1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing) 4 * 5 * Copyright (C) 2013-2023 Eric Dumazet <edumazet@google.com> 6 * 7 * Meant to be mostly used for locally generated traffic : 8 * Fast classification depends on skb->sk being set before reaching us. 9 * If not, (router workload), we use rxhash as fallback, with 32 bits wide hash. 10 * All packets belonging to a socket are considered as a 'flow'. 11 * 12 * Flows are dynamically allocated and stored in a hash table of RB trees 13 * They are also part of one Round Robin 'queues' (new or old flows) 14 * 15 * Burst avoidance (aka pacing) capability : 16 * 17 * Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a 18 * bunch of packets, and this packet scheduler adds delay between 19 * packets to respect rate limitation. 20 * 21 * enqueue() : 22 * - lookup one RB tree (out of 1024 or more) to find the flow. 23 * If non existent flow, create it, add it to the tree. 24 * Add skb to the per flow list of skb (fifo). 25 * - Use a special fifo for high prio packets 26 * 27 * dequeue() : serves flows in Round Robin 28 * Note : When a flow becomes empty, we do not immediately remove it from 29 * rb trees, for performance reasons (its expected to send additional packets, 30 * or SLAB cache will reuse socket for another flow) 31 */ 32 33 #include <linux/module.h> 34 #include <linux/types.h> 35 #include <linux/kernel.h> 36 #include <linux/jiffies.h> 37 #include <linux/string.h> 38 #include <linux/in.h> 39 #include <linux/errno.h> 40 #include <linux/init.h> 41 #include <linux/skbuff.h> 42 #include <linux/slab.h> 43 #include <linux/rbtree.h> 44 #include <linux/hash.h> 45 #include <linux/prefetch.h> 46 #include <linux/vmalloc.h> 47 #include <net/netlink.h> 48 #include <net/pkt_sched.h> 49 #include <net/sock.h> 50 #include <net/tcp_states.h> 51 #include <net/tcp.h> 52 53 struct fq_skb_cb { 54 u64 time_to_send; 55 u8 band; 56 }; 57 58 static inline struct fq_skb_cb *fq_skb_cb(struct sk_buff *skb) 59 { 60 qdisc_cb_private_validate(skb, sizeof(struct fq_skb_cb)); 61 return (struct fq_skb_cb *)qdisc_skb_cb(skb)->data; 62 } 63 64 /* 65 * Per flow structure, dynamically allocated. 66 * If packets have monotically increasing time_to_send, they are placed in O(1) 67 * in linear list (head,tail), otherwise are placed in a rbtree (t_root). 68 */ 69 struct fq_flow { 70 /* First cache line : used in fq_gc(), fq_enqueue(), fq_dequeue() */ 71 struct rb_root t_root; 72 struct sk_buff *head; /* list of skbs for this flow : first skb */ 73 union { 74 struct sk_buff *tail; /* last skb in the list */ 75 unsigned long age; /* (jiffies | 1UL) when flow was emptied, for gc */ 76 }; 77 union { 78 struct rb_node fq_node; /* anchor in fq_root[] trees */ 79 /* Following field is only used for q->internal, 80 * because q->internal is not hashed in fq_root[] 81 */ 82 u64 stat_fastpath_packets; 83 }; 84 struct sock *sk; 85 u32 socket_hash; /* sk_hash */ 86 int qlen; /* number of packets in flow queue */ 87 88 /* Second cache line */ 89 int credit; 90 int band; 91 struct fq_flow *next; /* next pointer in RR lists */ 92 93 struct rb_node rate_node; /* anchor in q->delayed tree */ 94 u64 time_next_packet; 95 }; 96 97 struct fq_flow_head { 98 struct fq_flow *first; 99 struct fq_flow *last; 100 }; 101 102 struct fq_perband_flows { 103 struct fq_flow_head new_flows; 104 struct fq_flow_head old_flows; 105 int credit; 106 int quantum; /* based on band nr : 576KB, 192KB, 64KB */ 107 }; 108 109 #define FQ_PRIO2BAND_CRUMB_SIZE ((TC_PRIO_MAX + 1) >> 2) 110 111 struct fq_sched_data { 112 /* Read mostly cache line */ 113 114 u64 offload_horizon; 115 u32 quantum; 116 u32 initial_quantum; 117 u32 flow_refill_delay; 118 u32 flow_plimit; /* max packets per flow */ 119 unsigned long flow_max_rate; /* optional max rate per flow */ 120 u64 ce_threshold; 121 u64 horizon; /* horizon in ns */ 122 u32 orphan_mask; /* mask for orphaned skb */ 123 u32 low_rate_threshold; 124 struct rb_root *fq_root; 125 u8 rate_enable; 126 u8 fq_trees_log; 127 u8 horizon_drop; 128 u8 prio2band[FQ_PRIO2BAND_CRUMB_SIZE]; 129 u32 timer_slack; /* hrtimer slack in ns */ 130 131 /* Read/Write fields. */ 132 133 unsigned int band_nr; /* band being serviced in fq_dequeue() */ 134 135 struct fq_perband_flows band_flows[FQ_BANDS]; 136 137 struct fq_flow internal; /* fastpath queue. */ 138 struct rb_root delayed; /* for rate limited flows */ 139 u64 time_next_delayed_flow; 140 unsigned long unthrottle_latency_ns; 141 142 u32 band_pkt_count[FQ_BANDS]; 143 u32 flows; 144 u32 inactive_flows; /* Flows with no packet to send. */ 145 u32 throttled_flows; 146 147 u64 stat_throttled; 148 struct qdisc_watchdog watchdog; 149 u64 stat_gc_flows; 150 151 /* Seldom used fields. */ 152 153 u64 stat_band_drops[FQ_BANDS]; 154 u64 stat_ce_mark; 155 u64 stat_horizon_drops; 156 u64 stat_horizon_caps; 157 u64 stat_flows_plimit; 158 u64 stat_pkts_too_long; 159 u64 stat_allocation_errors; 160 }; 161 162 /* return the i-th 2-bit value ("crumb") */ 163 static u8 fq_prio2band(const u8 *prio2band, unsigned int prio) 164 { 165 return (READ_ONCE(prio2band[prio / 4]) >> (2 * (prio & 0x3))) & 0x3; 166 } 167 168 /* 169 * f->tail and f->age share the same location. 170 * We can use the low order bit to differentiate if this location points 171 * to a sk_buff or contains a jiffies value, if we force this value to be odd. 172 * This assumes f->tail low order bit must be 0 since alignof(struct sk_buff) >= 2 173 */ 174 static void fq_flow_set_detached(struct fq_flow *f) 175 { 176 f->age = jiffies | 1UL; 177 } 178 179 static bool fq_flow_is_detached(const struct fq_flow *f) 180 { 181 return !!(f->age & 1UL); 182 } 183 184 /* special value to mark a throttled flow (not on old/new list) */ 185 static struct fq_flow throttled; 186 187 static bool fq_flow_is_throttled(const struct fq_flow *f) 188 { 189 return f->next == &throttled; 190 } 191 192 enum new_flow { 193 NEW_FLOW, 194 OLD_FLOW 195 }; 196 197 static void fq_flow_add_tail(struct fq_sched_data *q, struct fq_flow *flow, 198 enum new_flow list_sel) 199 { 200 struct fq_perband_flows *pband = &q->band_flows[flow->band]; 201 struct fq_flow_head *head = (list_sel == NEW_FLOW) ? 202 &pband->new_flows : 203 &pband->old_flows; 204 205 if (head->first) 206 head->last->next = flow; 207 else 208 head->first = flow; 209 head->last = flow; 210 flow->next = NULL; 211 } 212 213 static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f) 214 { 215 rb_erase(&f->rate_node, &q->delayed); 216 q->throttled_flows--; 217 fq_flow_add_tail(q, f, OLD_FLOW); 218 } 219 220 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) 221 { 222 struct rb_node **p = &q->delayed.rb_node, *parent = NULL; 223 224 while (*p) { 225 struct fq_flow *aux; 226 227 parent = *p; 228 aux = rb_entry(parent, struct fq_flow, rate_node); 229 if (f->time_next_packet >= aux->time_next_packet) 230 p = &parent->rb_right; 231 else 232 p = &parent->rb_left; 233 } 234 rb_link_node(&f->rate_node, parent, p); 235 rb_insert_color(&f->rate_node, &q->delayed); 236 q->throttled_flows++; 237 q->stat_throttled++; 238 239 f->next = &throttled; 240 if (q->time_next_delayed_flow > f->time_next_packet) 241 q->time_next_delayed_flow = f->time_next_packet; 242 } 243 244 245 static struct kmem_cache *fq_flow_cachep __read_mostly; 246 247 248 #define FQ_GC_AGE (3*HZ) 249 250 static bool fq_gc_candidate(const struct fq_flow *f) 251 { 252 return fq_flow_is_detached(f) && 253 time_after(jiffies, f->age + FQ_GC_AGE); 254 } 255 256 static void fq_gc(struct fq_sched_data *q, 257 struct rb_root *root, 258 struct sock *sk) 259 { 260 struct fq_flow *f, *tofree = NULL; 261 struct rb_node **p, *parent; 262 int fcnt; 263 264 p = &root->rb_node; 265 parent = NULL; 266 while (*p) { 267 parent = *p; 268 269 f = rb_entry(parent, struct fq_flow, fq_node); 270 if (f->sk == sk) 271 break; 272 273 if (fq_gc_candidate(f)) { 274 f->next = tofree; 275 tofree = f; 276 } 277 278 if (f->sk > sk) 279 p = &parent->rb_right; 280 else 281 p = &parent->rb_left; 282 } 283 284 if (!tofree) 285 return; 286 287 fcnt = 0; 288 while (tofree) { 289 f = tofree; 290 tofree = f->next; 291 rb_erase(&f->fq_node, root); 292 kmem_cache_free(fq_flow_cachep, f); 293 fcnt++; 294 } 295 q->flows -= fcnt; 296 q->inactive_flows -= fcnt; 297 q->stat_gc_flows += fcnt; 298 } 299 300 /* Fast path can be used if : 301 * 1) Packet tstamp is in the past, or within the pacing offload horizon. 302 * 2) FQ qlen == 0 OR 303 * (no flow is currently eligible for transmit, 304 * AND fast path queue has less than 8 packets) 305 * 3) No SO_MAX_PACING_RATE on the socket (if any). 306 * 4) No @maxrate attribute on this qdisc, 307 * 308 * FQ can not use generic TCQ_F_CAN_BYPASS infrastructure. 309 */ 310 static bool fq_fastpath_check(const struct Qdisc *sch, struct sk_buff *skb, 311 u64 now) 312 { 313 const struct fq_sched_data *q = qdisc_priv(sch); 314 const struct sock *sk; 315 316 if (fq_skb_cb(skb)->time_to_send > now + q->offload_horizon) 317 return false; 318 319 if (sch->q.qlen != 0) { 320 /* Even if some packets are stored in this qdisc, 321 * we can still enable fast path if all of them are 322 * scheduled in the future (ie no flows are eligible) 323 * or in the fast path queue. 324 */ 325 if (q->flows != q->inactive_flows + q->throttled_flows) 326 return false; 327 328 /* Do not allow fast path queue to explode, we want Fair Queue mode 329 * under pressure. 330 */ 331 if (q->internal.qlen >= 8) 332 return false; 333 334 /* Ordering invariants fall apart if some delayed flows 335 * are ready but we haven't serviced them, yet. 336 */ 337 if (q->time_next_delayed_flow <= now + q->offload_horizon) 338 return false; 339 } 340 341 sk = skb->sk; 342 if (sk && sk_fullsock(sk) && !sk_is_tcp(sk) && 343 sk->sk_max_pacing_rate != ~0UL) 344 return false; 345 346 if (q->flow_max_rate != ~0UL) 347 return false; 348 349 return true; 350 } 351 352 static struct fq_flow *fq_classify(struct Qdisc *sch, struct sk_buff *skb, 353 u64 now) 354 { 355 struct fq_sched_data *q = qdisc_priv(sch); 356 struct rb_node **p, *parent; 357 struct sock *sk = skb->sk; 358 struct rb_root *root; 359 struct fq_flow *f; 360 361 /* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket 362 * or a listener (SYNCOOKIE mode) 363 * 1) request sockets are not full blown, 364 * they do not contain sk_pacing_rate 365 * 2) They are not part of a 'flow' yet 366 * 3) We do not want to rate limit them (eg SYNFLOOD attack), 367 * especially if the listener set SO_MAX_PACING_RATE 368 * 4) We pretend they are orphaned 369 * TCP can also associate TIME_WAIT sockets with RST or ACK packets. 370 */ 371 if (!sk || sk_listener_or_tw(sk)) { 372 unsigned long hash = skb_get_hash(skb) & q->orphan_mask; 373 374 /* By forcing low order bit to 1, we make sure to not 375 * collide with a local flow (socket pointers are word aligned) 376 */ 377 sk = (struct sock *)((hash << 1) | 1UL); 378 skb_orphan(skb); 379 } else if (sk->sk_state == TCP_CLOSE) { 380 unsigned long hash = skb_get_hash(skb) & q->orphan_mask; 381 /* 382 * Sockets in TCP_CLOSE are non connected. 383 * Typical use case is UDP sockets, they can send packets 384 * with sendto() to many different destinations. 385 * We probably could use a generic bit advertising 386 * non connected sockets, instead of sk_state == TCP_CLOSE, 387 * if we care enough. 388 */ 389 sk = (struct sock *)((hash << 1) | 1UL); 390 } 391 392 if (fq_fastpath_check(sch, skb, now)) { 393 q->internal.stat_fastpath_packets++; 394 if (skb->sk == sk && q->rate_enable && 395 READ_ONCE(sk->sk_pacing_status) != SK_PACING_FQ) 396 smp_store_release(&sk->sk_pacing_status, 397 SK_PACING_FQ); 398 return &q->internal; 399 } 400 401 root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)]; 402 403 fq_gc(q, root, sk); 404 405 p = &root->rb_node; 406 parent = NULL; 407 while (*p) { 408 parent = *p; 409 410 f = rb_entry(parent, struct fq_flow, fq_node); 411 if (f->sk == sk) { 412 /* socket might have been reallocated, so check 413 * if its sk_hash is the same. 414 * It not, we need to refill credit with 415 * initial quantum 416 */ 417 if (unlikely(skb->sk == sk && 418 f->socket_hash != sk->sk_hash)) { 419 f->credit = q->initial_quantum; 420 f->socket_hash = sk->sk_hash; 421 if (q->rate_enable) 422 smp_store_release(&sk->sk_pacing_status, 423 SK_PACING_FQ); 424 if (fq_flow_is_throttled(f)) 425 fq_flow_unset_throttled(q, f); 426 f->time_next_packet = 0ULL; 427 } 428 return f; 429 } 430 if (f->sk > sk) 431 p = &parent->rb_right; 432 else 433 p = &parent->rb_left; 434 } 435 436 f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN); 437 if (unlikely(!f)) { 438 q->stat_allocation_errors++; 439 return &q->internal; 440 } 441 /* f->t_root is already zeroed after kmem_cache_zalloc() */ 442 443 fq_flow_set_detached(f); 444 f->sk = sk; 445 if (skb->sk == sk) { 446 f->socket_hash = sk->sk_hash; 447 if (q->rate_enable) 448 smp_store_release(&sk->sk_pacing_status, 449 SK_PACING_FQ); 450 } 451 f->credit = q->initial_quantum; 452 453 rb_link_node(&f->fq_node, parent, p); 454 rb_insert_color(&f->fq_node, root); 455 456 q->flows++; 457 q->inactive_flows++; 458 return f; 459 } 460 461 static struct sk_buff *fq_peek(struct fq_flow *flow) 462 { 463 struct sk_buff *skb = skb_rb_first(&flow->t_root); 464 struct sk_buff *head = flow->head; 465 466 if (!skb) 467 return head; 468 469 if (!head) 470 return skb; 471 472 if (fq_skb_cb(skb)->time_to_send < fq_skb_cb(head)->time_to_send) 473 return skb; 474 return head; 475 } 476 477 static void fq_erase_head(struct Qdisc *sch, struct fq_flow *flow, 478 struct sk_buff *skb) 479 { 480 if (skb == flow->head) { 481 struct sk_buff *next = skb->next; 482 483 prefetch(next); 484 flow->head = next; 485 } else { 486 rb_erase(&skb->rbnode, &flow->t_root); 487 skb->dev = qdisc_dev(sch); 488 } 489 } 490 491 /* Remove one skb from flow queue. 492 * This skb must be the return value of prior fq_peek(). 493 */ 494 static void fq_dequeue_skb(struct Qdisc *sch, struct fq_flow *flow, 495 struct sk_buff *skb) 496 { 497 fq_erase_head(sch, flow, skb); 498 skb_mark_not_on_list(skb); 499 qdisc_qstats_backlog_dec(sch, skb); 500 sch->q.qlen--; 501 qdisc_bstats_update(sch, skb); 502 } 503 504 static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb) 505 { 506 struct rb_node **p, *parent; 507 struct sk_buff *head, *aux; 508 509 head = flow->head; 510 if (!head || 511 fq_skb_cb(skb)->time_to_send >= fq_skb_cb(flow->tail)->time_to_send) { 512 if (!head) 513 flow->head = skb; 514 else 515 flow->tail->next = skb; 516 flow->tail = skb; 517 skb->next = NULL; 518 return; 519 } 520 521 p = &flow->t_root.rb_node; 522 parent = NULL; 523 524 while (*p) { 525 parent = *p; 526 aux = rb_to_skb(parent); 527 if (fq_skb_cb(skb)->time_to_send >= fq_skb_cb(aux)->time_to_send) 528 p = &parent->rb_right; 529 else 530 p = &parent->rb_left; 531 } 532 rb_link_node(&skb->rbnode, parent, p); 533 rb_insert_color(&skb->rbnode, &flow->t_root); 534 } 535 536 static bool fq_packet_beyond_horizon(const struct sk_buff *skb, 537 const struct fq_sched_data *q, u64 now) 538 { 539 return unlikely((s64)skb->tstamp > (s64)(now + q->horizon)); 540 } 541 542 #define FQDR(reason) SKB_DROP_REASON_FQ_##reason 543 544 static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch, 545 struct sk_buff **to_free) 546 { 547 struct fq_sched_data *q = qdisc_priv(sch); 548 struct fq_flow *f; 549 u64 now; 550 u8 band; 551 552 band = fq_prio2band(q->prio2band, skb->priority & TC_PRIO_MAX); 553 if (unlikely(q->band_pkt_count[band] >= sch->limit)) { 554 q->stat_band_drops[band]++; 555 return qdisc_drop_reason(skb, sch, to_free, 556 FQDR(BAND_LIMIT)); 557 } 558 559 now = ktime_get_ns(); 560 if (!skb->tstamp) { 561 fq_skb_cb(skb)->time_to_send = now; 562 } else { 563 /* Check if packet timestamp is too far in the future. */ 564 if (fq_packet_beyond_horizon(skb, q, now)) { 565 if (q->horizon_drop) { 566 q->stat_horizon_drops++; 567 return qdisc_drop_reason(skb, sch, to_free, 568 FQDR(HORIZON_LIMIT)); 569 } 570 q->stat_horizon_caps++; 571 skb->tstamp = now + q->horizon; 572 } 573 fq_skb_cb(skb)->time_to_send = skb->tstamp; 574 } 575 576 f = fq_classify(sch, skb, now); 577 578 if (f != &q->internal) { 579 if (unlikely(f->qlen >= q->flow_plimit)) { 580 q->stat_flows_plimit++; 581 return qdisc_drop_reason(skb, sch, to_free, 582 FQDR(FLOW_LIMIT)); 583 } 584 585 if (fq_flow_is_detached(f)) { 586 fq_flow_add_tail(q, f, NEW_FLOW); 587 if (time_after(jiffies, f->age + q->flow_refill_delay)) 588 f->credit = max_t(u32, f->credit, q->quantum); 589 } 590 591 f->band = band; 592 q->band_pkt_count[band]++; 593 fq_skb_cb(skb)->band = band; 594 if (f->qlen == 0) 595 q->inactive_flows--; 596 } 597 598 f->qlen++; 599 /* Note: this overwrites f->age */ 600 flow_queue_add(f, skb); 601 602 qdisc_qstats_backlog_inc(sch, skb); 603 sch->q.qlen++; 604 605 return NET_XMIT_SUCCESS; 606 } 607 #undef FQDR 608 609 static void fq_check_throttled(struct fq_sched_data *q, u64 now) 610 { 611 unsigned long sample; 612 struct rb_node *p; 613 614 if (q->time_next_delayed_flow > now + q->offload_horizon) 615 return; 616 617 /* Update unthrottle latency EWMA. 618 * This is cheap and can help diagnosing timer/latency problems. 619 */ 620 sample = (unsigned long)(now - q->time_next_delayed_flow); 621 if ((long)sample > 0) { 622 q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3; 623 q->unthrottle_latency_ns += sample >> 3; 624 } 625 now += q->offload_horizon; 626 627 q->time_next_delayed_flow = ~0ULL; 628 while ((p = rb_first(&q->delayed)) != NULL) { 629 struct fq_flow *f = rb_entry(p, struct fq_flow, rate_node); 630 631 if (f->time_next_packet > now) { 632 q->time_next_delayed_flow = f->time_next_packet; 633 break; 634 } 635 fq_flow_unset_throttled(q, f); 636 } 637 } 638 639 static struct fq_flow_head *fq_pband_head_select(struct fq_perband_flows *pband) 640 { 641 if (pband->credit <= 0) 642 return NULL; 643 644 if (pband->new_flows.first) 645 return &pband->new_flows; 646 647 return pband->old_flows.first ? &pband->old_flows : NULL; 648 } 649 650 static struct sk_buff *fq_dequeue(struct Qdisc *sch) 651 { 652 struct fq_sched_data *q = qdisc_priv(sch); 653 struct fq_perband_flows *pband; 654 struct fq_flow_head *head; 655 struct sk_buff *skb; 656 struct fq_flow *f; 657 unsigned long rate; 658 int retry; 659 u32 plen; 660 u64 now; 661 662 if (!sch->q.qlen) 663 return NULL; 664 665 skb = fq_peek(&q->internal); 666 if (skb) { 667 q->internal.qlen--; 668 fq_dequeue_skb(sch, &q->internal, skb); 669 goto out; 670 } 671 672 now = ktime_get_ns(); 673 fq_check_throttled(q, now); 674 retry = 0; 675 pband = &q->band_flows[q->band_nr]; 676 begin: 677 head = fq_pband_head_select(pband); 678 if (!head) { 679 while (++retry <= FQ_BANDS) { 680 if (++q->band_nr == FQ_BANDS) 681 q->band_nr = 0; 682 pband = &q->band_flows[q->band_nr]; 683 pband->credit = min(pband->credit + pband->quantum, 684 pband->quantum); 685 if (pband->credit > 0) 686 goto begin; 687 retry = 0; 688 } 689 if (q->time_next_delayed_flow != ~0ULL) 690 qdisc_watchdog_schedule_range_ns(&q->watchdog, 691 q->time_next_delayed_flow, 692 q->timer_slack); 693 return NULL; 694 } 695 f = head->first; 696 retry = 0; 697 if (f->credit <= 0) { 698 f->credit += q->quantum; 699 head->first = f->next; 700 fq_flow_add_tail(q, f, OLD_FLOW); 701 goto begin; 702 } 703 704 skb = fq_peek(f); 705 if (skb) { 706 u64 time_next_packet = max_t(u64, fq_skb_cb(skb)->time_to_send, 707 f->time_next_packet); 708 709 if (now + q->offload_horizon < time_next_packet) { 710 head->first = f->next; 711 f->time_next_packet = time_next_packet; 712 fq_flow_set_throttled(q, f); 713 goto begin; 714 } 715 prefetch(&skb->end); 716 fq_dequeue_skb(sch, f, skb); 717 if (unlikely((s64)(now - time_next_packet - q->ce_threshold) > 0)) { 718 INET_ECN_set_ce(skb); 719 q->stat_ce_mark++; 720 } 721 if (--f->qlen == 0) 722 q->inactive_flows++; 723 q->band_pkt_count[fq_skb_cb(skb)->band]--; 724 } else { 725 head->first = f->next; 726 /* force a pass through old_flows to prevent starvation */ 727 if (head == &pband->new_flows) { 728 fq_flow_add_tail(q, f, OLD_FLOW); 729 } else { 730 fq_flow_set_detached(f); 731 } 732 goto begin; 733 } 734 plen = qdisc_pkt_len(skb); 735 f->credit -= plen; 736 pband->credit -= plen; 737 738 if (!q->rate_enable) 739 goto out; 740 741 rate = q->flow_max_rate; 742 743 /* If EDT time was provided for this skb, we need to 744 * update f->time_next_packet only if this qdisc enforces 745 * a flow max rate. 746 */ 747 if (!skb->tstamp) { 748 if (skb->sk) 749 rate = min(READ_ONCE(skb->sk->sk_pacing_rate), rate); 750 751 if (rate <= q->low_rate_threshold) { 752 f->credit = 0; 753 } else { 754 plen = max(plen, q->quantum); 755 if (f->credit > 0) 756 goto out; 757 } 758 } 759 if (rate != ~0UL) { 760 u64 len = (u64)plen * NSEC_PER_SEC; 761 762 if (likely(rate)) 763 len = div64_ul(len, rate); 764 /* Since socket rate can change later, 765 * clamp the delay to 1 second. 766 * Really, providers of too big packets should be fixed ! 767 */ 768 if (unlikely(len > NSEC_PER_SEC)) { 769 len = NSEC_PER_SEC; 770 q->stat_pkts_too_long++; 771 } 772 /* Account for schedule/timers drifts. 773 * f->time_next_packet was set when prior packet was sent, 774 * and current time (@now) can be too late by tens of us. 775 */ 776 if (f->time_next_packet) 777 len -= min(len/2, now - f->time_next_packet); 778 f->time_next_packet = now + len; 779 } 780 out: 781 return skb; 782 } 783 784 static void fq_flow_purge(struct fq_flow *flow) 785 { 786 struct rb_node *p = rb_first(&flow->t_root); 787 788 while (p) { 789 struct sk_buff *skb = rb_to_skb(p); 790 791 p = rb_next(p); 792 rb_erase(&skb->rbnode, &flow->t_root); 793 rtnl_kfree_skbs(skb, skb); 794 } 795 rtnl_kfree_skbs(flow->head, flow->tail); 796 flow->head = NULL; 797 flow->qlen = 0; 798 } 799 800 static void fq_reset(struct Qdisc *sch) 801 { 802 struct fq_sched_data *q = qdisc_priv(sch); 803 struct rb_root *root; 804 struct rb_node *p; 805 struct fq_flow *f; 806 unsigned int idx; 807 808 sch->q.qlen = 0; 809 sch->qstats.backlog = 0; 810 811 fq_flow_purge(&q->internal); 812 813 if (!q->fq_root) 814 return; 815 816 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) { 817 root = &q->fq_root[idx]; 818 while ((p = rb_first(root)) != NULL) { 819 f = rb_entry(p, struct fq_flow, fq_node); 820 rb_erase(p, root); 821 822 fq_flow_purge(f); 823 824 kmem_cache_free(fq_flow_cachep, f); 825 } 826 } 827 for (idx = 0; idx < FQ_BANDS; idx++) { 828 q->band_flows[idx].new_flows.first = NULL; 829 q->band_flows[idx].old_flows.first = NULL; 830 } 831 q->delayed = RB_ROOT; 832 q->flows = 0; 833 q->inactive_flows = 0; 834 q->throttled_flows = 0; 835 } 836 837 static void fq_rehash(struct fq_sched_data *q, 838 struct rb_root *old_array, u32 old_log, 839 struct rb_root *new_array, u32 new_log) 840 { 841 struct rb_node *op, **np, *parent; 842 struct rb_root *oroot, *nroot; 843 struct fq_flow *of, *nf; 844 int fcnt = 0; 845 u32 idx; 846 847 for (idx = 0; idx < (1U << old_log); idx++) { 848 oroot = &old_array[idx]; 849 while ((op = rb_first(oroot)) != NULL) { 850 rb_erase(op, oroot); 851 of = rb_entry(op, struct fq_flow, fq_node); 852 if (fq_gc_candidate(of)) { 853 fcnt++; 854 kmem_cache_free(fq_flow_cachep, of); 855 continue; 856 } 857 nroot = &new_array[hash_ptr(of->sk, new_log)]; 858 859 np = &nroot->rb_node; 860 parent = NULL; 861 while (*np) { 862 parent = *np; 863 864 nf = rb_entry(parent, struct fq_flow, fq_node); 865 BUG_ON(nf->sk == of->sk); 866 867 if (nf->sk > of->sk) 868 np = &parent->rb_right; 869 else 870 np = &parent->rb_left; 871 } 872 873 rb_link_node(&of->fq_node, parent, np); 874 rb_insert_color(&of->fq_node, nroot); 875 } 876 } 877 q->flows -= fcnt; 878 q->inactive_flows -= fcnt; 879 q->stat_gc_flows += fcnt; 880 } 881 882 static void fq_free(void *addr) 883 { 884 kvfree(addr); 885 } 886 887 static int fq_resize(struct Qdisc *sch, u32 log) 888 { 889 struct fq_sched_data *q = qdisc_priv(sch); 890 struct rb_root *array; 891 void *old_fq_root; 892 u32 idx; 893 894 if (q->fq_root && log == q->fq_trees_log) 895 return 0; 896 897 /* If XPS was setup, we can allocate memory on right NUMA node */ 898 array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_RETRY_MAYFAIL, 899 netdev_queue_numa_node_read(sch->dev_queue)); 900 if (!array) 901 return -ENOMEM; 902 903 for (idx = 0; idx < (1U << log); idx++) 904 array[idx] = RB_ROOT; 905 906 sch_tree_lock(sch); 907 908 old_fq_root = q->fq_root; 909 if (old_fq_root) 910 fq_rehash(q, old_fq_root, q->fq_trees_log, array, log); 911 912 q->fq_root = array; 913 WRITE_ONCE(q->fq_trees_log, log); 914 915 sch_tree_unlock(sch); 916 917 fq_free(old_fq_root); 918 919 return 0; 920 } 921 922 static const struct netlink_range_validation iq_range = { 923 .max = INT_MAX, 924 }; 925 926 static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = { 927 [TCA_FQ_UNSPEC] = { .strict_start_type = TCA_FQ_TIMER_SLACK }, 928 929 [TCA_FQ_PLIMIT] = { .type = NLA_U32 }, 930 [TCA_FQ_FLOW_PLIMIT] = { .type = NLA_U32 }, 931 [TCA_FQ_QUANTUM] = { .type = NLA_U32 }, 932 [TCA_FQ_INITIAL_QUANTUM] = NLA_POLICY_FULL_RANGE(NLA_U32, &iq_range), 933 [TCA_FQ_RATE_ENABLE] = { .type = NLA_U32 }, 934 [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 }, 935 [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 }, 936 [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 }, 937 [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 }, 938 [TCA_FQ_ORPHAN_MASK] = { .type = NLA_U32 }, 939 [TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 }, 940 [TCA_FQ_CE_THRESHOLD] = { .type = NLA_U32 }, 941 [TCA_FQ_TIMER_SLACK] = { .type = NLA_U32 }, 942 [TCA_FQ_HORIZON] = { .type = NLA_U32 }, 943 [TCA_FQ_HORIZON_DROP] = { .type = NLA_U8 }, 944 [TCA_FQ_PRIOMAP] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_prio_qopt)), 945 [TCA_FQ_WEIGHTS] = NLA_POLICY_EXACT_LEN(FQ_BANDS * sizeof(s32)), 946 [TCA_FQ_OFFLOAD_HORIZON] = { .type = NLA_U32 }, 947 }; 948 949 /* compress a u8 array with all elems <= 3 to an array of 2-bit fields */ 950 static void fq_prio2band_compress_crumb(const u8 *in, u8 *out) 951 { 952 const int num_elems = TC_PRIO_MAX + 1; 953 u8 tmp[FQ_PRIO2BAND_CRUMB_SIZE]; 954 int i; 955 956 memset(tmp, 0, sizeof(tmp)); 957 for (i = 0; i < num_elems; i++) 958 tmp[i / 4] |= in[i] << (2 * (i & 0x3)); 959 960 for (i = 0; i < FQ_PRIO2BAND_CRUMB_SIZE; i++) 961 WRITE_ONCE(out[i], tmp[i]); 962 } 963 964 static void fq_prio2band_decompress_crumb(const u8 *in, u8 *out) 965 { 966 const int num_elems = TC_PRIO_MAX + 1; 967 int i; 968 969 for (i = 0; i < num_elems; i++) 970 out[i] = fq_prio2band(in, i); 971 } 972 973 static int fq_load_weights(struct fq_sched_data *q, 974 const struct nlattr *attr, 975 struct netlink_ext_ack *extack) 976 { 977 s32 *weights = nla_data(attr); 978 int i; 979 980 for (i = 0; i < FQ_BANDS; i++) { 981 if (weights[i] < FQ_MIN_WEIGHT) { 982 NL_SET_ERR_MSG_FMT_MOD(extack, "Weight %d less that minimum allowed %d", 983 weights[i], FQ_MIN_WEIGHT); 984 return -EINVAL; 985 } 986 } 987 for (i = 0; i < FQ_BANDS; i++) 988 WRITE_ONCE(q->band_flows[i].quantum, weights[i]); 989 return 0; 990 } 991 992 static int fq_load_priomap(struct fq_sched_data *q, 993 const struct nlattr *attr, 994 struct netlink_ext_ack *extack) 995 { 996 const struct tc_prio_qopt *map = nla_data(attr); 997 int i; 998 999 if (map->bands != FQ_BANDS) { 1000 NL_SET_ERR_MSG_MOD(extack, "FQ only supports 3 bands"); 1001 return -EINVAL; 1002 } 1003 for (i = 0; i < TC_PRIO_MAX + 1; i++) { 1004 if (map->priomap[i] >= FQ_BANDS) { 1005 NL_SET_ERR_MSG_FMT_MOD(extack, "FQ priomap field %d maps to a too high band %d", 1006 i, map->priomap[i]); 1007 return -EINVAL; 1008 } 1009 } 1010 fq_prio2band_compress_crumb(map->priomap, q->prio2band); 1011 return 0; 1012 } 1013 1014 static int fq_change(struct Qdisc *sch, struct nlattr *opt, 1015 struct netlink_ext_ack *extack) 1016 { 1017 unsigned int dropped_pkts = 0, dropped_bytes = 0; 1018 struct fq_sched_data *q = qdisc_priv(sch); 1019 struct nlattr *tb[TCA_FQ_MAX + 1]; 1020 u32 fq_log; 1021 int err; 1022 1023 err = nla_parse_nested_deprecated(tb, TCA_FQ_MAX, opt, fq_policy, 1024 NULL); 1025 if (err < 0) 1026 return err; 1027 1028 sch_tree_lock(sch); 1029 1030 fq_log = q->fq_trees_log; 1031 1032 if (tb[TCA_FQ_BUCKETS_LOG]) { 1033 u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]); 1034 1035 if (nval >= 1 && nval <= ilog2(256*1024)) 1036 fq_log = nval; 1037 else 1038 err = -EINVAL; 1039 } 1040 if (tb[TCA_FQ_PLIMIT]) 1041 WRITE_ONCE(sch->limit, 1042 nla_get_u32(tb[TCA_FQ_PLIMIT])); 1043 1044 if (tb[TCA_FQ_FLOW_PLIMIT]) 1045 WRITE_ONCE(q->flow_plimit, 1046 nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT])); 1047 1048 if (tb[TCA_FQ_QUANTUM]) { 1049 u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); 1050 1051 if (quantum > 0 && quantum <= (1 << 20)) { 1052 WRITE_ONCE(q->quantum, quantum); 1053 } else { 1054 NL_SET_ERR_MSG_MOD(extack, "invalid quantum"); 1055 err = -EINVAL; 1056 } 1057 } 1058 1059 if (tb[TCA_FQ_INITIAL_QUANTUM]) 1060 WRITE_ONCE(q->initial_quantum, 1061 nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM])); 1062 1063 if (tb[TCA_FQ_FLOW_DEFAULT_RATE]) 1064 pr_warn_ratelimited("sch_fq: defrate %u ignored.\n", 1065 nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE])); 1066 1067 if (tb[TCA_FQ_FLOW_MAX_RATE]) { 1068 u32 rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]); 1069 1070 WRITE_ONCE(q->flow_max_rate, 1071 (rate == ~0U) ? ~0UL : rate); 1072 } 1073 if (tb[TCA_FQ_LOW_RATE_THRESHOLD]) 1074 WRITE_ONCE(q->low_rate_threshold, 1075 nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD])); 1076 1077 if (tb[TCA_FQ_RATE_ENABLE]) { 1078 u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]); 1079 1080 if (enable <= 1) 1081 WRITE_ONCE(q->rate_enable, 1082 enable); 1083 else 1084 err = -EINVAL; 1085 } 1086 1087 if (tb[TCA_FQ_FLOW_REFILL_DELAY]) { 1088 u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ; 1089 1090 WRITE_ONCE(q->flow_refill_delay, 1091 usecs_to_jiffies(usecs_delay)); 1092 } 1093 1094 if (!err && tb[TCA_FQ_PRIOMAP]) 1095 err = fq_load_priomap(q, tb[TCA_FQ_PRIOMAP], extack); 1096 1097 if (!err && tb[TCA_FQ_WEIGHTS]) 1098 err = fq_load_weights(q, tb[TCA_FQ_WEIGHTS], extack); 1099 1100 if (tb[TCA_FQ_ORPHAN_MASK]) 1101 WRITE_ONCE(q->orphan_mask, 1102 nla_get_u32(tb[TCA_FQ_ORPHAN_MASK])); 1103 1104 if (tb[TCA_FQ_CE_THRESHOLD]) 1105 WRITE_ONCE(q->ce_threshold, 1106 (u64)NSEC_PER_USEC * 1107 nla_get_u32(tb[TCA_FQ_CE_THRESHOLD])); 1108 1109 if (tb[TCA_FQ_TIMER_SLACK]) 1110 WRITE_ONCE(q->timer_slack, 1111 nla_get_u32(tb[TCA_FQ_TIMER_SLACK])); 1112 1113 if (tb[TCA_FQ_HORIZON]) 1114 WRITE_ONCE(q->horizon, 1115 (u64)NSEC_PER_USEC * 1116 nla_get_u32(tb[TCA_FQ_HORIZON])); 1117 1118 if (tb[TCA_FQ_HORIZON_DROP]) 1119 WRITE_ONCE(q->horizon_drop, 1120 nla_get_u8(tb[TCA_FQ_HORIZON_DROP])); 1121 1122 if (tb[TCA_FQ_OFFLOAD_HORIZON]) { 1123 u64 offload_horizon = (u64)NSEC_PER_USEC * 1124 nla_get_u32(tb[TCA_FQ_OFFLOAD_HORIZON]); 1125 1126 if (offload_horizon <= qdisc_dev(sch)->max_pacing_offload_horizon) { 1127 WRITE_ONCE(q->offload_horizon, offload_horizon); 1128 } else { 1129 NL_SET_ERR_MSG_MOD(extack, "invalid offload_horizon"); 1130 err = -EINVAL; 1131 } 1132 } 1133 if (!err) { 1134 1135 sch_tree_unlock(sch); 1136 err = fq_resize(sch, fq_log); 1137 sch_tree_lock(sch); 1138 } 1139 1140 while (sch->q.qlen > sch->limit) { 1141 struct sk_buff *skb = qdisc_dequeue_internal(sch, false); 1142 1143 if (!skb) 1144 break; 1145 1146 dropped_pkts++; 1147 dropped_bytes += qdisc_pkt_len(skb); 1148 rtnl_kfree_skbs(skb, skb); 1149 } 1150 qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes); 1151 1152 sch_tree_unlock(sch); 1153 return err; 1154 } 1155 1156 static void fq_destroy(struct Qdisc *sch) 1157 { 1158 struct fq_sched_data *q = qdisc_priv(sch); 1159 1160 fq_reset(sch); 1161 fq_free(q->fq_root); 1162 qdisc_watchdog_cancel(&q->watchdog); 1163 } 1164 1165 static int fq_init(struct Qdisc *sch, struct nlattr *opt, 1166 struct netlink_ext_ack *extack) 1167 { 1168 struct fq_sched_data *q = qdisc_priv(sch); 1169 int i, err; 1170 1171 sch->limit = 10000; 1172 q->flow_plimit = 100; 1173 q->quantum = 2 * psched_mtu(qdisc_dev(sch)); 1174 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch)); 1175 q->flow_refill_delay = msecs_to_jiffies(40); 1176 q->flow_max_rate = ~0UL; 1177 q->time_next_delayed_flow = ~0ULL; 1178 q->rate_enable = 1; 1179 for (i = 0; i < FQ_BANDS; i++) { 1180 q->band_flows[i].new_flows.first = NULL; 1181 q->band_flows[i].old_flows.first = NULL; 1182 } 1183 q->band_flows[0].quantum = 9 << 16; 1184 q->band_flows[1].quantum = 3 << 16; 1185 q->band_flows[2].quantum = 1 << 16; 1186 q->delayed = RB_ROOT; 1187 q->fq_root = NULL; 1188 q->fq_trees_log = ilog2(1024); 1189 q->orphan_mask = 1024 - 1; 1190 q->low_rate_threshold = 550000 / 8; 1191 1192 q->timer_slack = 10 * NSEC_PER_USEC; /* 10 usec of hrtimer slack */ 1193 1194 q->horizon = 10ULL * NSEC_PER_SEC; /* 10 seconds */ 1195 q->horizon_drop = 1; /* by default, drop packets beyond horizon */ 1196 1197 /* Default ce_threshold of 4294 seconds */ 1198 q->ce_threshold = (u64)NSEC_PER_USEC * ~0U; 1199 1200 fq_prio2band_compress_crumb(sch_default_prio2band, q->prio2band); 1201 qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC); 1202 1203 if (opt) 1204 err = fq_change(sch, opt, extack); 1205 else 1206 err = fq_resize(sch, q->fq_trees_log); 1207 1208 return err; 1209 } 1210 1211 static int fq_dump(struct Qdisc *sch, struct sk_buff *skb) 1212 { 1213 struct fq_sched_data *q = qdisc_priv(sch); 1214 struct tc_prio_qopt prio = { 1215 .bands = FQ_BANDS, 1216 }; 1217 struct nlattr *opts; 1218 u64 offload_horizon; 1219 u64 ce_threshold; 1220 s32 weights[3]; 1221 u64 horizon; 1222 1223 opts = nla_nest_start_noflag(skb, TCA_OPTIONS); 1224 if (opts == NULL) 1225 goto nla_put_failure; 1226 1227 /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */ 1228 1229 ce_threshold = READ_ONCE(q->ce_threshold); 1230 do_div(ce_threshold, NSEC_PER_USEC); 1231 1232 horizon = READ_ONCE(q->horizon); 1233 do_div(horizon, NSEC_PER_USEC); 1234 1235 offload_horizon = READ_ONCE(q->offload_horizon); 1236 do_div(offload_horizon, NSEC_PER_USEC); 1237 1238 if (nla_put_u32(skb, TCA_FQ_PLIMIT, 1239 READ_ONCE(sch->limit)) || 1240 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, 1241 READ_ONCE(q->flow_plimit)) || 1242 nla_put_u32(skb, TCA_FQ_QUANTUM, 1243 READ_ONCE(q->quantum)) || 1244 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, 1245 READ_ONCE(q->initial_quantum)) || 1246 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, 1247 READ_ONCE(q->rate_enable)) || 1248 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, 1249 min_t(unsigned long, 1250 READ_ONCE(q->flow_max_rate), ~0U)) || 1251 nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY, 1252 jiffies_to_usecs(READ_ONCE(q->flow_refill_delay))) || 1253 nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, 1254 READ_ONCE(q->orphan_mask)) || 1255 nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD, 1256 READ_ONCE(q->low_rate_threshold)) || 1257 nla_put_u32(skb, TCA_FQ_CE_THRESHOLD, (u32)ce_threshold) || 1258 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, 1259 READ_ONCE(q->fq_trees_log)) || 1260 nla_put_u32(skb, TCA_FQ_TIMER_SLACK, 1261 READ_ONCE(q->timer_slack)) || 1262 nla_put_u32(skb, TCA_FQ_HORIZON, (u32)horizon) || 1263 nla_put_u32(skb, TCA_FQ_OFFLOAD_HORIZON, (u32)offload_horizon) || 1264 nla_put_u8(skb, TCA_FQ_HORIZON_DROP, 1265 READ_ONCE(q->horizon_drop))) 1266 goto nla_put_failure; 1267 1268 fq_prio2band_decompress_crumb(q->prio2band, prio.priomap); 1269 if (nla_put(skb, TCA_FQ_PRIOMAP, sizeof(prio), &prio)) 1270 goto nla_put_failure; 1271 1272 weights[0] = READ_ONCE(q->band_flows[0].quantum); 1273 weights[1] = READ_ONCE(q->band_flows[1].quantum); 1274 weights[2] = READ_ONCE(q->band_flows[2].quantum); 1275 if (nla_put(skb, TCA_FQ_WEIGHTS, sizeof(weights), &weights)) 1276 goto nla_put_failure; 1277 1278 return nla_nest_end(skb, opts); 1279 1280 nla_put_failure: 1281 return -1; 1282 } 1283 1284 static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d) 1285 { 1286 struct fq_sched_data *q = qdisc_priv(sch); 1287 struct tc_fq_qd_stats st; 1288 int i; 1289 1290 st.pad = 0; 1291 1292 sch_tree_lock(sch); 1293 1294 st.gc_flows = q->stat_gc_flows; 1295 st.highprio_packets = 0; 1296 st.fastpath_packets = q->internal.stat_fastpath_packets; 1297 st.tcp_retrans = 0; 1298 st.throttled = q->stat_throttled; 1299 st.flows_plimit = q->stat_flows_plimit; 1300 st.pkts_too_long = q->stat_pkts_too_long; 1301 st.allocation_errors = q->stat_allocation_errors; 1302 st.time_next_delayed_flow = q->time_next_delayed_flow + q->timer_slack - 1303 ktime_get_ns(); 1304 st.flows = q->flows; 1305 st.inactive_flows = q->inactive_flows; 1306 st.throttled_flows = q->throttled_flows; 1307 st.unthrottle_latency_ns = min_t(unsigned long, 1308 q->unthrottle_latency_ns, ~0U); 1309 st.ce_mark = q->stat_ce_mark; 1310 st.horizon_drops = q->stat_horizon_drops; 1311 st.horizon_caps = q->stat_horizon_caps; 1312 for (i = 0; i < FQ_BANDS; i++) { 1313 st.band_drops[i] = q->stat_band_drops[i]; 1314 st.band_pkt_count[i] = q->band_pkt_count[i]; 1315 } 1316 sch_tree_unlock(sch); 1317 1318 return gnet_stats_copy_app(d, &st, sizeof(st)); 1319 } 1320 1321 static struct Qdisc_ops fq_qdisc_ops __read_mostly = { 1322 .id = "fq", 1323 .priv_size = sizeof(struct fq_sched_data), 1324 1325 .enqueue = fq_enqueue, 1326 .dequeue = fq_dequeue, 1327 .peek = qdisc_peek_dequeued, 1328 .init = fq_init, 1329 .reset = fq_reset, 1330 .destroy = fq_destroy, 1331 .change = fq_change, 1332 .dump = fq_dump, 1333 .dump_stats = fq_dump_stats, 1334 .owner = THIS_MODULE, 1335 }; 1336 MODULE_ALIAS_NET_SCH("fq"); 1337 1338 static int __init fq_module_init(void) 1339 { 1340 int ret; 1341 1342 fq_flow_cachep = kmem_cache_create("fq_flow_cache", 1343 sizeof(struct fq_flow), 1344 0, SLAB_HWCACHE_ALIGN, NULL); 1345 if (!fq_flow_cachep) 1346 return -ENOMEM; 1347 1348 ret = register_qdisc(&fq_qdisc_ops); 1349 if (ret) 1350 kmem_cache_destroy(fq_flow_cachep); 1351 return ret; 1352 } 1353 1354 static void __exit fq_module_exit(void) 1355 { 1356 unregister_qdisc(&fq_qdisc_ops); 1357 kmem_cache_destroy(fq_flow_cachep); 1358 } 1359 1360 module_init(fq_module_init) 1361 module_exit(fq_module_exit) 1362 MODULE_AUTHOR("Eric Dumazet"); 1363 MODULE_LICENSE("GPL"); 1364 MODULE_DESCRIPTION("Fair Queue Packet Scheduler"); 1365