1 /* 2 * net/sched/sch_generic.c Generic packet scheduler routines. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * Jamal Hadi Salim, <hadi@cyberus.ca> 990601 11 * - Ingress support 12 */ 13 14 #include <linux/bitops.h> 15 #include <linux/module.h> 16 #include <linux/types.h> 17 #include <linux/kernel.h> 18 #include <linux/sched.h> 19 #include <linux/string.h> 20 #include <linux/errno.h> 21 #include <linux/netdevice.h> 22 #include <linux/skbuff.h> 23 #include <linux/rtnetlink.h> 24 #include <linux/init.h> 25 #include <linux/rcupdate.h> 26 #include <linux/list.h> 27 #include <linux/slab.h> 28 #include <linux/if_vlan.h> 29 #include <linux/skb_array.h> 30 #include <linux/if_macvlan.h> 31 #include <net/sch_generic.h> 32 #include <net/pkt_sched.h> 33 #include <net/dst.h> 34 #include <trace/events/qdisc.h> 35 #include <net/xfrm.h> 36 37 /* Qdisc to use by default */ 38 const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops; 39 EXPORT_SYMBOL(default_qdisc_ops); 40 41 /* Main transmission queue. */ 42 43 /* Modifications to data participating in scheduling must be protected with 44 * qdisc_lock(qdisc) spinlock. 45 * 46 * The idea is the following: 47 * - enqueue, dequeue are serialized via qdisc root lock 48 * - ingress filtering is also serialized via qdisc root lock 49 * - updates to tree and tree walking are only done under the rtnl mutex. 50 */ 51 52 static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q) 53 { 54 const struct netdev_queue *txq = q->dev_queue; 55 spinlock_t *lock = NULL; 56 struct sk_buff *skb; 57 58 if (q->flags & TCQ_F_NOLOCK) { 59 lock = qdisc_lock(q); 60 spin_lock(lock); 61 } 62 63 skb = skb_peek(&q->skb_bad_txq); 64 if (skb) { 65 /* check the reason of requeuing without tx lock first */ 66 txq = skb_get_tx_queue(txq->dev, skb); 67 if (!netif_xmit_frozen_or_stopped(txq)) { 68 skb = __skb_dequeue(&q->skb_bad_txq); 69 if (qdisc_is_percpu_stats(q)) { 70 qdisc_qstats_cpu_backlog_dec(q, skb); 71 qdisc_qstats_atomic_qlen_dec(q); 72 } else { 73 qdisc_qstats_backlog_dec(q, skb); 74 q->q.qlen--; 75 } 76 } else { 77 skb = NULL; 78 } 79 } 80 81 if (lock) 82 spin_unlock(lock); 83 84 return skb; 85 } 86 87 static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q) 88 { 89 struct sk_buff *skb = skb_peek(&q->skb_bad_txq); 90 91 if (unlikely(skb)) 92 skb = __skb_dequeue_bad_txq(q); 93 94 return skb; 95 } 96 97 static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q, 98 struct sk_buff *skb) 99 { 100 spinlock_t *lock = NULL; 101 102 if (q->flags & TCQ_F_NOLOCK) { 103 lock = qdisc_lock(q); 104 spin_lock(lock); 105 } 106 107 __skb_queue_tail(&q->skb_bad_txq, skb); 108 109 if (qdisc_is_percpu_stats(q)) { 110 qdisc_qstats_cpu_backlog_inc(q, skb); 111 qdisc_qstats_atomic_qlen_inc(q); 112 } else { 113 qdisc_qstats_backlog_inc(q, skb); 114 q->q.qlen++; 115 } 116 117 if (lock) 118 spin_unlock(lock); 119 } 120 121 static inline int __dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) 122 { 123 while (skb) { 124 struct sk_buff *next = skb->next; 125 126 __skb_queue_tail(&q->gso_skb, skb); 127 q->qstats.requeues++; 128 qdisc_qstats_backlog_inc(q, skb); 129 q->q.qlen++; /* it's still part of the queue */ 130 131 skb = next; 132 } 133 __netif_schedule(q); 134 135 return 0; 136 } 137 138 static inline int dev_requeue_skb_locked(struct sk_buff *skb, struct Qdisc *q) 139 { 140 spinlock_t *lock = qdisc_lock(q); 141 142 spin_lock(lock); 143 while (skb) { 144 struct sk_buff *next = skb->next; 145 146 __skb_queue_tail(&q->gso_skb, skb); 147 148 qdisc_qstats_cpu_requeues_inc(q); 149 qdisc_qstats_cpu_backlog_inc(q, skb); 150 qdisc_qstats_atomic_qlen_inc(q); 151 152 skb = next; 153 } 154 spin_unlock(lock); 155 156 __netif_schedule(q); 157 158 return 0; 159 } 160 161 static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) 162 { 163 if (q->flags & TCQ_F_NOLOCK) 164 return dev_requeue_skb_locked(skb, q); 165 else 166 return __dev_requeue_skb(skb, q); 167 } 168 169 static void try_bulk_dequeue_skb(struct Qdisc *q, 170 struct sk_buff *skb, 171 const struct netdev_queue *txq, 172 int *packets) 173 { 174 int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; 175 176 while (bytelimit > 0) { 177 struct sk_buff *nskb = q->dequeue(q); 178 179 if (!nskb) 180 break; 181 182 bytelimit -= nskb->len; /* covers GSO len */ 183 skb->next = nskb; 184 skb = nskb; 185 (*packets)++; /* GSO counts as one pkt */ 186 } 187 skb_mark_not_on_list(skb); 188 } 189 190 /* This variant of try_bulk_dequeue_skb() makes sure 191 * all skbs in the chain are for the same txq 192 */ 193 static void try_bulk_dequeue_skb_slow(struct Qdisc *q, 194 struct sk_buff *skb, 195 int *packets) 196 { 197 int mapping = skb_get_queue_mapping(skb); 198 struct sk_buff *nskb; 199 int cnt = 0; 200 201 do { 202 nskb = q->dequeue(q); 203 if (!nskb) 204 break; 205 if (unlikely(skb_get_queue_mapping(nskb) != mapping)) { 206 qdisc_enqueue_skb_bad_txq(q, nskb); 207 break; 208 } 209 skb->next = nskb; 210 skb = nskb; 211 } while (++cnt < 8); 212 (*packets) += cnt; 213 skb_mark_not_on_list(skb); 214 } 215 216 /* Note that dequeue_skb can possibly return a SKB list (via skb->next). 217 * A requeued skb (via q->gso_skb) can also be a SKB list. 218 */ 219 static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, 220 int *packets) 221 { 222 const struct netdev_queue *txq = q->dev_queue; 223 struct sk_buff *skb = NULL; 224 225 *packets = 1; 226 if (unlikely(!skb_queue_empty(&q->gso_skb))) { 227 spinlock_t *lock = NULL; 228 229 if (q->flags & TCQ_F_NOLOCK) { 230 lock = qdisc_lock(q); 231 spin_lock(lock); 232 } 233 234 skb = skb_peek(&q->gso_skb); 235 236 /* skb may be null if another cpu pulls gso_skb off in between 237 * empty check and lock. 238 */ 239 if (!skb) { 240 if (lock) 241 spin_unlock(lock); 242 goto validate; 243 } 244 245 /* skb in gso_skb were already validated */ 246 *validate = false; 247 if (xfrm_offload(skb)) 248 *validate = true; 249 /* check the reason of requeuing without tx lock first */ 250 txq = skb_get_tx_queue(txq->dev, skb); 251 if (!netif_xmit_frozen_or_stopped(txq)) { 252 skb = __skb_dequeue(&q->gso_skb); 253 if (qdisc_is_percpu_stats(q)) { 254 qdisc_qstats_cpu_backlog_dec(q, skb); 255 qdisc_qstats_atomic_qlen_dec(q); 256 } else { 257 qdisc_qstats_backlog_dec(q, skb); 258 q->q.qlen--; 259 } 260 } else { 261 skb = NULL; 262 } 263 if (lock) 264 spin_unlock(lock); 265 goto trace; 266 } 267 validate: 268 *validate = true; 269 270 if ((q->flags & TCQ_F_ONETXQUEUE) && 271 netif_xmit_frozen_or_stopped(txq)) 272 return skb; 273 274 skb = qdisc_dequeue_skb_bad_txq(q); 275 if (unlikely(skb)) 276 goto bulk; 277 skb = q->dequeue(q); 278 if (skb) { 279 bulk: 280 if (qdisc_may_bulk(q)) 281 try_bulk_dequeue_skb(q, skb, txq, packets); 282 else 283 try_bulk_dequeue_skb_slow(q, skb, packets); 284 } 285 trace: 286 trace_qdisc_dequeue(q, txq, *packets, skb); 287 return skb; 288 } 289 290 /* 291 * Transmit possibly several skbs, and handle the return status as 292 * required. Owning running seqcount bit guarantees that 293 * only one CPU can execute this function. 294 * 295 * Returns to the caller: 296 * false - hardware queue frozen backoff 297 * true - feel free to send more pkts 298 */ 299 bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, 300 struct net_device *dev, struct netdev_queue *txq, 301 spinlock_t *root_lock, bool validate) 302 { 303 int ret = NETDEV_TX_BUSY; 304 bool again = false; 305 306 /* And release qdisc */ 307 if (root_lock) 308 spin_unlock(root_lock); 309 310 /* Note that we validate skb (GSO, checksum, ...) outside of locks */ 311 if (validate) 312 skb = validate_xmit_skb_list(skb, dev, &again); 313 314 #ifdef CONFIG_XFRM_OFFLOAD 315 if (unlikely(again)) { 316 if (root_lock) 317 spin_lock(root_lock); 318 319 dev_requeue_skb(skb, q); 320 return false; 321 } 322 #endif 323 324 if (likely(skb)) { 325 HARD_TX_LOCK(dev, txq, smp_processor_id()); 326 if (!netif_xmit_frozen_or_stopped(txq)) 327 skb = dev_hard_start_xmit(skb, dev, txq, &ret); 328 329 HARD_TX_UNLOCK(dev, txq); 330 } else { 331 if (root_lock) 332 spin_lock(root_lock); 333 return true; 334 } 335 336 if (root_lock) 337 spin_lock(root_lock); 338 339 if (!dev_xmit_complete(ret)) { 340 /* Driver returned NETDEV_TX_BUSY - requeue skb */ 341 if (unlikely(ret != NETDEV_TX_BUSY)) 342 net_warn_ratelimited("BUG %s code %d qlen %d\n", 343 dev->name, ret, q->q.qlen); 344 345 dev_requeue_skb(skb, q); 346 return false; 347 } 348 349 return true; 350 } 351 352 /* 353 * NOTE: Called under qdisc_lock(q) with locally disabled BH. 354 * 355 * running seqcount guarantees only one CPU can process 356 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for 357 * this queue. 358 * 359 * netif_tx_lock serializes accesses to device driver. 360 * 361 * qdisc_lock(q) and netif_tx_lock are mutually exclusive, 362 * if one is grabbed, another must be free. 363 * 364 * Note, that this procedure can be called by a watchdog timer 365 * 366 * Returns to the caller: 367 * 0 - queue is empty or throttled. 368 * >0 - queue is not empty. 369 * 370 */ 371 static inline bool qdisc_restart(struct Qdisc *q, int *packets) 372 { 373 spinlock_t *root_lock = NULL; 374 struct netdev_queue *txq; 375 struct net_device *dev; 376 struct sk_buff *skb; 377 bool validate; 378 379 /* Dequeue packet */ 380 skb = dequeue_skb(q, &validate, packets); 381 if (unlikely(!skb)) 382 return false; 383 384 if (!(q->flags & TCQ_F_NOLOCK)) 385 root_lock = qdisc_lock(q); 386 387 dev = qdisc_dev(q); 388 txq = skb_get_tx_queue(dev, skb); 389 390 return sch_direct_xmit(skb, q, dev, txq, root_lock, validate); 391 } 392 393 void __qdisc_run(struct Qdisc *q) 394 { 395 int quota = dev_tx_weight; 396 int packets; 397 398 while (qdisc_restart(q, &packets)) { 399 /* 400 * Ordered by possible occurrence: Postpone processing if 401 * 1. we've exceeded packet quota 402 * 2. another process needs the CPU; 403 */ 404 quota -= packets; 405 if (quota <= 0 || need_resched()) { 406 __netif_schedule(q); 407 break; 408 } 409 } 410 } 411 412 unsigned long dev_trans_start(struct net_device *dev) 413 { 414 unsigned long val, res; 415 unsigned int i; 416 417 if (is_vlan_dev(dev)) 418 dev = vlan_dev_real_dev(dev); 419 else if (netif_is_macvlan(dev)) 420 dev = macvlan_dev_real_dev(dev); 421 res = netdev_get_tx_queue(dev, 0)->trans_start; 422 for (i = 1; i < dev->num_tx_queues; i++) { 423 val = netdev_get_tx_queue(dev, i)->trans_start; 424 if (val && time_after(val, res)) 425 res = val; 426 } 427 428 return res; 429 } 430 EXPORT_SYMBOL(dev_trans_start); 431 432 static void dev_watchdog(struct timer_list *t) 433 { 434 struct net_device *dev = from_timer(dev, t, watchdog_timer); 435 436 netif_tx_lock(dev); 437 if (!qdisc_tx_is_noop(dev)) { 438 if (netif_device_present(dev) && 439 netif_running(dev) && 440 netif_carrier_ok(dev)) { 441 int some_queue_timedout = 0; 442 unsigned int i; 443 unsigned long trans_start; 444 445 for (i = 0; i < dev->num_tx_queues; i++) { 446 struct netdev_queue *txq; 447 448 txq = netdev_get_tx_queue(dev, i); 449 trans_start = txq->trans_start; 450 if (netif_xmit_stopped(txq) && 451 time_after(jiffies, (trans_start + 452 dev->watchdog_timeo))) { 453 some_queue_timedout = 1; 454 txq->trans_timeout++; 455 break; 456 } 457 } 458 459 if (some_queue_timedout) { 460 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", 461 dev->name, netdev_drivername(dev), i); 462 dev->netdev_ops->ndo_tx_timeout(dev); 463 } 464 if (!mod_timer(&dev->watchdog_timer, 465 round_jiffies(jiffies + 466 dev->watchdog_timeo))) 467 dev_hold(dev); 468 } 469 } 470 netif_tx_unlock(dev); 471 472 dev_put(dev); 473 } 474 475 void __netdev_watchdog_up(struct net_device *dev) 476 { 477 if (dev->netdev_ops->ndo_tx_timeout) { 478 if (dev->watchdog_timeo <= 0) 479 dev->watchdog_timeo = 5*HZ; 480 if (!mod_timer(&dev->watchdog_timer, 481 round_jiffies(jiffies + dev->watchdog_timeo))) 482 dev_hold(dev); 483 } 484 } 485 486 static void dev_watchdog_up(struct net_device *dev) 487 { 488 __netdev_watchdog_up(dev); 489 } 490 491 static void dev_watchdog_down(struct net_device *dev) 492 { 493 netif_tx_lock_bh(dev); 494 if (del_timer(&dev->watchdog_timer)) 495 dev_put(dev); 496 netif_tx_unlock_bh(dev); 497 } 498 499 /** 500 * netif_carrier_on - set carrier 501 * @dev: network device 502 * 503 * Device has detected acquisition of carrier. 504 */ 505 void netif_carrier_on(struct net_device *dev) 506 { 507 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) { 508 if (dev->reg_state == NETREG_UNINITIALIZED) 509 return; 510 atomic_inc(&dev->carrier_up_count); 511 linkwatch_fire_event(dev); 512 if (netif_running(dev)) 513 __netdev_watchdog_up(dev); 514 } 515 } 516 EXPORT_SYMBOL(netif_carrier_on); 517 518 /** 519 * netif_carrier_off - clear carrier 520 * @dev: network device 521 * 522 * Device has detected loss of carrier. 523 */ 524 void netif_carrier_off(struct net_device *dev) 525 { 526 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) { 527 if (dev->reg_state == NETREG_UNINITIALIZED) 528 return; 529 atomic_inc(&dev->carrier_down_count); 530 linkwatch_fire_event(dev); 531 } 532 } 533 EXPORT_SYMBOL(netif_carrier_off); 534 535 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces 536 under all circumstances. It is difficult to invent anything faster or 537 cheaper. 538 */ 539 540 static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, 541 struct sk_buff **to_free) 542 { 543 __qdisc_drop(skb, to_free); 544 return NET_XMIT_CN; 545 } 546 547 static struct sk_buff *noop_dequeue(struct Qdisc *qdisc) 548 { 549 return NULL; 550 } 551 552 struct Qdisc_ops noop_qdisc_ops __read_mostly = { 553 .id = "noop", 554 .priv_size = 0, 555 .enqueue = noop_enqueue, 556 .dequeue = noop_dequeue, 557 .peek = noop_dequeue, 558 .owner = THIS_MODULE, 559 }; 560 561 static struct netdev_queue noop_netdev_queue = { 562 RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc), 563 .qdisc_sleeping = &noop_qdisc, 564 }; 565 566 struct Qdisc noop_qdisc = { 567 .enqueue = noop_enqueue, 568 .dequeue = noop_dequeue, 569 .flags = TCQ_F_BUILTIN, 570 .ops = &noop_qdisc_ops, 571 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), 572 .dev_queue = &noop_netdev_queue, 573 .running = SEQCNT_ZERO(noop_qdisc.running), 574 .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), 575 .gso_skb = { 576 .next = (struct sk_buff *)&noop_qdisc.gso_skb, 577 .prev = (struct sk_buff *)&noop_qdisc.gso_skb, 578 .qlen = 0, 579 .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.gso_skb.lock), 580 }, 581 .skb_bad_txq = { 582 .next = (struct sk_buff *)&noop_qdisc.skb_bad_txq, 583 .prev = (struct sk_buff *)&noop_qdisc.skb_bad_txq, 584 .qlen = 0, 585 .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock), 586 }, 587 }; 588 EXPORT_SYMBOL(noop_qdisc); 589 590 static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt, 591 struct netlink_ext_ack *extack) 592 { 593 /* register_qdisc() assigns a default of noop_enqueue if unset, 594 * but __dev_queue_xmit() treats noqueue only as such 595 * if this is NULL - so clear it here. */ 596 qdisc->enqueue = NULL; 597 return 0; 598 } 599 600 struct Qdisc_ops noqueue_qdisc_ops __read_mostly = { 601 .id = "noqueue", 602 .priv_size = 0, 603 .init = noqueue_init, 604 .enqueue = noop_enqueue, 605 .dequeue = noop_dequeue, 606 .peek = noop_dequeue, 607 .owner = THIS_MODULE, 608 }; 609 610 static const u8 prio2band[TC_PRIO_MAX + 1] = { 611 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 612 }; 613 614 /* 3-band FIFO queue: old style, but should be a bit faster than 615 generic prio+fifo combination. 616 */ 617 618 #define PFIFO_FAST_BANDS 3 619 620 /* 621 * Private data for a pfifo_fast scheduler containing: 622 * - rings for priority bands 623 */ 624 struct pfifo_fast_priv { 625 struct skb_array q[PFIFO_FAST_BANDS]; 626 }; 627 628 static inline struct skb_array *band2list(struct pfifo_fast_priv *priv, 629 int band) 630 { 631 return &priv->q[band]; 632 } 633 634 static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, 635 struct sk_buff **to_free) 636 { 637 int band = prio2band[skb->priority & TC_PRIO_MAX]; 638 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 639 struct skb_array *q = band2list(priv, band); 640 unsigned int pkt_len = qdisc_pkt_len(skb); 641 int err; 642 643 err = skb_array_produce(q, skb); 644 645 if (unlikely(err)) 646 return qdisc_drop_cpu(skb, qdisc, to_free); 647 648 qdisc_qstats_atomic_qlen_inc(qdisc); 649 /* Note: skb can not be used after skb_array_produce(), 650 * so we better not use qdisc_qstats_cpu_backlog_inc() 651 */ 652 this_cpu_add(qdisc->cpu_qstats->backlog, pkt_len); 653 return NET_XMIT_SUCCESS; 654 } 655 656 static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc) 657 { 658 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 659 struct sk_buff *skb = NULL; 660 int band; 661 662 for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) { 663 struct skb_array *q = band2list(priv, band); 664 665 if (__skb_array_empty(q)) 666 continue; 667 668 skb = __skb_array_consume(q); 669 } 670 if (likely(skb)) { 671 qdisc_qstats_cpu_backlog_dec(qdisc, skb); 672 qdisc_bstats_cpu_update(qdisc, skb); 673 qdisc_qstats_atomic_qlen_dec(qdisc); 674 } else { 675 qdisc->empty = true; 676 } 677 678 return skb; 679 } 680 681 static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc) 682 { 683 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 684 struct sk_buff *skb = NULL; 685 int band; 686 687 for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) { 688 struct skb_array *q = band2list(priv, band); 689 690 skb = __skb_array_peek(q); 691 } 692 693 return skb; 694 } 695 696 static void pfifo_fast_reset(struct Qdisc *qdisc) 697 { 698 int i, band; 699 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 700 701 for (band = 0; band < PFIFO_FAST_BANDS; band++) { 702 struct skb_array *q = band2list(priv, band); 703 struct sk_buff *skb; 704 705 /* NULL ring is possible if destroy path is due to a failed 706 * skb_array_init() in pfifo_fast_init() case. 707 */ 708 if (!q->ring.queue) 709 continue; 710 711 while ((skb = __skb_array_consume(q)) != NULL) 712 kfree_skb(skb); 713 } 714 715 for_each_possible_cpu(i) { 716 struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i); 717 718 q->backlog = 0; 719 } 720 } 721 722 static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) 723 { 724 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; 725 726 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1); 727 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) 728 goto nla_put_failure; 729 return skb->len; 730 731 nla_put_failure: 732 return -1; 733 } 734 735 static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt, 736 struct netlink_ext_ack *extack) 737 { 738 unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len; 739 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 740 int prio; 741 742 /* guard against zero length rings */ 743 if (!qlen) 744 return -EINVAL; 745 746 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { 747 struct skb_array *q = band2list(priv, prio); 748 int err; 749 750 err = skb_array_init(q, qlen, GFP_KERNEL); 751 if (err) 752 return -ENOMEM; 753 } 754 755 /* Can by-pass the queue discipline */ 756 qdisc->flags |= TCQ_F_CAN_BYPASS; 757 return 0; 758 } 759 760 static void pfifo_fast_destroy(struct Qdisc *sch) 761 { 762 struct pfifo_fast_priv *priv = qdisc_priv(sch); 763 int prio; 764 765 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { 766 struct skb_array *q = band2list(priv, prio); 767 768 /* NULL ring is possible if destroy path is due to a failed 769 * skb_array_init() in pfifo_fast_init() case. 770 */ 771 if (!q->ring.queue) 772 continue; 773 /* Destroy ring but no need to kfree_skb because a call to 774 * pfifo_fast_reset() has already done that work. 775 */ 776 ptr_ring_cleanup(&q->ring, NULL); 777 } 778 } 779 780 static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch, 781 unsigned int new_len) 782 { 783 struct pfifo_fast_priv *priv = qdisc_priv(sch); 784 struct skb_array *bands[PFIFO_FAST_BANDS]; 785 int prio; 786 787 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { 788 struct skb_array *q = band2list(priv, prio); 789 790 bands[prio] = q; 791 } 792 793 return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len, 794 GFP_KERNEL); 795 } 796 797 struct Qdisc_ops pfifo_fast_ops __read_mostly = { 798 .id = "pfifo_fast", 799 .priv_size = sizeof(struct pfifo_fast_priv), 800 .enqueue = pfifo_fast_enqueue, 801 .dequeue = pfifo_fast_dequeue, 802 .peek = pfifo_fast_peek, 803 .init = pfifo_fast_init, 804 .destroy = pfifo_fast_destroy, 805 .reset = pfifo_fast_reset, 806 .dump = pfifo_fast_dump, 807 .change_tx_queue_len = pfifo_fast_change_tx_queue_len, 808 .owner = THIS_MODULE, 809 .static_flags = TCQ_F_NOLOCK | TCQ_F_CPUSTATS, 810 }; 811 EXPORT_SYMBOL(pfifo_fast_ops); 812 813 static struct lock_class_key qdisc_tx_busylock; 814 static struct lock_class_key qdisc_running_key; 815 816 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 817 const struct Qdisc_ops *ops, 818 struct netlink_ext_ack *extack) 819 { 820 void *p; 821 struct Qdisc *sch; 822 unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size; 823 int err = -ENOBUFS; 824 struct net_device *dev; 825 826 if (!dev_queue) { 827 NL_SET_ERR_MSG(extack, "No device queue given"); 828 err = -EINVAL; 829 goto errout; 830 } 831 832 dev = dev_queue->dev; 833 p = kzalloc_node(size, GFP_KERNEL, 834 netdev_queue_numa_node_read(dev_queue)); 835 836 if (!p) 837 goto errout; 838 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 839 /* if we got non aligned memory, ask more and do alignment ourself */ 840 if (sch != p) { 841 kfree(p); 842 p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL, 843 netdev_queue_numa_node_read(dev_queue)); 844 if (!p) 845 goto errout; 846 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 847 sch->padded = (char *) sch - (char *) p; 848 } 849 __skb_queue_head_init(&sch->gso_skb); 850 __skb_queue_head_init(&sch->skb_bad_txq); 851 qdisc_skb_head_init(&sch->q); 852 spin_lock_init(&sch->q.lock); 853 854 if (ops->static_flags & TCQ_F_CPUSTATS) { 855 sch->cpu_bstats = 856 netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); 857 if (!sch->cpu_bstats) 858 goto errout1; 859 860 sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue); 861 if (!sch->cpu_qstats) { 862 free_percpu(sch->cpu_bstats); 863 goto errout1; 864 } 865 } 866 867 spin_lock_init(&sch->busylock); 868 lockdep_set_class(&sch->busylock, 869 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); 870 871 /* seqlock has the same scope of busylock, for NOLOCK qdisc */ 872 spin_lock_init(&sch->seqlock); 873 lockdep_set_class(&sch->busylock, 874 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); 875 876 seqcount_init(&sch->running); 877 lockdep_set_class(&sch->running, 878 dev->qdisc_running_key ?: &qdisc_running_key); 879 880 sch->ops = ops; 881 sch->flags = ops->static_flags; 882 sch->enqueue = ops->enqueue; 883 sch->dequeue = ops->dequeue; 884 sch->dev_queue = dev_queue; 885 sch->empty = true; 886 dev_hold(dev); 887 refcount_set(&sch->refcnt, 1); 888 889 return sch; 890 errout1: 891 kfree(p); 892 errout: 893 return ERR_PTR(err); 894 } 895 896 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 897 const struct Qdisc_ops *ops, 898 unsigned int parentid, 899 struct netlink_ext_ack *extack) 900 { 901 struct Qdisc *sch; 902 903 if (!try_module_get(ops->owner)) { 904 NL_SET_ERR_MSG(extack, "Failed to increase module reference counter"); 905 return NULL; 906 } 907 908 sch = qdisc_alloc(dev_queue, ops, extack); 909 if (IS_ERR(sch)) { 910 module_put(ops->owner); 911 return NULL; 912 } 913 sch->parent = parentid; 914 915 if (!ops->init || ops->init(sch, NULL, extack) == 0) 916 return sch; 917 918 qdisc_put(sch); 919 return NULL; 920 } 921 EXPORT_SYMBOL(qdisc_create_dflt); 922 923 /* Under qdisc_lock(qdisc) and BH! */ 924 925 void qdisc_reset(struct Qdisc *qdisc) 926 { 927 const struct Qdisc_ops *ops = qdisc->ops; 928 struct sk_buff *skb, *tmp; 929 930 if (ops->reset) 931 ops->reset(qdisc); 932 933 skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) { 934 __skb_unlink(skb, &qdisc->gso_skb); 935 kfree_skb_list(skb); 936 } 937 938 skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) { 939 __skb_unlink(skb, &qdisc->skb_bad_txq); 940 kfree_skb_list(skb); 941 } 942 943 qdisc->q.qlen = 0; 944 qdisc->qstats.backlog = 0; 945 } 946 EXPORT_SYMBOL(qdisc_reset); 947 948 void qdisc_free(struct Qdisc *qdisc) 949 { 950 if (qdisc_is_percpu_stats(qdisc)) { 951 free_percpu(qdisc->cpu_bstats); 952 free_percpu(qdisc->cpu_qstats); 953 } 954 955 kfree((char *) qdisc - qdisc->padded); 956 } 957 958 static void qdisc_free_cb(struct rcu_head *head) 959 { 960 struct Qdisc *q = container_of(head, struct Qdisc, rcu); 961 962 qdisc_free(q); 963 } 964 965 static void qdisc_destroy(struct Qdisc *qdisc) 966 { 967 const struct Qdisc_ops *ops = qdisc->ops; 968 struct sk_buff *skb, *tmp; 969 970 #ifdef CONFIG_NET_SCHED 971 qdisc_hash_del(qdisc); 972 973 qdisc_put_stab(rtnl_dereference(qdisc->stab)); 974 #endif 975 gen_kill_estimator(&qdisc->rate_est); 976 if (ops->reset) 977 ops->reset(qdisc); 978 if (ops->destroy) 979 ops->destroy(qdisc); 980 981 module_put(ops->owner); 982 dev_put(qdisc_dev(qdisc)); 983 984 skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) { 985 __skb_unlink(skb, &qdisc->gso_skb); 986 kfree_skb_list(skb); 987 } 988 989 skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) { 990 __skb_unlink(skb, &qdisc->skb_bad_txq); 991 kfree_skb_list(skb); 992 } 993 994 call_rcu(&qdisc->rcu, qdisc_free_cb); 995 } 996 997 void qdisc_put(struct Qdisc *qdisc) 998 { 999 if (qdisc->flags & TCQ_F_BUILTIN || 1000 !refcount_dec_and_test(&qdisc->refcnt)) 1001 return; 1002 1003 qdisc_destroy(qdisc); 1004 } 1005 EXPORT_SYMBOL(qdisc_put); 1006 1007 /* Version of qdisc_put() that is called with rtnl mutex unlocked. 1008 * Intended to be used as optimization, this function only takes rtnl lock if 1009 * qdisc reference counter reached zero. 1010 */ 1011 1012 void qdisc_put_unlocked(struct Qdisc *qdisc) 1013 { 1014 if (qdisc->flags & TCQ_F_BUILTIN || 1015 !refcount_dec_and_rtnl_lock(&qdisc->refcnt)) 1016 return; 1017 1018 qdisc_destroy(qdisc); 1019 rtnl_unlock(); 1020 } 1021 EXPORT_SYMBOL(qdisc_put_unlocked); 1022 1023 /* Attach toplevel qdisc to device queue. */ 1024 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 1025 struct Qdisc *qdisc) 1026 { 1027 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; 1028 spinlock_t *root_lock; 1029 1030 root_lock = qdisc_lock(oqdisc); 1031 spin_lock_bh(root_lock); 1032 1033 /* ... and graft new one */ 1034 if (qdisc == NULL) 1035 qdisc = &noop_qdisc; 1036 dev_queue->qdisc_sleeping = qdisc; 1037 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); 1038 1039 spin_unlock_bh(root_lock); 1040 1041 return oqdisc; 1042 } 1043 EXPORT_SYMBOL(dev_graft_qdisc); 1044 1045 static void attach_one_default_qdisc(struct net_device *dev, 1046 struct netdev_queue *dev_queue, 1047 void *_unused) 1048 { 1049 struct Qdisc *qdisc; 1050 const struct Qdisc_ops *ops = default_qdisc_ops; 1051 1052 if (dev->priv_flags & IFF_NO_QUEUE) 1053 ops = &noqueue_qdisc_ops; 1054 1055 qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL); 1056 if (!qdisc) { 1057 netdev_info(dev, "activation failed\n"); 1058 return; 1059 } 1060 if (!netif_is_multiqueue(dev)) 1061 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 1062 dev_queue->qdisc_sleeping = qdisc; 1063 } 1064 1065 static void attach_default_qdiscs(struct net_device *dev) 1066 { 1067 struct netdev_queue *txq; 1068 struct Qdisc *qdisc; 1069 1070 txq = netdev_get_tx_queue(dev, 0); 1071 1072 if (!netif_is_multiqueue(dev) || 1073 dev->priv_flags & IFF_NO_QUEUE) { 1074 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); 1075 dev->qdisc = txq->qdisc_sleeping; 1076 qdisc_refcount_inc(dev->qdisc); 1077 } else { 1078 qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL); 1079 if (qdisc) { 1080 dev->qdisc = qdisc; 1081 qdisc->ops->attach(qdisc); 1082 } 1083 } 1084 #ifdef CONFIG_NET_SCHED 1085 if (dev->qdisc != &noop_qdisc) 1086 qdisc_hash_add(dev->qdisc, false); 1087 #endif 1088 } 1089 1090 static void transition_one_qdisc(struct net_device *dev, 1091 struct netdev_queue *dev_queue, 1092 void *_need_watchdog) 1093 { 1094 struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping; 1095 int *need_watchdog_p = _need_watchdog; 1096 1097 if (!(new_qdisc->flags & TCQ_F_BUILTIN)) 1098 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state); 1099 1100 rcu_assign_pointer(dev_queue->qdisc, new_qdisc); 1101 if (need_watchdog_p) { 1102 dev_queue->trans_start = 0; 1103 *need_watchdog_p = 1; 1104 } 1105 } 1106 1107 void dev_activate(struct net_device *dev) 1108 { 1109 int need_watchdog; 1110 1111 /* No queueing discipline is attached to device; 1112 * create default one for devices, which need queueing 1113 * and noqueue_qdisc for virtual interfaces 1114 */ 1115 1116 if (dev->qdisc == &noop_qdisc) 1117 attach_default_qdiscs(dev); 1118 1119 if (!netif_carrier_ok(dev)) 1120 /* Delay activation until next carrier-on event */ 1121 return; 1122 1123 need_watchdog = 0; 1124 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); 1125 if (dev_ingress_queue(dev)) 1126 transition_one_qdisc(dev, dev_ingress_queue(dev), NULL); 1127 1128 if (need_watchdog) { 1129 netif_trans_update(dev); 1130 dev_watchdog_up(dev); 1131 } 1132 } 1133 EXPORT_SYMBOL(dev_activate); 1134 1135 static void dev_deactivate_queue(struct net_device *dev, 1136 struct netdev_queue *dev_queue, 1137 void *_qdisc_default) 1138 { 1139 struct Qdisc *qdisc_default = _qdisc_default; 1140 struct Qdisc *qdisc; 1141 1142 qdisc = rtnl_dereference(dev_queue->qdisc); 1143 if (qdisc) { 1144 bool nolock = qdisc->flags & TCQ_F_NOLOCK; 1145 1146 if (nolock) 1147 spin_lock_bh(&qdisc->seqlock); 1148 spin_lock_bh(qdisc_lock(qdisc)); 1149 1150 if (!(qdisc->flags & TCQ_F_BUILTIN)) 1151 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); 1152 1153 rcu_assign_pointer(dev_queue->qdisc, qdisc_default); 1154 qdisc_reset(qdisc); 1155 1156 spin_unlock_bh(qdisc_lock(qdisc)); 1157 if (nolock) 1158 spin_unlock_bh(&qdisc->seqlock); 1159 } 1160 } 1161 1162 static bool some_qdisc_is_busy(struct net_device *dev) 1163 { 1164 unsigned int i; 1165 1166 for (i = 0; i < dev->num_tx_queues; i++) { 1167 struct netdev_queue *dev_queue; 1168 spinlock_t *root_lock; 1169 struct Qdisc *q; 1170 int val; 1171 1172 dev_queue = netdev_get_tx_queue(dev, i); 1173 q = dev_queue->qdisc_sleeping; 1174 1175 root_lock = qdisc_lock(q); 1176 spin_lock_bh(root_lock); 1177 1178 val = (qdisc_is_running(q) || 1179 test_bit(__QDISC_STATE_SCHED, &q->state)); 1180 1181 spin_unlock_bh(root_lock); 1182 1183 if (val) 1184 return true; 1185 } 1186 return false; 1187 } 1188 1189 static void dev_qdisc_reset(struct net_device *dev, 1190 struct netdev_queue *dev_queue, 1191 void *none) 1192 { 1193 struct Qdisc *qdisc = dev_queue->qdisc_sleeping; 1194 1195 if (qdisc) 1196 qdisc_reset(qdisc); 1197 } 1198 1199 /** 1200 * dev_deactivate_many - deactivate transmissions on several devices 1201 * @head: list of devices to deactivate 1202 * 1203 * This function returns only when all outstanding transmissions 1204 * have completed, unless all devices are in dismantle phase. 1205 */ 1206 void dev_deactivate_many(struct list_head *head) 1207 { 1208 struct net_device *dev; 1209 1210 list_for_each_entry(dev, head, close_list) { 1211 netdev_for_each_tx_queue(dev, dev_deactivate_queue, 1212 &noop_qdisc); 1213 if (dev_ingress_queue(dev)) 1214 dev_deactivate_queue(dev, dev_ingress_queue(dev), 1215 &noop_qdisc); 1216 1217 dev_watchdog_down(dev); 1218 } 1219 1220 /* Wait for outstanding qdisc-less dev_queue_xmit calls. 1221 * This is avoided if all devices are in dismantle phase : 1222 * Caller will call synchronize_net() for us 1223 */ 1224 synchronize_net(); 1225 1226 /* Wait for outstanding qdisc_run calls. */ 1227 list_for_each_entry(dev, head, close_list) { 1228 while (some_qdisc_is_busy(dev)) 1229 yield(); 1230 /* The new qdisc is assigned at this point so we can safely 1231 * unwind stale skb lists and qdisc statistics 1232 */ 1233 netdev_for_each_tx_queue(dev, dev_qdisc_reset, NULL); 1234 if (dev_ingress_queue(dev)) 1235 dev_qdisc_reset(dev, dev_ingress_queue(dev), NULL); 1236 } 1237 } 1238 1239 void dev_deactivate(struct net_device *dev) 1240 { 1241 LIST_HEAD(single); 1242 1243 list_add(&dev->close_list, &single); 1244 dev_deactivate_many(&single); 1245 list_del(&single); 1246 } 1247 EXPORT_SYMBOL(dev_deactivate); 1248 1249 static int qdisc_change_tx_queue_len(struct net_device *dev, 1250 struct netdev_queue *dev_queue) 1251 { 1252 struct Qdisc *qdisc = dev_queue->qdisc_sleeping; 1253 const struct Qdisc_ops *ops = qdisc->ops; 1254 1255 if (ops->change_tx_queue_len) 1256 return ops->change_tx_queue_len(qdisc, dev->tx_queue_len); 1257 return 0; 1258 } 1259 1260 int dev_qdisc_change_tx_queue_len(struct net_device *dev) 1261 { 1262 bool up = dev->flags & IFF_UP; 1263 unsigned int i; 1264 int ret = 0; 1265 1266 if (up) 1267 dev_deactivate(dev); 1268 1269 for (i = 0; i < dev->num_tx_queues; i++) { 1270 ret = qdisc_change_tx_queue_len(dev, &dev->_tx[i]); 1271 1272 /* TODO: revert changes on a partial failure */ 1273 if (ret) 1274 break; 1275 } 1276 1277 if (up) 1278 dev_activate(dev); 1279 return ret; 1280 } 1281 1282 static void dev_init_scheduler_queue(struct net_device *dev, 1283 struct netdev_queue *dev_queue, 1284 void *_qdisc) 1285 { 1286 struct Qdisc *qdisc = _qdisc; 1287 1288 rcu_assign_pointer(dev_queue->qdisc, qdisc); 1289 dev_queue->qdisc_sleeping = qdisc; 1290 } 1291 1292 void dev_init_scheduler(struct net_device *dev) 1293 { 1294 dev->qdisc = &noop_qdisc; 1295 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); 1296 if (dev_ingress_queue(dev)) 1297 dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); 1298 1299 timer_setup(&dev->watchdog_timer, dev_watchdog, 0); 1300 } 1301 1302 static void shutdown_scheduler_queue(struct net_device *dev, 1303 struct netdev_queue *dev_queue, 1304 void *_qdisc_default) 1305 { 1306 struct Qdisc *qdisc = dev_queue->qdisc_sleeping; 1307 struct Qdisc *qdisc_default = _qdisc_default; 1308 1309 if (qdisc) { 1310 rcu_assign_pointer(dev_queue->qdisc, qdisc_default); 1311 dev_queue->qdisc_sleeping = qdisc_default; 1312 1313 qdisc_put(qdisc); 1314 } 1315 } 1316 1317 void dev_shutdown(struct net_device *dev) 1318 { 1319 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); 1320 if (dev_ingress_queue(dev)) 1321 shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); 1322 qdisc_put(dev->qdisc); 1323 dev->qdisc = &noop_qdisc; 1324 1325 WARN_ON(timer_pending(&dev->watchdog_timer)); 1326 } 1327 1328 void psched_ratecfg_precompute(struct psched_ratecfg *r, 1329 const struct tc_ratespec *conf, 1330 u64 rate64) 1331 { 1332 memset(r, 0, sizeof(*r)); 1333 r->overhead = conf->overhead; 1334 r->rate_bytes_ps = max_t(u64, conf->rate, rate64); 1335 r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK); 1336 r->mult = 1; 1337 /* 1338 * The deal here is to replace a divide by a reciprocal one 1339 * in fast path (a reciprocal divide is a multiply and a shift) 1340 * 1341 * Normal formula would be : 1342 * time_in_ns = (NSEC_PER_SEC * len) / rate_bps 1343 * 1344 * We compute mult/shift to use instead : 1345 * time_in_ns = (len * mult) >> shift; 1346 * 1347 * We try to get the highest possible mult value for accuracy, 1348 * but have to make sure no overflows will ever happen. 1349 */ 1350 if (r->rate_bytes_ps > 0) { 1351 u64 factor = NSEC_PER_SEC; 1352 1353 for (;;) { 1354 r->mult = div64_u64(factor, r->rate_bytes_ps); 1355 if (r->mult & (1U << 31) || factor & (1ULL << 63)) 1356 break; 1357 factor <<= 1; 1358 r->shift++; 1359 } 1360 } 1361 } 1362 EXPORT_SYMBOL(psched_ratecfg_precompute); 1363 1364 static void mini_qdisc_rcu_func(struct rcu_head *head) 1365 { 1366 } 1367 1368 void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, 1369 struct tcf_proto *tp_head) 1370 { 1371 /* Protected with chain0->filter_chain_lock. 1372 * Can't access chain directly because tp_head can be NULL. 1373 */ 1374 struct mini_Qdisc *miniq_old = 1375 rcu_dereference_protected(*miniqp->p_miniq, 1); 1376 struct mini_Qdisc *miniq; 1377 1378 if (!tp_head) { 1379 RCU_INIT_POINTER(*miniqp->p_miniq, NULL); 1380 /* Wait for flying RCU callback before it is freed. */ 1381 rcu_barrier(); 1382 return; 1383 } 1384 1385 miniq = !miniq_old || miniq_old == &miniqp->miniq2 ? 1386 &miniqp->miniq1 : &miniqp->miniq2; 1387 1388 /* We need to make sure that readers won't see the miniq 1389 * we are about to modify. So wait until previous call_rcu callback 1390 * is done. 1391 */ 1392 rcu_barrier(); 1393 miniq->filter_list = tp_head; 1394 rcu_assign_pointer(*miniqp->p_miniq, miniq); 1395 1396 if (miniq_old) 1397 /* This is counterpart of the rcu barriers above. We need to 1398 * block potential new user of miniq_old until all readers 1399 * are not seeing it. 1400 */ 1401 call_rcu(&miniq_old->rcu, mini_qdisc_rcu_func); 1402 } 1403 EXPORT_SYMBOL(mini_qdisc_pair_swap); 1404 1405 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, 1406 struct mini_Qdisc __rcu **p_miniq) 1407 { 1408 miniqp->miniq1.cpu_bstats = qdisc->cpu_bstats; 1409 miniqp->miniq1.cpu_qstats = qdisc->cpu_qstats; 1410 miniqp->miniq2.cpu_bstats = qdisc->cpu_bstats; 1411 miniqp->miniq2.cpu_qstats = qdisc->cpu_qstats; 1412 miniqp->p_miniq = p_miniq; 1413 } 1414 EXPORT_SYMBOL(mini_qdisc_pair_init); 1415