1 /* 2 * net/sched/sch_generic.c Generic packet scheduler routines. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * Jamal Hadi Salim, <hadi@cyberus.ca> 990601 11 * - Ingress support 12 */ 13 14 #include <linux/bitops.h> 15 #include <linux/module.h> 16 #include <linux/types.h> 17 #include <linux/kernel.h> 18 #include <linux/sched.h> 19 #include <linux/string.h> 20 #include <linux/errno.h> 21 #include <linux/netdevice.h> 22 #include <linux/skbuff.h> 23 #include <linux/rtnetlink.h> 24 #include <linux/init.h> 25 #include <linux/rcupdate.h> 26 #include <linux/list.h> 27 #include <linux/slab.h> 28 #include <net/pkt_sched.h> 29 #include <net/dst.h> 30 31 /* Main transmission queue. */ 32 33 /* Modifications to data participating in scheduling must be protected with 34 * qdisc_lock(qdisc) spinlock. 35 * 36 * The idea is the following: 37 * - enqueue, dequeue are serialized via qdisc root lock 38 * - ingress filtering is also serialized via qdisc root lock 39 * - updates to tree and tree walking are only done under the rtnl mutex. 40 */ 41 42 static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) 43 { 44 skb_dst_force(skb); 45 q->gso_skb = skb; 46 q->qstats.requeues++; 47 q->q.qlen++; /* it's still part of the queue */ 48 __netif_schedule(q); 49 50 return 0; 51 } 52 53 static inline struct sk_buff *dequeue_skb(struct Qdisc *q) 54 { 55 struct sk_buff *skb = q->gso_skb; 56 57 if (unlikely(skb)) { 58 struct net_device *dev = qdisc_dev(q); 59 struct netdev_queue *txq; 60 61 /* check the reason of requeuing without tx lock first */ 62 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 63 if (!netif_tx_queue_frozen_or_stopped(txq)) { 64 q->gso_skb = NULL; 65 q->q.qlen--; 66 } else 67 skb = NULL; 68 } else { 69 skb = q->dequeue(q); 70 } 71 72 return skb; 73 } 74 75 static inline int handle_dev_cpu_collision(struct sk_buff *skb, 76 struct netdev_queue *dev_queue, 77 struct Qdisc *q) 78 { 79 int ret; 80 81 if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) { 82 /* 83 * Same CPU holding the lock. It may be a transient 84 * configuration error, when hard_start_xmit() recurses. We 85 * detect it by checking xmit owner and drop the packet when 86 * deadloop is detected. Return OK to try the next skb. 87 */ 88 kfree_skb(skb); 89 if (net_ratelimit()) 90 pr_warning("Dead loop on netdevice %s, fix it urgently!\n", 91 dev_queue->dev->name); 92 ret = qdisc_qlen(q); 93 } else { 94 /* 95 * Another cpu is holding lock, requeue & delay xmits for 96 * some time. 97 */ 98 __this_cpu_inc(softnet_data.cpu_collision); 99 ret = dev_requeue_skb(skb, q); 100 } 101 102 return ret; 103 } 104 105 /* 106 * Transmit one skb, and handle the return status as required. Holding the 107 * __QDISC_STATE_RUNNING bit guarantees that only one CPU can execute this 108 * function. 109 * 110 * Returns to the caller: 111 * 0 - queue is empty or throttled. 112 * >0 - queue is not empty. 113 */ 114 int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, 115 struct net_device *dev, struct netdev_queue *txq, 116 spinlock_t *root_lock) 117 { 118 int ret = NETDEV_TX_BUSY; 119 120 /* And release qdisc */ 121 spin_unlock(root_lock); 122 123 HARD_TX_LOCK(dev, txq, smp_processor_id()); 124 if (!netif_tx_queue_frozen_or_stopped(txq)) 125 ret = dev_hard_start_xmit(skb, dev, txq); 126 127 HARD_TX_UNLOCK(dev, txq); 128 129 spin_lock(root_lock); 130 131 if (dev_xmit_complete(ret)) { 132 /* Driver sent out skb successfully or skb was consumed */ 133 ret = qdisc_qlen(q); 134 } else if (ret == NETDEV_TX_LOCKED) { 135 /* Driver try lock failed */ 136 ret = handle_dev_cpu_collision(skb, txq, q); 137 } else { 138 /* Driver returned NETDEV_TX_BUSY - requeue skb */ 139 if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) 140 pr_warning("BUG %s code %d qlen %d\n", 141 dev->name, ret, q->q.qlen); 142 143 ret = dev_requeue_skb(skb, q); 144 } 145 146 if (ret && netif_tx_queue_frozen_or_stopped(txq)) 147 ret = 0; 148 149 return ret; 150 } 151 152 /* 153 * NOTE: Called under qdisc_lock(q) with locally disabled BH. 154 * 155 * __QDISC_STATE_RUNNING guarantees only one CPU can process 156 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for 157 * this queue. 158 * 159 * netif_tx_lock serializes accesses to device driver. 160 * 161 * qdisc_lock(q) and netif_tx_lock are mutually exclusive, 162 * if one is grabbed, another must be free. 163 * 164 * Note, that this procedure can be called by a watchdog timer 165 * 166 * Returns to the caller: 167 * 0 - queue is empty or throttled. 168 * >0 - queue is not empty. 169 * 170 */ 171 static inline int qdisc_restart(struct Qdisc *q) 172 { 173 struct netdev_queue *txq; 174 struct net_device *dev; 175 spinlock_t *root_lock; 176 struct sk_buff *skb; 177 178 /* Dequeue packet */ 179 skb = dequeue_skb(q); 180 if (unlikely(!skb)) 181 return 0; 182 WARN_ON_ONCE(skb_dst_is_noref(skb)); 183 root_lock = qdisc_lock(q); 184 dev = qdisc_dev(q); 185 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 186 187 return sch_direct_xmit(skb, q, dev, txq, root_lock); 188 } 189 190 void __qdisc_run(struct Qdisc *q) 191 { 192 int quota = weight_p; 193 194 while (qdisc_restart(q)) { 195 /* 196 * Ordered by possible occurrence: Postpone processing if 197 * 1. we've exceeded packet quota 198 * 2. another process needs the CPU; 199 */ 200 if (--quota <= 0 || need_resched()) { 201 __netif_schedule(q); 202 break; 203 } 204 } 205 206 qdisc_run_end(q); 207 } 208 209 unsigned long dev_trans_start(struct net_device *dev) 210 { 211 unsigned long val, res = dev->trans_start; 212 unsigned int i; 213 214 for (i = 0; i < dev->num_tx_queues; i++) { 215 val = netdev_get_tx_queue(dev, i)->trans_start; 216 if (val && time_after(val, res)) 217 res = val; 218 } 219 dev->trans_start = res; 220 return res; 221 } 222 EXPORT_SYMBOL(dev_trans_start); 223 224 static void dev_watchdog(unsigned long arg) 225 { 226 struct net_device *dev = (struct net_device *)arg; 227 228 netif_tx_lock(dev); 229 if (!qdisc_tx_is_noop(dev)) { 230 if (netif_device_present(dev) && 231 netif_running(dev) && 232 netif_carrier_ok(dev)) { 233 int some_queue_timedout = 0; 234 unsigned int i; 235 unsigned long trans_start; 236 237 for (i = 0; i < dev->num_tx_queues; i++) { 238 struct netdev_queue *txq; 239 240 txq = netdev_get_tx_queue(dev, i); 241 /* 242 * old device drivers set dev->trans_start 243 */ 244 trans_start = txq->trans_start ? : dev->trans_start; 245 if (netif_tx_queue_stopped(txq) && 246 time_after(jiffies, (trans_start + 247 dev->watchdog_timeo))) { 248 some_queue_timedout = 1; 249 break; 250 } 251 } 252 253 if (some_queue_timedout) { 254 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", 255 dev->name, netdev_drivername(dev), i); 256 dev->netdev_ops->ndo_tx_timeout(dev); 257 } 258 if (!mod_timer(&dev->watchdog_timer, 259 round_jiffies(jiffies + 260 dev->watchdog_timeo))) 261 dev_hold(dev); 262 } 263 } 264 netif_tx_unlock(dev); 265 266 dev_put(dev); 267 } 268 269 void __netdev_watchdog_up(struct net_device *dev) 270 { 271 if (dev->netdev_ops->ndo_tx_timeout) { 272 if (dev->watchdog_timeo <= 0) 273 dev->watchdog_timeo = 5*HZ; 274 if (!mod_timer(&dev->watchdog_timer, 275 round_jiffies(jiffies + dev->watchdog_timeo))) 276 dev_hold(dev); 277 } 278 } 279 280 static void dev_watchdog_up(struct net_device *dev) 281 { 282 __netdev_watchdog_up(dev); 283 } 284 285 static void dev_watchdog_down(struct net_device *dev) 286 { 287 netif_tx_lock_bh(dev); 288 if (del_timer(&dev->watchdog_timer)) 289 dev_put(dev); 290 netif_tx_unlock_bh(dev); 291 } 292 293 /** 294 * netif_carrier_on - set carrier 295 * @dev: network device 296 * 297 * Device has detected that carrier. 298 */ 299 void netif_carrier_on(struct net_device *dev) 300 { 301 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) { 302 if (dev->reg_state == NETREG_UNINITIALIZED) 303 return; 304 linkwatch_fire_event(dev); 305 if (netif_running(dev)) 306 __netdev_watchdog_up(dev); 307 } 308 } 309 EXPORT_SYMBOL(netif_carrier_on); 310 311 /** 312 * netif_carrier_off - clear carrier 313 * @dev: network device 314 * 315 * Device has detected loss of carrier. 316 */ 317 void netif_carrier_off(struct net_device *dev) 318 { 319 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) { 320 if (dev->reg_state == NETREG_UNINITIALIZED) 321 return; 322 linkwatch_fire_event(dev); 323 } 324 } 325 EXPORT_SYMBOL(netif_carrier_off); 326 327 /** 328 * netif_notify_peers - notify network peers about existence of @dev 329 * @dev: network device 330 * 331 * Generate traffic such that interested network peers are aware of 332 * @dev, such as by generating a gratuitous ARP. This may be used when 333 * a device wants to inform the rest of the network about some sort of 334 * reconfiguration such as a failover event or virtual machine 335 * migration. 336 */ 337 void netif_notify_peers(struct net_device *dev) 338 { 339 rtnl_lock(); 340 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); 341 rtnl_unlock(); 342 } 343 EXPORT_SYMBOL(netif_notify_peers); 344 345 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces 346 under all circumstances. It is difficult to invent anything faster or 347 cheaper. 348 */ 349 350 static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc) 351 { 352 kfree_skb(skb); 353 return NET_XMIT_CN; 354 } 355 356 static struct sk_buff *noop_dequeue(struct Qdisc * qdisc) 357 { 358 return NULL; 359 } 360 361 struct Qdisc_ops noop_qdisc_ops __read_mostly = { 362 .id = "noop", 363 .priv_size = 0, 364 .enqueue = noop_enqueue, 365 .dequeue = noop_dequeue, 366 .peek = noop_dequeue, 367 .owner = THIS_MODULE, 368 }; 369 370 static struct netdev_queue noop_netdev_queue = { 371 .qdisc = &noop_qdisc, 372 .qdisc_sleeping = &noop_qdisc, 373 }; 374 375 struct Qdisc noop_qdisc = { 376 .enqueue = noop_enqueue, 377 .dequeue = noop_dequeue, 378 .flags = TCQ_F_BUILTIN, 379 .ops = &noop_qdisc_ops, 380 .list = LIST_HEAD_INIT(noop_qdisc.list), 381 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), 382 .dev_queue = &noop_netdev_queue, 383 .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), 384 }; 385 EXPORT_SYMBOL(noop_qdisc); 386 387 static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = { 388 .id = "noqueue", 389 .priv_size = 0, 390 .enqueue = noop_enqueue, 391 .dequeue = noop_dequeue, 392 .peek = noop_dequeue, 393 .owner = THIS_MODULE, 394 }; 395 396 static struct Qdisc noqueue_qdisc; 397 static struct netdev_queue noqueue_netdev_queue = { 398 .qdisc = &noqueue_qdisc, 399 .qdisc_sleeping = &noqueue_qdisc, 400 }; 401 402 static struct Qdisc noqueue_qdisc = { 403 .enqueue = NULL, 404 .dequeue = noop_dequeue, 405 .flags = TCQ_F_BUILTIN, 406 .ops = &noqueue_qdisc_ops, 407 .list = LIST_HEAD_INIT(noqueue_qdisc.list), 408 .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock), 409 .dev_queue = &noqueue_netdev_queue, 410 .busylock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.busylock), 411 }; 412 413 414 static const u8 prio2band[TC_PRIO_MAX + 1] = { 415 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 416 }; 417 418 /* 3-band FIFO queue: old style, but should be a bit faster than 419 generic prio+fifo combination. 420 */ 421 422 #define PFIFO_FAST_BANDS 3 423 424 /* 425 * Private data for a pfifo_fast scheduler containing: 426 * - queues for the three band 427 * - bitmap indicating which of the bands contain skbs 428 */ 429 struct pfifo_fast_priv { 430 u32 bitmap; 431 struct sk_buff_head q[PFIFO_FAST_BANDS]; 432 }; 433 434 /* 435 * Convert a bitmap to the first band number where an skb is queued, where: 436 * bitmap=0 means there are no skbs on any band. 437 * bitmap=1 means there is an skb on band 0. 438 * bitmap=7 means there are skbs on all 3 bands, etc. 439 */ 440 static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0}; 441 442 static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv, 443 int band) 444 { 445 return priv->q + band; 446 } 447 448 static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc) 449 { 450 if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { 451 int band = prio2band[skb->priority & TC_PRIO_MAX]; 452 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 453 struct sk_buff_head *list = band2list(priv, band); 454 455 priv->bitmap |= (1 << band); 456 qdisc->q.qlen++; 457 return __qdisc_enqueue_tail(skb, qdisc, list); 458 } 459 460 return qdisc_drop(skb, qdisc); 461 } 462 463 static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc) 464 { 465 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 466 int band = bitmap2band[priv->bitmap]; 467 468 if (likely(band >= 0)) { 469 struct sk_buff_head *list = band2list(priv, band); 470 struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list); 471 472 qdisc->q.qlen--; 473 if (skb_queue_empty(list)) 474 priv->bitmap &= ~(1 << band); 475 476 return skb; 477 } 478 479 return NULL; 480 } 481 482 static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc) 483 { 484 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 485 int band = bitmap2band[priv->bitmap]; 486 487 if (band >= 0) { 488 struct sk_buff_head *list = band2list(priv, band); 489 490 return skb_peek(list); 491 } 492 493 return NULL; 494 } 495 496 static void pfifo_fast_reset(struct Qdisc *qdisc) 497 { 498 int prio; 499 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 500 501 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) 502 __qdisc_reset_queue(qdisc, band2list(priv, prio)); 503 504 priv->bitmap = 0; 505 qdisc->qstats.backlog = 0; 506 qdisc->q.qlen = 0; 507 } 508 509 static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) 510 { 511 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; 512 513 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1); 514 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 515 return skb->len; 516 517 nla_put_failure: 518 return -1; 519 } 520 521 static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) 522 { 523 int prio; 524 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 525 526 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) 527 skb_queue_head_init(band2list(priv, prio)); 528 529 /* Can by-pass the queue discipline */ 530 qdisc->flags |= TCQ_F_CAN_BYPASS; 531 return 0; 532 } 533 534 struct Qdisc_ops pfifo_fast_ops __read_mostly = { 535 .id = "pfifo_fast", 536 .priv_size = sizeof(struct pfifo_fast_priv), 537 .enqueue = pfifo_fast_enqueue, 538 .dequeue = pfifo_fast_dequeue, 539 .peek = pfifo_fast_peek, 540 .init = pfifo_fast_init, 541 .reset = pfifo_fast_reset, 542 .dump = pfifo_fast_dump, 543 .owner = THIS_MODULE, 544 }; 545 EXPORT_SYMBOL(pfifo_fast_ops); 546 547 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 548 struct Qdisc_ops *ops) 549 { 550 void *p; 551 struct Qdisc *sch; 552 unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size; 553 int err = -ENOBUFS; 554 555 p = kzalloc_node(size, GFP_KERNEL, 556 netdev_queue_numa_node_read(dev_queue)); 557 558 if (!p) 559 goto errout; 560 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 561 /* if we got non aligned memory, ask more and do alignment ourself */ 562 if (sch != p) { 563 kfree(p); 564 p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL, 565 netdev_queue_numa_node_read(dev_queue)); 566 if (!p) 567 goto errout; 568 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 569 sch->padded = (char *) sch - (char *) p; 570 } 571 INIT_LIST_HEAD(&sch->list); 572 skb_queue_head_init(&sch->q); 573 spin_lock_init(&sch->busylock); 574 sch->ops = ops; 575 sch->enqueue = ops->enqueue; 576 sch->dequeue = ops->dequeue; 577 sch->dev_queue = dev_queue; 578 dev_hold(qdisc_dev(sch)); 579 atomic_set(&sch->refcnt, 1); 580 581 return sch; 582 errout: 583 return ERR_PTR(err); 584 } 585 586 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 587 struct Qdisc_ops *ops, unsigned int parentid) 588 { 589 struct Qdisc *sch; 590 591 sch = qdisc_alloc(dev_queue, ops); 592 if (IS_ERR(sch)) 593 goto errout; 594 sch->parent = parentid; 595 596 if (!ops->init || ops->init(sch, NULL) == 0) 597 return sch; 598 599 qdisc_destroy(sch); 600 errout: 601 return NULL; 602 } 603 EXPORT_SYMBOL(qdisc_create_dflt); 604 605 /* Under qdisc_lock(qdisc) and BH! */ 606 607 void qdisc_reset(struct Qdisc *qdisc) 608 { 609 const struct Qdisc_ops *ops = qdisc->ops; 610 611 if (ops->reset) 612 ops->reset(qdisc); 613 614 if (qdisc->gso_skb) { 615 kfree_skb(qdisc->gso_skb); 616 qdisc->gso_skb = NULL; 617 qdisc->q.qlen = 0; 618 } 619 } 620 EXPORT_SYMBOL(qdisc_reset); 621 622 static void qdisc_rcu_free(struct rcu_head *head) 623 { 624 struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head); 625 626 kfree((char *) qdisc - qdisc->padded); 627 } 628 629 void qdisc_destroy(struct Qdisc *qdisc) 630 { 631 const struct Qdisc_ops *ops = qdisc->ops; 632 633 if (qdisc->flags & TCQ_F_BUILTIN || 634 !atomic_dec_and_test(&qdisc->refcnt)) 635 return; 636 637 #ifdef CONFIG_NET_SCHED 638 qdisc_list_del(qdisc); 639 640 qdisc_put_stab(rtnl_dereference(qdisc->stab)); 641 #endif 642 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); 643 if (ops->reset) 644 ops->reset(qdisc); 645 if (ops->destroy) 646 ops->destroy(qdisc); 647 648 module_put(ops->owner); 649 dev_put(qdisc_dev(qdisc)); 650 651 kfree_skb(qdisc->gso_skb); 652 /* 653 * gen_estimator est_timer() might access qdisc->q.lock, 654 * wait a RCU grace period before freeing qdisc. 655 */ 656 call_rcu(&qdisc->rcu_head, qdisc_rcu_free); 657 } 658 EXPORT_SYMBOL(qdisc_destroy); 659 660 /* Attach toplevel qdisc to device queue. */ 661 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 662 struct Qdisc *qdisc) 663 { 664 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; 665 spinlock_t *root_lock; 666 667 root_lock = qdisc_lock(oqdisc); 668 spin_lock_bh(root_lock); 669 670 /* Prune old scheduler */ 671 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) 672 qdisc_reset(oqdisc); 673 674 /* ... and graft new one */ 675 if (qdisc == NULL) 676 qdisc = &noop_qdisc; 677 dev_queue->qdisc_sleeping = qdisc; 678 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); 679 680 spin_unlock_bh(root_lock); 681 682 return oqdisc; 683 } 684 EXPORT_SYMBOL(dev_graft_qdisc); 685 686 static void attach_one_default_qdisc(struct net_device *dev, 687 struct netdev_queue *dev_queue, 688 void *_unused) 689 { 690 struct Qdisc *qdisc = &noqueue_qdisc; 691 692 if (dev->tx_queue_len) { 693 qdisc = qdisc_create_dflt(dev_queue, 694 &pfifo_fast_ops, TC_H_ROOT); 695 if (!qdisc) { 696 netdev_info(dev, "activation failed\n"); 697 return; 698 } 699 } 700 dev_queue->qdisc_sleeping = qdisc; 701 } 702 703 static void attach_default_qdiscs(struct net_device *dev) 704 { 705 struct netdev_queue *txq; 706 struct Qdisc *qdisc; 707 708 txq = netdev_get_tx_queue(dev, 0); 709 710 if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) { 711 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); 712 dev->qdisc = txq->qdisc_sleeping; 713 atomic_inc(&dev->qdisc->refcnt); 714 } else { 715 qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT); 716 if (qdisc) { 717 qdisc->ops->attach(qdisc); 718 dev->qdisc = qdisc; 719 } 720 } 721 } 722 723 static void transition_one_qdisc(struct net_device *dev, 724 struct netdev_queue *dev_queue, 725 void *_need_watchdog) 726 { 727 struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping; 728 int *need_watchdog_p = _need_watchdog; 729 730 if (!(new_qdisc->flags & TCQ_F_BUILTIN)) 731 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state); 732 733 rcu_assign_pointer(dev_queue->qdisc, new_qdisc); 734 if (need_watchdog_p && new_qdisc != &noqueue_qdisc) { 735 dev_queue->trans_start = 0; 736 *need_watchdog_p = 1; 737 } 738 } 739 740 void dev_activate(struct net_device *dev) 741 { 742 int need_watchdog; 743 744 /* No queueing discipline is attached to device; 745 create default one i.e. pfifo_fast for devices, 746 which need queueing and noqueue_qdisc for 747 virtual interfaces 748 */ 749 750 if (dev->qdisc == &noop_qdisc) 751 attach_default_qdiscs(dev); 752 753 if (!netif_carrier_ok(dev)) 754 /* Delay activation until next carrier-on event */ 755 return; 756 757 need_watchdog = 0; 758 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); 759 if (dev_ingress_queue(dev)) 760 transition_one_qdisc(dev, dev_ingress_queue(dev), NULL); 761 762 if (need_watchdog) { 763 dev->trans_start = jiffies; 764 dev_watchdog_up(dev); 765 } 766 } 767 EXPORT_SYMBOL(dev_activate); 768 769 static void dev_deactivate_queue(struct net_device *dev, 770 struct netdev_queue *dev_queue, 771 void *_qdisc_default) 772 { 773 struct Qdisc *qdisc_default = _qdisc_default; 774 struct Qdisc *qdisc; 775 776 qdisc = dev_queue->qdisc; 777 if (qdisc) { 778 spin_lock_bh(qdisc_lock(qdisc)); 779 780 if (!(qdisc->flags & TCQ_F_BUILTIN)) 781 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); 782 783 rcu_assign_pointer(dev_queue->qdisc, qdisc_default); 784 qdisc_reset(qdisc); 785 786 spin_unlock_bh(qdisc_lock(qdisc)); 787 } 788 } 789 790 static bool some_qdisc_is_busy(struct net_device *dev) 791 { 792 unsigned int i; 793 794 for (i = 0; i < dev->num_tx_queues; i++) { 795 struct netdev_queue *dev_queue; 796 spinlock_t *root_lock; 797 struct Qdisc *q; 798 int val; 799 800 dev_queue = netdev_get_tx_queue(dev, i); 801 q = dev_queue->qdisc_sleeping; 802 root_lock = qdisc_lock(q); 803 804 spin_lock_bh(root_lock); 805 806 val = (qdisc_is_running(q) || 807 test_bit(__QDISC_STATE_SCHED, &q->state)); 808 809 spin_unlock_bh(root_lock); 810 811 if (val) 812 return true; 813 } 814 return false; 815 } 816 817 /** 818 * dev_deactivate_many - deactivate transmissions on several devices 819 * @head: list of devices to deactivate 820 * 821 * This function returns only when all outstanding transmissions 822 * have completed, unless all devices are in dismantle phase. 823 */ 824 void dev_deactivate_many(struct list_head *head) 825 { 826 struct net_device *dev; 827 bool sync_needed = false; 828 829 list_for_each_entry(dev, head, unreg_list) { 830 netdev_for_each_tx_queue(dev, dev_deactivate_queue, 831 &noop_qdisc); 832 if (dev_ingress_queue(dev)) 833 dev_deactivate_queue(dev, dev_ingress_queue(dev), 834 &noop_qdisc); 835 836 dev_watchdog_down(dev); 837 sync_needed |= !dev->dismantle; 838 } 839 840 /* Wait for outstanding qdisc-less dev_queue_xmit calls. 841 * This is avoided if all devices are in dismantle phase : 842 * Caller will call synchronize_net() for us 843 */ 844 if (sync_needed) 845 synchronize_net(); 846 847 /* Wait for outstanding qdisc_run calls. */ 848 list_for_each_entry(dev, head, unreg_list) 849 while (some_qdisc_is_busy(dev)) 850 yield(); 851 } 852 853 void dev_deactivate(struct net_device *dev) 854 { 855 LIST_HEAD(single); 856 857 list_add(&dev->unreg_list, &single); 858 dev_deactivate_many(&single); 859 list_del(&single); 860 } 861 EXPORT_SYMBOL(dev_deactivate); 862 863 static void dev_init_scheduler_queue(struct net_device *dev, 864 struct netdev_queue *dev_queue, 865 void *_qdisc) 866 { 867 struct Qdisc *qdisc = _qdisc; 868 869 dev_queue->qdisc = qdisc; 870 dev_queue->qdisc_sleeping = qdisc; 871 } 872 873 void dev_init_scheduler(struct net_device *dev) 874 { 875 dev->qdisc = &noop_qdisc; 876 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); 877 if (dev_ingress_queue(dev)) 878 dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); 879 880 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); 881 } 882 883 static void shutdown_scheduler_queue(struct net_device *dev, 884 struct netdev_queue *dev_queue, 885 void *_qdisc_default) 886 { 887 struct Qdisc *qdisc = dev_queue->qdisc_sleeping; 888 struct Qdisc *qdisc_default = _qdisc_default; 889 890 if (qdisc) { 891 rcu_assign_pointer(dev_queue->qdisc, qdisc_default); 892 dev_queue->qdisc_sleeping = qdisc_default; 893 894 qdisc_destroy(qdisc); 895 } 896 } 897 898 void dev_shutdown(struct net_device *dev) 899 { 900 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); 901 if (dev_ingress_queue(dev)) 902 shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); 903 qdisc_destroy(dev->qdisc); 904 dev->qdisc = &noop_qdisc; 905 906 WARN_ON(timer_pending(&dev->watchdog_timer)); 907 } 908