1 /* 2 * NET3 Protocol independent device support routines. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Derived from the non IP parts of dev.c 1.0.19 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 13 * 14 * Additional Authors: 15 * Florian la Roche <rzsfl@rz.uni-sb.de> 16 * Alan Cox <gw4pts@gw4pts.ampr.org> 17 * David Hinds <dahinds@users.sourceforge.net> 18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 19 * Adam Sulmicki <adam@cfar.umd.edu> 20 * Pekka Riikonen <priikone@poesidon.pspt.fi> 21 * 22 * Changes: 23 * D.J. Barrow : Fixed bug where dev->refcnt gets set 24 * to 2 if register_netdev gets called 25 * before net_dev_init & also removed a 26 * few lines of code in the process. 27 * Alan Cox : device private ioctl copies fields back. 28 * Alan Cox : Transmit queue code does relevant 29 * stunts to keep the queue safe. 30 * Alan Cox : Fixed double lock. 31 * Alan Cox : Fixed promisc NULL pointer trap 32 * ???????? : Support the full private ioctl range 33 * Alan Cox : Moved ioctl permission check into 34 * drivers 35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI 36 * Alan Cox : 100 backlog just doesn't cut it when 37 * you start doing multicast video 8) 38 * Alan Cox : Rewrote net_bh and list manager. 39 * Alan Cox : Fix ETH_P_ALL echoback lengths. 40 * Alan Cox : Took out transmit every packet pass 41 * Saved a few bytes in the ioctl handler 42 * Alan Cox : Network driver sets packet type before 43 * calling netif_rx. Saves a function 44 * call a packet. 45 * Alan Cox : Hashed net_bh() 46 * Richard Kooijman: Timestamp fixes. 47 * Alan Cox : Wrong field in SIOCGIFDSTADDR 48 * Alan Cox : Device lock protection. 49 * Alan Cox : Fixed nasty side effect of device close 50 * changes. 51 * Rudi Cilibrasi : Pass the right thing to 52 * set_mac_address() 53 * Dave Miller : 32bit quantity for the device lock to 54 * make it work out on a Sparc. 55 * Bjorn Ekwall : Added KERNELD hack. 56 * Alan Cox : Cleaned up the backlog initialise. 57 * Craig Metz : SIOCGIFCONF fix if space for under 58 * 1 device. 59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there 60 * is no device open function. 61 * Andi Kleen : Fix error reporting for SIOCGIFCONF 62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF 63 * Cyrus Durgin : Cleaned for KMOD 64 * Adam Sulmicki : Bug Fix : Network Device Unload 65 * A network device unload needs to purge 66 * the backlog queue. 67 * Paul Rusty Russell : SIOCSIFNAME 68 * Pekka Riikonen : Netdev boot-time settings code 69 * Andrew Morton : Make unregister_netdevice wait 70 * indefinitely on dev->refcnt 71 * J Hadi Salim : - Backlog queue sampling 72 * - netif_rx() feedback 73 */ 74 75 #include <asm/uaccess.h> 76 #include <linux/bitops.h> 77 #include <linux/capability.h> 78 #include <linux/cpu.h> 79 #include <linux/types.h> 80 #include <linux/kernel.h> 81 #include <linux/hash.h> 82 #include <linux/slab.h> 83 #include <linux/sched.h> 84 #include <linux/mutex.h> 85 #include <linux/string.h> 86 #include <linux/mm.h> 87 #include <linux/socket.h> 88 #include <linux/sockios.h> 89 #include <linux/errno.h> 90 #include <linux/interrupt.h> 91 #include <linux/if_ether.h> 92 #include <linux/netdevice.h> 93 #include <linux/etherdevice.h> 94 #include <linux/ethtool.h> 95 #include <linux/notifier.h> 96 #include <linux/skbuff.h> 97 #include <net/net_namespace.h> 98 #include <net/sock.h> 99 #include <linux/rtnetlink.h> 100 #include <linux/stat.h> 101 #include <net/dst.h> 102 #include <net/pkt_sched.h> 103 #include <net/checksum.h> 104 #include <net/xfrm.h> 105 #include <linux/highmem.h> 106 #include <linux/init.h> 107 #include <linux/module.h> 108 #include <linux/netpoll.h> 109 #include <linux/rcupdate.h> 110 #include <linux/delay.h> 111 #include <net/iw_handler.h> 112 #include <asm/current.h> 113 #include <linux/audit.h> 114 #include <linux/dmaengine.h> 115 #include <linux/err.h> 116 #include <linux/ctype.h> 117 #include <linux/if_arp.h> 118 #include <linux/if_vlan.h> 119 #include <linux/ip.h> 120 #include <net/ip.h> 121 #include <linux/ipv6.h> 122 #include <linux/in.h> 123 #include <linux/jhash.h> 124 #include <linux/random.h> 125 #include <trace/events/napi.h> 126 #include <trace/events/net.h> 127 #include <trace/events/skb.h> 128 #include <linux/pci.h> 129 #include <linux/inetdevice.h> 130 #include <linux/cpu_rmap.h> 131 #include <linux/static_key.h> 132 133 #include "net-sysfs.h" 134 135 /* Instead of increasing this, you should create a hash table. */ 136 #define MAX_GRO_SKBS 8 137 138 /* This should be increased if a protocol with a bigger head is added. */ 139 #define GRO_MAX_HEAD (MAX_HEADER + 128) 140 141 static DEFINE_SPINLOCK(ptype_lock); 142 static DEFINE_SPINLOCK(offload_lock); 143 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 144 struct list_head ptype_all __read_mostly; /* Taps */ 145 static struct list_head offload_base __read_mostly; 146 147 /* 148 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 149 * semaphore. 150 * 151 * Pure readers hold dev_base_lock for reading, or rcu_read_lock() 152 * 153 * Writers must hold the rtnl semaphore while they loop through the 154 * dev_base_head list, and hold dev_base_lock for writing when they do the 155 * actual updates. This allows pure readers to access the list even 156 * while a writer is preparing to update it. 157 * 158 * To put it another way, dev_base_lock is held for writing only to 159 * protect against pure readers; the rtnl semaphore provides the 160 * protection against other writers. 161 * 162 * See, for example usages, register_netdevice() and 163 * unregister_netdevice(), which must be called with the rtnl 164 * semaphore held. 165 */ 166 DEFINE_RWLOCK(dev_base_lock); 167 EXPORT_SYMBOL(dev_base_lock); 168 169 seqcount_t devnet_rename_seq; 170 171 static inline void dev_base_seq_inc(struct net *net) 172 { 173 while (++net->dev_base_seq == 0); 174 } 175 176 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) 177 { 178 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); 179 180 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; 181 } 182 183 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) 184 { 185 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; 186 } 187 188 static inline void rps_lock(struct softnet_data *sd) 189 { 190 #ifdef CONFIG_RPS 191 spin_lock(&sd->input_pkt_queue.lock); 192 #endif 193 } 194 195 static inline void rps_unlock(struct softnet_data *sd) 196 { 197 #ifdef CONFIG_RPS 198 spin_unlock(&sd->input_pkt_queue.lock); 199 #endif 200 } 201 202 /* Device list insertion */ 203 static int list_netdevice(struct net_device *dev) 204 { 205 struct net *net = dev_net(dev); 206 207 ASSERT_RTNL(); 208 209 write_lock_bh(&dev_base_lock); 210 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); 211 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); 212 hlist_add_head_rcu(&dev->index_hlist, 213 dev_index_hash(net, dev->ifindex)); 214 write_unlock_bh(&dev_base_lock); 215 216 dev_base_seq_inc(net); 217 218 return 0; 219 } 220 221 /* Device list removal 222 * caller must respect a RCU grace period before freeing/reusing dev 223 */ 224 static void unlist_netdevice(struct net_device *dev) 225 { 226 ASSERT_RTNL(); 227 228 /* Unlink dev from the device chain */ 229 write_lock_bh(&dev_base_lock); 230 list_del_rcu(&dev->dev_list); 231 hlist_del_rcu(&dev->name_hlist); 232 hlist_del_rcu(&dev->index_hlist); 233 write_unlock_bh(&dev_base_lock); 234 235 dev_base_seq_inc(dev_net(dev)); 236 } 237 238 /* 239 * Our notifier list 240 */ 241 242 static RAW_NOTIFIER_HEAD(netdev_chain); 243 244 /* 245 * Device drivers call our routines to queue packets here. We empty the 246 * queue in the local softnet handler. 247 */ 248 249 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 250 EXPORT_PER_CPU_SYMBOL(softnet_data); 251 252 #ifdef CONFIG_LOCKDEP 253 /* 254 * register_netdevice() inits txq->_xmit_lock and sets lockdep class 255 * according to dev->type 256 */ 257 static const unsigned short netdev_lock_type[] = 258 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, 259 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, 260 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, 261 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, 262 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, 263 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, 264 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, 265 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, 266 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, 267 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, 268 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, 269 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 270 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, 271 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, 272 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; 273 274 static const char *const netdev_lock_name[] = 275 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", 276 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", 277 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", 278 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", 279 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", 280 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", 281 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", 282 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", 283 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", 284 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", 285 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", 286 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 287 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", 288 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", 289 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; 290 291 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 292 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; 293 294 static inline unsigned short netdev_lock_pos(unsigned short dev_type) 295 { 296 int i; 297 298 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) 299 if (netdev_lock_type[i] == dev_type) 300 return i; 301 /* the last key is used by default */ 302 return ARRAY_SIZE(netdev_lock_type) - 1; 303 } 304 305 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 306 unsigned short dev_type) 307 { 308 int i; 309 310 i = netdev_lock_pos(dev_type); 311 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], 312 netdev_lock_name[i]); 313 } 314 315 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 316 { 317 int i; 318 319 i = netdev_lock_pos(dev->type); 320 lockdep_set_class_and_name(&dev->addr_list_lock, 321 &netdev_addr_lock_key[i], 322 netdev_lock_name[i]); 323 } 324 #else 325 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 326 unsigned short dev_type) 327 { 328 } 329 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 330 { 331 } 332 #endif 333 334 /******************************************************************************* 335 336 Protocol management and registration routines 337 338 *******************************************************************************/ 339 340 /* 341 * Add a protocol ID to the list. Now that the input handler is 342 * smarter we can dispense with all the messy stuff that used to be 343 * here. 344 * 345 * BEWARE!!! Protocol handlers, mangling input packets, 346 * MUST BE last in hash buckets and checking protocol handlers 347 * MUST start from promiscuous ptype_all chain in net_bh. 348 * It is true now, do not change it. 349 * Explanation follows: if protocol handler, mangling packet, will 350 * be the first on list, it is not able to sense, that packet 351 * is cloned and should be copied-on-write, so that it will 352 * change it and subsequent readers will get broken packet. 353 * --ANK (980803) 354 */ 355 356 static inline struct list_head *ptype_head(const struct packet_type *pt) 357 { 358 if (pt->type == htons(ETH_P_ALL)) 359 return &ptype_all; 360 else 361 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; 362 } 363 364 /** 365 * dev_add_pack - add packet handler 366 * @pt: packet type declaration 367 * 368 * Add a protocol handler to the networking stack. The passed &packet_type 369 * is linked into kernel lists and may not be freed until it has been 370 * removed from the kernel lists. 371 * 372 * This call does not sleep therefore it can not 373 * guarantee all CPU's that are in middle of receiving packets 374 * will see the new packet type (until the next received packet). 375 */ 376 377 void dev_add_pack(struct packet_type *pt) 378 { 379 struct list_head *head = ptype_head(pt); 380 381 spin_lock(&ptype_lock); 382 list_add_rcu(&pt->list, head); 383 spin_unlock(&ptype_lock); 384 } 385 EXPORT_SYMBOL(dev_add_pack); 386 387 /** 388 * __dev_remove_pack - remove packet handler 389 * @pt: packet type declaration 390 * 391 * Remove a protocol handler that was previously added to the kernel 392 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 393 * from the kernel lists and can be freed or reused once this function 394 * returns. 395 * 396 * The packet type might still be in use by receivers 397 * and must not be freed until after all the CPU's have gone 398 * through a quiescent state. 399 */ 400 void __dev_remove_pack(struct packet_type *pt) 401 { 402 struct list_head *head = ptype_head(pt); 403 struct packet_type *pt1; 404 405 spin_lock(&ptype_lock); 406 407 list_for_each_entry(pt1, head, list) { 408 if (pt == pt1) { 409 list_del_rcu(&pt->list); 410 goto out; 411 } 412 } 413 414 pr_warn("dev_remove_pack: %p not found\n", pt); 415 out: 416 spin_unlock(&ptype_lock); 417 } 418 EXPORT_SYMBOL(__dev_remove_pack); 419 420 /** 421 * dev_remove_pack - remove packet handler 422 * @pt: packet type declaration 423 * 424 * Remove a protocol handler that was previously added to the kernel 425 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 426 * from the kernel lists and can be freed or reused once this function 427 * returns. 428 * 429 * This call sleeps to guarantee that no CPU is looking at the packet 430 * type after return. 431 */ 432 void dev_remove_pack(struct packet_type *pt) 433 { 434 __dev_remove_pack(pt); 435 436 synchronize_net(); 437 } 438 EXPORT_SYMBOL(dev_remove_pack); 439 440 441 /** 442 * dev_add_offload - register offload handlers 443 * @po: protocol offload declaration 444 * 445 * Add protocol offload handlers to the networking stack. The passed 446 * &proto_offload is linked into kernel lists and may not be freed until 447 * it has been removed from the kernel lists. 448 * 449 * This call does not sleep therefore it can not 450 * guarantee all CPU's that are in middle of receiving packets 451 * will see the new offload handlers (until the next received packet). 452 */ 453 void dev_add_offload(struct packet_offload *po) 454 { 455 struct list_head *head = &offload_base; 456 457 spin_lock(&offload_lock); 458 list_add_rcu(&po->list, head); 459 spin_unlock(&offload_lock); 460 } 461 EXPORT_SYMBOL(dev_add_offload); 462 463 /** 464 * __dev_remove_offload - remove offload handler 465 * @po: packet offload declaration 466 * 467 * Remove a protocol offload handler that was previously added to the 468 * kernel offload handlers by dev_add_offload(). The passed &offload_type 469 * is removed from the kernel lists and can be freed or reused once this 470 * function returns. 471 * 472 * The packet type might still be in use by receivers 473 * and must not be freed until after all the CPU's have gone 474 * through a quiescent state. 475 */ 476 void __dev_remove_offload(struct packet_offload *po) 477 { 478 struct list_head *head = &offload_base; 479 struct packet_offload *po1; 480 481 spin_lock(&offload_lock); 482 483 list_for_each_entry(po1, head, list) { 484 if (po == po1) { 485 list_del_rcu(&po->list); 486 goto out; 487 } 488 } 489 490 pr_warn("dev_remove_offload: %p not found\n", po); 491 out: 492 spin_unlock(&offload_lock); 493 } 494 EXPORT_SYMBOL(__dev_remove_offload); 495 496 /** 497 * dev_remove_offload - remove packet offload handler 498 * @po: packet offload declaration 499 * 500 * Remove a packet offload handler that was previously added to the kernel 501 * offload handlers by dev_add_offload(). The passed &offload_type is 502 * removed from the kernel lists and can be freed or reused once this 503 * function returns. 504 * 505 * This call sleeps to guarantee that no CPU is looking at the packet 506 * type after return. 507 */ 508 void dev_remove_offload(struct packet_offload *po) 509 { 510 __dev_remove_offload(po); 511 512 synchronize_net(); 513 } 514 EXPORT_SYMBOL(dev_remove_offload); 515 516 /****************************************************************************** 517 518 Device Boot-time Settings Routines 519 520 *******************************************************************************/ 521 522 /* Boot time configuration table */ 523 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX]; 524 525 /** 526 * netdev_boot_setup_add - add new setup entry 527 * @name: name of the device 528 * @map: configured settings for the device 529 * 530 * Adds new setup entry to the dev_boot_setup list. The function 531 * returns 0 on error and 1 on success. This is a generic routine to 532 * all netdevices. 533 */ 534 static int netdev_boot_setup_add(char *name, struct ifmap *map) 535 { 536 struct netdev_boot_setup *s; 537 int i; 538 539 s = dev_boot_setup; 540 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { 541 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') { 542 memset(s[i].name, 0, sizeof(s[i].name)); 543 strlcpy(s[i].name, name, IFNAMSIZ); 544 memcpy(&s[i].map, map, sizeof(s[i].map)); 545 break; 546 } 547 } 548 549 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1; 550 } 551 552 /** 553 * netdev_boot_setup_check - check boot time settings 554 * @dev: the netdevice 555 * 556 * Check boot time settings for the device. 557 * The found settings are set for the device to be used 558 * later in the device probing. 559 * Returns 0 if no settings found, 1 if they are. 560 */ 561 int netdev_boot_setup_check(struct net_device *dev) 562 { 563 struct netdev_boot_setup *s = dev_boot_setup; 564 int i; 565 566 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { 567 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' && 568 !strcmp(dev->name, s[i].name)) { 569 dev->irq = s[i].map.irq; 570 dev->base_addr = s[i].map.base_addr; 571 dev->mem_start = s[i].map.mem_start; 572 dev->mem_end = s[i].map.mem_end; 573 return 1; 574 } 575 } 576 return 0; 577 } 578 EXPORT_SYMBOL(netdev_boot_setup_check); 579 580 581 /** 582 * netdev_boot_base - get address from boot time settings 583 * @prefix: prefix for network device 584 * @unit: id for network device 585 * 586 * Check boot time settings for the base address of device. 587 * The found settings are set for the device to be used 588 * later in the device probing. 589 * Returns 0 if no settings found. 590 */ 591 unsigned long netdev_boot_base(const char *prefix, int unit) 592 { 593 const struct netdev_boot_setup *s = dev_boot_setup; 594 char name[IFNAMSIZ]; 595 int i; 596 597 sprintf(name, "%s%d", prefix, unit); 598 599 /* 600 * If device already registered then return base of 1 601 * to indicate not to probe for this interface 602 */ 603 if (__dev_get_by_name(&init_net, name)) 604 return 1; 605 606 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) 607 if (!strcmp(name, s[i].name)) 608 return s[i].map.base_addr; 609 return 0; 610 } 611 612 /* 613 * Saves at boot time configured settings for any netdevice. 614 */ 615 int __init netdev_boot_setup(char *str) 616 { 617 int ints[5]; 618 struct ifmap map; 619 620 str = get_options(str, ARRAY_SIZE(ints), ints); 621 if (!str || !*str) 622 return 0; 623 624 /* Save settings */ 625 memset(&map, 0, sizeof(map)); 626 if (ints[0] > 0) 627 map.irq = ints[1]; 628 if (ints[0] > 1) 629 map.base_addr = ints[2]; 630 if (ints[0] > 2) 631 map.mem_start = ints[3]; 632 if (ints[0] > 3) 633 map.mem_end = ints[4]; 634 635 /* Add new entry to the list */ 636 return netdev_boot_setup_add(str, &map); 637 } 638 639 __setup("netdev=", netdev_boot_setup); 640 641 /******************************************************************************* 642 643 Device Interface Subroutines 644 645 *******************************************************************************/ 646 647 /** 648 * __dev_get_by_name - find a device by its name 649 * @net: the applicable net namespace 650 * @name: name to find 651 * 652 * Find an interface by name. Must be called under RTNL semaphore 653 * or @dev_base_lock. If the name is found a pointer to the device 654 * is returned. If the name is not found then %NULL is returned. The 655 * reference counters are not incremented so the caller must be 656 * careful with locks. 657 */ 658 659 struct net_device *__dev_get_by_name(struct net *net, const char *name) 660 { 661 struct net_device *dev; 662 struct hlist_head *head = dev_name_hash(net, name); 663 664 hlist_for_each_entry(dev, head, name_hlist) 665 if (!strncmp(dev->name, name, IFNAMSIZ)) 666 return dev; 667 668 return NULL; 669 } 670 EXPORT_SYMBOL(__dev_get_by_name); 671 672 /** 673 * dev_get_by_name_rcu - find a device by its name 674 * @net: the applicable net namespace 675 * @name: name to find 676 * 677 * Find an interface by name. 678 * If the name is found a pointer to the device is returned. 679 * If the name is not found then %NULL is returned. 680 * The reference counters are not incremented so the caller must be 681 * careful with locks. The caller must hold RCU lock. 682 */ 683 684 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) 685 { 686 struct net_device *dev; 687 struct hlist_head *head = dev_name_hash(net, name); 688 689 hlist_for_each_entry_rcu(dev, head, name_hlist) 690 if (!strncmp(dev->name, name, IFNAMSIZ)) 691 return dev; 692 693 return NULL; 694 } 695 EXPORT_SYMBOL(dev_get_by_name_rcu); 696 697 /** 698 * dev_get_by_name - find a device by its name 699 * @net: the applicable net namespace 700 * @name: name to find 701 * 702 * Find an interface by name. This can be called from any 703 * context and does its own locking. The returned handle has 704 * the usage count incremented and the caller must use dev_put() to 705 * release it when it is no longer needed. %NULL is returned if no 706 * matching device is found. 707 */ 708 709 struct net_device *dev_get_by_name(struct net *net, const char *name) 710 { 711 struct net_device *dev; 712 713 rcu_read_lock(); 714 dev = dev_get_by_name_rcu(net, name); 715 if (dev) 716 dev_hold(dev); 717 rcu_read_unlock(); 718 return dev; 719 } 720 EXPORT_SYMBOL(dev_get_by_name); 721 722 /** 723 * __dev_get_by_index - find a device by its ifindex 724 * @net: the applicable net namespace 725 * @ifindex: index of device 726 * 727 * Search for an interface by index. Returns %NULL if the device 728 * is not found or a pointer to the device. The device has not 729 * had its reference counter increased so the caller must be careful 730 * about locking. The caller must hold either the RTNL semaphore 731 * or @dev_base_lock. 732 */ 733 734 struct net_device *__dev_get_by_index(struct net *net, int ifindex) 735 { 736 struct net_device *dev; 737 struct hlist_head *head = dev_index_hash(net, ifindex); 738 739 hlist_for_each_entry(dev, head, index_hlist) 740 if (dev->ifindex == ifindex) 741 return dev; 742 743 return NULL; 744 } 745 EXPORT_SYMBOL(__dev_get_by_index); 746 747 /** 748 * dev_get_by_index_rcu - find a device by its ifindex 749 * @net: the applicable net namespace 750 * @ifindex: index of device 751 * 752 * Search for an interface by index. Returns %NULL if the device 753 * is not found or a pointer to the device. The device has not 754 * had its reference counter increased so the caller must be careful 755 * about locking. The caller must hold RCU lock. 756 */ 757 758 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) 759 { 760 struct net_device *dev; 761 struct hlist_head *head = dev_index_hash(net, ifindex); 762 763 hlist_for_each_entry_rcu(dev, head, index_hlist) 764 if (dev->ifindex == ifindex) 765 return dev; 766 767 return NULL; 768 } 769 EXPORT_SYMBOL(dev_get_by_index_rcu); 770 771 772 /** 773 * dev_get_by_index - find a device by its ifindex 774 * @net: the applicable net namespace 775 * @ifindex: index of device 776 * 777 * Search for an interface by index. Returns NULL if the device 778 * is not found or a pointer to the device. The device returned has 779 * had a reference added and the pointer is safe until the user calls 780 * dev_put to indicate they have finished with it. 781 */ 782 783 struct net_device *dev_get_by_index(struct net *net, int ifindex) 784 { 785 struct net_device *dev; 786 787 rcu_read_lock(); 788 dev = dev_get_by_index_rcu(net, ifindex); 789 if (dev) 790 dev_hold(dev); 791 rcu_read_unlock(); 792 return dev; 793 } 794 EXPORT_SYMBOL(dev_get_by_index); 795 796 /** 797 * dev_getbyhwaddr_rcu - find a device by its hardware address 798 * @net: the applicable net namespace 799 * @type: media type of device 800 * @ha: hardware address 801 * 802 * Search for an interface by MAC address. Returns NULL if the device 803 * is not found or a pointer to the device. 804 * The caller must hold RCU or RTNL. 805 * The returned device has not had its ref count increased 806 * and the caller must therefore be careful about locking 807 * 808 */ 809 810 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 811 const char *ha) 812 { 813 struct net_device *dev; 814 815 for_each_netdev_rcu(net, dev) 816 if (dev->type == type && 817 !memcmp(dev->dev_addr, ha, dev->addr_len)) 818 return dev; 819 820 return NULL; 821 } 822 EXPORT_SYMBOL(dev_getbyhwaddr_rcu); 823 824 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) 825 { 826 struct net_device *dev; 827 828 ASSERT_RTNL(); 829 for_each_netdev(net, dev) 830 if (dev->type == type) 831 return dev; 832 833 return NULL; 834 } 835 EXPORT_SYMBOL(__dev_getfirstbyhwtype); 836 837 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) 838 { 839 struct net_device *dev, *ret = NULL; 840 841 rcu_read_lock(); 842 for_each_netdev_rcu(net, dev) 843 if (dev->type == type) { 844 dev_hold(dev); 845 ret = dev; 846 break; 847 } 848 rcu_read_unlock(); 849 return ret; 850 } 851 EXPORT_SYMBOL(dev_getfirstbyhwtype); 852 853 /** 854 * dev_get_by_flags_rcu - find any device with given flags 855 * @net: the applicable net namespace 856 * @if_flags: IFF_* values 857 * @mask: bitmask of bits in if_flags to check 858 * 859 * Search for any interface with the given flags. Returns NULL if a device 860 * is not found or a pointer to the device. Must be called inside 861 * rcu_read_lock(), and result refcount is unchanged. 862 */ 863 864 struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags, 865 unsigned short mask) 866 { 867 struct net_device *dev, *ret; 868 869 ret = NULL; 870 for_each_netdev_rcu(net, dev) { 871 if (((dev->flags ^ if_flags) & mask) == 0) { 872 ret = dev; 873 break; 874 } 875 } 876 return ret; 877 } 878 EXPORT_SYMBOL(dev_get_by_flags_rcu); 879 880 /** 881 * dev_valid_name - check if name is okay for network device 882 * @name: name string 883 * 884 * Network device names need to be valid file names to 885 * to allow sysfs to work. We also disallow any kind of 886 * whitespace. 887 */ 888 bool dev_valid_name(const char *name) 889 { 890 if (*name == '\0') 891 return false; 892 if (strlen(name) >= IFNAMSIZ) 893 return false; 894 if (!strcmp(name, ".") || !strcmp(name, "..")) 895 return false; 896 897 while (*name) { 898 if (*name == '/' || isspace(*name)) 899 return false; 900 name++; 901 } 902 return true; 903 } 904 EXPORT_SYMBOL(dev_valid_name); 905 906 /** 907 * __dev_alloc_name - allocate a name for a device 908 * @net: network namespace to allocate the device name in 909 * @name: name format string 910 * @buf: scratch buffer and result name string 911 * 912 * Passed a format string - eg "lt%d" it will try and find a suitable 913 * id. It scans list of devices to build up a free map, then chooses 914 * the first empty slot. The caller must hold the dev_base or rtnl lock 915 * while allocating the name and adding the device in order to avoid 916 * duplicates. 917 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 918 * Returns the number of the unit assigned or a negative errno code. 919 */ 920 921 static int __dev_alloc_name(struct net *net, const char *name, char *buf) 922 { 923 int i = 0; 924 const char *p; 925 const int max_netdevices = 8*PAGE_SIZE; 926 unsigned long *inuse; 927 struct net_device *d; 928 929 p = strnchr(name, IFNAMSIZ-1, '%'); 930 if (p) { 931 /* 932 * Verify the string as this thing may have come from 933 * the user. There must be either one "%d" and no other "%" 934 * characters. 935 */ 936 if (p[1] != 'd' || strchr(p + 2, '%')) 937 return -EINVAL; 938 939 /* Use one page as a bit array of possible slots */ 940 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC); 941 if (!inuse) 942 return -ENOMEM; 943 944 for_each_netdev(net, d) { 945 if (!sscanf(d->name, name, &i)) 946 continue; 947 if (i < 0 || i >= max_netdevices) 948 continue; 949 950 /* avoid cases where sscanf is not exact inverse of printf */ 951 snprintf(buf, IFNAMSIZ, name, i); 952 if (!strncmp(buf, d->name, IFNAMSIZ)) 953 set_bit(i, inuse); 954 } 955 956 i = find_first_zero_bit(inuse, max_netdevices); 957 free_page((unsigned long) inuse); 958 } 959 960 if (buf != name) 961 snprintf(buf, IFNAMSIZ, name, i); 962 if (!__dev_get_by_name(net, buf)) 963 return i; 964 965 /* It is possible to run out of possible slots 966 * when the name is long and there isn't enough space left 967 * for the digits, or if all bits are used. 968 */ 969 return -ENFILE; 970 } 971 972 /** 973 * dev_alloc_name - allocate a name for a device 974 * @dev: device 975 * @name: name format string 976 * 977 * Passed a format string - eg "lt%d" it will try and find a suitable 978 * id. It scans list of devices to build up a free map, then chooses 979 * the first empty slot. The caller must hold the dev_base or rtnl lock 980 * while allocating the name and adding the device in order to avoid 981 * duplicates. 982 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 983 * Returns the number of the unit assigned or a negative errno code. 984 */ 985 986 int dev_alloc_name(struct net_device *dev, const char *name) 987 { 988 char buf[IFNAMSIZ]; 989 struct net *net; 990 int ret; 991 992 BUG_ON(!dev_net(dev)); 993 net = dev_net(dev); 994 ret = __dev_alloc_name(net, name, buf); 995 if (ret >= 0) 996 strlcpy(dev->name, buf, IFNAMSIZ); 997 return ret; 998 } 999 EXPORT_SYMBOL(dev_alloc_name); 1000 1001 static int dev_alloc_name_ns(struct net *net, 1002 struct net_device *dev, 1003 const char *name) 1004 { 1005 char buf[IFNAMSIZ]; 1006 int ret; 1007 1008 ret = __dev_alloc_name(net, name, buf); 1009 if (ret >= 0) 1010 strlcpy(dev->name, buf, IFNAMSIZ); 1011 return ret; 1012 } 1013 1014 static int dev_get_valid_name(struct net *net, 1015 struct net_device *dev, 1016 const char *name) 1017 { 1018 BUG_ON(!net); 1019 1020 if (!dev_valid_name(name)) 1021 return -EINVAL; 1022 1023 if (strchr(name, '%')) 1024 return dev_alloc_name_ns(net, dev, name); 1025 else if (__dev_get_by_name(net, name)) 1026 return -EEXIST; 1027 else if (dev->name != name) 1028 strlcpy(dev->name, name, IFNAMSIZ); 1029 1030 return 0; 1031 } 1032 1033 /** 1034 * dev_change_name - change name of a device 1035 * @dev: device 1036 * @newname: name (or format string) must be at least IFNAMSIZ 1037 * 1038 * Change name of a device, can pass format strings "eth%d". 1039 * for wildcarding. 1040 */ 1041 int dev_change_name(struct net_device *dev, const char *newname) 1042 { 1043 char oldname[IFNAMSIZ]; 1044 int err = 0; 1045 int ret; 1046 struct net *net; 1047 1048 ASSERT_RTNL(); 1049 BUG_ON(!dev_net(dev)); 1050 1051 net = dev_net(dev); 1052 if (dev->flags & IFF_UP) 1053 return -EBUSY; 1054 1055 write_seqcount_begin(&devnet_rename_seq); 1056 1057 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { 1058 write_seqcount_end(&devnet_rename_seq); 1059 return 0; 1060 } 1061 1062 memcpy(oldname, dev->name, IFNAMSIZ); 1063 1064 err = dev_get_valid_name(net, dev, newname); 1065 if (err < 0) { 1066 write_seqcount_end(&devnet_rename_seq); 1067 return err; 1068 } 1069 1070 rollback: 1071 ret = device_rename(&dev->dev, dev->name); 1072 if (ret) { 1073 memcpy(dev->name, oldname, IFNAMSIZ); 1074 write_seqcount_end(&devnet_rename_seq); 1075 return ret; 1076 } 1077 1078 write_seqcount_end(&devnet_rename_seq); 1079 1080 write_lock_bh(&dev_base_lock); 1081 hlist_del_rcu(&dev->name_hlist); 1082 write_unlock_bh(&dev_base_lock); 1083 1084 synchronize_rcu(); 1085 1086 write_lock_bh(&dev_base_lock); 1087 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); 1088 write_unlock_bh(&dev_base_lock); 1089 1090 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); 1091 ret = notifier_to_errno(ret); 1092 1093 if (ret) { 1094 /* err >= 0 after dev_alloc_name() or stores the first errno */ 1095 if (err >= 0) { 1096 err = ret; 1097 write_seqcount_begin(&devnet_rename_seq); 1098 memcpy(dev->name, oldname, IFNAMSIZ); 1099 goto rollback; 1100 } else { 1101 pr_err("%s: name change rollback failed: %d\n", 1102 dev->name, ret); 1103 } 1104 } 1105 1106 return err; 1107 } 1108 1109 /** 1110 * dev_set_alias - change ifalias of a device 1111 * @dev: device 1112 * @alias: name up to IFALIASZ 1113 * @len: limit of bytes to copy from info 1114 * 1115 * Set ifalias for a device, 1116 */ 1117 int dev_set_alias(struct net_device *dev, const char *alias, size_t len) 1118 { 1119 char *new_ifalias; 1120 1121 ASSERT_RTNL(); 1122 1123 if (len >= IFALIASZ) 1124 return -EINVAL; 1125 1126 if (!len) { 1127 kfree(dev->ifalias); 1128 dev->ifalias = NULL; 1129 return 0; 1130 } 1131 1132 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); 1133 if (!new_ifalias) 1134 return -ENOMEM; 1135 dev->ifalias = new_ifalias; 1136 1137 strlcpy(dev->ifalias, alias, len+1); 1138 return len; 1139 } 1140 1141 1142 /** 1143 * netdev_features_change - device changes features 1144 * @dev: device to cause notification 1145 * 1146 * Called to indicate a device has changed features. 1147 */ 1148 void netdev_features_change(struct net_device *dev) 1149 { 1150 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); 1151 } 1152 EXPORT_SYMBOL(netdev_features_change); 1153 1154 /** 1155 * netdev_state_change - device changes state 1156 * @dev: device to cause notification 1157 * 1158 * Called to indicate a device has changed state. This function calls 1159 * the notifier chains for netdev_chain and sends a NEWLINK message 1160 * to the routing socket. 1161 */ 1162 void netdev_state_change(struct net_device *dev) 1163 { 1164 if (dev->flags & IFF_UP) { 1165 call_netdevice_notifiers(NETDEV_CHANGE, dev); 1166 rtmsg_ifinfo(RTM_NEWLINK, dev, 0); 1167 } 1168 } 1169 EXPORT_SYMBOL(netdev_state_change); 1170 1171 /** 1172 * netdev_notify_peers - notify network peers about existence of @dev 1173 * @dev: network device 1174 * 1175 * Generate traffic such that interested network peers are aware of 1176 * @dev, such as by generating a gratuitous ARP. This may be used when 1177 * a device wants to inform the rest of the network about some sort of 1178 * reconfiguration such as a failover event or virtual machine 1179 * migration. 1180 */ 1181 void netdev_notify_peers(struct net_device *dev) 1182 { 1183 rtnl_lock(); 1184 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); 1185 rtnl_unlock(); 1186 } 1187 EXPORT_SYMBOL(netdev_notify_peers); 1188 1189 static int __dev_open(struct net_device *dev) 1190 { 1191 const struct net_device_ops *ops = dev->netdev_ops; 1192 int ret; 1193 1194 ASSERT_RTNL(); 1195 1196 if (!netif_device_present(dev)) 1197 return -ENODEV; 1198 1199 /* Block netpoll from trying to do any rx path servicing. 1200 * If we don't do this there is a chance ndo_poll_controller 1201 * or ndo_poll may be running while we open the device 1202 */ 1203 ret = netpoll_rx_disable(dev); 1204 if (ret) 1205 return ret; 1206 1207 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev); 1208 ret = notifier_to_errno(ret); 1209 if (ret) 1210 return ret; 1211 1212 set_bit(__LINK_STATE_START, &dev->state); 1213 1214 if (ops->ndo_validate_addr) 1215 ret = ops->ndo_validate_addr(dev); 1216 1217 if (!ret && ops->ndo_open) 1218 ret = ops->ndo_open(dev); 1219 1220 netpoll_rx_enable(dev); 1221 1222 if (ret) 1223 clear_bit(__LINK_STATE_START, &dev->state); 1224 else { 1225 dev->flags |= IFF_UP; 1226 net_dmaengine_get(); 1227 dev_set_rx_mode(dev); 1228 dev_activate(dev); 1229 add_device_randomness(dev->dev_addr, dev->addr_len); 1230 } 1231 1232 return ret; 1233 } 1234 1235 /** 1236 * dev_open - prepare an interface for use. 1237 * @dev: device to open 1238 * 1239 * Takes a device from down to up state. The device's private open 1240 * function is invoked and then the multicast lists are loaded. Finally 1241 * the device is moved into the up state and a %NETDEV_UP message is 1242 * sent to the netdev notifier chain. 1243 * 1244 * Calling this function on an active interface is a nop. On a failure 1245 * a negative errno code is returned. 1246 */ 1247 int dev_open(struct net_device *dev) 1248 { 1249 int ret; 1250 1251 if (dev->flags & IFF_UP) 1252 return 0; 1253 1254 ret = __dev_open(dev); 1255 if (ret < 0) 1256 return ret; 1257 1258 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); 1259 call_netdevice_notifiers(NETDEV_UP, dev); 1260 1261 return ret; 1262 } 1263 EXPORT_SYMBOL(dev_open); 1264 1265 static int __dev_close_many(struct list_head *head) 1266 { 1267 struct net_device *dev; 1268 1269 ASSERT_RTNL(); 1270 might_sleep(); 1271 1272 list_for_each_entry(dev, head, unreg_list) { 1273 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); 1274 1275 clear_bit(__LINK_STATE_START, &dev->state); 1276 1277 /* Synchronize to scheduled poll. We cannot touch poll list, it 1278 * can be even on different cpu. So just clear netif_running(). 1279 * 1280 * dev->stop() will invoke napi_disable() on all of it's 1281 * napi_struct instances on this device. 1282 */ 1283 smp_mb__after_clear_bit(); /* Commit netif_running(). */ 1284 } 1285 1286 dev_deactivate_many(head); 1287 1288 list_for_each_entry(dev, head, unreg_list) { 1289 const struct net_device_ops *ops = dev->netdev_ops; 1290 1291 /* 1292 * Call the device specific close. This cannot fail. 1293 * Only if device is UP 1294 * 1295 * We allow it to be called even after a DETACH hot-plug 1296 * event. 1297 */ 1298 if (ops->ndo_stop) 1299 ops->ndo_stop(dev); 1300 1301 dev->flags &= ~IFF_UP; 1302 net_dmaengine_put(); 1303 } 1304 1305 return 0; 1306 } 1307 1308 static int __dev_close(struct net_device *dev) 1309 { 1310 int retval; 1311 LIST_HEAD(single); 1312 1313 /* Temporarily disable netpoll until the interface is down */ 1314 retval = netpoll_rx_disable(dev); 1315 if (retval) 1316 return retval; 1317 1318 list_add(&dev->unreg_list, &single); 1319 retval = __dev_close_many(&single); 1320 list_del(&single); 1321 1322 netpoll_rx_enable(dev); 1323 return retval; 1324 } 1325 1326 static int dev_close_many(struct list_head *head) 1327 { 1328 struct net_device *dev, *tmp; 1329 LIST_HEAD(tmp_list); 1330 1331 list_for_each_entry_safe(dev, tmp, head, unreg_list) 1332 if (!(dev->flags & IFF_UP)) 1333 list_move(&dev->unreg_list, &tmp_list); 1334 1335 __dev_close_many(head); 1336 1337 list_for_each_entry(dev, head, unreg_list) { 1338 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); 1339 call_netdevice_notifiers(NETDEV_DOWN, dev); 1340 } 1341 1342 /* rollback_registered_many needs the complete original list */ 1343 list_splice(&tmp_list, head); 1344 return 0; 1345 } 1346 1347 /** 1348 * dev_close - shutdown an interface. 1349 * @dev: device to shutdown 1350 * 1351 * This function moves an active device into down state. A 1352 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device 1353 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier 1354 * chain. 1355 */ 1356 int dev_close(struct net_device *dev) 1357 { 1358 int ret = 0; 1359 if (dev->flags & IFF_UP) { 1360 LIST_HEAD(single); 1361 1362 /* Block netpoll rx while the interface is going down */ 1363 ret = netpoll_rx_disable(dev); 1364 if (ret) 1365 return ret; 1366 1367 list_add(&dev->unreg_list, &single); 1368 dev_close_many(&single); 1369 list_del(&single); 1370 1371 netpoll_rx_enable(dev); 1372 } 1373 return ret; 1374 } 1375 EXPORT_SYMBOL(dev_close); 1376 1377 1378 /** 1379 * dev_disable_lro - disable Large Receive Offload on a device 1380 * @dev: device 1381 * 1382 * Disable Large Receive Offload (LRO) on a net device. Must be 1383 * called under RTNL. This is needed if received packets may be 1384 * forwarded to another interface. 1385 */ 1386 void dev_disable_lro(struct net_device *dev) 1387 { 1388 /* 1389 * If we're trying to disable lro on a vlan device 1390 * use the underlying physical device instead 1391 */ 1392 if (is_vlan_dev(dev)) 1393 dev = vlan_dev_real_dev(dev); 1394 1395 dev->wanted_features &= ~NETIF_F_LRO; 1396 netdev_update_features(dev); 1397 1398 if (unlikely(dev->features & NETIF_F_LRO)) 1399 netdev_WARN(dev, "failed to disable LRO!\n"); 1400 } 1401 EXPORT_SYMBOL(dev_disable_lro); 1402 1403 1404 static int dev_boot_phase = 1; 1405 1406 /** 1407 * register_netdevice_notifier - register a network notifier block 1408 * @nb: notifier 1409 * 1410 * Register a notifier to be called when network device events occur. 1411 * The notifier passed is linked into the kernel structures and must 1412 * not be reused until it has been unregistered. A negative errno code 1413 * is returned on a failure. 1414 * 1415 * When registered all registration and up events are replayed 1416 * to the new notifier to allow device to have a race free 1417 * view of the network device list. 1418 */ 1419 1420 int register_netdevice_notifier(struct notifier_block *nb) 1421 { 1422 struct net_device *dev; 1423 struct net_device *last; 1424 struct net *net; 1425 int err; 1426 1427 rtnl_lock(); 1428 err = raw_notifier_chain_register(&netdev_chain, nb); 1429 if (err) 1430 goto unlock; 1431 if (dev_boot_phase) 1432 goto unlock; 1433 for_each_net(net) { 1434 for_each_netdev(net, dev) { 1435 err = nb->notifier_call(nb, NETDEV_REGISTER, dev); 1436 err = notifier_to_errno(err); 1437 if (err) 1438 goto rollback; 1439 1440 if (!(dev->flags & IFF_UP)) 1441 continue; 1442 1443 nb->notifier_call(nb, NETDEV_UP, dev); 1444 } 1445 } 1446 1447 unlock: 1448 rtnl_unlock(); 1449 return err; 1450 1451 rollback: 1452 last = dev; 1453 for_each_net(net) { 1454 for_each_netdev(net, dev) { 1455 if (dev == last) 1456 goto outroll; 1457 1458 if (dev->flags & IFF_UP) { 1459 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); 1460 nb->notifier_call(nb, NETDEV_DOWN, dev); 1461 } 1462 nb->notifier_call(nb, NETDEV_UNREGISTER, dev); 1463 } 1464 } 1465 1466 outroll: 1467 raw_notifier_chain_unregister(&netdev_chain, nb); 1468 goto unlock; 1469 } 1470 EXPORT_SYMBOL(register_netdevice_notifier); 1471 1472 /** 1473 * unregister_netdevice_notifier - unregister a network notifier block 1474 * @nb: notifier 1475 * 1476 * Unregister a notifier previously registered by 1477 * register_netdevice_notifier(). The notifier is unlinked into the 1478 * kernel structures and may then be reused. A negative errno code 1479 * is returned on a failure. 1480 * 1481 * After unregistering unregister and down device events are synthesized 1482 * for all devices on the device list to the removed notifier to remove 1483 * the need for special case cleanup code. 1484 */ 1485 1486 int unregister_netdevice_notifier(struct notifier_block *nb) 1487 { 1488 struct net_device *dev; 1489 struct net *net; 1490 int err; 1491 1492 rtnl_lock(); 1493 err = raw_notifier_chain_unregister(&netdev_chain, nb); 1494 if (err) 1495 goto unlock; 1496 1497 for_each_net(net) { 1498 for_each_netdev(net, dev) { 1499 if (dev->flags & IFF_UP) { 1500 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); 1501 nb->notifier_call(nb, NETDEV_DOWN, dev); 1502 } 1503 nb->notifier_call(nb, NETDEV_UNREGISTER, dev); 1504 } 1505 } 1506 unlock: 1507 rtnl_unlock(); 1508 return err; 1509 } 1510 EXPORT_SYMBOL(unregister_netdevice_notifier); 1511 1512 /** 1513 * call_netdevice_notifiers - call all network notifier blocks 1514 * @val: value passed unmodified to notifier function 1515 * @dev: net_device pointer passed unmodified to notifier function 1516 * 1517 * Call all network notifier blocks. Parameters and return value 1518 * are as for raw_notifier_call_chain(). 1519 */ 1520 1521 int call_netdevice_notifiers(unsigned long val, struct net_device *dev) 1522 { 1523 ASSERT_RTNL(); 1524 return raw_notifier_call_chain(&netdev_chain, val, dev); 1525 } 1526 EXPORT_SYMBOL(call_netdevice_notifiers); 1527 1528 static struct static_key netstamp_needed __read_mostly; 1529 #ifdef HAVE_JUMP_LABEL 1530 /* We are not allowed to call static_key_slow_dec() from irq context 1531 * If net_disable_timestamp() is called from irq context, defer the 1532 * static_key_slow_dec() calls. 1533 */ 1534 static atomic_t netstamp_needed_deferred; 1535 #endif 1536 1537 void net_enable_timestamp(void) 1538 { 1539 #ifdef HAVE_JUMP_LABEL 1540 int deferred = atomic_xchg(&netstamp_needed_deferred, 0); 1541 1542 if (deferred) { 1543 while (--deferred) 1544 static_key_slow_dec(&netstamp_needed); 1545 return; 1546 } 1547 #endif 1548 static_key_slow_inc(&netstamp_needed); 1549 } 1550 EXPORT_SYMBOL(net_enable_timestamp); 1551 1552 void net_disable_timestamp(void) 1553 { 1554 #ifdef HAVE_JUMP_LABEL 1555 if (in_interrupt()) { 1556 atomic_inc(&netstamp_needed_deferred); 1557 return; 1558 } 1559 #endif 1560 static_key_slow_dec(&netstamp_needed); 1561 } 1562 EXPORT_SYMBOL(net_disable_timestamp); 1563 1564 static inline void net_timestamp_set(struct sk_buff *skb) 1565 { 1566 skb->tstamp.tv64 = 0; 1567 if (static_key_false(&netstamp_needed)) 1568 __net_timestamp(skb); 1569 } 1570 1571 #define net_timestamp_check(COND, SKB) \ 1572 if (static_key_false(&netstamp_needed)) { \ 1573 if ((COND) && !(SKB)->tstamp.tv64) \ 1574 __net_timestamp(SKB); \ 1575 } \ 1576 1577 static inline bool is_skb_forwardable(struct net_device *dev, 1578 struct sk_buff *skb) 1579 { 1580 unsigned int len; 1581 1582 if (!(dev->flags & IFF_UP)) 1583 return false; 1584 1585 len = dev->mtu + dev->hard_header_len + VLAN_HLEN; 1586 if (skb->len <= len) 1587 return true; 1588 1589 /* if TSO is enabled, we don't care about the length as the packet 1590 * could be forwarded without being segmented before 1591 */ 1592 if (skb_is_gso(skb)) 1593 return true; 1594 1595 return false; 1596 } 1597 1598 /** 1599 * dev_forward_skb - loopback an skb to another netif 1600 * 1601 * @dev: destination network device 1602 * @skb: buffer to forward 1603 * 1604 * return values: 1605 * NET_RX_SUCCESS (no congestion) 1606 * NET_RX_DROP (packet was dropped, but freed) 1607 * 1608 * dev_forward_skb can be used for injecting an skb from the 1609 * start_xmit function of one device into the receive queue 1610 * of another device. 1611 * 1612 * The receiving device may be in another namespace, so 1613 * we have to clear all information in the skb that could 1614 * impact namespace isolation. 1615 */ 1616 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1617 { 1618 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 1619 if (skb_copy_ubufs(skb, GFP_ATOMIC)) { 1620 atomic_long_inc(&dev->rx_dropped); 1621 kfree_skb(skb); 1622 return NET_RX_DROP; 1623 } 1624 } 1625 1626 skb_orphan(skb); 1627 1628 if (unlikely(!is_skb_forwardable(dev, skb))) { 1629 atomic_long_inc(&dev->rx_dropped); 1630 kfree_skb(skb); 1631 return NET_RX_DROP; 1632 } 1633 skb->skb_iif = 0; 1634 skb->dev = dev; 1635 skb_dst_drop(skb); 1636 skb->tstamp.tv64 = 0; 1637 skb->pkt_type = PACKET_HOST; 1638 skb->protocol = eth_type_trans(skb, dev); 1639 skb->mark = 0; 1640 secpath_reset(skb); 1641 nf_reset(skb); 1642 nf_reset_trace(skb); 1643 return netif_rx(skb); 1644 } 1645 EXPORT_SYMBOL_GPL(dev_forward_skb); 1646 1647 static inline int deliver_skb(struct sk_buff *skb, 1648 struct packet_type *pt_prev, 1649 struct net_device *orig_dev) 1650 { 1651 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) 1652 return -ENOMEM; 1653 atomic_inc(&skb->users); 1654 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 1655 } 1656 1657 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) 1658 { 1659 if (!ptype->af_packet_priv || !skb->sk) 1660 return false; 1661 1662 if (ptype->id_match) 1663 return ptype->id_match(ptype, skb->sk); 1664 else if ((struct sock *)ptype->af_packet_priv == skb->sk) 1665 return true; 1666 1667 return false; 1668 } 1669 1670 /* 1671 * Support routine. Sends outgoing frames to any network 1672 * taps currently in use. 1673 */ 1674 1675 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) 1676 { 1677 struct packet_type *ptype; 1678 struct sk_buff *skb2 = NULL; 1679 struct packet_type *pt_prev = NULL; 1680 1681 rcu_read_lock(); 1682 list_for_each_entry_rcu(ptype, &ptype_all, list) { 1683 /* Never send packets back to the socket 1684 * they originated from - MvS (miquels@drinkel.ow.org) 1685 */ 1686 if ((ptype->dev == dev || !ptype->dev) && 1687 (!skb_loop_sk(ptype, skb))) { 1688 if (pt_prev) { 1689 deliver_skb(skb2, pt_prev, skb->dev); 1690 pt_prev = ptype; 1691 continue; 1692 } 1693 1694 skb2 = skb_clone(skb, GFP_ATOMIC); 1695 if (!skb2) 1696 break; 1697 1698 net_timestamp_set(skb2); 1699 1700 /* skb->nh should be correctly 1701 set by sender, so that the second statement is 1702 just protection against buggy protocols. 1703 */ 1704 skb_reset_mac_header(skb2); 1705 1706 if (skb_network_header(skb2) < skb2->data || 1707 skb2->network_header > skb2->tail) { 1708 net_crit_ratelimited("protocol %04x is buggy, dev %s\n", 1709 ntohs(skb2->protocol), 1710 dev->name); 1711 skb_reset_network_header(skb2); 1712 } 1713 1714 skb2->transport_header = skb2->network_header; 1715 skb2->pkt_type = PACKET_OUTGOING; 1716 pt_prev = ptype; 1717 } 1718 } 1719 if (pt_prev) 1720 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); 1721 rcu_read_unlock(); 1722 } 1723 1724 /** 1725 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change 1726 * @dev: Network device 1727 * @txq: number of queues available 1728 * 1729 * If real_num_tx_queues is changed the tc mappings may no longer be 1730 * valid. To resolve this verify the tc mapping remains valid and if 1731 * not NULL the mapping. With no priorities mapping to this 1732 * offset/count pair it will no longer be used. In the worst case TC0 1733 * is invalid nothing can be done so disable priority mappings. If is 1734 * expected that drivers will fix this mapping if they can before 1735 * calling netif_set_real_num_tx_queues. 1736 */ 1737 static void netif_setup_tc(struct net_device *dev, unsigned int txq) 1738 { 1739 int i; 1740 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 1741 1742 /* If TC0 is invalidated disable TC mapping */ 1743 if (tc->offset + tc->count > txq) { 1744 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); 1745 dev->num_tc = 0; 1746 return; 1747 } 1748 1749 /* Invalidated prio to tc mappings set to TC0 */ 1750 for (i = 1; i < TC_BITMASK + 1; i++) { 1751 int q = netdev_get_prio_tc_map(dev, i); 1752 1753 tc = &dev->tc_to_txq[q]; 1754 if (tc->offset + tc->count > txq) { 1755 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", 1756 i, q); 1757 netdev_set_prio_tc_map(dev, i, 0); 1758 } 1759 } 1760 } 1761 1762 #ifdef CONFIG_XPS 1763 static DEFINE_MUTEX(xps_map_mutex); 1764 #define xmap_dereference(P) \ 1765 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) 1766 1767 static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps, 1768 int cpu, u16 index) 1769 { 1770 struct xps_map *map = NULL; 1771 int pos; 1772 1773 if (dev_maps) 1774 map = xmap_dereference(dev_maps->cpu_map[cpu]); 1775 1776 for (pos = 0; map && pos < map->len; pos++) { 1777 if (map->queues[pos] == index) { 1778 if (map->len > 1) { 1779 map->queues[pos] = map->queues[--map->len]; 1780 } else { 1781 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL); 1782 kfree_rcu(map, rcu); 1783 map = NULL; 1784 } 1785 break; 1786 } 1787 } 1788 1789 return map; 1790 } 1791 1792 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index) 1793 { 1794 struct xps_dev_maps *dev_maps; 1795 int cpu, i; 1796 bool active = false; 1797 1798 mutex_lock(&xps_map_mutex); 1799 dev_maps = xmap_dereference(dev->xps_maps); 1800 1801 if (!dev_maps) 1802 goto out_no_maps; 1803 1804 for_each_possible_cpu(cpu) { 1805 for (i = index; i < dev->num_tx_queues; i++) { 1806 if (!remove_xps_queue(dev_maps, cpu, i)) 1807 break; 1808 } 1809 if (i == dev->num_tx_queues) 1810 active = true; 1811 } 1812 1813 if (!active) { 1814 RCU_INIT_POINTER(dev->xps_maps, NULL); 1815 kfree_rcu(dev_maps, rcu); 1816 } 1817 1818 for (i = index; i < dev->num_tx_queues; i++) 1819 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i), 1820 NUMA_NO_NODE); 1821 1822 out_no_maps: 1823 mutex_unlock(&xps_map_mutex); 1824 } 1825 1826 static struct xps_map *expand_xps_map(struct xps_map *map, 1827 int cpu, u16 index) 1828 { 1829 struct xps_map *new_map; 1830 int alloc_len = XPS_MIN_MAP_ALLOC; 1831 int i, pos; 1832 1833 for (pos = 0; map && pos < map->len; pos++) { 1834 if (map->queues[pos] != index) 1835 continue; 1836 return map; 1837 } 1838 1839 /* Need to add queue to this CPU's existing map */ 1840 if (map) { 1841 if (pos < map->alloc_len) 1842 return map; 1843 1844 alloc_len = map->alloc_len * 2; 1845 } 1846 1847 /* Need to allocate new map to store queue on this CPU's map */ 1848 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL, 1849 cpu_to_node(cpu)); 1850 if (!new_map) 1851 return NULL; 1852 1853 for (i = 0; i < pos; i++) 1854 new_map->queues[i] = map->queues[i]; 1855 new_map->alloc_len = alloc_len; 1856 new_map->len = pos; 1857 1858 return new_map; 1859 } 1860 1861 int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index) 1862 { 1863 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL; 1864 struct xps_map *map, *new_map; 1865 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES); 1866 int cpu, numa_node_id = -2; 1867 bool active = false; 1868 1869 mutex_lock(&xps_map_mutex); 1870 1871 dev_maps = xmap_dereference(dev->xps_maps); 1872 1873 /* allocate memory for queue storage */ 1874 for_each_online_cpu(cpu) { 1875 if (!cpumask_test_cpu(cpu, mask)) 1876 continue; 1877 1878 if (!new_dev_maps) 1879 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); 1880 if (!new_dev_maps) { 1881 mutex_unlock(&xps_map_mutex); 1882 return -ENOMEM; 1883 } 1884 1885 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) : 1886 NULL; 1887 1888 map = expand_xps_map(map, cpu, index); 1889 if (!map) 1890 goto error; 1891 1892 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map); 1893 } 1894 1895 if (!new_dev_maps) 1896 goto out_no_new_maps; 1897 1898 for_each_possible_cpu(cpu) { 1899 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) { 1900 /* add queue to CPU maps */ 1901 int pos = 0; 1902 1903 map = xmap_dereference(new_dev_maps->cpu_map[cpu]); 1904 while ((pos < map->len) && (map->queues[pos] != index)) 1905 pos++; 1906 1907 if (pos == map->len) 1908 map->queues[map->len++] = index; 1909 #ifdef CONFIG_NUMA 1910 if (numa_node_id == -2) 1911 numa_node_id = cpu_to_node(cpu); 1912 else if (numa_node_id != cpu_to_node(cpu)) 1913 numa_node_id = -1; 1914 #endif 1915 } else if (dev_maps) { 1916 /* fill in the new device map from the old device map */ 1917 map = xmap_dereference(dev_maps->cpu_map[cpu]); 1918 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map); 1919 } 1920 1921 } 1922 1923 rcu_assign_pointer(dev->xps_maps, new_dev_maps); 1924 1925 /* Cleanup old maps */ 1926 if (dev_maps) { 1927 for_each_possible_cpu(cpu) { 1928 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]); 1929 map = xmap_dereference(dev_maps->cpu_map[cpu]); 1930 if (map && map != new_map) 1931 kfree_rcu(map, rcu); 1932 } 1933 1934 kfree_rcu(dev_maps, rcu); 1935 } 1936 1937 dev_maps = new_dev_maps; 1938 active = true; 1939 1940 out_no_new_maps: 1941 /* update Tx queue numa node */ 1942 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index), 1943 (numa_node_id >= 0) ? numa_node_id : 1944 NUMA_NO_NODE); 1945 1946 if (!dev_maps) 1947 goto out_no_maps; 1948 1949 /* removes queue from unused CPUs */ 1950 for_each_possible_cpu(cpu) { 1951 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) 1952 continue; 1953 1954 if (remove_xps_queue(dev_maps, cpu, index)) 1955 active = true; 1956 } 1957 1958 /* free map if not active */ 1959 if (!active) { 1960 RCU_INIT_POINTER(dev->xps_maps, NULL); 1961 kfree_rcu(dev_maps, rcu); 1962 } 1963 1964 out_no_maps: 1965 mutex_unlock(&xps_map_mutex); 1966 1967 return 0; 1968 error: 1969 /* remove any maps that we added */ 1970 for_each_possible_cpu(cpu) { 1971 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]); 1972 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) : 1973 NULL; 1974 if (new_map && new_map != map) 1975 kfree(new_map); 1976 } 1977 1978 mutex_unlock(&xps_map_mutex); 1979 1980 kfree(new_dev_maps); 1981 return -ENOMEM; 1982 } 1983 EXPORT_SYMBOL(netif_set_xps_queue); 1984 1985 #endif 1986 /* 1987 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues 1988 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed. 1989 */ 1990 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) 1991 { 1992 int rc; 1993 1994 if (txq < 1 || txq > dev->num_tx_queues) 1995 return -EINVAL; 1996 1997 if (dev->reg_state == NETREG_REGISTERED || 1998 dev->reg_state == NETREG_UNREGISTERING) { 1999 ASSERT_RTNL(); 2000 2001 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, 2002 txq); 2003 if (rc) 2004 return rc; 2005 2006 if (dev->num_tc) 2007 netif_setup_tc(dev, txq); 2008 2009 if (txq < dev->real_num_tx_queues) { 2010 qdisc_reset_all_tx_gt(dev, txq); 2011 #ifdef CONFIG_XPS 2012 netif_reset_xps_queues_gt(dev, txq); 2013 #endif 2014 } 2015 } 2016 2017 dev->real_num_tx_queues = txq; 2018 return 0; 2019 } 2020 EXPORT_SYMBOL(netif_set_real_num_tx_queues); 2021 2022 #ifdef CONFIG_RPS 2023 /** 2024 * netif_set_real_num_rx_queues - set actual number of RX queues used 2025 * @dev: Network device 2026 * @rxq: Actual number of RX queues 2027 * 2028 * This must be called either with the rtnl_lock held or before 2029 * registration of the net device. Returns 0 on success, or a 2030 * negative error code. If called before registration, it always 2031 * succeeds. 2032 */ 2033 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) 2034 { 2035 int rc; 2036 2037 if (rxq < 1 || rxq > dev->num_rx_queues) 2038 return -EINVAL; 2039 2040 if (dev->reg_state == NETREG_REGISTERED) { 2041 ASSERT_RTNL(); 2042 2043 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, 2044 rxq); 2045 if (rc) 2046 return rc; 2047 } 2048 2049 dev->real_num_rx_queues = rxq; 2050 return 0; 2051 } 2052 EXPORT_SYMBOL(netif_set_real_num_rx_queues); 2053 #endif 2054 2055 /** 2056 * netif_get_num_default_rss_queues - default number of RSS queues 2057 * 2058 * This routine should set an upper limit on the number of RSS queues 2059 * used by default by multiqueue devices. 2060 */ 2061 int netif_get_num_default_rss_queues(void) 2062 { 2063 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus()); 2064 } 2065 EXPORT_SYMBOL(netif_get_num_default_rss_queues); 2066 2067 static inline void __netif_reschedule(struct Qdisc *q) 2068 { 2069 struct softnet_data *sd; 2070 unsigned long flags; 2071 2072 local_irq_save(flags); 2073 sd = &__get_cpu_var(softnet_data); 2074 q->next_sched = NULL; 2075 *sd->output_queue_tailp = q; 2076 sd->output_queue_tailp = &q->next_sched; 2077 raise_softirq_irqoff(NET_TX_SOFTIRQ); 2078 local_irq_restore(flags); 2079 } 2080 2081 void __netif_schedule(struct Qdisc *q) 2082 { 2083 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) 2084 __netif_reschedule(q); 2085 } 2086 EXPORT_SYMBOL(__netif_schedule); 2087 2088 void dev_kfree_skb_irq(struct sk_buff *skb) 2089 { 2090 if (atomic_dec_and_test(&skb->users)) { 2091 struct softnet_data *sd; 2092 unsigned long flags; 2093 2094 local_irq_save(flags); 2095 sd = &__get_cpu_var(softnet_data); 2096 skb->next = sd->completion_queue; 2097 sd->completion_queue = skb; 2098 raise_softirq_irqoff(NET_TX_SOFTIRQ); 2099 local_irq_restore(flags); 2100 } 2101 } 2102 EXPORT_SYMBOL(dev_kfree_skb_irq); 2103 2104 void dev_kfree_skb_any(struct sk_buff *skb) 2105 { 2106 if (in_irq() || irqs_disabled()) 2107 dev_kfree_skb_irq(skb); 2108 else 2109 dev_kfree_skb(skb); 2110 } 2111 EXPORT_SYMBOL(dev_kfree_skb_any); 2112 2113 2114 /** 2115 * netif_device_detach - mark device as removed 2116 * @dev: network device 2117 * 2118 * Mark device as removed from system and therefore no longer available. 2119 */ 2120 void netif_device_detach(struct net_device *dev) 2121 { 2122 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && 2123 netif_running(dev)) { 2124 netif_tx_stop_all_queues(dev); 2125 } 2126 } 2127 EXPORT_SYMBOL(netif_device_detach); 2128 2129 /** 2130 * netif_device_attach - mark device as attached 2131 * @dev: network device 2132 * 2133 * Mark device as attached from system and restart if needed. 2134 */ 2135 void netif_device_attach(struct net_device *dev) 2136 { 2137 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && 2138 netif_running(dev)) { 2139 netif_tx_wake_all_queues(dev); 2140 __netdev_watchdog_up(dev); 2141 } 2142 } 2143 EXPORT_SYMBOL(netif_device_attach); 2144 2145 static void skb_warn_bad_offload(const struct sk_buff *skb) 2146 { 2147 static const netdev_features_t null_features = 0; 2148 struct net_device *dev = skb->dev; 2149 const char *driver = ""; 2150 2151 if (!net_ratelimit()) 2152 return; 2153 2154 if (dev && dev->dev.parent) 2155 driver = dev_driver_string(dev->dev.parent); 2156 2157 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d " 2158 "gso_type=%d ip_summed=%d\n", 2159 driver, dev ? &dev->features : &null_features, 2160 skb->sk ? &skb->sk->sk_route_caps : &null_features, 2161 skb->len, skb->data_len, skb_shinfo(skb)->gso_size, 2162 skb_shinfo(skb)->gso_type, skb->ip_summed); 2163 } 2164 2165 /* 2166 * Invalidate hardware checksum when packet is to be mangled, and 2167 * complete checksum manually on outgoing path. 2168 */ 2169 int skb_checksum_help(struct sk_buff *skb) 2170 { 2171 __wsum csum; 2172 int ret = 0, offset; 2173 2174 if (skb->ip_summed == CHECKSUM_COMPLETE) 2175 goto out_set_summed; 2176 2177 if (unlikely(skb_shinfo(skb)->gso_size)) { 2178 skb_warn_bad_offload(skb); 2179 return -EINVAL; 2180 } 2181 2182 /* Before computing a checksum, we should make sure no frag could 2183 * be modified by an external entity : checksum could be wrong. 2184 */ 2185 if (skb_has_shared_frag(skb)) { 2186 ret = __skb_linearize(skb); 2187 if (ret) 2188 goto out; 2189 } 2190 2191 offset = skb_checksum_start_offset(skb); 2192 BUG_ON(offset >= skb_headlen(skb)); 2193 csum = skb_checksum(skb, offset, skb->len - offset, 0); 2194 2195 offset += skb->csum_offset; 2196 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); 2197 2198 if (skb_cloned(skb) && 2199 !skb_clone_writable(skb, offset + sizeof(__sum16))) { 2200 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2201 if (ret) 2202 goto out; 2203 } 2204 2205 *(__sum16 *)(skb->data + offset) = csum_fold(csum); 2206 out_set_summed: 2207 skb->ip_summed = CHECKSUM_NONE; 2208 out: 2209 return ret; 2210 } 2211 EXPORT_SYMBOL(skb_checksum_help); 2212 2213 /** 2214 * skb_mac_gso_segment - mac layer segmentation handler. 2215 * @skb: buffer to segment 2216 * @features: features for the output path (see dev->features) 2217 */ 2218 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, 2219 netdev_features_t features) 2220 { 2221 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 2222 struct packet_offload *ptype; 2223 __be16 type = skb->protocol; 2224 int vlan_depth = ETH_HLEN; 2225 2226 while (type == htons(ETH_P_8021Q)) { 2227 struct vlan_hdr *vh; 2228 2229 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) 2230 return ERR_PTR(-EINVAL); 2231 2232 vh = (struct vlan_hdr *)(skb->data + vlan_depth); 2233 type = vh->h_vlan_encapsulated_proto; 2234 vlan_depth += VLAN_HLEN; 2235 } 2236 2237 __skb_pull(skb, skb->mac_len); 2238 2239 rcu_read_lock(); 2240 list_for_each_entry_rcu(ptype, &offload_base, list) { 2241 if (ptype->type == type && ptype->callbacks.gso_segment) { 2242 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { 2243 int err; 2244 2245 err = ptype->callbacks.gso_send_check(skb); 2246 segs = ERR_PTR(err); 2247 if (err || skb_gso_ok(skb, features)) 2248 break; 2249 __skb_push(skb, (skb->data - 2250 skb_network_header(skb))); 2251 } 2252 segs = ptype->callbacks.gso_segment(skb, features); 2253 break; 2254 } 2255 } 2256 rcu_read_unlock(); 2257 2258 __skb_push(skb, skb->data - skb_mac_header(skb)); 2259 2260 return segs; 2261 } 2262 EXPORT_SYMBOL(skb_mac_gso_segment); 2263 2264 2265 /* openvswitch calls this on rx path, so we need a different check. 2266 */ 2267 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) 2268 { 2269 if (tx_path) 2270 return skb->ip_summed != CHECKSUM_PARTIAL; 2271 else 2272 return skb->ip_summed == CHECKSUM_NONE; 2273 } 2274 2275 /** 2276 * __skb_gso_segment - Perform segmentation on skb. 2277 * @skb: buffer to segment 2278 * @features: features for the output path (see dev->features) 2279 * @tx_path: whether it is called in TX path 2280 * 2281 * This function segments the given skb and returns a list of segments. 2282 * 2283 * It may return NULL if the skb requires no segmentation. This is 2284 * only possible when GSO is used for verifying header integrity. 2285 */ 2286 struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 2287 netdev_features_t features, bool tx_path) 2288 { 2289 if (unlikely(skb_needs_check(skb, tx_path))) { 2290 int err; 2291 2292 skb_warn_bad_offload(skb); 2293 2294 if (skb_header_cloned(skb) && 2295 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 2296 return ERR_PTR(err); 2297 } 2298 2299 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb); 2300 skb_reset_mac_header(skb); 2301 skb_reset_mac_len(skb); 2302 2303 return skb_mac_gso_segment(skb, features); 2304 } 2305 EXPORT_SYMBOL(__skb_gso_segment); 2306 2307 /* Take action when hardware reception checksum errors are detected. */ 2308 #ifdef CONFIG_BUG 2309 void netdev_rx_csum_fault(struct net_device *dev) 2310 { 2311 if (net_ratelimit()) { 2312 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>"); 2313 dump_stack(); 2314 } 2315 } 2316 EXPORT_SYMBOL(netdev_rx_csum_fault); 2317 #endif 2318 2319 /* Actually, we should eliminate this check as soon as we know, that: 2320 * 1. IOMMU is present and allows to map all the memory. 2321 * 2. No high memory really exists on this machine. 2322 */ 2323 2324 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 2325 { 2326 #ifdef CONFIG_HIGHMEM 2327 int i; 2328 if (!(dev->features & NETIF_F_HIGHDMA)) { 2329 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2330 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2331 if (PageHighMem(skb_frag_page(frag))) 2332 return 1; 2333 } 2334 } 2335 2336 if (PCI_DMA_BUS_IS_PHYS) { 2337 struct device *pdev = dev->dev.parent; 2338 2339 if (!pdev) 2340 return 0; 2341 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2342 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2343 dma_addr_t addr = page_to_phys(skb_frag_page(frag)); 2344 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask) 2345 return 1; 2346 } 2347 } 2348 #endif 2349 return 0; 2350 } 2351 2352 struct dev_gso_cb { 2353 void (*destructor)(struct sk_buff *skb); 2354 }; 2355 2356 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb) 2357 2358 static void dev_gso_skb_destructor(struct sk_buff *skb) 2359 { 2360 struct dev_gso_cb *cb; 2361 2362 do { 2363 struct sk_buff *nskb = skb->next; 2364 2365 skb->next = nskb->next; 2366 nskb->next = NULL; 2367 kfree_skb(nskb); 2368 } while (skb->next); 2369 2370 cb = DEV_GSO_CB(skb); 2371 if (cb->destructor) 2372 cb->destructor(skb); 2373 } 2374 2375 /** 2376 * dev_gso_segment - Perform emulated hardware segmentation on skb. 2377 * @skb: buffer to segment 2378 * @features: device features as applicable to this skb 2379 * 2380 * This function segments the given skb and stores the list of segments 2381 * in skb->next. 2382 */ 2383 static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features) 2384 { 2385 struct sk_buff *segs; 2386 2387 segs = skb_gso_segment(skb, features); 2388 2389 /* Verifying header integrity only. */ 2390 if (!segs) 2391 return 0; 2392 2393 if (IS_ERR(segs)) 2394 return PTR_ERR(segs); 2395 2396 skb->next = segs; 2397 DEV_GSO_CB(skb)->destructor = skb->destructor; 2398 skb->destructor = dev_gso_skb_destructor; 2399 2400 return 0; 2401 } 2402 2403 static bool can_checksum_protocol(netdev_features_t features, __be16 protocol) 2404 { 2405 return ((features & NETIF_F_GEN_CSUM) || 2406 ((features & NETIF_F_V4_CSUM) && 2407 protocol == htons(ETH_P_IP)) || 2408 ((features & NETIF_F_V6_CSUM) && 2409 protocol == htons(ETH_P_IPV6)) || 2410 ((features & NETIF_F_FCOE_CRC) && 2411 protocol == htons(ETH_P_FCOE))); 2412 } 2413 2414 static netdev_features_t harmonize_features(struct sk_buff *skb, 2415 __be16 protocol, netdev_features_t features) 2416 { 2417 if (skb->ip_summed != CHECKSUM_NONE && 2418 !can_checksum_protocol(features, protocol)) { 2419 features &= ~NETIF_F_ALL_CSUM; 2420 features &= ~NETIF_F_SG; 2421 } else if (illegal_highdma(skb->dev, skb)) { 2422 features &= ~NETIF_F_SG; 2423 } 2424 2425 return features; 2426 } 2427 2428 netdev_features_t netif_skb_features(struct sk_buff *skb) 2429 { 2430 __be16 protocol = skb->protocol; 2431 netdev_features_t features = skb->dev->features; 2432 2433 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs) 2434 features &= ~NETIF_F_GSO_MASK; 2435 2436 if (protocol == htons(ETH_P_8021Q)) { 2437 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2438 protocol = veh->h_vlan_encapsulated_proto; 2439 } else if (!vlan_tx_tag_present(skb)) { 2440 return harmonize_features(skb, protocol, features); 2441 } 2442 2443 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX); 2444 2445 if (protocol != htons(ETH_P_8021Q)) { 2446 return harmonize_features(skb, protocol, features); 2447 } else { 2448 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | 2449 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX; 2450 return harmonize_features(skb, protocol, features); 2451 } 2452 } 2453 EXPORT_SYMBOL(netif_skb_features); 2454 2455 /* 2456 * Returns true if either: 2457 * 1. skb has frag_list and the device doesn't support FRAGLIST, or 2458 * 2. skb is fragmented and the device does not support SG. 2459 */ 2460 static inline int skb_needs_linearize(struct sk_buff *skb, 2461 int features) 2462 { 2463 return skb_is_nonlinear(skb) && 2464 ((skb_has_frag_list(skb) && 2465 !(features & NETIF_F_FRAGLIST)) || 2466 (skb_shinfo(skb)->nr_frags && 2467 !(features & NETIF_F_SG))); 2468 } 2469 2470 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2471 struct netdev_queue *txq) 2472 { 2473 const struct net_device_ops *ops = dev->netdev_ops; 2474 int rc = NETDEV_TX_OK; 2475 unsigned int skb_len; 2476 2477 if (likely(!skb->next)) { 2478 netdev_features_t features; 2479 2480 /* 2481 * If device doesn't need skb->dst, release it right now while 2482 * its hot in this cpu cache 2483 */ 2484 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 2485 skb_dst_drop(skb); 2486 2487 features = netif_skb_features(skb); 2488 2489 if (vlan_tx_tag_present(skb) && 2490 !(features & NETIF_F_HW_VLAN_TX)) { 2491 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); 2492 if (unlikely(!skb)) 2493 goto out; 2494 2495 skb->vlan_tci = 0; 2496 } 2497 2498 /* If encapsulation offload request, verify we are testing 2499 * hardware encapsulation features instead of standard 2500 * features for the netdev 2501 */ 2502 if (skb->encapsulation) 2503 features &= dev->hw_enc_features; 2504 2505 if (netif_needs_gso(skb, features)) { 2506 if (unlikely(dev_gso_segment(skb, features))) 2507 goto out_kfree_skb; 2508 if (skb->next) 2509 goto gso; 2510 } else { 2511 if (skb_needs_linearize(skb, features) && 2512 __skb_linearize(skb)) 2513 goto out_kfree_skb; 2514 2515 /* If packet is not checksummed and device does not 2516 * support checksumming for this protocol, complete 2517 * checksumming here. 2518 */ 2519 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2520 if (skb->encapsulation) 2521 skb_set_inner_transport_header(skb, 2522 skb_checksum_start_offset(skb)); 2523 else 2524 skb_set_transport_header(skb, 2525 skb_checksum_start_offset(skb)); 2526 if (!(features & NETIF_F_ALL_CSUM) && 2527 skb_checksum_help(skb)) 2528 goto out_kfree_skb; 2529 } 2530 } 2531 2532 if (!list_empty(&ptype_all)) 2533 dev_queue_xmit_nit(skb, dev); 2534 2535 skb_len = skb->len; 2536 rc = ops->ndo_start_xmit(skb, dev); 2537 trace_net_dev_xmit(skb, rc, dev, skb_len); 2538 if (rc == NETDEV_TX_OK) 2539 txq_trans_update(txq); 2540 return rc; 2541 } 2542 2543 gso: 2544 do { 2545 struct sk_buff *nskb = skb->next; 2546 2547 skb->next = nskb->next; 2548 nskb->next = NULL; 2549 2550 /* 2551 * If device doesn't need nskb->dst, release it right now while 2552 * its hot in this cpu cache 2553 */ 2554 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 2555 skb_dst_drop(nskb); 2556 2557 if (!list_empty(&ptype_all)) 2558 dev_queue_xmit_nit(nskb, dev); 2559 2560 skb_len = nskb->len; 2561 rc = ops->ndo_start_xmit(nskb, dev); 2562 trace_net_dev_xmit(nskb, rc, dev, skb_len); 2563 if (unlikely(rc != NETDEV_TX_OK)) { 2564 if (rc & ~NETDEV_TX_MASK) 2565 goto out_kfree_gso_skb; 2566 nskb->next = skb->next; 2567 skb->next = nskb; 2568 return rc; 2569 } 2570 txq_trans_update(txq); 2571 if (unlikely(netif_xmit_stopped(txq) && skb->next)) 2572 return NETDEV_TX_BUSY; 2573 } while (skb->next); 2574 2575 out_kfree_gso_skb: 2576 if (likely(skb->next == NULL)) 2577 skb->destructor = DEV_GSO_CB(skb)->destructor; 2578 out_kfree_skb: 2579 kfree_skb(skb); 2580 out: 2581 return rc; 2582 } 2583 2584 static void qdisc_pkt_len_init(struct sk_buff *skb) 2585 { 2586 const struct skb_shared_info *shinfo = skb_shinfo(skb); 2587 2588 qdisc_skb_cb(skb)->pkt_len = skb->len; 2589 2590 /* To get more precise estimation of bytes sent on wire, 2591 * we add to pkt_len the headers size of all segments 2592 */ 2593 if (shinfo->gso_size) { 2594 unsigned int hdr_len; 2595 2596 /* mac layer + network layer */ 2597 hdr_len = skb_transport_header(skb) - skb_mac_header(skb); 2598 2599 /* + transport layer */ 2600 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 2601 hdr_len += tcp_hdrlen(skb); 2602 else 2603 hdr_len += sizeof(struct udphdr); 2604 qdisc_skb_cb(skb)->pkt_len += (shinfo->gso_segs - 1) * hdr_len; 2605 } 2606 } 2607 2608 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, 2609 struct net_device *dev, 2610 struct netdev_queue *txq) 2611 { 2612 spinlock_t *root_lock = qdisc_lock(q); 2613 bool contended; 2614 int rc; 2615 2616 qdisc_pkt_len_init(skb); 2617 qdisc_calculate_pkt_len(skb, q); 2618 /* 2619 * Heuristic to force contended enqueues to serialize on a 2620 * separate lock before trying to get qdisc main lock. 2621 * This permits __QDISC_STATE_RUNNING owner to get the lock more often 2622 * and dequeue packets faster. 2623 */ 2624 contended = qdisc_is_running(q); 2625 if (unlikely(contended)) 2626 spin_lock(&q->busylock); 2627 2628 spin_lock(root_lock); 2629 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { 2630 kfree_skb(skb); 2631 rc = NET_XMIT_DROP; 2632 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && 2633 qdisc_run_begin(q)) { 2634 /* 2635 * This is a work-conserving queue; there are no old skbs 2636 * waiting to be sent out; and the qdisc is not running - 2637 * xmit the skb directly. 2638 */ 2639 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) 2640 skb_dst_force(skb); 2641 2642 qdisc_bstats_update(q, skb); 2643 2644 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { 2645 if (unlikely(contended)) { 2646 spin_unlock(&q->busylock); 2647 contended = false; 2648 } 2649 __qdisc_run(q); 2650 } else 2651 qdisc_run_end(q); 2652 2653 rc = NET_XMIT_SUCCESS; 2654 } else { 2655 skb_dst_force(skb); 2656 rc = q->enqueue(skb, q) & NET_XMIT_MASK; 2657 if (qdisc_run_begin(q)) { 2658 if (unlikely(contended)) { 2659 spin_unlock(&q->busylock); 2660 contended = false; 2661 } 2662 __qdisc_run(q); 2663 } 2664 } 2665 spin_unlock(root_lock); 2666 if (unlikely(contended)) 2667 spin_unlock(&q->busylock); 2668 return rc; 2669 } 2670 2671 #if IS_ENABLED(CONFIG_NETPRIO_CGROUP) 2672 static void skb_update_prio(struct sk_buff *skb) 2673 { 2674 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); 2675 2676 if (!skb->priority && skb->sk && map) { 2677 unsigned int prioidx = skb->sk->sk_cgrp_prioidx; 2678 2679 if (prioidx < map->priomap_len) 2680 skb->priority = map->priomap[prioidx]; 2681 } 2682 } 2683 #else 2684 #define skb_update_prio(skb) 2685 #endif 2686 2687 static DEFINE_PER_CPU(int, xmit_recursion); 2688 #define RECURSION_LIMIT 10 2689 2690 /** 2691 * dev_loopback_xmit - loop back @skb 2692 * @skb: buffer to transmit 2693 */ 2694 int dev_loopback_xmit(struct sk_buff *skb) 2695 { 2696 skb_reset_mac_header(skb); 2697 __skb_pull(skb, skb_network_offset(skb)); 2698 skb->pkt_type = PACKET_LOOPBACK; 2699 skb->ip_summed = CHECKSUM_UNNECESSARY; 2700 WARN_ON(!skb_dst(skb)); 2701 skb_dst_force(skb); 2702 netif_rx_ni(skb); 2703 return 0; 2704 } 2705 EXPORT_SYMBOL(dev_loopback_xmit); 2706 2707 /** 2708 * dev_queue_xmit - transmit a buffer 2709 * @skb: buffer to transmit 2710 * 2711 * Queue a buffer for transmission to a network device. The caller must 2712 * have set the device and priority and built the buffer before calling 2713 * this function. The function can be called from an interrupt. 2714 * 2715 * A negative errno code is returned on a failure. A success does not 2716 * guarantee the frame will be transmitted as it may be dropped due 2717 * to congestion or traffic shaping. 2718 * 2719 * ----------------------------------------------------------------------------------- 2720 * I notice this method can also return errors from the queue disciplines, 2721 * including NET_XMIT_DROP, which is a positive value. So, errors can also 2722 * be positive. 2723 * 2724 * Regardless of the return value, the skb is consumed, so it is currently 2725 * difficult to retry a send to this method. (You can bump the ref count 2726 * before sending to hold a reference for retry if you are careful.) 2727 * 2728 * When calling this method, interrupts MUST be enabled. This is because 2729 * the BH enable code must have IRQs enabled so that it will not deadlock. 2730 * --BLG 2731 */ 2732 int dev_queue_xmit(struct sk_buff *skb) 2733 { 2734 struct net_device *dev = skb->dev; 2735 struct netdev_queue *txq; 2736 struct Qdisc *q; 2737 int rc = -ENOMEM; 2738 2739 skb_reset_mac_header(skb); 2740 2741 /* Disable soft irqs for various locks below. Also 2742 * stops preemption for RCU. 2743 */ 2744 rcu_read_lock_bh(); 2745 2746 skb_update_prio(skb); 2747 2748 txq = netdev_pick_tx(dev, skb); 2749 q = rcu_dereference_bh(txq->qdisc); 2750 2751 #ifdef CONFIG_NET_CLS_ACT 2752 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); 2753 #endif 2754 trace_net_dev_queue(skb); 2755 if (q->enqueue) { 2756 rc = __dev_xmit_skb(skb, q, dev, txq); 2757 goto out; 2758 } 2759 2760 /* The device has no queue. Common case for software devices: 2761 loopback, all the sorts of tunnels... 2762 2763 Really, it is unlikely that netif_tx_lock protection is necessary 2764 here. (f.e. loopback and IP tunnels are clean ignoring statistics 2765 counters.) 2766 However, it is possible, that they rely on protection 2767 made by us here. 2768 2769 Check this and shot the lock. It is not prone from deadlocks. 2770 Either shot noqueue qdisc, it is even simpler 8) 2771 */ 2772 if (dev->flags & IFF_UP) { 2773 int cpu = smp_processor_id(); /* ok because BHs are off */ 2774 2775 if (txq->xmit_lock_owner != cpu) { 2776 2777 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT) 2778 goto recursion_alert; 2779 2780 HARD_TX_LOCK(dev, txq, cpu); 2781 2782 if (!netif_xmit_stopped(txq)) { 2783 __this_cpu_inc(xmit_recursion); 2784 rc = dev_hard_start_xmit(skb, dev, txq); 2785 __this_cpu_dec(xmit_recursion); 2786 if (dev_xmit_complete(rc)) { 2787 HARD_TX_UNLOCK(dev, txq); 2788 goto out; 2789 } 2790 } 2791 HARD_TX_UNLOCK(dev, txq); 2792 net_crit_ratelimited("Virtual device %s asks to queue packet!\n", 2793 dev->name); 2794 } else { 2795 /* Recursion is detected! It is possible, 2796 * unfortunately 2797 */ 2798 recursion_alert: 2799 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n", 2800 dev->name); 2801 } 2802 } 2803 2804 rc = -ENETDOWN; 2805 rcu_read_unlock_bh(); 2806 2807 kfree_skb(skb); 2808 return rc; 2809 out: 2810 rcu_read_unlock_bh(); 2811 return rc; 2812 } 2813 EXPORT_SYMBOL(dev_queue_xmit); 2814 2815 2816 /*======================================================================= 2817 Receiver routines 2818 =======================================================================*/ 2819 2820 int netdev_max_backlog __read_mostly = 1000; 2821 EXPORT_SYMBOL(netdev_max_backlog); 2822 2823 int netdev_tstamp_prequeue __read_mostly = 1; 2824 int netdev_budget __read_mostly = 300; 2825 int weight_p __read_mostly = 64; /* old backlog weight */ 2826 2827 /* Called with irq disabled */ 2828 static inline void ____napi_schedule(struct softnet_data *sd, 2829 struct napi_struct *napi) 2830 { 2831 list_add_tail(&napi->poll_list, &sd->poll_list); 2832 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 2833 } 2834 2835 #ifdef CONFIG_RPS 2836 2837 /* One global table that all flow-based protocols share. */ 2838 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; 2839 EXPORT_SYMBOL(rps_sock_flow_table); 2840 2841 struct static_key rps_needed __read_mostly; 2842 2843 static struct rps_dev_flow * 2844 set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 2845 struct rps_dev_flow *rflow, u16 next_cpu) 2846 { 2847 if (next_cpu != RPS_NO_CPU) { 2848 #ifdef CONFIG_RFS_ACCEL 2849 struct netdev_rx_queue *rxqueue; 2850 struct rps_dev_flow_table *flow_table; 2851 struct rps_dev_flow *old_rflow; 2852 u32 flow_id; 2853 u16 rxq_index; 2854 int rc; 2855 2856 /* Should we steer this flow to a different hardware queue? */ 2857 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || 2858 !(dev->features & NETIF_F_NTUPLE)) 2859 goto out; 2860 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); 2861 if (rxq_index == skb_get_rx_queue(skb)) 2862 goto out; 2863 2864 rxqueue = dev->_rx + rxq_index; 2865 flow_table = rcu_dereference(rxqueue->rps_flow_table); 2866 if (!flow_table) 2867 goto out; 2868 flow_id = skb->rxhash & flow_table->mask; 2869 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, 2870 rxq_index, flow_id); 2871 if (rc < 0) 2872 goto out; 2873 old_rflow = rflow; 2874 rflow = &flow_table->flows[flow_id]; 2875 rflow->filter = rc; 2876 if (old_rflow->filter == rflow->filter) 2877 old_rflow->filter = RPS_NO_FILTER; 2878 out: 2879 #endif 2880 rflow->last_qtail = 2881 per_cpu(softnet_data, next_cpu).input_queue_head; 2882 } 2883 2884 rflow->cpu = next_cpu; 2885 return rflow; 2886 } 2887 2888 /* 2889 * get_rps_cpu is called from netif_receive_skb and returns the target 2890 * CPU from the RPS map of the receiving queue for a given skb. 2891 * rcu_read_lock must be held on entry. 2892 */ 2893 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, 2894 struct rps_dev_flow **rflowp) 2895 { 2896 struct netdev_rx_queue *rxqueue; 2897 struct rps_map *map; 2898 struct rps_dev_flow_table *flow_table; 2899 struct rps_sock_flow_table *sock_flow_table; 2900 int cpu = -1; 2901 u16 tcpu; 2902 2903 if (skb_rx_queue_recorded(skb)) { 2904 u16 index = skb_get_rx_queue(skb); 2905 if (unlikely(index >= dev->real_num_rx_queues)) { 2906 WARN_ONCE(dev->real_num_rx_queues > 1, 2907 "%s received packet on queue %u, but number " 2908 "of RX queues is %u\n", 2909 dev->name, index, dev->real_num_rx_queues); 2910 goto done; 2911 } 2912 rxqueue = dev->_rx + index; 2913 } else 2914 rxqueue = dev->_rx; 2915 2916 map = rcu_dereference(rxqueue->rps_map); 2917 if (map) { 2918 if (map->len == 1 && 2919 !rcu_access_pointer(rxqueue->rps_flow_table)) { 2920 tcpu = map->cpus[0]; 2921 if (cpu_online(tcpu)) 2922 cpu = tcpu; 2923 goto done; 2924 } 2925 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) { 2926 goto done; 2927 } 2928 2929 skb_reset_network_header(skb); 2930 if (!skb_get_rxhash(skb)) 2931 goto done; 2932 2933 flow_table = rcu_dereference(rxqueue->rps_flow_table); 2934 sock_flow_table = rcu_dereference(rps_sock_flow_table); 2935 if (flow_table && sock_flow_table) { 2936 u16 next_cpu; 2937 struct rps_dev_flow *rflow; 2938 2939 rflow = &flow_table->flows[skb->rxhash & flow_table->mask]; 2940 tcpu = rflow->cpu; 2941 2942 next_cpu = sock_flow_table->ents[skb->rxhash & 2943 sock_flow_table->mask]; 2944 2945 /* 2946 * If the desired CPU (where last recvmsg was done) is 2947 * different from current CPU (one in the rx-queue flow 2948 * table entry), switch if one of the following holds: 2949 * - Current CPU is unset (equal to RPS_NO_CPU). 2950 * - Current CPU is offline. 2951 * - The current CPU's queue tail has advanced beyond the 2952 * last packet that was enqueued using this table entry. 2953 * This guarantees that all previous packets for the flow 2954 * have been dequeued, thus preserving in order delivery. 2955 */ 2956 if (unlikely(tcpu != next_cpu) && 2957 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) || 2958 ((int)(per_cpu(softnet_data, tcpu).input_queue_head - 2959 rflow->last_qtail)) >= 0)) { 2960 tcpu = next_cpu; 2961 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); 2962 } 2963 2964 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) { 2965 *rflowp = rflow; 2966 cpu = tcpu; 2967 goto done; 2968 } 2969 } 2970 2971 if (map) { 2972 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32]; 2973 2974 if (cpu_online(tcpu)) { 2975 cpu = tcpu; 2976 goto done; 2977 } 2978 } 2979 2980 done: 2981 return cpu; 2982 } 2983 2984 #ifdef CONFIG_RFS_ACCEL 2985 2986 /** 2987 * rps_may_expire_flow - check whether an RFS hardware filter may be removed 2988 * @dev: Device on which the filter was set 2989 * @rxq_index: RX queue index 2990 * @flow_id: Flow ID passed to ndo_rx_flow_steer() 2991 * @filter_id: Filter ID returned by ndo_rx_flow_steer() 2992 * 2993 * Drivers that implement ndo_rx_flow_steer() should periodically call 2994 * this function for each installed filter and remove the filters for 2995 * which it returns %true. 2996 */ 2997 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, 2998 u32 flow_id, u16 filter_id) 2999 { 3000 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index; 3001 struct rps_dev_flow_table *flow_table; 3002 struct rps_dev_flow *rflow; 3003 bool expire = true; 3004 int cpu; 3005 3006 rcu_read_lock(); 3007 flow_table = rcu_dereference(rxqueue->rps_flow_table); 3008 if (flow_table && flow_id <= flow_table->mask) { 3009 rflow = &flow_table->flows[flow_id]; 3010 cpu = ACCESS_ONCE(rflow->cpu); 3011 if (rflow->filter == filter_id && cpu != RPS_NO_CPU && 3012 ((int)(per_cpu(softnet_data, cpu).input_queue_head - 3013 rflow->last_qtail) < 3014 (int)(10 * flow_table->mask))) 3015 expire = false; 3016 } 3017 rcu_read_unlock(); 3018 return expire; 3019 } 3020 EXPORT_SYMBOL(rps_may_expire_flow); 3021 3022 #endif /* CONFIG_RFS_ACCEL */ 3023 3024 /* Called from hardirq (IPI) context */ 3025 static void rps_trigger_softirq(void *data) 3026 { 3027 struct softnet_data *sd = data; 3028 3029 ____napi_schedule(sd, &sd->backlog); 3030 sd->received_rps++; 3031 } 3032 3033 #endif /* CONFIG_RPS */ 3034 3035 /* 3036 * Check if this softnet_data structure is another cpu one 3037 * If yes, queue it to our IPI list and return 1 3038 * If no, return 0 3039 */ 3040 static int rps_ipi_queued(struct softnet_data *sd) 3041 { 3042 #ifdef CONFIG_RPS 3043 struct softnet_data *mysd = &__get_cpu_var(softnet_data); 3044 3045 if (sd != mysd) { 3046 sd->rps_ipi_next = mysd->rps_ipi_list; 3047 mysd->rps_ipi_list = sd; 3048 3049 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 3050 return 1; 3051 } 3052 #endif /* CONFIG_RPS */ 3053 return 0; 3054 } 3055 3056 /* 3057 * enqueue_to_backlog is called to queue an skb to a per CPU backlog 3058 * queue (may be a remote CPU queue). 3059 */ 3060 static int enqueue_to_backlog(struct sk_buff *skb, int cpu, 3061 unsigned int *qtail) 3062 { 3063 struct softnet_data *sd; 3064 unsigned long flags; 3065 3066 sd = &per_cpu(softnet_data, cpu); 3067 3068 local_irq_save(flags); 3069 3070 rps_lock(sd); 3071 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) { 3072 if (skb_queue_len(&sd->input_pkt_queue)) { 3073 enqueue: 3074 __skb_queue_tail(&sd->input_pkt_queue, skb); 3075 input_queue_tail_incr_save(sd, qtail); 3076 rps_unlock(sd); 3077 local_irq_restore(flags); 3078 return NET_RX_SUCCESS; 3079 } 3080 3081 /* Schedule NAPI for backlog device 3082 * We can use non atomic operation since we own the queue lock 3083 */ 3084 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) { 3085 if (!rps_ipi_queued(sd)) 3086 ____napi_schedule(sd, &sd->backlog); 3087 } 3088 goto enqueue; 3089 } 3090 3091 sd->dropped++; 3092 rps_unlock(sd); 3093 3094 local_irq_restore(flags); 3095 3096 atomic_long_inc(&skb->dev->rx_dropped); 3097 kfree_skb(skb); 3098 return NET_RX_DROP; 3099 } 3100 3101 /** 3102 * netif_rx - post buffer to the network code 3103 * @skb: buffer to post 3104 * 3105 * This function receives a packet from a device driver and queues it for 3106 * the upper (protocol) levels to process. It always succeeds. The buffer 3107 * may be dropped during processing for congestion control or by the 3108 * protocol layers. 3109 * 3110 * return values: 3111 * NET_RX_SUCCESS (no congestion) 3112 * NET_RX_DROP (packet was dropped) 3113 * 3114 */ 3115 3116 int netif_rx(struct sk_buff *skb) 3117 { 3118 int ret; 3119 3120 /* if netpoll wants it, pretend we never saw it */ 3121 if (netpoll_rx(skb)) 3122 return NET_RX_DROP; 3123 3124 net_timestamp_check(netdev_tstamp_prequeue, skb); 3125 3126 trace_netif_rx(skb); 3127 #ifdef CONFIG_RPS 3128 if (static_key_false(&rps_needed)) { 3129 struct rps_dev_flow voidflow, *rflow = &voidflow; 3130 int cpu; 3131 3132 preempt_disable(); 3133 rcu_read_lock(); 3134 3135 cpu = get_rps_cpu(skb->dev, skb, &rflow); 3136 if (cpu < 0) 3137 cpu = smp_processor_id(); 3138 3139 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 3140 3141 rcu_read_unlock(); 3142 preempt_enable(); 3143 } else 3144 #endif 3145 { 3146 unsigned int qtail; 3147 ret = enqueue_to_backlog(skb, get_cpu(), &qtail); 3148 put_cpu(); 3149 } 3150 return ret; 3151 } 3152 EXPORT_SYMBOL(netif_rx); 3153 3154 int netif_rx_ni(struct sk_buff *skb) 3155 { 3156 int err; 3157 3158 preempt_disable(); 3159 err = netif_rx(skb); 3160 if (local_softirq_pending()) 3161 do_softirq(); 3162 preempt_enable(); 3163 3164 return err; 3165 } 3166 EXPORT_SYMBOL(netif_rx_ni); 3167 3168 static void net_tx_action(struct softirq_action *h) 3169 { 3170 struct softnet_data *sd = &__get_cpu_var(softnet_data); 3171 3172 if (sd->completion_queue) { 3173 struct sk_buff *clist; 3174 3175 local_irq_disable(); 3176 clist = sd->completion_queue; 3177 sd->completion_queue = NULL; 3178 local_irq_enable(); 3179 3180 while (clist) { 3181 struct sk_buff *skb = clist; 3182 clist = clist->next; 3183 3184 WARN_ON(atomic_read(&skb->users)); 3185 trace_kfree_skb(skb, net_tx_action); 3186 __kfree_skb(skb); 3187 } 3188 } 3189 3190 if (sd->output_queue) { 3191 struct Qdisc *head; 3192 3193 local_irq_disable(); 3194 head = sd->output_queue; 3195 sd->output_queue = NULL; 3196 sd->output_queue_tailp = &sd->output_queue; 3197 local_irq_enable(); 3198 3199 while (head) { 3200 struct Qdisc *q = head; 3201 spinlock_t *root_lock; 3202 3203 head = head->next_sched; 3204 3205 root_lock = qdisc_lock(q); 3206 if (spin_trylock(root_lock)) { 3207 smp_mb__before_clear_bit(); 3208 clear_bit(__QDISC_STATE_SCHED, 3209 &q->state); 3210 qdisc_run(q); 3211 spin_unlock(root_lock); 3212 } else { 3213 if (!test_bit(__QDISC_STATE_DEACTIVATED, 3214 &q->state)) { 3215 __netif_reschedule(q); 3216 } else { 3217 smp_mb__before_clear_bit(); 3218 clear_bit(__QDISC_STATE_SCHED, 3219 &q->state); 3220 } 3221 } 3222 } 3223 } 3224 } 3225 3226 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \ 3227 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)) 3228 /* This hook is defined here for ATM LANE */ 3229 int (*br_fdb_test_addr_hook)(struct net_device *dev, 3230 unsigned char *addr) __read_mostly; 3231 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); 3232 #endif 3233 3234 #ifdef CONFIG_NET_CLS_ACT 3235 /* TODO: Maybe we should just force sch_ingress to be compiled in 3236 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions 3237 * a compare and 2 stores extra right now if we dont have it on 3238 * but have CONFIG_NET_CLS_ACT 3239 * NOTE: This doesn't stop any functionality; if you dont have 3240 * the ingress scheduler, you just can't add policies on ingress. 3241 * 3242 */ 3243 static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq) 3244 { 3245 struct net_device *dev = skb->dev; 3246 u32 ttl = G_TC_RTTL(skb->tc_verd); 3247 int result = TC_ACT_OK; 3248 struct Qdisc *q; 3249 3250 if (unlikely(MAX_RED_LOOP < ttl++)) { 3251 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n", 3252 skb->skb_iif, dev->ifindex); 3253 return TC_ACT_SHOT; 3254 } 3255 3256 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); 3257 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); 3258 3259 q = rxq->qdisc; 3260 if (q != &noop_qdisc) { 3261 spin_lock(qdisc_lock(q)); 3262 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) 3263 result = qdisc_enqueue_root(skb, q); 3264 spin_unlock(qdisc_lock(q)); 3265 } 3266 3267 return result; 3268 } 3269 3270 static inline struct sk_buff *handle_ing(struct sk_buff *skb, 3271 struct packet_type **pt_prev, 3272 int *ret, struct net_device *orig_dev) 3273 { 3274 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue); 3275 3276 if (!rxq || rxq->qdisc == &noop_qdisc) 3277 goto out; 3278 3279 if (*pt_prev) { 3280 *ret = deliver_skb(skb, *pt_prev, orig_dev); 3281 *pt_prev = NULL; 3282 } 3283 3284 switch (ing_filter(skb, rxq)) { 3285 case TC_ACT_SHOT: 3286 case TC_ACT_STOLEN: 3287 kfree_skb(skb); 3288 return NULL; 3289 } 3290 3291 out: 3292 skb->tc_verd = 0; 3293 return skb; 3294 } 3295 #endif 3296 3297 /** 3298 * netdev_rx_handler_register - register receive handler 3299 * @dev: device to register a handler for 3300 * @rx_handler: receive handler to register 3301 * @rx_handler_data: data pointer that is used by rx handler 3302 * 3303 * Register a receive hander for a device. This handler will then be 3304 * called from __netif_receive_skb. A negative errno code is returned 3305 * on a failure. 3306 * 3307 * The caller must hold the rtnl_mutex. 3308 * 3309 * For a general description of rx_handler, see enum rx_handler_result. 3310 */ 3311 int netdev_rx_handler_register(struct net_device *dev, 3312 rx_handler_func_t *rx_handler, 3313 void *rx_handler_data) 3314 { 3315 ASSERT_RTNL(); 3316 3317 if (dev->rx_handler) 3318 return -EBUSY; 3319 3320 /* Note: rx_handler_data must be set before rx_handler */ 3321 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); 3322 rcu_assign_pointer(dev->rx_handler, rx_handler); 3323 3324 return 0; 3325 } 3326 EXPORT_SYMBOL_GPL(netdev_rx_handler_register); 3327 3328 /** 3329 * netdev_rx_handler_unregister - unregister receive handler 3330 * @dev: device to unregister a handler from 3331 * 3332 * Unregister a receive hander from a device. 3333 * 3334 * The caller must hold the rtnl_mutex. 3335 */ 3336 void netdev_rx_handler_unregister(struct net_device *dev) 3337 { 3338 3339 ASSERT_RTNL(); 3340 RCU_INIT_POINTER(dev->rx_handler, NULL); 3341 /* a reader seeing a non NULL rx_handler in a rcu_read_lock() 3342 * section has a guarantee to see a non NULL rx_handler_data 3343 * as well. 3344 */ 3345 synchronize_net(); 3346 RCU_INIT_POINTER(dev->rx_handler_data, NULL); 3347 } 3348 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 3349 3350 /* 3351 * Limit the use of PFMEMALLOC reserves to those protocols that implement 3352 * the special handling of PFMEMALLOC skbs. 3353 */ 3354 static bool skb_pfmemalloc_protocol(struct sk_buff *skb) 3355 { 3356 switch (skb->protocol) { 3357 case __constant_htons(ETH_P_ARP): 3358 case __constant_htons(ETH_P_IP): 3359 case __constant_htons(ETH_P_IPV6): 3360 case __constant_htons(ETH_P_8021Q): 3361 return true; 3362 default: 3363 return false; 3364 } 3365 } 3366 3367 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc) 3368 { 3369 struct packet_type *ptype, *pt_prev; 3370 rx_handler_func_t *rx_handler; 3371 struct net_device *orig_dev; 3372 struct net_device *null_or_dev; 3373 bool deliver_exact = false; 3374 int ret = NET_RX_DROP; 3375 __be16 type; 3376 3377 net_timestamp_check(!netdev_tstamp_prequeue, skb); 3378 3379 trace_netif_receive_skb(skb); 3380 3381 /* if we've gotten here through NAPI, check netpoll */ 3382 if (netpoll_receive_skb(skb)) 3383 goto out; 3384 3385 orig_dev = skb->dev; 3386 3387 skb_reset_network_header(skb); 3388 if (!skb_transport_header_was_set(skb)) 3389 skb_reset_transport_header(skb); 3390 skb_reset_mac_len(skb); 3391 3392 pt_prev = NULL; 3393 3394 rcu_read_lock(); 3395 3396 another_round: 3397 skb->skb_iif = skb->dev->ifindex; 3398 3399 __this_cpu_inc(softnet_data.processed); 3400 3401 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { 3402 skb = vlan_untag(skb); 3403 if (unlikely(!skb)) 3404 goto unlock; 3405 } 3406 3407 #ifdef CONFIG_NET_CLS_ACT 3408 if (skb->tc_verd & TC_NCLS) { 3409 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); 3410 goto ncls; 3411 } 3412 #endif 3413 3414 if (pfmemalloc) 3415 goto skip_taps; 3416 3417 list_for_each_entry_rcu(ptype, &ptype_all, list) { 3418 if (!ptype->dev || ptype->dev == skb->dev) { 3419 if (pt_prev) 3420 ret = deliver_skb(skb, pt_prev, orig_dev); 3421 pt_prev = ptype; 3422 } 3423 } 3424 3425 skip_taps: 3426 #ifdef CONFIG_NET_CLS_ACT 3427 skb = handle_ing(skb, &pt_prev, &ret, orig_dev); 3428 if (!skb) 3429 goto unlock; 3430 ncls: 3431 #endif 3432 3433 if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) 3434 goto drop; 3435 3436 if (vlan_tx_tag_present(skb)) { 3437 if (pt_prev) { 3438 ret = deliver_skb(skb, pt_prev, orig_dev); 3439 pt_prev = NULL; 3440 } 3441 if (vlan_do_receive(&skb)) 3442 goto another_round; 3443 else if (unlikely(!skb)) 3444 goto unlock; 3445 } 3446 3447 rx_handler = rcu_dereference(skb->dev->rx_handler); 3448 if (rx_handler) { 3449 if (pt_prev) { 3450 ret = deliver_skb(skb, pt_prev, orig_dev); 3451 pt_prev = NULL; 3452 } 3453 switch (rx_handler(&skb)) { 3454 case RX_HANDLER_CONSUMED: 3455 ret = NET_RX_SUCCESS; 3456 goto unlock; 3457 case RX_HANDLER_ANOTHER: 3458 goto another_round; 3459 case RX_HANDLER_EXACT: 3460 deliver_exact = true; 3461 case RX_HANDLER_PASS: 3462 break; 3463 default: 3464 BUG(); 3465 } 3466 } 3467 3468 if (vlan_tx_nonzero_tag_present(skb)) 3469 skb->pkt_type = PACKET_OTHERHOST; 3470 3471 /* deliver only exact match when indicated */ 3472 null_or_dev = deliver_exact ? skb->dev : NULL; 3473 3474 type = skb->protocol; 3475 list_for_each_entry_rcu(ptype, 3476 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 3477 if (ptype->type == type && 3478 (ptype->dev == null_or_dev || ptype->dev == skb->dev || 3479 ptype->dev == orig_dev)) { 3480 if (pt_prev) 3481 ret = deliver_skb(skb, pt_prev, orig_dev); 3482 pt_prev = ptype; 3483 } 3484 } 3485 3486 if (pt_prev) { 3487 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) 3488 goto drop; 3489 else 3490 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 3491 } else { 3492 drop: 3493 atomic_long_inc(&skb->dev->rx_dropped); 3494 kfree_skb(skb); 3495 /* Jamal, now you will not able to escape explaining 3496 * me how you were going to use this. :-) 3497 */ 3498 ret = NET_RX_DROP; 3499 } 3500 3501 unlock: 3502 rcu_read_unlock(); 3503 out: 3504 return ret; 3505 } 3506 3507 static int __netif_receive_skb(struct sk_buff *skb) 3508 { 3509 int ret; 3510 3511 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) { 3512 unsigned long pflags = current->flags; 3513 3514 /* 3515 * PFMEMALLOC skbs are special, they should 3516 * - be delivered to SOCK_MEMALLOC sockets only 3517 * - stay away from userspace 3518 * - have bounded memory usage 3519 * 3520 * Use PF_MEMALLOC as this saves us from propagating the allocation 3521 * context down to all allocation sites. 3522 */ 3523 current->flags |= PF_MEMALLOC; 3524 ret = __netif_receive_skb_core(skb, true); 3525 tsk_restore_flags(current, pflags, PF_MEMALLOC); 3526 } else 3527 ret = __netif_receive_skb_core(skb, false); 3528 3529 return ret; 3530 } 3531 3532 /** 3533 * netif_receive_skb - process receive buffer from network 3534 * @skb: buffer to process 3535 * 3536 * netif_receive_skb() is the main receive data processing function. 3537 * It always succeeds. The buffer may be dropped during processing 3538 * for congestion control or by the protocol layers. 3539 * 3540 * This function may only be called from softirq context and interrupts 3541 * should be enabled. 3542 * 3543 * Return values (usually ignored): 3544 * NET_RX_SUCCESS: no congestion 3545 * NET_RX_DROP: packet was dropped 3546 */ 3547 int netif_receive_skb(struct sk_buff *skb) 3548 { 3549 net_timestamp_check(netdev_tstamp_prequeue, skb); 3550 3551 if (skb_defer_rx_timestamp(skb)) 3552 return NET_RX_SUCCESS; 3553 3554 #ifdef CONFIG_RPS 3555 if (static_key_false(&rps_needed)) { 3556 struct rps_dev_flow voidflow, *rflow = &voidflow; 3557 int cpu, ret; 3558 3559 rcu_read_lock(); 3560 3561 cpu = get_rps_cpu(skb->dev, skb, &rflow); 3562 3563 if (cpu >= 0) { 3564 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 3565 rcu_read_unlock(); 3566 return ret; 3567 } 3568 rcu_read_unlock(); 3569 } 3570 #endif 3571 return __netif_receive_skb(skb); 3572 } 3573 EXPORT_SYMBOL(netif_receive_skb); 3574 3575 /* Network device is going away, flush any packets still pending 3576 * Called with irqs disabled. 3577 */ 3578 static void flush_backlog(void *arg) 3579 { 3580 struct net_device *dev = arg; 3581 struct softnet_data *sd = &__get_cpu_var(softnet_data); 3582 struct sk_buff *skb, *tmp; 3583 3584 rps_lock(sd); 3585 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { 3586 if (skb->dev == dev) { 3587 __skb_unlink(skb, &sd->input_pkt_queue); 3588 kfree_skb(skb); 3589 input_queue_head_incr(sd); 3590 } 3591 } 3592 rps_unlock(sd); 3593 3594 skb_queue_walk_safe(&sd->process_queue, skb, tmp) { 3595 if (skb->dev == dev) { 3596 __skb_unlink(skb, &sd->process_queue); 3597 kfree_skb(skb); 3598 input_queue_head_incr(sd); 3599 } 3600 } 3601 } 3602 3603 static int napi_gro_complete(struct sk_buff *skb) 3604 { 3605 struct packet_offload *ptype; 3606 __be16 type = skb->protocol; 3607 struct list_head *head = &offload_base; 3608 int err = -ENOENT; 3609 3610 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb)); 3611 3612 if (NAPI_GRO_CB(skb)->count == 1) { 3613 skb_shinfo(skb)->gso_size = 0; 3614 goto out; 3615 } 3616 3617 rcu_read_lock(); 3618 list_for_each_entry_rcu(ptype, head, list) { 3619 if (ptype->type != type || !ptype->callbacks.gro_complete) 3620 continue; 3621 3622 err = ptype->callbacks.gro_complete(skb); 3623 break; 3624 } 3625 rcu_read_unlock(); 3626 3627 if (err) { 3628 WARN_ON(&ptype->list == head); 3629 kfree_skb(skb); 3630 return NET_RX_SUCCESS; 3631 } 3632 3633 out: 3634 return netif_receive_skb(skb); 3635 } 3636 3637 /* napi->gro_list contains packets ordered by age. 3638 * youngest packets at the head of it. 3639 * Complete skbs in reverse order to reduce latencies. 3640 */ 3641 void napi_gro_flush(struct napi_struct *napi, bool flush_old) 3642 { 3643 struct sk_buff *skb, *prev = NULL; 3644 3645 /* scan list and build reverse chain */ 3646 for (skb = napi->gro_list; skb != NULL; skb = skb->next) { 3647 skb->prev = prev; 3648 prev = skb; 3649 } 3650 3651 for (skb = prev; skb; skb = prev) { 3652 skb->next = NULL; 3653 3654 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) 3655 return; 3656 3657 prev = skb->prev; 3658 napi_gro_complete(skb); 3659 napi->gro_count--; 3660 } 3661 3662 napi->gro_list = NULL; 3663 } 3664 EXPORT_SYMBOL(napi_gro_flush); 3665 3666 static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb) 3667 { 3668 struct sk_buff *p; 3669 unsigned int maclen = skb->dev->hard_header_len; 3670 3671 for (p = napi->gro_list; p; p = p->next) { 3672 unsigned long diffs; 3673 3674 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; 3675 diffs |= p->vlan_tci ^ skb->vlan_tci; 3676 if (maclen == ETH_HLEN) 3677 diffs |= compare_ether_header(skb_mac_header(p), 3678 skb_gro_mac_header(skb)); 3679 else if (!diffs) 3680 diffs = memcmp(skb_mac_header(p), 3681 skb_gro_mac_header(skb), 3682 maclen); 3683 NAPI_GRO_CB(p)->same_flow = !diffs; 3684 NAPI_GRO_CB(p)->flush = 0; 3685 } 3686 } 3687 3688 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3689 { 3690 struct sk_buff **pp = NULL; 3691 struct packet_offload *ptype; 3692 __be16 type = skb->protocol; 3693 struct list_head *head = &offload_base; 3694 int same_flow; 3695 enum gro_result ret; 3696 3697 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb)) 3698 goto normal; 3699 3700 if (skb_is_gso(skb) || skb_has_frag_list(skb)) 3701 goto normal; 3702 3703 gro_list_prepare(napi, skb); 3704 3705 rcu_read_lock(); 3706 list_for_each_entry_rcu(ptype, head, list) { 3707 if (ptype->type != type || !ptype->callbacks.gro_receive) 3708 continue; 3709 3710 skb_set_network_header(skb, skb_gro_offset(skb)); 3711 skb_reset_mac_len(skb); 3712 NAPI_GRO_CB(skb)->same_flow = 0; 3713 NAPI_GRO_CB(skb)->flush = 0; 3714 NAPI_GRO_CB(skb)->free = 0; 3715 3716 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb); 3717 break; 3718 } 3719 rcu_read_unlock(); 3720 3721 if (&ptype->list == head) 3722 goto normal; 3723 3724 same_flow = NAPI_GRO_CB(skb)->same_flow; 3725 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; 3726 3727 if (pp) { 3728 struct sk_buff *nskb = *pp; 3729 3730 *pp = nskb->next; 3731 nskb->next = NULL; 3732 napi_gro_complete(nskb); 3733 napi->gro_count--; 3734 } 3735 3736 if (same_flow) 3737 goto ok; 3738 3739 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS) 3740 goto normal; 3741 3742 napi->gro_count++; 3743 NAPI_GRO_CB(skb)->count = 1; 3744 NAPI_GRO_CB(skb)->age = jiffies; 3745 skb_shinfo(skb)->gso_size = skb_gro_len(skb); 3746 skb->next = napi->gro_list; 3747 napi->gro_list = skb; 3748 ret = GRO_HELD; 3749 3750 pull: 3751 if (skb_headlen(skb) < skb_gro_offset(skb)) { 3752 int grow = skb_gro_offset(skb) - skb_headlen(skb); 3753 3754 BUG_ON(skb->end - skb->tail < grow); 3755 3756 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); 3757 3758 skb->tail += grow; 3759 skb->data_len -= grow; 3760 3761 skb_shinfo(skb)->frags[0].page_offset += grow; 3762 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow); 3763 3764 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) { 3765 skb_frag_unref(skb, 0); 3766 memmove(skb_shinfo(skb)->frags, 3767 skb_shinfo(skb)->frags + 1, 3768 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); 3769 } 3770 } 3771 3772 ok: 3773 return ret; 3774 3775 normal: 3776 ret = GRO_NORMAL; 3777 goto pull; 3778 } 3779 3780 3781 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) 3782 { 3783 switch (ret) { 3784 case GRO_NORMAL: 3785 if (netif_receive_skb(skb)) 3786 ret = GRO_DROP; 3787 break; 3788 3789 case GRO_DROP: 3790 kfree_skb(skb); 3791 break; 3792 3793 case GRO_MERGED_FREE: 3794 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 3795 kmem_cache_free(skbuff_head_cache, skb); 3796 else 3797 __kfree_skb(skb); 3798 break; 3799 3800 case GRO_HELD: 3801 case GRO_MERGED: 3802 break; 3803 } 3804 3805 return ret; 3806 } 3807 3808 static void skb_gro_reset_offset(struct sk_buff *skb) 3809 { 3810 const struct skb_shared_info *pinfo = skb_shinfo(skb); 3811 const skb_frag_t *frag0 = &pinfo->frags[0]; 3812 3813 NAPI_GRO_CB(skb)->data_offset = 0; 3814 NAPI_GRO_CB(skb)->frag0 = NULL; 3815 NAPI_GRO_CB(skb)->frag0_len = 0; 3816 3817 if (skb->mac_header == skb->tail && 3818 pinfo->nr_frags && 3819 !PageHighMem(skb_frag_page(frag0))) { 3820 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); 3821 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0); 3822 } 3823 } 3824 3825 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3826 { 3827 skb_gro_reset_offset(skb); 3828 3829 return napi_skb_finish(dev_gro_receive(napi, skb), skb); 3830 } 3831 EXPORT_SYMBOL(napi_gro_receive); 3832 3833 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) 3834 { 3835 __skb_pull(skb, skb_headlen(skb)); 3836 /* restore the reserve we had after netdev_alloc_skb_ip_align() */ 3837 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); 3838 skb->vlan_tci = 0; 3839 skb->dev = napi->dev; 3840 skb->skb_iif = 0; 3841 3842 napi->skb = skb; 3843 } 3844 3845 struct sk_buff *napi_get_frags(struct napi_struct *napi) 3846 { 3847 struct sk_buff *skb = napi->skb; 3848 3849 if (!skb) { 3850 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD); 3851 if (skb) 3852 napi->skb = skb; 3853 } 3854 return skb; 3855 } 3856 EXPORT_SYMBOL(napi_get_frags); 3857 3858 static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, 3859 gro_result_t ret) 3860 { 3861 switch (ret) { 3862 case GRO_NORMAL: 3863 case GRO_HELD: 3864 skb->protocol = eth_type_trans(skb, skb->dev); 3865 3866 if (ret == GRO_HELD) 3867 skb_gro_pull(skb, -ETH_HLEN); 3868 else if (netif_receive_skb(skb)) 3869 ret = GRO_DROP; 3870 break; 3871 3872 case GRO_DROP: 3873 case GRO_MERGED_FREE: 3874 napi_reuse_skb(napi, skb); 3875 break; 3876 3877 case GRO_MERGED: 3878 break; 3879 } 3880 3881 return ret; 3882 } 3883 3884 static struct sk_buff *napi_frags_skb(struct napi_struct *napi) 3885 { 3886 struct sk_buff *skb = napi->skb; 3887 struct ethhdr *eth; 3888 unsigned int hlen; 3889 unsigned int off; 3890 3891 napi->skb = NULL; 3892 3893 skb_reset_mac_header(skb); 3894 skb_gro_reset_offset(skb); 3895 3896 off = skb_gro_offset(skb); 3897 hlen = off + sizeof(*eth); 3898 eth = skb_gro_header_fast(skb, off); 3899 if (skb_gro_header_hard(skb, hlen)) { 3900 eth = skb_gro_header_slow(skb, hlen, off); 3901 if (unlikely(!eth)) { 3902 napi_reuse_skb(napi, skb); 3903 skb = NULL; 3904 goto out; 3905 } 3906 } 3907 3908 skb_gro_pull(skb, sizeof(*eth)); 3909 3910 /* 3911 * This works because the only protocols we care about don't require 3912 * special handling. We'll fix it up properly at the end. 3913 */ 3914 skb->protocol = eth->h_proto; 3915 3916 out: 3917 return skb; 3918 } 3919 3920 gro_result_t napi_gro_frags(struct napi_struct *napi) 3921 { 3922 struct sk_buff *skb = napi_frags_skb(napi); 3923 3924 if (!skb) 3925 return GRO_DROP; 3926 3927 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb)); 3928 } 3929 EXPORT_SYMBOL(napi_gro_frags); 3930 3931 /* 3932 * net_rps_action sends any pending IPI's for rps. 3933 * Note: called with local irq disabled, but exits with local irq enabled. 3934 */ 3935 static void net_rps_action_and_irq_enable(struct softnet_data *sd) 3936 { 3937 #ifdef CONFIG_RPS 3938 struct softnet_data *remsd = sd->rps_ipi_list; 3939 3940 if (remsd) { 3941 sd->rps_ipi_list = NULL; 3942 3943 local_irq_enable(); 3944 3945 /* Send pending IPI's to kick RPS processing on remote cpus. */ 3946 while (remsd) { 3947 struct softnet_data *next = remsd->rps_ipi_next; 3948 3949 if (cpu_online(remsd->cpu)) 3950 __smp_call_function_single(remsd->cpu, 3951 &remsd->csd, 0); 3952 remsd = next; 3953 } 3954 } else 3955 #endif 3956 local_irq_enable(); 3957 } 3958 3959 static int process_backlog(struct napi_struct *napi, int quota) 3960 { 3961 int work = 0; 3962 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); 3963 3964 #ifdef CONFIG_RPS 3965 /* Check if we have pending ipi, its better to send them now, 3966 * not waiting net_rx_action() end. 3967 */ 3968 if (sd->rps_ipi_list) { 3969 local_irq_disable(); 3970 net_rps_action_and_irq_enable(sd); 3971 } 3972 #endif 3973 napi->weight = weight_p; 3974 local_irq_disable(); 3975 while (work < quota) { 3976 struct sk_buff *skb; 3977 unsigned int qlen; 3978 3979 while ((skb = __skb_dequeue(&sd->process_queue))) { 3980 local_irq_enable(); 3981 __netif_receive_skb(skb); 3982 local_irq_disable(); 3983 input_queue_head_incr(sd); 3984 if (++work >= quota) { 3985 local_irq_enable(); 3986 return work; 3987 } 3988 } 3989 3990 rps_lock(sd); 3991 qlen = skb_queue_len(&sd->input_pkt_queue); 3992 if (qlen) 3993 skb_queue_splice_tail_init(&sd->input_pkt_queue, 3994 &sd->process_queue); 3995 3996 if (qlen < quota - work) { 3997 /* 3998 * Inline a custom version of __napi_complete(). 3999 * only current cpu owns and manipulates this napi, 4000 * and NAPI_STATE_SCHED is the only possible flag set on backlog. 4001 * we can use a plain write instead of clear_bit(), 4002 * and we dont need an smp_mb() memory barrier. 4003 */ 4004 list_del(&napi->poll_list); 4005 napi->state = 0; 4006 4007 quota = work + qlen; 4008 } 4009 rps_unlock(sd); 4010 } 4011 local_irq_enable(); 4012 4013 return work; 4014 } 4015 4016 /** 4017 * __napi_schedule - schedule for receive 4018 * @n: entry to schedule 4019 * 4020 * The entry's receive function will be scheduled to run 4021 */ 4022 void __napi_schedule(struct napi_struct *n) 4023 { 4024 unsigned long flags; 4025 4026 local_irq_save(flags); 4027 ____napi_schedule(&__get_cpu_var(softnet_data), n); 4028 local_irq_restore(flags); 4029 } 4030 EXPORT_SYMBOL(__napi_schedule); 4031 4032 void __napi_complete(struct napi_struct *n) 4033 { 4034 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); 4035 BUG_ON(n->gro_list); 4036 4037 list_del(&n->poll_list); 4038 smp_mb__before_clear_bit(); 4039 clear_bit(NAPI_STATE_SCHED, &n->state); 4040 } 4041 EXPORT_SYMBOL(__napi_complete); 4042 4043 void napi_complete(struct napi_struct *n) 4044 { 4045 unsigned long flags; 4046 4047 /* 4048 * don't let napi dequeue from the cpu poll list 4049 * just in case its running on a different cpu 4050 */ 4051 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state))) 4052 return; 4053 4054 napi_gro_flush(n, false); 4055 local_irq_save(flags); 4056 __napi_complete(n); 4057 local_irq_restore(flags); 4058 } 4059 EXPORT_SYMBOL(napi_complete); 4060 4061 void netif_napi_add(struct net_device *dev, struct napi_struct *napi, 4062 int (*poll)(struct napi_struct *, int), int weight) 4063 { 4064 INIT_LIST_HEAD(&napi->poll_list); 4065 napi->gro_count = 0; 4066 napi->gro_list = NULL; 4067 napi->skb = NULL; 4068 napi->poll = poll; 4069 napi->weight = weight; 4070 list_add(&napi->dev_list, &dev->napi_list); 4071 napi->dev = dev; 4072 #ifdef CONFIG_NETPOLL 4073 spin_lock_init(&napi->poll_lock); 4074 napi->poll_owner = -1; 4075 #endif 4076 set_bit(NAPI_STATE_SCHED, &napi->state); 4077 } 4078 EXPORT_SYMBOL(netif_napi_add); 4079 4080 void netif_napi_del(struct napi_struct *napi) 4081 { 4082 struct sk_buff *skb, *next; 4083 4084 list_del_init(&napi->dev_list); 4085 napi_free_frags(napi); 4086 4087 for (skb = napi->gro_list; skb; skb = next) { 4088 next = skb->next; 4089 skb->next = NULL; 4090 kfree_skb(skb); 4091 } 4092 4093 napi->gro_list = NULL; 4094 napi->gro_count = 0; 4095 } 4096 EXPORT_SYMBOL(netif_napi_del); 4097 4098 static void net_rx_action(struct softirq_action *h) 4099 { 4100 struct softnet_data *sd = &__get_cpu_var(softnet_data); 4101 unsigned long time_limit = jiffies + 2; 4102 int budget = netdev_budget; 4103 void *have; 4104 4105 local_irq_disable(); 4106 4107 while (!list_empty(&sd->poll_list)) { 4108 struct napi_struct *n; 4109 int work, weight; 4110 4111 /* If softirq window is exhuasted then punt. 4112 * Allow this to run for 2 jiffies since which will allow 4113 * an average latency of 1.5/HZ. 4114 */ 4115 if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit))) 4116 goto softnet_break; 4117 4118 local_irq_enable(); 4119 4120 /* Even though interrupts have been re-enabled, this 4121 * access is safe because interrupts can only add new 4122 * entries to the tail of this list, and only ->poll() 4123 * calls can remove this head entry from the list. 4124 */ 4125 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list); 4126 4127 have = netpoll_poll_lock(n); 4128 4129 weight = n->weight; 4130 4131 /* This NAPI_STATE_SCHED test is for avoiding a race 4132 * with netpoll's poll_napi(). Only the entity which 4133 * obtains the lock and sees NAPI_STATE_SCHED set will 4134 * actually make the ->poll() call. Therefore we avoid 4135 * accidentally calling ->poll() when NAPI is not scheduled. 4136 */ 4137 work = 0; 4138 if (test_bit(NAPI_STATE_SCHED, &n->state)) { 4139 work = n->poll(n, weight); 4140 trace_napi_poll(n); 4141 } 4142 4143 WARN_ON_ONCE(work > weight); 4144 4145 budget -= work; 4146 4147 local_irq_disable(); 4148 4149 /* Drivers must not modify the NAPI state if they 4150 * consume the entire weight. In such cases this code 4151 * still "owns" the NAPI instance and therefore can 4152 * move the instance around on the list at-will. 4153 */ 4154 if (unlikely(work == weight)) { 4155 if (unlikely(napi_disable_pending(n))) { 4156 local_irq_enable(); 4157 napi_complete(n); 4158 local_irq_disable(); 4159 } else { 4160 if (n->gro_list) { 4161 /* flush too old packets 4162 * If HZ < 1000, flush all packets. 4163 */ 4164 local_irq_enable(); 4165 napi_gro_flush(n, HZ >= 1000); 4166 local_irq_disable(); 4167 } 4168 list_move_tail(&n->poll_list, &sd->poll_list); 4169 } 4170 } 4171 4172 netpoll_poll_unlock(have); 4173 } 4174 out: 4175 net_rps_action_and_irq_enable(sd); 4176 4177 #ifdef CONFIG_NET_DMA 4178 /* 4179 * There may not be any more sk_buffs coming right now, so push 4180 * any pending DMA copies to hardware 4181 */ 4182 dma_issue_pending_all(); 4183 #endif 4184 4185 return; 4186 4187 softnet_break: 4188 sd->time_squeeze++; 4189 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4190 goto out; 4191 } 4192 4193 struct netdev_upper { 4194 struct net_device *dev; 4195 bool master; 4196 struct list_head list; 4197 struct rcu_head rcu; 4198 struct list_head search_list; 4199 }; 4200 4201 static void __append_search_uppers(struct list_head *search_list, 4202 struct net_device *dev) 4203 { 4204 struct netdev_upper *upper; 4205 4206 list_for_each_entry(upper, &dev->upper_dev_list, list) { 4207 /* check if this upper is not already in search list */ 4208 if (list_empty(&upper->search_list)) 4209 list_add_tail(&upper->search_list, search_list); 4210 } 4211 } 4212 4213 static bool __netdev_search_upper_dev(struct net_device *dev, 4214 struct net_device *upper_dev) 4215 { 4216 LIST_HEAD(search_list); 4217 struct netdev_upper *upper; 4218 struct netdev_upper *tmp; 4219 bool ret = false; 4220 4221 __append_search_uppers(&search_list, dev); 4222 list_for_each_entry(upper, &search_list, search_list) { 4223 if (upper->dev == upper_dev) { 4224 ret = true; 4225 break; 4226 } 4227 __append_search_uppers(&search_list, upper->dev); 4228 } 4229 list_for_each_entry_safe(upper, tmp, &search_list, search_list) 4230 INIT_LIST_HEAD(&upper->search_list); 4231 return ret; 4232 } 4233 4234 static struct netdev_upper *__netdev_find_upper(struct net_device *dev, 4235 struct net_device *upper_dev) 4236 { 4237 struct netdev_upper *upper; 4238 4239 list_for_each_entry(upper, &dev->upper_dev_list, list) { 4240 if (upper->dev == upper_dev) 4241 return upper; 4242 } 4243 return NULL; 4244 } 4245 4246 /** 4247 * netdev_has_upper_dev - Check if device is linked to an upper device 4248 * @dev: device 4249 * @upper_dev: upper device to check 4250 * 4251 * Find out if a device is linked to specified upper device and return true 4252 * in case it is. Note that this checks only immediate upper device, 4253 * not through a complete stack of devices. The caller must hold the RTNL lock. 4254 */ 4255 bool netdev_has_upper_dev(struct net_device *dev, 4256 struct net_device *upper_dev) 4257 { 4258 ASSERT_RTNL(); 4259 4260 return __netdev_find_upper(dev, upper_dev); 4261 } 4262 EXPORT_SYMBOL(netdev_has_upper_dev); 4263 4264 /** 4265 * netdev_has_any_upper_dev - Check if device is linked to some device 4266 * @dev: device 4267 * 4268 * Find out if a device is linked to an upper device and return true in case 4269 * it is. The caller must hold the RTNL lock. 4270 */ 4271 bool netdev_has_any_upper_dev(struct net_device *dev) 4272 { 4273 ASSERT_RTNL(); 4274 4275 return !list_empty(&dev->upper_dev_list); 4276 } 4277 EXPORT_SYMBOL(netdev_has_any_upper_dev); 4278 4279 /** 4280 * netdev_master_upper_dev_get - Get master upper device 4281 * @dev: device 4282 * 4283 * Find a master upper device and return pointer to it or NULL in case 4284 * it's not there. The caller must hold the RTNL lock. 4285 */ 4286 struct net_device *netdev_master_upper_dev_get(struct net_device *dev) 4287 { 4288 struct netdev_upper *upper; 4289 4290 ASSERT_RTNL(); 4291 4292 if (list_empty(&dev->upper_dev_list)) 4293 return NULL; 4294 4295 upper = list_first_entry(&dev->upper_dev_list, 4296 struct netdev_upper, list); 4297 if (likely(upper->master)) 4298 return upper->dev; 4299 return NULL; 4300 } 4301 EXPORT_SYMBOL(netdev_master_upper_dev_get); 4302 4303 /** 4304 * netdev_master_upper_dev_get_rcu - Get master upper device 4305 * @dev: device 4306 * 4307 * Find a master upper device and return pointer to it or NULL in case 4308 * it's not there. The caller must hold the RCU read lock. 4309 */ 4310 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev) 4311 { 4312 struct netdev_upper *upper; 4313 4314 upper = list_first_or_null_rcu(&dev->upper_dev_list, 4315 struct netdev_upper, list); 4316 if (upper && likely(upper->master)) 4317 return upper->dev; 4318 return NULL; 4319 } 4320 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu); 4321 4322 static int __netdev_upper_dev_link(struct net_device *dev, 4323 struct net_device *upper_dev, bool master) 4324 { 4325 struct netdev_upper *upper; 4326 4327 ASSERT_RTNL(); 4328 4329 if (dev == upper_dev) 4330 return -EBUSY; 4331 4332 /* To prevent loops, check if dev is not upper device to upper_dev. */ 4333 if (__netdev_search_upper_dev(upper_dev, dev)) 4334 return -EBUSY; 4335 4336 if (__netdev_find_upper(dev, upper_dev)) 4337 return -EEXIST; 4338 4339 if (master && netdev_master_upper_dev_get(dev)) 4340 return -EBUSY; 4341 4342 upper = kmalloc(sizeof(*upper), GFP_KERNEL); 4343 if (!upper) 4344 return -ENOMEM; 4345 4346 upper->dev = upper_dev; 4347 upper->master = master; 4348 INIT_LIST_HEAD(&upper->search_list); 4349 4350 /* Ensure that master upper link is always the first item in list. */ 4351 if (master) 4352 list_add_rcu(&upper->list, &dev->upper_dev_list); 4353 else 4354 list_add_tail_rcu(&upper->list, &dev->upper_dev_list); 4355 dev_hold(upper_dev); 4356 4357 return 0; 4358 } 4359 4360 /** 4361 * netdev_upper_dev_link - Add a link to the upper device 4362 * @dev: device 4363 * @upper_dev: new upper device 4364 * 4365 * Adds a link to device which is upper to this one. The caller must hold 4366 * the RTNL lock. On a failure a negative errno code is returned. 4367 * On success the reference counts are adjusted and the function 4368 * returns zero. 4369 */ 4370 int netdev_upper_dev_link(struct net_device *dev, 4371 struct net_device *upper_dev) 4372 { 4373 return __netdev_upper_dev_link(dev, upper_dev, false); 4374 } 4375 EXPORT_SYMBOL(netdev_upper_dev_link); 4376 4377 /** 4378 * netdev_master_upper_dev_link - Add a master link to the upper device 4379 * @dev: device 4380 * @upper_dev: new upper device 4381 * 4382 * Adds a link to device which is upper to this one. In this case, only 4383 * one master upper device can be linked, although other non-master devices 4384 * might be linked as well. The caller must hold the RTNL lock. 4385 * On a failure a negative errno code is returned. On success the reference 4386 * counts are adjusted and the function returns zero. 4387 */ 4388 int netdev_master_upper_dev_link(struct net_device *dev, 4389 struct net_device *upper_dev) 4390 { 4391 return __netdev_upper_dev_link(dev, upper_dev, true); 4392 } 4393 EXPORT_SYMBOL(netdev_master_upper_dev_link); 4394 4395 /** 4396 * netdev_upper_dev_unlink - Removes a link to upper device 4397 * @dev: device 4398 * @upper_dev: new upper device 4399 * 4400 * Removes a link to device which is upper to this one. The caller must hold 4401 * the RTNL lock. 4402 */ 4403 void netdev_upper_dev_unlink(struct net_device *dev, 4404 struct net_device *upper_dev) 4405 { 4406 struct netdev_upper *upper; 4407 4408 ASSERT_RTNL(); 4409 4410 upper = __netdev_find_upper(dev, upper_dev); 4411 if (!upper) 4412 return; 4413 list_del_rcu(&upper->list); 4414 dev_put(upper_dev); 4415 kfree_rcu(upper, rcu); 4416 } 4417 EXPORT_SYMBOL(netdev_upper_dev_unlink); 4418 4419 static void dev_change_rx_flags(struct net_device *dev, int flags) 4420 { 4421 const struct net_device_ops *ops = dev->netdev_ops; 4422 4423 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags) 4424 ops->ndo_change_rx_flags(dev, flags); 4425 } 4426 4427 static int __dev_set_promiscuity(struct net_device *dev, int inc) 4428 { 4429 unsigned int old_flags = dev->flags; 4430 kuid_t uid; 4431 kgid_t gid; 4432 4433 ASSERT_RTNL(); 4434 4435 dev->flags |= IFF_PROMISC; 4436 dev->promiscuity += inc; 4437 if (dev->promiscuity == 0) { 4438 /* 4439 * Avoid overflow. 4440 * If inc causes overflow, untouch promisc and return error. 4441 */ 4442 if (inc < 0) 4443 dev->flags &= ~IFF_PROMISC; 4444 else { 4445 dev->promiscuity -= inc; 4446 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n", 4447 dev->name); 4448 return -EOVERFLOW; 4449 } 4450 } 4451 if (dev->flags != old_flags) { 4452 pr_info("device %s %s promiscuous mode\n", 4453 dev->name, 4454 dev->flags & IFF_PROMISC ? "entered" : "left"); 4455 if (audit_enabled) { 4456 current_uid_gid(&uid, &gid); 4457 audit_log(current->audit_context, GFP_ATOMIC, 4458 AUDIT_ANOM_PROMISCUOUS, 4459 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", 4460 dev->name, (dev->flags & IFF_PROMISC), 4461 (old_flags & IFF_PROMISC), 4462 from_kuid(&init_user_ns, audit_get_loginuid(current)), 4463 from_kuid(&init_user_ns, uid), 4464 from_kgid(&init_user_ns, gid), 4465 audit_get_sessionid(current)); 4466 } 4467 4468 dev_change_rx_flags(dev, IFF_PROMISC); 4469 } 4470 return 0; 4471 } 4472 4473 /** 4474 * dev_set_promiscuity - update promiscuity count on a device 4475 * @dev: device 4476 * @inc: modifier 4477 * 4478 * Add or remove promiscuity from a device. While the count in the device 4479 * remains above zero the interface remains promiscuous. Once it hits zero 4480 * the device reverts back to normal filtering operation. A negative inc 4481 * value is used to drop promiscuity on the device. 4482 * Return 0 if successful or a negative errno code on error. 4483 */ 4484 int dev_set_promiscuity(struct net_device *dev, int inc) 4485 { 4486 unsigned int old_flags = dev->flags; 4487 int err; 4488 4489 err = __dev_set_promiscuity(dev, inc); 4490 if (err < 0) 4491 return err; 4492 if (dev->flags != old_flags) 4493 dev_set_rx_mode(dev); 4494 return err; 4495 } 4496 EXPORT_SYMBOL(dev_set_promiscuity); 4497 4498 /** 4499 * dev_set_allmulti - update allmulti count on a device 4500 * @dev: device 4501 * @inc: modifier 4502 * 4503 * Add or remove reception of all multicast frames to a device. While the 4504 * count in the device remains above zero the interface remains listening 4505 * to all interfaces. Once it hits zero the device reverts back to normal 4506 * filtering operation. A negative @inc value is used to drop the counter 4507 * when releasing a resource needing all multicasts. 4508 * Return 0 if successful or a negative errno code on error. 4509 */ 4510 4511 int dev_set_allmulti(struct net_device *dev, int inc) 4512 { 4513 unsigned int old_flags = dev->flags; 4514 4515 ASSERT_RTNL(); 4516 4517 dev->flags |= IFF_ALLMULTI; 4518 dev->allmulti += inc; 4519 if (dev->allmulti == 0) { 4520 /* 4521 * Avoid overflow. 4522 * If inc causes overflow, untouch allmulti and return error. 4523 */ 4524 if (inc < 0) 4525 dev->flags &= ~IFF_ALLMULTI; 4526 else { 4527 dev->allmulti -= inc; 4528 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n", 4529 dev->name); 4530 return -EOVERFLOW; 4531 } 4532 } 4533 if (dev->flags ^ old_flags) { 4534 dev_change_rx_flags(dev, IFF_ALLMULTI); 4535 dev_set_rx_mode(dev); 4536 } 4537 return 0; 4538 } 4539 EXPORT_SYMBOL(dev_set_allmulti); 4540 4541 /* 4542 * Upload unicast and multicast address lists to device and 4543 * configure RX filtering. When the device doesn't support unicast 4544 * filtering it is put in promiscuous mode while unicast addresses 4545 * are present. 4546 */ 4547 void __dev_set_rx_mode(struct net_device *dev) 4548 { 4549 const struct net_device_ops *ops = dev->netdev_ops; 4550 4551 /* dev_open will call this function so the list will stay sane. */ 4552 if (!(dev->flags&IFF_UP)) 4553 return; 4554 4555 if (!netif_device_present(dev)) 4556 return; 4557 4558 if (!(dev->priv_flags & IFF_UNICAST_FLT)) { 4559 /* Unicast addresses changes may only happen under the rtnl, 4560 * therefore calling __dev_set_promiscuity here is safe. 4561 */ 4562 if (!netdev_uc_empty(dev) && !dev->uc_promisc) { 4563 __dev_set_promiscuity(dev, 1); 4564 dev->uc_promisc = true; 4565 } else if (netdev_uc_empty(dev) && dev->uc_promisc) { 4566 __dev_set_promiscuity(dev, -1); 4567 dev->uc_promisc = false; 4568 } 4569 } 4570 4571 if (ops->ndo_set_rx_mode) 4572 ops->ndo_set_rx_mode(dev); 4573 } 4574 4575 void dev_set_rx_mode(struct net_device *dev) 4576 { 4577 netif_addr_lock_bh(dev); 4578 __dev_set_rx_mode(dev); 4579 netif_addr_unlock_bh(dev); 4580 } 4581 4582 /** 4583 * dev_get_flags - get flags reported to userspace 4584 * @dev: device 4585 * 4586 * Get the combination of flag bits exported through APIs to userspace. 4587 */ 4588 unsigned int dev_get_flags(const struct net_device *dev) 4589 { 4590 unsigned int flags; 4591 4592 flags = (dev->flags & ~(IFF_PROMISC | 4593 IFF_ALLMULTI | 4594 IFF_RUNNING | 4595 IFF_LOWER_UP | 4596 IFF_DORMANT)) | 4597 (dev->gflags & (IFF_PROMISC | 4598 IFF_ALLMULTI)); 4599 4600 if (netif_running(dev)) { 4601 if (netif_oper_up(dev)) 4602 flags |= IFF_RUNNING; 4603 if (netif_carrier_ok(dev)) 4604 flags |= IFF_LOWER_UP; 4605 if (netif_dormant(dev)) 4606 flags |= IFF_DORMANT; 4607 } 4608 4609 return flags; 4610 } 4611 EXPORT_SYMBOL(dev_get_flags); 4612 4613 int __dev_change_flags(struct net_device *dev, unsigned int flags) 4614 { 4615 unsigned int old_flags = dev->flags; 4616 int ret; 4617 4618 ASSERT_RTNL(); 4619 4620 /* 4621 * Set the flags on our device. 4622 */ 4623 4624 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | 4625 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | 4626 IFF_AUTOMEDIA)) | 4627 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | 4628 IFF_ALLMULTI)); 4629 4630 /* 4631 * Load in the correct multicast list now the flags have changed. 4632 */ 4633 4634 if ((old_flags ^ flags) & IFF_MULTICAST) 4635 dev_change_rx_flags(dev, IFF_MULTICAST); 4636 4637 dev_set_rx_mode(dev); 4638 4639 /* 4640 * Have we downed the interface. We handle IFF_UP ourselves 4641 * according to user attempts to set it, rather than blindly 4642 * setting it. 4643 */ 4644 4645 ret = 0; 4646 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */ 4647 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev); 4648 4649 if (!ret) 4650 dev_set_rx_mode(dev); 4651 } 4652 4653 if ((flags ^ dev->gflags) & IFF_PROMISC) { 4654 int inc = (flags & IFF_PROMISC) ? 1 : -1; 4655 4656 dev->gflags ^= IFF_PROMISC; 4657 dev_set_promiscuity(dev, inc); 4658 } 4659 4660 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI 4661 is important. Some (broken) drivers set IFF_PROMISC, when 4662 IFF_ALLMULTI is requested not asking us and not reporting. 4663 */ 4664 if ((flags ^ dev->gflags) & IFF_ALLMULTI) { 4665 int inc = (flags & IFF_ALLMULTI) ? 1 : -1; 4666 4667 dev->gflags ^= IFF_ALLMULTI; 4668 dev_set_allmulti(dev, inc); 4669 } 4670 4671 return ret; 4672 } 4673 4674 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags) 4675 { 4676 unsigned int changes = dev->flags ^ old_flags; 4677 4678 if (changes & IFF_UP) { 4679 if (dev->flags & IFF_UP) 4680 call_netdevice_notifiers(NETDEV_UP, dev); 4681 else 4682 call_netdevice_notifiers(NETDEV_DOWN, dev); 4683 } 4684 4685 if (dev->flags & IFF_UP && 4686 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) 4687 call_netdevice_notifiers(NETDEV_CHANGE, dev); 4688 } 4689 4690 /** 4691 * dev_change_flags - change device settings 4692 * @dev: device 4693 * @flags: device state flags 4694 * 4695 * Change settings on device based state flags. The flags are 4696 * in the userspace exported format. 4697 */ 4698 int dev_change_flags(struct net_device *dev, unsigned int flags) 4699 { 4700 int ret; 4701 unsigned int changes, old_flags = dev->flags; 4702 4703 ret = __dev_change_flags(dev, flags); 4704 if (ret < 0) 4705 return ret; 4706 4707 changes = old_flags ^ dev->flags; 4708 if (changes) 4709 rtmsg_ifinfo(RTM_NEWLINK, dev, changes); 4710 4711 __dev_notify_flags(dev, old_flags); 4712 return ret; 4713 } 4714 EXPORT_SYMBOL(dev_change_flags); 4715 4716 /** 4717 * dev_set_mtu - Change maximum transfer unit 4718 * @dev: device 4719 * @new_mtu: new transfer unit 4720 * 4721 * Change the maximum transfer size of the network device. 4722 */ 4723 int dev_set_mtu(struct net_device *dev, int new_mtu) 4724 { 4725 const struct net_device_ops *ops = dev->netdev_ops; 4726 int err; 4727 4728 if (new_mtu == dev->mtu) 4729 return 0; 4730 4731 /* MTU must be positive. */ 4732 if (new_mtu < 0) 4733 return -EINVAL; 4734 4735 if (!netif_device_present(dev)) 4736 return -ENODEV; 4737 4738 err = 0; 4739 if (ops->ndo_change_mtu) 4740 err = ops->ndo_change_mtu(dev, new_mtu); 4741 else 4742 dev->mtu = new_mtu; 4743 4744 if (!err) 4745 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); 4746 return err; 4747 } 4748 EXPORT_SYMBOL(dev_set_mtu); 4749 4750 /** 4751 * dev_set_group - Change group this device belongs to 4752 * @dev: device 4753 * @new_group: group this device should belong to 4754 */ 4755 void dev_set_group(struct net_device *dev, int new_group) 4756 { 4757 dev->group = new_group; 4758 } 4759 EXPORT_SYMBOL(dev_set_group); 4760 4761 /** 4762 * dev_set_mac_address - Change Media Access Control Address 4763 * @dev: device 4764 * @sa: new address 4765 * 4766 * Change the hardware (MAC) address of the device 4767 */ 4768 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa) 4769 { 4770 const struct net_device_ops *ops = dev->netdev_ops; 4771 int err; 4772 4773 if (!ops->ndo_set_mac_address) 4774 return -EOPNOTSUPP; 4775 if (sa->sa_family != dev->type) 4776 return -EINVAL; 4777 if (!netif_device_present(dev)) 4778 return -ENODEV; 4779 err = ops->ndo_set_mac_address(dev, sa); 4780 if (err) 4781 return err; 4782 dev->addr_assign_type = NET_ADDR_SET; 4783 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 4784 add_device_randomness(dev->dev_addr, dev->addr_len); 4785 return 0; 4786 } 4787 EXPORT_SYMBOL(dev_set_mac_address); 4788 4789 /** 4790 * dev_change_carrier - Change device carrier 4791 * @dev: device 4792 * @new_carrier: new value 4793 * 4794 * Change device carrier 4795 */ 4796 int dev_change_carrier(struct net_device *dev, bool new_carrier) 4797 { 4798 const struct net_device_ops *ops = dev->netdev_ops; 4799 4800 if (!ops->ndo_change_carrier) 4801 return -EOPNOTSUPP; 4802 if (!netif_device_present(dev)) 4803 return -ENODEV; 4804 return ops->ndo_change_carrier(dev, new_carrier); 4805 } 4806 EXPORT_SYMBOL(dev_change_carrier); 4807 4808 /** 4809 * dev_new_index - allocate an ifindex 4810 * @net: the applicable net namespace 4811 * 4812 * Returns a suitable unique value for a new device interface 4813 * number. The caller must hold the rtnl semaphore or the 4814 * dev_base_lock to be sure it remains unique. 4815 */ 4816 static int dev_new_index(struct net *net) 4817 { 4818 int ifindex = net->ifindex; 4819 for (;;) { 4820 if (++ifindex <= 0) 4821 ifindex = 1; 4822 if (!__dev_get_by_index(net, ifindex)) 4823 return net->ifindex = ifindex; 4824 } 4825 } 4826 4827 /* Delayed registration/unregisteration */ 4828 static LIST_HEAD(net_todo_list); 4829 4830 static void net_set_todo(struct net_device *dev) 4831 { 4832 list_add_tail(&dev->todo_list, &net_todo_list); 4833 } 4834 4835 static void rollback_registered_many(struct list_head *head) 4836 { 4837 struct net_device *dev, *tmp; 4838 4839 BUG_ON(dev_boot_phase); 4840 ASSERT_RTNL(); 4841 4842 list_for_each_entry_safe(dev, tmp, head, unreg_list) { 4843 /* Some devices call without registering 4844 * for initialization unwind. Remove those 4845 * devices and proceed with the remaining. 4846 */ 4847 if (dev->reg_state == NETREG_UNINITIALIZED) { 4848 pr_debug("unregister_netdevice: device %s/%p never was registered\n", 4849 dev->name, dev); 4850 4851 WARN_ON(1); 4852 list_del(&dev->unreg_list); 4853 continue; 4854 } 4855 dev->dismantle = true; 4856 BUG_ON(dev->reg_state != NETREG_REGISTERED); 4857 } 4858 4859 /* If device is running, close it first. */ 4860 dev_close_many(head); 4861 4862 list_for_each_entry(dev, head, unreg_list) { 4863 /* And unlink it from device chain. */ 4864 unlist_netdevice(dev); 4865 4866 dev->reg_state = NETREG_UNREGISTERING; 4867 } 4868 4869 synchronize_net(); 4870 4871 list_for_each_entry(dev, head, unreg_list) { 4872 /* Shutdown queueing discipline. */ 4873 dev_shutdown(dev); 4874 4875 4876 /* Notify protocols, that we are about to destroy 4877 this device. They should clean all the things. 4878 */ 4879 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 4880 4881 if (!dev->rtnl_link_ops || 4882 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 4883 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); 4884 4885 /* 4886 * Flush the unicast and multicast chains 4887 */ 4888 dev_uc_flush(dev); 4889 dev_mc_flush(dev); 4890 4891 if (dev->netdev_ops->ndo_uninit) 4892 dev->netdev_ops->ndo_uninit(dev); 4893 4894 /* Notifier chain MUST detach us all upper devices. */ 4895 WARN_ON(netdev_has_any_upper_dev(dev)); 4896 4897 /* Remove entries from kobject tree */ 4898 netdev_unregister_kobject(dev); 4899 #ifdef CONFIG_XPS 4900 /* Remove XPS queueing entries */ 4901 netif_reset_xps_queues_gt(dev, 0); 4902 #endif 4903 } 4904 4905 synchronize_net(); 4906 4907 list_for_each_entry(dev, head, unreg_list) 4908 dev_put(dev); 4909 } 4910 4911 static void rollback_registered(struct net_device *dev) 4912 { 4913 LIST_HEAD(single); 4914 4915 list_add(&dev->unreg_list, &single); 4916 rollback_registered_many(&single); 4917 list_del(&single); 4918 } 4919 4920 static netdev_features_t netdev_fix_features(struct net_device *dev, 4921 netdev_features_t features) 4922 { 4923 /* Fix illegal checksum combinations */ 4924 if ((features & NETIF_F_HW_CSUM) && 4925 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 4926 netdev_warn(dev, "mixed HW and IP checksum settings.\n"); 4927 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 4928 } 4929 4930 /* Fix illegal SG+CSUM combinations. */ 4931 if ((features & NETIF_F_SG) && 4932 !(features & NETIF_F_ALL_CSUM)) { 4933 netdev_dbg(dev, 4934 "Dropping NETIF_F_SG since no checksum feature.\n"); 4935 features &= ~NETIF_F_SG; 4936 } 4937 4938 /* TSO requires that SG is present as well. */ 4939 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { 4940 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); 4941 features &= ~NETIF_F_ALL_TSO; 4942 } 4943 4944 /* TSO ECN requires that TSO is present as well. */ 4945 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) 4946 features &= ~NETIF_F_TSO_ECN; 4947 4948 /* Software GSO depends on SG. */ 4949 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { 4950 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); 4951 features &= ~NETIF_F_GSO; 4952 } 4953 4954 /* UFO needs SG and checksumming */ 4955 if (features & NETIF_F_UFO) { 4956 /* maybe split UFO into V4 and V6? */ 4957 if (!((features & NETIF_F_GEN_CSUM) || 4958 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)) 4959 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 4960 netdev_dbg(dev, 4961 "Dropping NETIF_F_UFO since no checksum offload features.\n"); 4962 features &= ~NETIF_F_UFO; 4963 } 4964 4965 if (!(features & NETIF_F_SG)) { 4966 netdev_dbg(dev, 4967 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n"); 4968 features &= ~NETIF_F_UFO; 4969 } 4970 } 4971 4972 return features; 4973 } 4974 4975 int __netdev_update_features(struct net_device *dev) 4976 { 4977 netdev_features_t features; 4978 int err = 0; 4979 4980 ASSERT_RTNL(); 4981 4982 features = netdev_get_wanted_features(dev); 4983 4984 if (dev->netdev_ops->ndo_fix_features) 4985 features = dev->netdev_ops->ndo_fix_features(dev, features); 4986 4987 /* driver might be less strict about feature dependencies */ 4988 features = netdev_fix_features(dev, features); 4989 4990 if (dev->features == features) 4991 return 0; 4992 4993 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n", 4994 &dev->features, &features); 4995 4996 if (dev->netdev_ops->ndo_set_features) 4997 err = dev->netdev_ops->ndo_set_features(dev, features); 4998 4999 if (unlikely(err < 0)) { 5000 netdev_err(dev, 5001 "set_features() failed (%d); wanted %pNF, left %pNF\n", 5002 err, &features, &dev->features); 5003 return -1; 5004 } 5005 5006 if (!err) 5007 dev->features = features; 5008 5009 return 1; 5010 } 5011 5012 /** 5013 * netdev_update_features - recalculate device features 5014 * @dev: the device to check 5015 * 5016 * Recalculate dev->features set and send notifications if it 5017 * has changed. Should be called after driver or hardware dependent 5018 * conditions might have changed that influence the features. 5019 */ 5020 void netdev_update_features(struct net_device *dev) 5021 { 5022 if (__netdev_update_features(dev)) 5023 netdev_features_change(dev); 5024 } 5025 EXPORT_SYMBOL(netdev_update_features); 5026 5027 /** 5028 * netdev_change_features - recalculate device features 5029 * @dev: the device to check 5030 * 5031 * Recalculate dev->features set and send notifications even 5032 * if they have not changed. Should be called instead of 5033 * netdev_update_features() if also dev->vlan_features might 5034 * have changed to allow the changes to be propagated to stacked 5035 * VLAN devices. 5036 */ 5037 void netdev_change_features(struct net_device *dev) 5038 { 5039 __netdev_update_features(dev); 5040 netdev_features_change(dev); 5041 } 5042 EXPORT_SYMBOL(netdev_change_features); 5043 5044 /** 5045 * netif_stacked_transfer_operstate - transfer operstate 5046 * @rootdev: the root or lower level device to transfer state from 5047 * @dev: the device to transfer operstate to 5048 * 5049 * Transfer operational state from root to device. This is normally 5050 * called when a stacking relationship exists between the root 5051 * device and the device(a leaf device). 5052 */ 5053 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 5054 struct net_device *dev) 5055 { 5056 if (rootdev->operstate == IF_OPER_DORMANT) 5057 netif_dormant_on(dev); 5058 else 5059 netif_dormant_off(dev); 5060 5061 if (netif_carrier_ok(rootdev)) { 5062 if (!netif_carrier_ok(dev)) 5063 netif_carrier_on(dev); 5064 } else { 5065 if (netif_carrier_ok(dev)) 5066 netif_carrier_off(dev); 5067 } 5068 } 5069 EXPORT_SYMBOL(netif_stacked_transfer_operstate); 5070 5071 #ifdef CONFIG_RPS 5072 static int netif_alloc_rx_queues(struct net_device *dev) 5073 { 5074 unsigned int i, count = dev->num_rx_queues; 5075 struct netdev_rx_queue *rx; 5076 5077 BUG_ON(count < 1); 5078 5079 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL); 5080 if (!rx) 5081 return -ENOMEM; 5082 5083 dev->_rx = rx; 5084 5085 for (i = 0; i < count; i++) 5086 rx[i].dev = dev; 5087 return 0; 5088 } 5089 #endif 5090 5091 static void netdev_init_one_queue(struct net_device *dev, 5092 struct netdev_queue *queue, void *_unused) 5093 { 5094 /* Initialize queue lock */ 5095 spin_lock_init(&queue->_xmit_lock); 5096 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); 5097 queue->xmit_lock_owner = -1; 5098 netdev_queue_numa_node_write(queue, NUMA_NO_NODE); 5099 queue->dev = dev; 5100 #ifdef CONFIG_BQL 5101 dql_init(&queue->dql, HZ); 5102 #endif 5103 } 5104 5105 static int netif_alloc_netdev_queues(struct net_device *dev) 5106 { 5107 unsigned int count = dev->num_tx_queues; 5108 struct netdev_queue *tx; 5109 5110 BUG_ON(count < 1); 5111 5112 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL); 5113 if (!tx) 5114 return -ENOMEM; 5115 5116 dev->_tx = tx; 5117 5118 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 5119 spin_lock_init(&dev->tx_global_lock); 5120 5121 return 0; 5122 } 5123 5124 /** 5125 * register_netdevice - register a network device 5126 * @dev: device to register 5127 * 5128 * Take a completed network device structure and add it to the kernel 5129 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 5130 * chain. 0 is returned on success. A negative errno code is returned 5131 * on a failure to set up the device, or if the name is a duplicate. 5132 * 5133 * Callers must hold the rtnl semaphore. You may want 5134 * register_netdev() instead of this. 5135 * 5136 * BUGS: 5137 * The locking appears insufficient to guarantee two parallel registers 5138 * will not get the same name. 5139 */ 5140 5141 int register_netdevice(struct net_device *dev) 5142 { 5143 int ret; 5144 struct net *net = dev_net(dev); 5145 5146 BUG_ON(dev_boot_phase); 5147 ASSERT_RTNL(); 5148 5149 might_sleep(); 5150 5151 /* When net_device's are persistent, this will be fatal. */ 5152 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 5153 BUG_ON(!net); 5154 5155 spin_lock_init(&dev->addr_list_lock); 5156 netdev_set_addr_lockdep_class(dev); 5157 5158 dev->iflink = -1; 5159 5160 ret = dev_get_valid_name(net, dev, dev->name); 5161 if (ret < 0) 5162 goto out; 5163 5164 /* Init, if this function is available */ 5165 if (dev->netdev_ops->ndo_init) { 5166 ret = dev->netdev_ops->ndo_init(dev); 5167 if (ret) { 5168 if (ret > 0) 5169 ret = -EIO; 5170 goto out; 5171 } 5172 } 5173 5174 if (((dev->hw_features | dev->features) & NETIF_F_HW_VLAN_FILTER) && 5175 (!dev->netdev_ops->ndo_vlan_rx_add_vid || 5176 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) { 5177 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n"); 5178 ret = -EINVAL; 5179 goto err_uninit; 5180 } 5181 5182 ret = -EBUSY; 5183 if (!dev->ifindex) 5184 dev->ifindex = dev_new_index(net); 5185 else if (__dev_get_by_index(net, dev->ifindex)) 5186 goto err_uninit; 5187 5188 if (dev->iflink == -1) 5189 dev->iflink = dev->ifindex; 5190 5191 /* Transfer changeable features to wanted_features and enable 5192 * software offloads (GSO and GRO). 5193 */ 5194 dev->hw_features |= NETIF_F_SOFT_FEATURES; 5195 dev->features |= NETIF_F_SOFT_FEATURES; 5196 dev->wanted_features = dev->features & dev->hw_features; 5197 5198 /* Turn on no cache copy if HW is doing checksum */ 5199 if (!(dev->flags & IFF_LOOPBACK)) { 5200 dev->hw_features |= NETIF_F_NOCACHE_COPY; 5201 if (dev->features & NETIF_F_ALL_CSUM) { 5202 dev->wanted_features |= NETIF_F_NOCACHE_COPY; 5203 dev->features |= NETIF_F_NOCACHE_COPY; 5204 } 5205 } 5206 5207 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices. 5208 */ 5209 dev->vlan_features |= NETIF_F_HIGHDMA; 5210 5211 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); 5212 ret = notifier_to_errno(ret); 5213 if (ret) 5214 goto err_uninit; 5215 5216 ret = netdev_register_kobject(dev); 5217 if (ret) 5218 goto err_uninit; 5219 dev->reg_state = NETREG_REGISTERED; 5220 5221 __netdev_update_features(dev); 5222 5223 /* 5224 * Default initial state at registry is that the 5225 * device is present. 5226 */ 5227 5228 set_bit(__LINK_STATE_PRESENT, &dev->state); 5229 5230 linkwatch_init_dev(dev); 5231 5232 dev_init_scheduler(dev); 5233 dev_hold(dev); 5234 list_netdevice(dev); 5235 add_device_randomness(dev->dev_addr, dev->addr_len); 5236 5237 /* If the device has permanent device address, driver should 5238 * set dev_addr and also addr_assign_type should be set to 5239 * NET_ADDR_PERM (default value). 5240 */ 5241 if (dev->addr_assign_type == NET_ADDR_PERM) 5242 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 5243 5244 /* Notify protocols, that a new device appeared. */ 5245 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); 5246 ret = notifier_to_errno(ret); 5247 if (ret) { 5248 rollback_registered(dev); 5249 dev->reg_state = NETREG_UNREGISTERED; 5250 } 5251 /* 5252 * Prevent userspace races by waiting until the network 5253 * device is fully setup before sending notifications. 5254 */ 5255 if (!dev->rtnl_link_ops || 5256 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 5257 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); 5258 5259 out: 5260 return ret; 5261 5262 err_uninit: 5263 if (dev->netdev_ops->ndo_uninit) 5264 dev->netdev_ops->ndo_uninit(dev); 5265 goto out; 5266 } 5267 EXPORT_SYMBOL(register_netdevice); 5268 5269 /** 5270 * init_dummy_netdev - init a dummy network device for NAPI 5271 * @dev: device to init 5272 * 5273 * This takes a network device structure and initialize the minimum 5274 * amount of fields so it can be used to schedule NAPI polls without 5275 * registering a full blown interface. This is to be used by drivers 5276 * that need to tie several hardware interfaces to a single NAPI 5277 * poll scheduler due to HW limitations. 5278 */ 5279 int init_dummy_netdev(struct net_device *dev) 5280 { 5281 /* Clear everything. Note we don't initialize spinlocks 5282 * are they aren't supposed to be taken by any of the 5283 * NAPI code and this dummy netdev is supposed to be 5284 * only ever used for NAPI polls 5285 */ 5286 memset(dev, 0, sizeof(struct net_device)); 5287 5288 /* make sure we BUG if trying to hit standard 5289 * register/unregister code path 5290 */ 5291 dev->reg_state = NETREG_DUMMY; 5292 5293 /* NAPI wants this */ 5294 INIT_LIST_HEAD(&dev->napi_list); 5295 5296 /* a dummy interface is started by default */ 5297 set_bit(__LINK_STATE_PRESENT, &dev->state); 5298 set_bit(__LINK_STATE_START, &dev->state); 5299 5300 /* Note : We dont allocate pcpu_refcnt for dummy devices, 5301 * because users of this 'device' dont need to change 5302 * its refcount. 5303 */ 5304 5305 return 0; 5306 } 5307 EXPORT_SYMBOL_GPL(init_dummy_netdev); 5308 5309 5310 /** 5311 * register_netdev - register a network device 5312 * @dev: device to register 5313 * 5314 * Take a completed network device structure and add it to the kernel 5315 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 5316 * chain. 0 is returned on success. A negative errno code is returned 5317 * on a failure to set up the device, or if the name is a duplicate. 5318 * 5319 * This is a wrapper around register_netdevice that takes the rtnl semaphore 5320 * and expands the device name if you passed a format string to 5321 * alloc_netdev. 5322 */ 5323 int register_netdev(struct net_device *dev) 5324 { 5325 int err; 5326 5327 rtnl_lock(); 5328 err = register_netdevice(dev); 5329 rtnl_unlock(); 5330 return err; 5331 } 5332 EXPORT_SYMBOL(register_netdev); 5333 5334 int netdev_refcnt_read(const struct net_device *dev) 5335 { 5336 int i, refcnt = 0; 5337 5338 for_each_possible_cpu(i) 5339 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i); 5340 return refcnt; 5341 } 5342 EXPORT_SYMBOL(netdev_refcnt_read); 5343 5344 /** 5345 * netdev_wait_allrefs - wait until all references are gone. 5346 * @dev: target net_device 5347 * 5348 * This is called when unregistering network devices. 5349 * 5350 * Any protocol or device that holds a reference should register 5351 * for netdevice notification, and cleanup and put back the 5352 * reference if they receive an UNREGISTER event. 5353 * We can get stuck here if buggy protocols don't correctly 5354 * call dev_put. 5355 */ 5356 static void netdev_wait_allrefs(struct net_device *dev) 5357 { 5358 unsigned long rebroadcast_time, warning_time; 5359 int refcnt; 5360 5361 linkwatch_forget_dev(dev); 5362 5363 rebroadcast_time = warning_time = jiffies; 5364 refcnt = netdev_refcnt_read(dev); 5365 5366 while (refcnt != 0) { 5367 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { 5368 rtnl_lock(); 5369 5370 /* Rebroadcast unregister notification */ 5371 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 5372 5373 __rtnl_unlock(); 5374 rcu_barrier(); 5375 rtnl_lock(); 5376 5377 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev); 5378 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 5379 &dev->state)) { 5380 /* We must not have linkwatch events 5381 * pending on unregister. If this 5382 * happens, we simply run the queue 5383 * unscheduled, resulting in a noop 5384 * for this device. 5385 */ 5386 linkwatch_run_queue(); 5387 } 5388 5389 __rtnl_unlock(); 5390 5391 rebroadcast_time = jiffies; 5392 } 5393 5394 msleep(250); 5395 5396 refcnt = netdev_refcnt_read(dev); 5397 5398 if (time_after(jiffies, warning_time + 10 * HZ)) { 5399 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", 5400 dev->name, refcnt); 5401 warning_time = jiffies; 5402 } 5403 } 5404 } 5405 5406 /* The sequence is: 5407 * 5408 * rtnl_lock(); 5409 * ... 5410 * register_netdevice(x1); 5411 * register_netdevice(x2); 5412 * ... 5413 * unregister_netdevice(y1); 5414 * unregister_netdevice(y2); 5415 * ... 5416 * rtnl_unlock(); 5417 * free_netdev(y1); 5418 * free_netdev(y2); 5419 * 5420 * We are invoked by rtnl_unlock(). 5421 * This allows us to deal with problems: 5422 * 1) We can delete sysfs objects which invoke hotplug 5423 * without deadlocking with linkwatch via keventd. 5424 * 2) Since we run with the RTNL semaphore not held, we can sleep 5425 * safely in order to wait for the netdev refcnt to drop to zero. 5426 * 5427 * We must not return until all unregister events added during 5428 * the interval the lock was held have been completed. 5429 */ 5430 void netdev_run_todo(void) 5431 { 5432 struct list_head list; 5433 5434 /* Snapshot list, allow later requests */ 5435 list_replace_init(&net_todo_list, &list); 5436 5437 __rtnl_unlock(); 5438 5439 5440 /* Wait for rcu callbacks to finish before next phase */ 5441 if (!list_empty(&list)) 5442 rcu_barrier(); 5443 5444 while (!list_empty(&list)) { 5445 struct net_device *dev 5446 = list_first_entry(&list, struct net_device, todo_list); 5447 list_del(&dev->todo_list); 5448 5449 rtnl_lock(); 5450 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev); 5451 __rtnl_unlock(); 5452 5453 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 5454 pr_err("network todo '%s' but state %d\n", 5455 dev->name, dev->reg_state); 5456 dump_stack(); 5457 continue; 5458 } 5459 5460 dev->reg_state = NETREG_UNREGISTERED; 5461 5462 on_each_cpu(flush_backlog, dev, 1); 5463 5464 netdev_wait_allrefs(dev); 5465 5466 /* paranoia */ 5467 BUG_ON(netdev_refcnt_read(dev)); 5468 WARN_ON(rcu_access_pointer(dev->ip_ptr)); 5469 WARN_ON(rcu_access_pointer(dev->ip6_ptr)); 5470 WARN_ON(dev->dn_ptr); 5471 5472 if (dev->destructor) 5473 dev->destructor(dev); 5474 5475 /* Free network device */ 5476 kobject_put(&dev->dev.kobj); 5477 } 5478 } 5479 5480 /* Convert net_device_stats to rtnl_link_stats64. They have the same 5481 * fields in the same order, with only the type differing. 5482 */ 5483 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 5484 const struct net_device_stats *netdev_stats) 5485 { 5486 #if BITS_PER_LONG == 64 5487 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats)); 5488 memcpy(stats64, netdev_stats, sizeof(*stats64)); 5489 #else 5490 size_t i, n = sizeof(*stats64) / sizeof(u64); 5491 const unsigned long *src = (const unsigned long *)netdev_stats; 5492 u64 *dst = (u64 *)stats64; 5493 5494 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) != 5495 sizeof(*stats64) / sizeof(u64)); 5496 for (i = 0; i < n; i++) 5497 dst[i] = src[i]; 5498 #endif 5499 } 5500 EXPORT_SYMBOL(netdev_stats_to_stats64); 5501 5502 /** 5503 * dev_get_stats - get network device statistics 5504 * @dev: device to get statistics from 5505 * @storage: place to store stats 5506 * 5507 * Get network statistics from device. Return @storage. 5508 * The device driver may provide its own method by setting 5509 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats; 5510 * otherwise the internal statistics structure is used. 5511 */ 5512 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 5513 struct rtnl_link_stats64 *storage) 5514 { 5515 const struct net_device_ops *ops = dev->netdev_ops; 5516 5517 if (ops->ndo_get_stats64) { 5518 memset(storage, 0, sizeof(*storage)); 5519 ops->ndo_get_stats64(dev, storage); 5520 } else if (ops->ndo_get_stats) { 5521 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); 5522 } else { 5523 netdev_stats_to_stats64(storage, &dev->stats); 5524 } 5525 storage->rx_dropped += atomic_long_read(&dev->rx_dropped); 5526 return storage; 5527 } 5528 EXPORT_SYMBOL(dev_get_stats); 5529 5530 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) 5531 { 5532 struct netdev_queue *queue = dev_ingress_queue(dev); 5533 5534 #ifdef CONFIG_NET_CLS_ACT 5535 if (queue) 5536 return queue; 5537 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 5538 if (!queue) 5539 return NULL; 5540 netdev_init_one_queue(dev, queue, NULL); 5541 queue->qdisc = &noop_qdisc; 5542 queue->qdisc_sleeping = &noop_qdisc; 5543 rcu_assign_pointer(dev->ingress_queue, queue); 5544 #endif 5545 return queue; 5546 } 5547 5548 static const struct ethtool_ops default_ethtool_ops; 5549 5550 void netdev_set_default_ethtool_ops(struct net_device *dev, 5551 const struct ethtool_ops *ops) 5552 { 5553 if (dev->ethtool_ops == &default_ethtool_ops) 5554 dev->ethtool_ops = ops; 5555 } 5556 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops); 5557 5558 /** 5559 * alloc_netdev_mqs - allocate network device 5560 * @sizeof_priv: size of private data to allocate space for 5561 * @name: device name format string 5562 * @setup: callback to initialize device 5563 * @txqs: the number of TX subqueues to allocate 5564 * @rxqs: the number of RX subqueues to allocate 5565 * 5566 * Allocates a struct net_device with private data area for driver use 5567 * and performs basic initialization. Also allocates subquue structs 5568 * for each queue on the device. 5569 */ 5570 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 5571 void (*setup)(struct net_device *), 5572 unsigned int txqs, unsigned int rxqs) 5573 { 5574 struct net_device *dev; 5575 size_t alloc_size; 5576 struct net_device *p; 5577 5578 BUG_ON(strlen(name) >= sizeof(dev->name)); 5579 5580 if (txqs < 1) { 5581 pr_err("alloc_netdev: Unable to allocate device with zero queues\n"); 5582 return NULL; 5583 } 5584 5585 #ifdef CONFIG_RPS 5586 if (rxqs < 1) { 5587 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n"); 5588 return NULL; 5589 } 5590 #endif 5591 5592 alloc_size = sizeof(struct net_device); 5593 if (sizeof_priv) { 5594 /* ensure 32-byte alignment of private area */ 5595 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); 5596 alloc_size += sizeof_priv; 5597 } 5598 /* ensure 32-byte alignment of whole construct */ 5599 alloc_size += NETDEV_ALIGN - 1; 5600 5601 p = kzalloc(alloc_size, GFP_KERNEL); 5602 if (!p) 5603 return NULL; 5604 5605 dev = PTR_ALIGN(p, NETDEV_ALIGN); 5606 dev->padded = (char *)dev - (char *)p; 5607 5608 dev->pcpu_refcnt = alloc_percpu(int); 5609 if (!dev->pcpu_refcnt) 5610 goto free_p; 5611 5612 if (dev_addr_init(dev)) 5613 goto free_pcpu; 5614 5615 dev_mc_init(dev); 5616 dev_uc_init(dev); 5617 5618 dev_net_set(dev, &init_net); 5619 5620 dev->gso_max_size = GSO_MAX_SIZE; 5621 dev->gso_max_segs = GSO_MAX_SEGS; 5622 5623 INIT_LIST_HEAD(&dev->napi_list); 5624 INIT_LIST_HEAD(&dev->unreg_list); 5625 INIT_LIST_HEAD(&dev->link_watch_list); 5626 INIT_LIST_HEAD(&dev->upper_dev_list); 5627 dev->priv_flags = IFF_XMIT_DST_RELEASE; 5628 setup(dev); 5629 5630 dev->num_tx_queues = txqs; 5631 dev->real_num_tx_queues = txqs; 5632 if (netif_alloc_netdev_queues(dev)) 5633 goto free_all; 5634 5635 #ifdef CONFIG_RPS 5636 dev->num_rx_queues = rxqs; 5637 dev->real_num_rx_queues = rxqs; 5638 if (netif_alloc_rx_queues(dev)) 5639 goto free_all; 5640 #endif 5641 5642 strcpy(dev->name, name); 5643 dev->group = INIT_NETDEV_GROUP; 5644 if (!dev->ethtool_ops) 5645 dev->ethtool_ops = &default_ethtool_ops; 5646 return dev; 5647 5648 free_all: 5649 free_netdev(dev); 5650 return NULL; 5651 5652 free_pcpu: 5653 free_percpu(dev->pcpu_refcnt); 5654 kfree(dev->_tx); 5655 #ifdef CONFIG_RPS 5656 kfree(dev->_rx); 5657 #endif 5658 5659 free_p: 5660 kfree(p); 5661 return NULL; 5662 } 5663 EXPORT_SYMBOL(alloc_netdev_mqs); 5664 5665 /** 5666 * free_netdev - free network device 5667 * @dev: device 5668 * 5669 * This function does the last stage of destroying an allocated device 5670 * interface. The reference to the device object is released. 5671 * If this is the last reference then it will be freed. 5672 */ 5673 void free_netdev(struct net_device *dev) 5674 { 5675 struct napi_struct *p, *n; 5676 5677 release_net(dev_net(dev)); 5678 5679 kfree(dev->_tx); 5680 #ifdef CONFIG_RPS 5681 kfree(dev->_rx); 5682 #endif 5683 5684 kfree(rcu_dereference_protected(dev->ingress_queue, 1)); 5685 5686 /* Flush device addresses */ 5687 dev_addr_flush(dev); 5688 5689 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 5690 netif_napi_del(p); 5691 5692 free_percpu(dev->pcpu_refcnt); 5693 dev->pcpu_refcnt = NULL; 5694 5695 /* Compatibility with error handling in drivers */ 5696 if (dev->reg_state == NETREG_UNINITIALIZED) { 5697 kfree((char *)dev - dev->padded); 5698 return; 5699 } 5700 5701 BUG_ON(dev->reg_state != NETREG_UNREGISTERED); 5702 dev->reg_state = NETREG_RELEASED; 5703 5704 /* will free via device release */ 5705 put_device(&dev->dev); 5706 } 5707 EXPORT_SYMBOL(free_netdev); 5708 5709 /** 5710 * synchronize_net - Synchronize with packet receive processing 5711 * 5712 * Wait for packets currently being received to be done. 5713 * Does not block later packets from starting. 5714 */ 5715 void synchronize_net(void) 5716 { 5717 might_sleep(); 5718 if (rtnl_is_locked()) 5719 synchronize_rcu_expedited(); 5720 else 5721 synchronize_rcu(); 5722 } 5723 EXPORT_SYMBOL(synchronize_net); 5724 5725 /** 5726 * unregister_netdevice_queue - remove device from the kernel 5727 * @dev: device 5728 * @head: list 5729 * 5730 * This function shuts down a device interface and removes it 5731 * from the kernel tables. 5732 * If head not NULL, device is queued to be unregistered later. 5733 * 5734 * Callers must hold the rtnl semaphore. You may want 5735 * unregister_netdev() instead of this. 5736 */ 5737 5738 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) 5739 { 5740 ASSERT_RTNL(); 5741 5742 if (head) { 5743 list_move_tail(&dev->unreg_list, head); 5744 } else { 5745 rollback_registered(dev); 5746 /* Finish processing unregister after unlock */ 5747 net_set_todo(dev); 5748 } 5749 } 5750 EXPORT_SYMBOL(unregister_netdevice_queue); 5751 5752 /** 5753 * unregister_netdevice_many - unregister many devices 5754 * @head: list of devices 5755 */ 5756 void unregister_netdevice_many(struct list_head *head) 5757 { 5758 struct net_device *dev; 5759 5760 if (!list_empty(head)) { 5761 rollback_registered_many(head); 5762 list_for_each_entry(dev, head, unreg_list) 5763 net_set_todo(dev); 5764 } 5765 } 5766 EXPORT_SYMBOL(unregister_netdevice_many); 5767 5768 /** 5769 * unregister_netdev - remove device from the kernel 5770 * @dev: device 5771 * 5772 * This function shuts down a device interface and removes it 5773 * from the kernel tables. 5774 * 5775 * This is just a wrapper for unregister_netdevice that takes 5776 * the rtnl semaphore. In general you want to use this and not 5777 * unregister_netdevice. 5778 */ 5779 void unregister_netdev(struct net_device *dev) 5780 { 5781 rtnl_lock(); 5782 unregister_netdevice(dev); 5783 rtnl_unlock(); 5784 } 5785 EXPORT_SYMBOL(unregister_netdev); 5786 5787 /** 5788 * dev_change_net_namespace - move device to different nethost namespace 5789 * @dev: device 5790 * @net: network namespace 5791 * @pat: If not NULL name pattern to try if the current device name 5792 * is already taken in the destination network namespace. 5793 * 5794 * This function shuts down a device interface and moves it 5795 * to a new network namespace. On success 0 is returned, on 5796 * a failure a netagive errno code is returned. 5797 * 5798 * Callers must hold the rtnl semaphore. 5799 */ 5800 5801 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat) 5802 { 5803 int err; 5804 5805 ASSERT_RTNL(); 5806 5807 /* Don't allow namespace local devices to be moved. */ 5808 err = -EINVAL; 5809 if (dev->features & NETIF_F_NETNS_LOCAL) 5810 goto out; 5811 5812 /* Ensure the device has been registrered */ 5813 if (dev->reg_state != NETREG_REGISTERED) 5814 goto out; 5815 5816 /* Get out if there is nothing todo */ 5817 err = 0; 5818 if (net_eq(dev_net(dev), net)) 5819 goto out; 5820 5821 /* Pick the destination device name, and ensure 5822 * we can use it in the destination network namespace. 5823 */ 5824 err = -EEXIST; 5825 if (__dev_get_by_name(net, dev->name)) { 5826 /* We get here if we can't use the current device name */ 5827 if (!pat) 5828 goto out; 5829 if (dev_get_valid_name(net, dev, pat) < 0) 5830 goto out; 5831 } 5832 5833 /* 5834 * And now a mini version of register_netdevice unregister_netdevice. 5835 */ 5836 5837 /* If device is running close it first. */ 5838 dev_close(dev); 5839 5840 /* And unlink it from device chain */ 5841 err = -ENODEV; 5842 unlist_netdevice(dev); 5843 5844 synchronize_net(); 5845 5846 /* Shutdown queueing discipline. */ 5847 dev_shutdown(dev); 5848 5849 /* Notify protocols, that we are about to destroy 5850 this device. They should clean all the things. 5851 5852 Note that dev->reg_state stays at NETREG_REGISTERED. 5853 This is wanted because this way 8021q and macvlan know 5854 the device is just moving and can keep their slaves up. 5855 */ 5856 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 5857 rcu_barrier(); 5858 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev); 5859 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); 5860 5861 /* 5862 * Flush the unicast and multicast chains 5863 */ 5864 dev_uc_flush(dev); 5865 dev_mc_flush(dev); 5866 5867 /* Send a netdev-removed uevent to the old namespace */ 5868 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE); 5869 5870 /* Actually switch the network namespace */ 5871 dev_net_set(dev, net); 5872 5873 /* If there is an ifindex conflict assign a new one */ 5874 if (__dev_get_by_index(net, dev->ifindex)) { 5875 int iflink = (dev->iflink == dev->ifindex); 5876 dev->ifindex = dev_new_index(net); 5877 if (iflink) 5878 dev->iflink = dev->ifindex; 5879 } 5880 5881 /* Send a netdev-add uevent to the new namespace */ 5882 kobject_uevent(&dev->dev.kobj, KOBJ_ADD); 5883 5884 /* Fixup kobjects */ 5885 err = device_rename(&dev->dev, dev->name); 5886 WARN_ON(err); 5887 5888 /* Add the device back in the hashes */ 5889 list_netdevice(dev); 5890 5891 /* Notify protocols, that a new device appeared. */ 5892 call_netdevice_notifiers(NETDEV_REGISTER, dev); 5893 5894 /* 5895 * Prevent userspace races by waiting until the network 5896 * device is fully setup before sending notifications. 5897 */ 5898 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); 5899 5900 synchronize_net(); 5901 err = 0; 5902 out: 5903 return err; 5904 } 5905 EXPORT_SYMBOL_GPL(dev_change_net_namespace); 5906 5907 static int dev_cpu_callback(struct notifier_block *nfb, 5908 unsigned long action, 5909 void *ocpu) 5910 { 5911 struct sk_buff **list_skb; 5912 struct sk_buff *skb; 5913 unsigned int cpu, oldcpu = (unsigned long)ocpu; 5914 struct softnet_data *sd, *oldsd; 5915 5916 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) 5917 return NOTIFY_OK; 5918 5919 local_irq_disable(); 5920 cpu = smp_processor_id(); 5921 sd = &per_cpu(softnet_data, cpu); 5922 oldsd = &per_cpu(softnet_data, oldcpu); 5923 5924 /* Find end of our completion_queue. */ 5925 list_skb = &sd->completion_queue; 5926 while (*list_skb) 5927 list_skb = &(*list_skb)->next; 5928 /* Append completion queue from offline CPU. */ 5929 *list_skb = oldsd->completion_queue; 5930 oldsd->completion_queue = NULL; 5931 5932 /* Append output queue from offline CPU. */ 5933 if (oldsd->output_queue) { 5934 *sd->output_queue_tailp = oldsd->output_queue; 5935 sd->output_queue_tailp = oldsd->output_queue_tailp; 5936 oldsd->output_queue = NULL; 5937 oldsd->output_queue_tailp = &oldsd->output_queue; 5938 } 5939 /* Append NAPI poll list from offline CPU. */ 5940 if (!list_empty(&oldsd->poll_list)) { 5941 list_splice_init(&oldsd->poll_list, &sd->poll_list); 5942 raise_softirq_irqoff(NET_RX_SOFTIRQ); 5943 } 5944 5945 raise_softirq_irqoff(NET_TX_SOFTIRQ); 5946 local_irq_enable(); 5947 5948 /* Process offline CPU's input_pkt_queue */ 5949 while ((skb = __skb_dequeue(&oldsd->process_queue))) { 5950 netif_rx(skb); 5951 input_queue_head_incr(oldsd); 5952 } 5953 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { 5954 netif_rx(skb); 5955 input_queue_head_incr(oldsd); 5956 } 5957 5958 return NOTIFY_OK; 5959 } 5960 5961 5962 /** 5963 * netdev_increment_features - increment feature set by one 5964 * @all: current feature set 5965 * @one: new feature set 5966 * @mask: mask feature set 5967 * 5968 * Computes a new feature set after adding a device with feature set 5969 * @one to the master device with current feature set @all. Will not 5970 * enable anything that is off in @mask. Returns the new feature set. 5971 */ 5972 netdev_features_t netdev_increment_features(netdev_features_t all, 5973 netdev_features_t one, netdev_features_t mask) 5974 { 5975 if (mask & NETIF_F_GEN_CSUM) 5976 mask |= NETIF_F_ALL_CSUM; 5977 mask |= NETIF_F_VLAN_CHALLENGED; 5978 5979 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask; 5980 all &= one | ~NETIF_F_ALL_FOR_ALL; 5981 5982 /* If one device supports hw checksumming, set for all. */ 5983 if (all & NETIF_F_GEN_CSUM) 5984 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); 5985 5986 return all; 5987 } 5988 EXPORT_SYMBOL(netdev_increment_features); 5989 5990 static struct hlist_head *netdev_create_hash(void) 5991 { 5992 int i; 5993 struct hlist_head *hash; 5994 5995 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL); 5996 if (hash != NULL) 5997 for (i = 0; i < NETDEV_HASHENTRIES; i++) 5998 INIT_HLIST_HEAD(&hash[i]); 5999 6000 return hash; 6001 } 6002 6003 /* Initialize per network namespace state */ 6004 static int __net_init netdev_init(struct net *net) 6005 { 6006 if (net != &init_net) 6007 INIT_LIST_HEAD(&net->dev_base_head); 6008 6009 net->dev_name_head = netdev_create_hash(); 6010 if (net->dev_name_head == NULL) 6011 goto err_name; 6012 6013 net->dev_index_head = netdev_create_hash(); 6014 if (net->dev_index_head == NULL) 6015 goto err_idx; 6016 6017 return 0; 6018 6019 err_idx: 6020 kfree(net->dev_name_head); 6021 err_name: 6022 return -ENOMEM; 6023 } 6024 6025 /** 6026 * netdev_drivername - network driver for the device 6027 * @dev: network device 6028 * 6029 * Determine network driver for device. 6030 */ 6031 const char *netdev_drivername(const struct net_device *dev) 6032 { 6033 const struct device_driver *driver; 6034 const struct device *parent; 6035 const char *empty = ""; 6036 6037 parent = dev->dev.parent; 6038 if (!parent) 6039 return empty; 6040 6041 driver = parent->driver; 6042 if (driver && driver->name) 6043 return driver->name; 6044 return empty; 6045 } 6046 6047 static int __netdev_printk(const char *level, const struct net_device *dev, 6048 struct va_format *vaf) 6049 { 6050 int r; 6051 6052 if (dev && dev->dev.parent) { 6053 r = dev_printk_emit(level[1] - '0', 6054 dev->dev.parent, 6055 "%s %s %s: %pV", 6056 dev_driver_string(dev->dev.parent), 6057 dev_name(dev->dev.parent), 6058 netdev_name(dev), vaf); 6059 } else if (dev) { 6060 r = printk("%s%s: %pV", level, netdev_name(dev), vaf); 6061 } else { 6062 r = printk("%s(NULL net_device): %pV", level, vaf); 6063 } 6064 6065 return r; 6066 } 6067 6068 int netdev_printk(const char *level, const struct net_device *dev, 6069 const char *format, ...) 6070 { 6071 struct va_format vaf; 6072 va_list args; 6073 int r; 6074 6075 va_start(args, format); 6076 6077 vaf.fmt = format; 6078 vaf.va = &args; 6079 6080 r = __netdev_printk(level, dev, &vaf); 6081 6082 va_end(args); 6083 6084 return r; 6085 } 6086 EXPORT_SYMBOL(netdev_printk); 6087 6088 #define define_netdev_printk_level(func, level) \ 6089 int func(const struct net_device *dev, const char *fmt, ...) \ 6090 { \ 6091 int r; \ 6092 struct va_format vaf; \ 6093 va_list args; \ 6094 \ 6095 va_start(args, fmt); \ 6096 \ 6097 vaf.fmt = fmt; \ 6098 vaf.va = &args; \ 6099 \ 6100 r = __netdev_printk(level, dev, &vaf); \ 6101 \ 6102 va_end(args); \ 6103 \ 6104 return r; \ 6105 } \ 6106 EXPORT_SYMBOL(func); 6107 6108 define_netdev_printk_level(netdev_emerg, KERN_EMERG); 6109 define_netdev_printk_level(netdev_alert, KERN_ALERT); 6110 define_netdev_printk_level(netdev_crit, KERN_CRIT); 6111 define_netdev_printk_level(netdev_err, KERN_ERR); 6112 define_netdev_printk_level(netdev_warn, KERN_WARNING); 6113 define_netdev_printk_level(netdev_notice, KERN_NOTICE); 6114 define_netdev_printk_level(netdev_info, KERN_INFO); 6115 6116 static void __net_exit netdev_exit(struct net *net) 6117 { 6118 kfree(net->dev_name_head); 6119 kfree(net->dev_index_head); 6120 } 6121 6122 static struct pernet_operations __net_initdata netdev_net_ops = { 6123 .init = netdev_init, 6124 .exit = netdev_exit, 6125 }; 6126 6127 static void __net_exit default_device_exit(struct net *net) 6128 { 6129 struct net_device *dev, *aux; 6130 /* 6131 * Push all migratable network devices back to the 6132 * initial network namespace 6133 */ 6134 rtnl_lock(); 6135 for_each_netdev_safe(net, dev, aux) { 6136 int err; 6137 char fb_name[IFNAMSIZ]; 6138 6139 /* Ignore unmoveable devices (i.e. loopback) */ 6140 if (dev->features & NETIF_F_NETNS_LOCAL) 6141 continue; 6142 6143 /* Leave virtual devices for the generic cleanup */ 6144 if (dev->rtnl_link_ops) 6145 continue; 6146 6147 /* Push remaining network devices to init_net */ 6148 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 6149 err = dev_change_net_namespace(dev, &init_net, fb_name); 6150 if (err) { 6151 pr_emerg("%s: failed to move %s to init_net: %d\n", 6152 __func__, dev->name, err); 6153 BUG(); 6154 } 6155 } 6156 rtnl_unlock(); 6157 } 6158 6159 static void __net_exit default_device_exit_batch(struct list_head *net_list) 6160 { 6161 /* At exit all network devices most be removed from a network 6162 * namespace. Do this in the reverse order of registration. 6163 * Do this across as many network namespaces as possible to 6164 * improve batching efficiency. 6165 */ 6166 struct net_device *dev; 6167 struct net *net; 6168 LIST_HEAD(dev_kill_list); 6169 6170 rtnl_lock(); 6171 list_for_each_entry(net, net_list, exit_list) { 6172 for_each_netdev_reverse(net, dev) { 6173 if (dev->rtnl_link_ops) 6174 dev->rtnl_link_ops->dellink(dev, &dev_kill_list); 6175 else 6176 unregister_netdevice_queue(dev, &dev_kill_list); 6177 } 6178 } 6179 unregister_netdevice_many(&dev_kill_list); 6180 list_del(&dev_kill_list); 6181 rtnl_unlock(); 6182 } 6183 6184 static struct pernet_operations __net_initdata default_device_ops = { 6185 .exit = default_device_exit, 6186 .exit_batch = default_device_exit_batch, 6187 }; 6188 6189 /* 6190 * Initialize the DEV module. At boot time this walks the device list and 6191 * unhooks any devices that fail to initialise (normally hardware not 6192 * present) and leaves us with a valid list of present and active devices. 6193 * 6194 */ 6195 6196 /* 6197 * This is called single threaded during boot, so no need 6198 * to take the rtnl semaphore. 6199 */ 6200 static int __init net_dev_init(void) 6201 { 6202 int i, rc = -ENOMEM; 6203 6204 BUG_ON(!dev_boot_phase); 6205 6206 if (dev_proc_init()) 6207 goto out; 6208 6209 if (netdev_kobject_init()) 6210 goto out; 6211 6212 INIT_LIST_HEAD(&ptype_all); 6213 for (i = 0; i < PTYPE_HASH_SIZE; i++) 6214 INIT_LIST_HEAD(&ptype_base[i]); 6215 6216 INIT_LIST_HEAD(&offload_base); 6217 6218 if (register_pernet_subsys(&netdev_net_ops)) 6219 goto out; 6220 6221 /* 6222 * Initialise the packet receive queues. 6223 */ 6224 6225 for_each_possible_cpu(i) { 6226 struct softnet_data *sd = &per_cpu(softnet_data, i); 6227 6228 memset(sd, 0, sizeof(*sd)); 6229 skb_queue_head_init(&sd->input_pkt_queue); 6230 skb_queue_head_init(&sd->process_queue); 6231 sd->completion_queue = NULL; 6232 INIT_LIST_HEAD(&sd->poll_list); 6233 sd->output_queue = NULL; 6234 sd->output_queue_tailp = &sd->output_queue; 6235 #ifdef CONFIG_RPS 6236 sd->csd.func = rps_trigger_softirq; 6237 sd->csd.info = sd; 6238 sd->csd.flags = 0; 6239 sd->cpu = i; 6240 #endif 6241 6242 sd->backlog.poll = process_backlog; 6243 sd->backlog.weight = weight_p; 6244 sd->backlog.gro_list = NULL; 6245 sd->backlog.gro_count = 0; 6246 } 6247 6248 dev_boot_phase = 0; 6249 6250 /* The loopback device is special if any other network devices 6251 * is present in a network namespace the loopback device must 6252 * be present. Since we now dynamically allocate and free the 6253 * loopback device ensure this invariant is maintained by 6254 * keeping the loopback device as the first device on the 6255 * list of network devices. Ensuring the loopback devices 6256 * is the first device that appears and the last network device 6257 * that disappears. 6258 */ 6259 if (register_pernet_device(&loopback_net_ops)) 6260 goto out; 6261 6262 if (register_pernet_device(&default_device_ops)) 6263 goto out; 6264 6265 open_softirq(NET_TX_SOFTIRQ, net_tx_action); 6266 open_softirq(NET_RX_SOFTIRQ, net_rx_action); 6267 6268 hotcpu_notifier(dev_cpu_callback, 0); 6269 dst_init(); 6270 rc = 0; 6271 out: 6272 return rc; 6273 } 6274 6275 subsys_initcall(net_dev_init); 6276