1 /* 2 * NET3 Protocol independent device support routines. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Derived from the non IP parts of dev.c 1.0.19 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 13 * 14 * Additional Authors: 15 * Florian la Roche <rzsfl@rz.uni-sb.de> 16 * Alan Cox <gw4pts@gw4pts.ampr.org> 17 * David Hinds <dahinds@users.sourceforge.net> 18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 19 * Adam Sulmicki <adam@cfar.umd.edu> 20 * Pekka Riikonen <priikone@poesidon.pspt.fi> 21 * 22 * Changes: 23 * D.J. Barrow : Fixed bug where dev->refcnt gets set 24 * to 2 if register_netdev gets called 25 * before net_dev_init & also removed a 26 * few lines of code in the process. 27 * Alan Cox : device private ioctl copies fields back. 28 * Alan Cox : Transmit queue code does relevant 29 * stunts to keep the queue safe. 30 * Alan Cox : Fixed double lock. 31 * Alan Cox : Fixed promisc NULL pointer trap 32 * ???????? : Support the full private ioctl range 33 * Alan Cox : Moved ioctl permission check into 34 * drivers 35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI 36 * Alan Cox : 100 backlog just doesn't cut it when 37 * you start doing multicast video 8) 38 * Alan Cox : Rewrote net_bh and list manager. 39 * Alan Cox : Fix ETH_P_ALL echoback lengths. 40 * Alan Cox : Took out transmit every packet pass 41 * Saved a few bytes in the ioctl handler 42 * Alan Cox : Network driver sets packet type before 43 * calling netif_rx. Saves a function 44 * call a packet. 45 * Alan Cox : Hashed net_bh() 46 * Richard Kooijman: Timestamp fixes. 47 * Alan Cox : Wrong field in SIOCGIFDSTADDR 48 * Alan Cox : Device lock protection. 49 * Alan Cox : Fixed nasty side effect of device close 50 * changes. 51 * Rudi Cilibrasi : Pass the right thing to 52 * set_mac_address() 53 * Dave Miller : 32bit quantity for the device lock to 54 * make it work out on a Sparc. 55 * Bjorn Ekwall : Added KERNELD hack. 56 * Alan Cox : Cleaned up the backlog initialise. 57 * Craig Metz : SIOCGIFCONF fix if space for under 58 * 1 device. 59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there 60 * is no device open function. 61 * Andi Kleen : Fix error reporting for SIOCGIFCONF 62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF 63 * Cyrus Durgin : Cleaned for KMOD 64 * Adam Sulmicki : Bug Fix : Network Device Unload 65 * A network device unload needs to purge 66 * the backlog queue. 67 * Paul Rusty Russell : SIOCSIFNAME 68 * Pekka Riikonen : Netdev boot-time settings code 69 * Andrew Morton : Make unregister_netdevice wait 70 * indefinitely on dev->refcnt 71 * J Hadi Salim : - Backlog queue sampling 72 * - netif_rx() feedback 73 */ 74 75 #include <asm/uaccess.h> 76 #include <linux/bitops.h> 77 #include <linux/capability.h> 78 #include <linux/cpu.h> 79 #include <linux/types.h> 80 #include <linux/kernel.h> 81 #include <linux/hash.h> 82 #include <linux/slab.h> 83 #include <linux/sched.h> 84 #include <linux/mutex.h> 85 #include <linux/string.h> 86 #include <linux/mm.h> 87 #include <linux/socket.h> 88 #include <linux/sockios.h> 89 #include <linux/errno.h> 90 #include <linux/interrupt.h> 91 #include <linux/if_ether.h> 92 #include <linux/netdevice.h> 93 #include <linux/etherdevice.h> 94 #include <linux/ethtool.h> 95 #include <linux/notifier.h> 96 #include <linux/skbuff.h> 97 #include <net/net_namespace.h> 98 #include <net/sock.h> 99 #include <linux/rtnetlink.h> 100 #include <linux/proc_fs.h> 101 #include <linux/seq_file.h> 102 #include <linux/stat.h> 103 #include <net/dst.h> 104 #include <net/pkt_sched.h> 105 #include <net/checksum.h> 106 #include <net/xfrm.h> 107 #include <linux/highmem.h> 108 #include <linux/init.h> 109 #include <linux/kmod.h> 110 #include <linux/module.h> 111 #include <linux/netpoll.h> 112 #include <linux/rcupdate.h> 113 #include <linux/delay.h> 114 #include <net/wext.h> 115 #include <net/iw_handler.h> 116 #include <asm/current.h> 117 #include <linux/audit.h> 118 #include <linux/dmaengine.h> 119 #include <linux/err.h> 120 #include <linux/ctype.h> 121 #include <linux/if_arp.h> 122 #include <linux/if_vlan.h> 123 #include <linux/ip.h> 124 #include <net/ip.h> 125 #include <linux/ipv6.h> 126 #include <linux/in.h> 127 #include <linux/jhash.h> 128 #include <linux/random.h> 129 #include <trace/events/napi.h> 130 #include <trace/events/net.h> 131 #include <trace/events/skb.h> 132 #include <linux/pci.h> 133 #include <linux/inetdevice.h> 134 #include <linux/cpu_rmap.h> 135 #include <linux/net_tstamp.h> 136 #include <linux/static_key.h> 137 #include <net/flow_keys.h> 138 139 #include "net-sysfs.h" 140 141 /* Instead of increasing this, you should create a hash table. */ 142 #define MAX_GRO_SKBS 8 143 144 /* This should be increased if a protocol with a bigger head is added. */ 145 #define GRO_MAX_HEAD (MAX_HEADER + 128) 146 147 /* 148 * The list of packet types we will receive (as opposed to discard) 149 * and the routines to invoke. 150 * 151 * Why 16. Because with 16 the only overlap we get on a hash of the 152 * low nibble of the protocol value is RARP/SNAP/X.25. 153 * 154 * NOTE: That is no longer true with the addition of VLAN tags. Not 155 * sure which should go first, but I bet it won't make much 156 * difference if we are running VLANs. The good news is that 157 * this protocol won't be in the list unless compiled in, so 158 * the average user (w/out VLANs) will not be adversely affected. 159 * --BLG 160 * 161 * 0800 IP 162 * 8100 802.1Q VLAN 163 * 0001 802.3 164 * 0002 AX.25 165 * 0004 802.2 166 * 8035 RARP 167 * 0005 SNAP 168 * 0805 X.25 169 * 0806 ARP 170 * 8137 IPX 171 * 0009 Localtalk 172 * 86DD IPv6 173 */ 174 175 #define PTYPE_HASH_SIZE (16) 176 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) 177 178 static DEFINE_SPINLOCK(ptype_lock); 179 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 180 static struct list_head ptype_all __read_mostly; /* Taps */ 181 182 /* 183 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 184 * semaphore. 185 * 186 * Pure readers hold dev_base_lock for reading, or rcu_read_lock() 187 * 188 * Writers must hold the rtnl semaphore while they loop through the 189 * dev_base_head list, and hold dev_base_lock for writing when they do the 190 * actual updates. This allows pure readers to access the list even 191 * while a writer is preparing to update it. 192 * 193 * To put it another way, dev_base_lock is held for writing only to 194 * protect against pure readers; the rtnl semaphore provides the 195 * protection against other writers. 196 * 197 * See, for example usages, register_netdevice() and 198 * unregister_netdevice(), which must be called with the rtnl 199 * semaphore held. 200 */ 201 DEFINE_RWLOCK(dev_base_lock); 202 EXPORT_SYMBOL(dev_base_lock); 203 204 static inline void dev_base_seq_inc(struct net *net) 205 { 206 while (++net->dev_base_seq == 0); 207 } 208 209 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) 210 { 211 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); 212 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; 213 } 214 215 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) 216 { 217 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; 218 } 219 220 static inline void rps_lock(struct softnet_data *sd) 221 { 222 #ifdef CONFIG_RPS 223 spin_lock(&sd->input_pkt_queue.lock); 224 #endif 225 } 226 227 static inline void rps_unlock(struct softnet_data *sd) 228 { 229 #ifdef CONFIG_RPS 230 spin_unlock(&sd->input_pkt_queue.lock); 231 #endif 232 } 233 234 /* Device list insertion */ 235 static int list_netdevice(struct net_device *dev) 236 { 237 struct net *net = dev_net(dev); 238 239 ASSERT_RTNL(); 240 241 write_lock_bh(&dev_base_lock); 242 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); 243 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); 244 hlist_add_head_rcu(&dev->index_hlist, 245 dev_index_hash(net, dev->ifindex)); 246 write_unlock_bh(&dev_base_lock); 247 248 dev_base_seq_inc(net); 249 250 return 0; 251 } 252 253 /* Device list removal 254 * caller must respect a RCU grace period before freeing/reusing dev 255 */ 256 static void unlist_netdevice(struct net_device *dev) 257 { 258 ASSERT_RTNL(); 259 260 /* Unlink dev from the device chain */ 261 write_lock_bh(&dev_base_lock); 262 list_del_rcu(&dev->dev_list); 263 hlist_del_rcu(&dev->name_hlist); 264 hlist_del_rcu(&dev->index_hlist); 265 write_unlock_bh(&dev_base_lock); 266 267 dev_base_seq_inc(dev_net(dev)); 268 } 269 270 /* 271 * Our notifier list 272 */ 273 274 static RAW_NOTIFIER_HEAD(netdev_chain); 275 276 /* 277 * Device drivers call our routines to queue packets here. We empty the 278 * queue in the local softnet handler. 279 */ 280 281 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 282 EXPORT_PER_CPU_SYMBOL(softnet_data); 283 284 #ifdef CONFIG_LOCKDEP 285 /* 286 * register_netdevice() inits txq->_xmit_lock and sets lockdep class 287 * according to dev->type 288 */ 289 static const unsigned short netdev_lock_type[] = 290 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, 291 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, 292 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, 293 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, 294 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, 295 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, 296 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, 297 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, 298 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, 299 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, 300 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, 301 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 302 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211, 303 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, 304 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, 305 ARPHRD_VOID, ARPHRD_NONE}; 306 307 static const char *const netdev_lock_name[] = 308 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", 309 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", 310 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", 311 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", 312 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", 313 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", 314 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", 315 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", 316 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", 317 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", 318 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", 319 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 320 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211", 321 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", 322 "_xmit_PHONET_PIPE", "_xmit_IEEE802154", 323 "_xmit_VOID", "_xmit_NONE"}; 324 325 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 326 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; 327 328 static inline unsigned short netdev_lock_pos(unsigned short dev_type) 329 { 330 int i; 331 332 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) 333 if (netdev_lock_type[i] == dev_type) 334 return i; 335 /* the last key is used by default */ 336 return ARRAY_SIZE(netdev_lock_type) - 1; 337 } 338 339 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 340 unsigned short dev_type) 341 { 342 int i; 343 344 i = netdev_lock_pos(dev_type); 345 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], 346 netdev_lock_name[i]); 347 } 348 349 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 350 { 351 int i; 352 353 i = netdev_lock_pos(dev->type); 354 lockdep_set_class_and_name(&dev->addr_list_lock, 355 &netdev_addr_lock_key[i], 356 netdev_lock_name[i]); 357 } 358 #else 359 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 360 unsigned short dev_type) 361 { 362 } 363 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 364 { 365 } 366 #endif 367 368 /******************************************************************************* 369 370 Protocol management and registration routines 371 372 *******************************************************************************/ 373 374 /* 375 * Add a protocol ID to the list. Now that the input handler is 376 * smarter we can dispense with all the messy stuff that used to be 377 * here. 378 * 379 * BEWARE!!! Protocol handlers, mangling input packets, 380 * MUST BE last in hash buckets and checking protocol handlers 381 * MUST start from promiscuous ptype_all chain in net_bh. 382 * It is true now, do not change it. 383 * Explanation follows: if protocol handler, mangling packet, will 384 * be the first on list, it is not able to sense, that packet 385 * is cloned and should be copied-on-write, so that it will 386 * change it and subsequent readers will get broken packet. 387 * --ANK (980803) 388 */ 389 390 static inline struct list_head *ptype_head(const struct packet_type *pt) 391 { 392 if (pt->type == htons(ETH_P_ALL)) 393 return &ptype_all; 394 else 395 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; 396 } 397 398 /** 399 * dev_add_pack - add packet handler 400 * @pt: packet type declaration 401 * 402 * Add a protocol handler to the networking stack. The passed &packet_type 403 * is linked into kernel lists and may not be freed until it has been 404 * removed from the kernel lists. 405 * 406 * This call does not sleep therefore it can not 407 * guarantee all CPU's that are in middle of receiving packets 408 * will see the new packet type (until the next received packet). 409 */ 410 411 void dev_add_pack(struct packet_type *pt) 412 { 413 struct list_head *head = ptype_head(pt); 414 415 spin_lock(&ptype_lock); 416 list_add_rcu(&pt->list, head); 417 spin_unlock(&ptype_lock); 418 } 419 EXPORT_SYMBOL(dev_add_pack); 420 421 /** 422 * __dev_remove_pack - remove packet handler 423 * @pt: packet type declaration 424 * 425 * Remove a protocol handler that was previously added to the kernel 426 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 427 * from the kernel lists and can be freed or reused once this function 428 * returns. 429 * 430 * The packet type might still be in use by receivers 431 * and must not be freed until after all the CPU's have gone 432 * through a quiescent state. 433 */ 434 void __dev_remove_pack(struct packet_type *pt) 435 { 436 struct list_head *head = ptype_head(pt); 437 struct packet_type *pt1; 438 439 spin_lock(&ptype_lock); 440 441 list_for_each_entry(pt1, head, list) { 442 if (pt == pt1) { 443 list_del_rcu(&pt->list); 444 goto out; 445 } 446 } 447 448 pr_warn("dev_remove_pack: %p not found\n", pt); 449 out: 450 spin_unlock(&ptype_lock); 451 } 452 EXPORT_SYMBOL(__dev_remove_pack); 453 454 /** 455 * dev_remove_pack - remove packet handler 456 * @pt: packet type declaration 457 * 458 * Remove a protocol handler that was previously added to the kernel 459 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 460 * from the kernel lists and can be freed or reused once this function 461 * returns. 462 * 463 * This call sleeps to guarantee that no CPU is looking at the packet 464 * type after return. 465 */ 466 void dev_remove_pack(struct packet_type *pt) 467 { 468 __dev_remove_pack(pt); 469 470 synchronize_net(); 471 } 472 EXPORT_SYMBOL(dev_remove_pack); 473 474 /****************************************************************************** 475 476 Device Boot-time Settings Routines 477 478 *******************************************************************************/ 479 480 /* Boot time configuration table */ 481 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX]; 482 483 /** 484 * netdev_boot_setup_add - add new setup entry 485 * @name: name of the device 486 * @map: configured settings for the device 487 * 488 * Adds new setup entry to the dev_boot_setup list. The function 489 * returns 0 on error and 1 on success. This is a generic routine to 490 * all netdevices. 491 */ 492 static int netdev_boot_setup_add(char *name, struct ifmap *map) 493 { 494 struct netdev_boot_setup *s; 495 int i; 496 497 s = dev_boot_setup; 498 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { 499 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') { 500 memset(s[i].name, 0, sizeof(s[i].name)); 501 strlcpy(s[i].name, name, IFNAMSIZ); 502 memcpy(&s[i].map, map, sizeof(s[i].map)); 503 break; 504 } 505 } 506 507 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1; 508 } 509 510 /** 511 * netdev_boot_setup_check - check boot time settings 512 * @dev: the netdevice 513 * 514 * Check boot time settings for the device. 515 * The found settings are set for the device to be used 516 * later in the device probing. 517 * Returns 0 if no settings found, 1 if they are. 518 */ 519 int netdev_boot_setup_check(struct net_device *dev) 520 { 521 struct netdev_boot_setup *s = dev_boot_setup; 522 int i; 523 524 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { 525 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' && 526 !strcmp(dev->name, s[i].name)) { 527 dev->irq = s[i].map.irq; 528 dev->base_addr = s[i].map.base_addr; 529 dev->mem_start = s[i].map.mem_start; 530 dev->mem_end = s[i].map.mem_end; 531 return 1; 532 } 533 } 534 return 0; 535 } 536 EXPORT_SYMBOL(netdev_boot_setup_check); 537 538 539 /** 540 * netdev_boot_base - get address from boot time settings 541 * @prefix: prefix for network device 542 * @unit: id for network device 543 * 544 * Check boot time settings for the base address of device. 545 * The found settings are set for the device to be used 546 * later in the device probing. 547 * Returns 0 if no settings found. 548 */ 549 unsigned long netdev_boot_base(const char *prefix, int unit) 550 { 551 const struct netdev_boot_setup *s = dev_boot_setup; 552 char name[IFNAMSIZ]; 553 int i; 554 555 sprintf(name, "%s%d", prefix, unit); 556 557 /* 558 * If device already registered then return base of 1 559 * to indicate not to probe for this interface 560 */ 561 if (__dev_get_by_name(&init_net, name)) 562 return 1; 563 564 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) 565 if (!strcmp(name, s[i].name)) 566 return s[i].map.base_addr; 567 return 0; 568 } 569 570 /* 571 * Saves at boot time configured settings for any netdevice. 572 */ 573 int __init netdev_boot_setup(char *str) 574 { 575 int ints[5]; 576 struct ifmap map; 577 578 str = get_options(str, ARRAY_SIZE(ints), ints); 579 if (!str || !*str) 580 return 0; 581 582 /* Save settings */ 583 memset(&map, 0, sizeof(map)); 584 if (ints[0] > 0) 585 map.irq = ints[1]; 586 if (ints[0] > 1) 587 map.base_addr = ints[2]; 588 if (ints[0] > 2) 589 map.mem_start = ints[3]; 590 if (ints[0] > 3) 591 map.mem_end = ints[4]; 592 593 /* Add new entry to the list */ 594 return netdev_boot_setup_add(str, &map); 595 } 596 597 __setup("netdev=", netdev_boot_setup); 598 599 /******************************************************************************* 600 601 Device Interface Subroutines 602 603 *******************************************************************************/ 604 605 /** 606 * __dev_get_by_name - find a device by its name 607 * @net: the applicable net namespace 608 * @name: name to find 609 * 610 * Find an interface by name. Must be called under RTNL semaphore 611 * or @dev_base_lock. If the name is found a pointer to the device 612 * is returned. If the name is not found then %NULL is returned. The 613 * reference counters are not incremented so the caller must be 614 * careful with locks. 615 */ 616 617 struct net_device *__dev_get_by_name(struct net *net, const char *name) 618 { 619 struct hlist_node *p; 620 struct net_device *dev; 621 struct hlist_head *head = dev_name_hash(net, name); 622 623 hlist_for_each_entry(dev, p, head, name_hlist) 624 if (!strncmp(dev->name, name, IFNAMSIZ)) 625 return dev; 626 627 return NULL; 628 } 629 EXPORT_SYMBOL(__dev_get_by_name); 630 631 /** 632 * dev_get_by_name_rcu - find a device by its name 633 * @net: the applicable net namespace 634 * @name: name to find 635 * 636 * Find an interface by name. 637 * If the name is found a pointer to the device is returned. 638 * If the name is not found then %NULL is returned. 639 * The reference counters are not incremented so the caller must be 640 * careful with locks. The caller must hold RCU lock. 641 */ 642 643 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) 644 { 645 struct hlist_node *p; 646 struct net_device *dev; 647 struct hlist_head *head = dev_name_hash(net, name); 648 649 hlist_for_each_entry_rcu(dev, p, head, name_hlist) 650 if (!strncmp(dev->name, name, IFNAMSIZ)) 651 return dev; 652 653 return NULL; 654 } 655 EXPORT_SYMBOL(dev_get_by_name_rcu); 656 657 /** 658 * dev_get_by_name - find a device by its name 659 * @net: the applicable net namespace 660 * @name: name to find 661 * 662 * Find an interface by name. This can be called from any 663 * context and does its own locking. The returned handle has 664 * the usage count incremented and the caller must use dev_put() to 665 * release it when it is no longer needed. %NULL is returned if no 666 * matching device is found. 667 */ 668 669 struct net_device *dev_get_by_name(struct net *net, const char *name) 670 { 671 struct net_device *dev; 672 673 rcu_read_lock(); 674 dev = dev_get_by_name_rcu(net, name); 675 if (dev) 676 dev_hold(dev); 677 rcu_read_unlock(); 678 return dev; 679 } 680 EXPORT_SYMBOL(dev_get_by_name); 681 682 /** 683 * __dev_get_by_index - find a device by its ifindex 684 * @net: the applicable net namespace 685 * @ifindex: index of device 686 * 687 * Search for an interface by index. Returns %NULL if the device 688 * is not found or a pointer to the device. The device has not 689 * had its reference counter increased so the caller must be careful 690 * about locking. The caller must hold either the RTNL semaphore 691 * or @dev_base_lock. 692 */ 693 694 struct net_device *__dev_get_by_index(struct net *net, int ifindex) 695 { 696 struct hlist_node *p; 697 struct net_device *dev; 698 struct hlist_head *head = dev_index_hash(net, ifindex); 699 700 hlist_for_each_entry(dev, p, head, index_hlist) 701 if (dev->ifindex == ifindex) 702 return dev; 703 704 return NULL; 705 } 706 EXPORT_SYMBOL(__dev_get_by_index); 707 708 /** 709 * dev_get_by_index_rcu - find a device by its ifindex 710 * @net: the applicable net namespace 711 * @ifindex: index of device 712 * 713 * Search for an interface by index. Returns %NULL if the device 714 * is not found or a pointer to the device. The device has not 715 * had its reference counter increased so the caller must be careful 716 * about locking. The caller must hold RCU lock. 717 */ 718 719 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) 720 { 721 struct hlist_node *p; 722 struct net_device *dev; 723 struct hlist_head *head = dev_index_hash(net, ifindex); 724 725 hlist_for_each_entry_rcu(dev, p, head, index_hlist) 726 if (dev->ifindex == ifindex) 727 return dev; 728 729 return NULL; 730 } 731 EXPORT_SYMBOL(dev_get_by_index_rcu); 732 733 734 /** 735 * dev_get_by_index - find a device by its ifindex 736 * @net: the applicable net namespace 737 * @ifindex: index of device 738 * 739 * Search for an interface by index. Returns NULL if the device 740 * is not found or a pointer to the device. The device returned has 741 * had a reference added and the pointer is safe until the user calls 742 * dev_put to indicate they have finished with it. 743 */ 744 745 struct net_device *dev_get_by_index(struct net *net, int ifindex) 746 { 747 struct net_device *dev; 748 749 rcu_read_lock(); 750 dev = dev_get_by_index_rcu(net, ifindex); 751 if (dev) 752 dev_hold(dev); 753 rcu_read_unlock(); 754 return dev; 755 } 756 EXPORT_SYMBOL(dev_get_by_index); 757 758 /** 759 * dev_getbyhwaddr_rcu - find a device by its hardware address 760 * @net: the applicable net namespace 761 * @type: media type of device 762 * @ha: hardware address 763 * 764 * Search for an interface by MAC address. Returns NULL if the device 765 * is not found or a pointer to the device. 766 * The caller must hold RCU or RTNL. 767 * The returned device has not had its ref count increased 768 * and the caller must therefore be careful about locking 769 * 770 */ 771 772 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 773 const char *ha) 774 { 775 struct net_device *dev; 776 777 for_each_netdev_rcu(net, dev) 778 if (dev->type == type && 779 !memcmp(dev->dev_addr, ha, dev->addr_len)) 780 return dev; 781 782 return NULL; 783 } 784 EXPORT_SYMBOL(dev_getbyhwaddr_rcu); 785 786 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) 787 { 788 struct net_device *dev; 789 790 ASSERT_RTNL(); 791 for_each_netdev(net, dev) 792 if (dev->type == type) 793 return dev; 794 795 return NULL; 796 } 797 EXPORT_SYMBOL(__dev_getfirstbyhwtype); 798 799 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) 800 { 801 struct net_device *dev, *ret = NULL; 802 803 rcu_read_lock(); 804 for_each_netdev_rcu(net, dev) 805 if (dev->type == type) { 806 dev_hold(dev); 807 ret = dev; 808 break; 809 } 810 rcu_read_unlock(); 811 return ret; 812 } 813 EXPORT_SYMBOL(dev_getfirstbyhwtype); 814 815 /** 816 * dev_get_by_flags_rcu - find any device with given flags 817 * @net: the applicable net namespace 818 * @if_flags: IFF_* values 819 * @mask: bitmask of bits in if_flags to check 820 * 821 * Search for any interface with the given flags. Returns NULL if a device 822 * is not found or a pointer to the device. Must be called inside 823 * rcu_read_lock(), and result refcount is unchanged. 824 */ 825 826 struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags, 827 unsigned short mask) 828 { 829 struct net_device *dev, *ret; 830 831 ret = NULL; 832 for_each_netdev_rcu(net, dev) { 833 if (((dev->flags ^ if_flags) & mask) == 0) { 834 ret = dev; 835 break; 836 } 837 } 838 return ret; 839 } 840 EXPORT_SYMBOL(dev_get_by_flags_rcu); 841 842 /** 843 * dev_valid_name - check if name is okay for network device 844 * @name: name string 845 * 846 * Network device names need to be valid file names to 847 * to allow sysfs to work. We also disallow any kind of 848 * whitespace. 849 */ 850 bool dev_valid_name(const char *name) 851 { 852 if (*name == '\0') 853 return false; 854 if (strlen(name) >= IFNAMSIZ) 855 return false; 856 if (!strcmp(name, ".") || !strcmp(name, "..")) 857 return false; 858 859 while (*name) { 860 if (*name == '/' || isspace(*name)) 861 return false; 862 name++; 863 } 864 return true; 865 } 866 EXPORT_SYMBOL(dev_valid_name); 867 868 /** 869 * __dev_alloc_name - allocate a name for a device 870 * @net: network namespace to allocate the device name in 871 * @name: name format string 872 * @buf: scratch buffer and result name string 873 * 874 * Passed a format string - eg "lt%d" it will try and find a suitable 875 * id. It scans list of devices to build up a free map, then chooses 876 * the first empty slot. The caller must hold the dev_base or rtnl lock 877 * while allocating the name and adding the device in order to avoid 878 * duplicates. 879 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 880 * Returns the number of the unit assigned or a negative errno code. 881 */ 882 883 static int __dev_alloc_name(struct net *net, const char *name, char *buf) 884 { 885 int i = 0; 886 const char *p; 887 const int max_netdevices = 8*PAGE_SIZE; 888 unsigned long *inuse; 889 struct net_device *d; 890 891 p = strnchr(name, IFNAMSIZ-1, '%'); 892 if (p) { 893 /* 894 * Verify the string as this thing may have come from 895 * the user. There must be either one "%d" and no other "%" 896 * characters. 897 */ 898 if (p[1] != 'd' || strchr(p + 2, '%')) 899 return -EINVAL; 900 901 /* Use one page as a bit array of possible slots */ 902 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC); 903 if (!inuse) 904 return -ENOMEM; 905 906 for_each_netdev(net, d) { 907 if (!sscanf(d->name, name, &i)) 908 continue; 909 if (i < 0 || i >= max_netdevices) 910 continue; 911 912 /* avoid cases where sscanf is not exact inverse of printf */ 913 snprintf(buf, IFNAMSIZ, name, i); 914 if (!strncmp(buf, d->name, IFNAMSIZ)) 915 set_bit(i, inuse); 916 } 917 918 i = find_first_zero_bit(inuse, max_netdevices); 919 free_page((unsigned long) inuse); 920 } 921 922 if (buf != name) 923 snprintf(buf, IFNAMSIZ, name, i); 924 if (!__dev_get_by_name(net, buf)) 925 return i; 926 927 /* It is possible to run out of possible slots 928 * when the name is long and there isn't enough space left 929 * for the digits, or if all bits are used. 930 */ 931 return -ENFILE; 932 } 933 934 /** 935 * dev_alloc_name - allocate a name for a device 936 * @dev: device 937 * @name: name format string 938 * 939 * Passed a format string - eg "lt%d" it will try and find a suitable 940 * id. It scans list of devices to build up a free map, then chooses 941 * the first empty slot. The caller must hold the dev_base or rtnl lock 942 * while allocating the name and adding the device in order to avoid 943 * duplicates. 944 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 945 * Returns the number of the unit assigned or a negative errno code. 946 */ 947 948 int dev_alloc_name(struct net_device *dev, const char *name) 949 { 950 char buf[IFNAMSIZ]; 951 struct net *net; 952 int ret; 953 954 BUG_ON(!dev_net(dev)); 955 net = dev_net(dev); 956 ret = __dev_alloc_name(net, name, buf); 957 if (ret >= 0) 958 strlcpy(dev->name, buf, IFNAMSIZ); 959 return ret; 960 } 961 EXPORT_SYMBOL(dev_alloc_name); 962 963 static int dev_get_valid_name(struct net_device *dev, const char *name) 964 { 965 struct net *net; 966 967 BUG_ON(!dev_net(dev)); 968 net = dev_net(dev); 969 970 if (!dev_valid_name(name)) 971 return -EINVAL; 972 973 if (strchr(name, '%')) 974 return dev_alloc_name(dev, name); 975 else if (__dev_get_by_name(net, name)) 976 return -EEXIST; 977 else if (dev->name != name) 978 strlcpy(dev->name, name, IFNAMSIZ); 979 980 return 0; 981 } 982 983 /** 984 * dev_change_name - change name of a device 985 * @dev: device 986 * @newname: name (or format string) must be at least IFNAMSIZ 987 * 988 * Change name of a device, can pass format strings "eth%d". 989 * for wildcarding. 990 */ 991 int dev_change_name(struct net_device *dev, const char *newname) 992 { 993 char oldname[IFNAMSIZ]; 994 int err = 0; 995 int ret; 996 struct net *net; 997 998 ASSERT_RTNL(); 999 BUG_ON(!dev_net(dev)); 1000 1001 net = dev_net(dev); 1002 if (dev->flags & IFF_UP) 1003 return -EBUSY; 1004 1005 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) 1006 return 0; 1007 1008 memcpy(oldname, dev->name, IFNAMSIZ); 1009 1010 err = dev_get_valid_name(dev, newname); 1011 if (err < 0) 1012 return err; 1013 1014 rollback: 1015 ret = device_rename(&dev->dev, dev->name); 1016 if (ret) { 1017 memcpy(dev->name, oldname, IFNAMSIZ); 1018 return ret; 1019 } 1020 1021 write_lock_bh(&dev_base_lock); 1022 hlist_del_rcu(&dev->name_hlist); 1023 write_unlock_bh(&dev_base_lock); 1024 1025 synchronize_rcu(); 1026 1027 write_lock_bh(&dev_base_lock); 1028 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); 1029 write_unlock_bh(&dev_base_lock); 1030 1031 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); 1032 ret = notifier_to_errno(ret); 1033 1034 if (ret) { 1035 /* err >= 0 after dev_alloc_name() or stores the first errno */ 1036 if (err >= 0) { 1037 err = ret; 1038 memcpy(dev->name, oldname, IFNAMSIZ); 1039 goto rollback; 1040 } else { 1041 pr_err("%s: name change rollback failed: %d\n", 1042 dev->name, ret); 1043 } 1044 } 1045 1046 return err; 1047 } 1048 1049 /** 1050 * dev_set_alias - change ifalias of a device 1051 * @dev: device 1052 * @alias: name up to IFALIASZ 1053 * @len: limit of bytes to copy from info 1054 * 1055 * Set ifalias for a device, 1056 */ 1057 int dev_set_alias(struct net_device *dev, const char *alias, size_t len) 1058 { 1059 ASSERT_RTNL(); 1060 1061 if (len >= IFALIASZ) 1062 return -EINVAL; 1063 1064 if (!len) { 1065 if (dev->ifalias) { 1066 kfree(dev->ifalias); 1067 dev->ifalias = NULL; 1068 } 1069 return 0; 1070 } 1071 1072 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); 1073 if (!dev->ifalias) 1074 return -ENOMEM; 1075 1076 strlcpy(dev->ifalias, alias, len+1); 1077 return len; 1078 } 1079 1080 1081 /** 1082 * netdev_features_change - device changes features 1083 * @dev: device to cause notification 1084 * 1085 * Called to indicate a device has changed features. 1086 */ 1087 void netdev_features_change(struct net_device *dev) 1088 { 1089 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); 1090 } 1091 EXPORT_SYMBOL(netdev_features_change); 1092 1093 /** 1094 * netdev_state_change - device changes state 1095 * @dev: device to cause notification 1096 * 1097 * Called to indicate a device has changed state. This function calls 1098 * the notifier chains for netdev_chain and sends a NEWLINK message 1099 * to the routing socket. 1100 */ 1101 void netdev_state_change(struct net_device *dev) 1102 { 1103 if (dev->flags & IFF_UP) { 1104 call_netdevice_notifiers(NETDEV_CHANGE, dev); 1105 rtmsg_ifinfo(RTM_NEWLINK, dev, 0); 1106 } 1107 } 1108 EXPORT_SYMBOL(netdev_state_change); 1109 1110 int netdev_bonding_change(struct net_device *dev, unsigned long event) 1111 { 1112 return call_netdevice_notifiers(event, dev); 1113 } 1114 EXPORT_SYMBOL(netdev_bonding_change); 1115 1116 /** 1117 * dev_load - load a network module 1118 * @net: the applicable net namespace 1119 * @name: name of interface 1120 * 1121 * If a network interface is not present and the process has suitable 1122 * privileges this function loads the module. If module loading is not 1123 * available in this kernel then it becomes a nop. 1124 */ 1125 1126 void dev_load(struct net *net, const char *name) 1127 { 1128 struct net_device *dev; 1129 int no_module; 1130 1131 rcu_read_lock(); 1132 dev = dev_get_by_name_rcu(net, name); 1133 rcu_read_unlock(); 1134 1135 no_module = !dev; 1136 if (no_module && capable(CAP_NET_ADMIN)) 1137 no_module = request_module("netdev-%s", name); 1138 if (no_module && capable(CAP_SYS_MODULE)) { 1139 if (!request_module("%s", name)) 1140 pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n", 1141 name); 1142 } 1143 } 1144 EXPORT_SYMBOL(dev_load); 1145 1146 static int __dev_open(struct net_device *dev) 1147 { 1148 const struct net_device_ops *ops = dev->netdev_ops; 1149 int ret; 1150 1151 ASSERT_RTNL(); 1152 1153 if (!netif_device_present(dev)) 1154 return -ENODEV; 1155 1156 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev); 1157 ret = notifier_to_errno(ret); 1158 if (ret) 1159 return ret; 1160 1161 set_bit(__LINK_STATE_START, &dev->state); 1162 1163 if (ops->ndo_validate_addr) 1164 ret = ops->ndo_validate_addr(dev); 1165 1166 if (!ret && ops->ndo_open) 1167 ret = ops->ndo_open(dev); 1168 1169 if (ret) 1170 clear_bit(__LINK_STATE_START, &dev->state); 1171 else { 1172 dev->flags |= IFF_UP; 1173 net_dmaengine_get(); 1174 dev_set_rx_mode(dev); 1175 dev_activate(dev); 1176 } 1177 1178 return ret; 1179 } 1180 1181 /** 1182 * dev_open - prepare an interface for use. 1183 * @dev: device to open 1184 * 1185 * Takes a device from down to up state. The device's private open 1186 * function is invoked and then the multicast lists are loaded. Finally 1187 * the device is moved into the up state and a %NETDEV_UP message is 1188 * sent to the netdev notifier chain. 1189 * 1190 * Calling this function on an active interface is a nop. On a failure 1191 * a negative errno code is returned. 1192 */ 1193 int dev_open(struct net_device *dev) 1194 { 1195 int ret; 1196 1197 if (dev->flags & IFF_UP) 1198 return 0; 1199 1200 ret = __dev_open(dev); 1201 if (ret < 0) 1202 return ret; 1203 1204 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); 1205 call_netdevice_notifiers(NETDEV_UP, dev); 1206 1207 return ret; 1208 } 1209 EXPORT_SYMBOL(dev_open); 1210 1211 static int __dev_close_many(struct list_head *head) 1212 { 1213 struct net_device *dev; 1214 1215 ASSERT_RTNL(); 1216 might_sleep(); 1217 1218 list_for_each_entry(dev, head, unreg_list) { 1219 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); 1220 1221 clear_bit(__LINK_STATE_START, &dev->state); 1222 1223 /* Synchronize to scheduled poll. We cannot touch poll list, it 1224 * can be even on different cpu. So just clear netif_running(). 1225 * 1226 * dev->stop() will invoke napi_disable() on all of it's 1227 * napi_struct instances on this device. 1228 */ 1229 smp_mb__after_clear_bit(); /* Commit netif_running(). */ 1230 } 1231 1232 dev_deactivate_many(head); 1233 1234 list_for_each_entry(dev, head, unreg_list) { 1235 const struct net_device_ops *ops = dev->netdev_ops; 1236 1237 /* 1238 * Call the device specific close. This cannot fail. 1239 * Only if device is UP 1240 * 1241 * We allow it to be called even after a DETACH hot-plug 1242 * event. 1243 */ 1244 if (ops->ndo_stop) 1245 ops->ndo_stop(dev); 1246 1247 dev->flags &= ~IFF_UP; 1248 net_dmaengine_put(); 1249 } 1250 1251 return 0; 1252 } 1253 1254 static int __dev_close(struct net_device *dev) 1255 { 1256 int retval; 1257 LIST_HEAD(single); 1258 1259 list_add(&dev->unreg_list, &single); 1260 retval = __dev_close_many(&single); 1261 list_del(&single); 1262 return retval; 1263 } 1264 1265 static int dev_close_many(struct list_head *head) 1266 { 1267 struct net_device *dev, *tmp; 1268 LIST_HEAD(tmp_list); 1269 1270 list_for_each_entry_safe(dev, tmp, head, unreg_list) 1271 if (!(dev->flags & IFF_UP)) 1272 list_move(&dev->unreg_list, &tmp_list); 1273 1274 __dev_close_many(head); 1275 1276 list_for_each_entry(dev, head, unreg_list) { 1277 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); 1278 call_netdevice_notifiers(NETDEV_DOWN, dev); 1279 } 1280 1281 /* rollback_registered_many needs the complete original list */ 1282 list_splice(&tmp_list, head); 1283 return 0; 1284 } 1285 1286 /** 1287 * dev_close - shutdown an interface. 1288 * @dev: device to shutdown 1289 * 1290 * This function moves an active device into down state. A 1291 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device 1292 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier 1293 * chain. 1294 */ 1295 int dev_close(struct net_device *dev) 1296 { 1297 if (dev->flags & IFF_UP) { 1298 LIST_HEAD(single); 1299 1300 list_add(&dev->unreg_list, &single); 1301 dev_close_many(&single); 1302 list_del(&single); 1303 } 1304 return 0; 1305 } 1306 EXPORT_SYMBOL(dev_close); 1307 1308 1309 /** 1310 * dev_disable_lro - disable Large Receive Offload on a device 1311 * @dev: device 1312 * 1313 * Disable Large Receive Offload (LRO) on a net device. Must be 1314 * called under RTNL. This is needed if received packets may be 1315 * forwarded to another interface. 1316 */ 1317 void dev_disable_lro(struct net_device *dev) 1318 { 1319 /* 1320 * If we're trying to disable lro on a vlan device 1321 * use the underlying physical device instead 1322 */ 1323 if (is_vlan_dev(dev)) 1324 dev = vlan_dev_real_dev(dev); 1325 1326 dev->wanted_features &= ~NETIF_F_LRO; 1327 netdev_update_features(dev); 1328 1329 if (unlikely(dev->features & NETIF_F_LRO)) 1330 netdev_WARN(dev, "failed to disable LRO!\n"); 1331 } 1332 EXPORT_SYMBOL(dev_disable_lro); 1333 1334 1335 static int dev_boot_phase = 1; 1336 1337 /** 1338 * register_netdevice_notifier - register a network notifier block 1339 * @nb: notifier 1340 * 1341 * Register a notifier to be called when network device events occur. 1342 * The notifier passed is linked into the kernel structures and must 1343 * not be reused until it has been unregistered. A negative errno code 1344 * is returned on a failure. 1345 * 1346 * When registered all registration and up events are replayed 1347 * to the new notifier to allow device to have a race free 1348 * view of the network device list. 1349 */ 1350 1351 int register_netdevice_notifier(struct notifier_block *nb) 1352 { 1353 struct net_device *dev; 1354 struct net_device *last; 1355 struct net *net; 1356 int err; 1357 1358 rtnl_lock(); 1359 err = raw_notifier_chain_register(&netdev_chain, nb); 1360 if (err) 1361 goto unlock; 1362 if (dev_boot_phase) 1363 goto unlock; 1364 for_each_net(net) { 1365 for_each_netdev(net, dev) { 1366 err = nb->notifier_call(nb, NETDEV_REGISTER, dev); 1367 err = notifier_to_errno(err); 1368 if (err) 1369 goto rollback; 1370 1371 if (!(dev->flags & IFF_UP)) 1372 continue; 1373 1374 nb->notifier_call(nb, NETDEV_UP, dev); 1375 } 1376 } 1377 1378 unlock: 1379 rtnl_unlock(); 1380 return err; 1381 1382 rollback: 1383 last = dev; 1384 for_each_net(net) { 1385 for_each_netdev(net, dev) { 1386 if (dev == last) 1387 goto outroll; 1388 1389 if (dev->flags & IFF_UP) { 1390 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); 1391 nb->notifier_call(nb, NETDEV_DOWN, dev); 1392 } 1393 nb->notifier_call(nb, NETDEV_UNREGISTER, dev); 1394 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev); 1395 } 1396 } 1397 1398 outroll: 1399 raw_notifier_chain_unregister(&netdev_chain, nb); 1400 goto unlock; 1401 } 1402 EXPORT_SYMBOL(register_netdevice_notifier); 1403 1404 /** 1405 * unregister_netdevice_notifier - unregister a network notifier block 1406 * @nb: notifier 1407 * 1408 * Unregister a notifier previously registered by 1409 * register_netdevice_notifier(). The notifier is unlinked into the 1410 * kernel structures and may then be reused. A negative errno code 1411 * is returned on a failure. 1412 */ 1413 1414 int unregister_netdevice_notifier(struct notifier_block *nb) 1415 { 1416 int err; 1417 1418 rtnl_lock(); 1419 err = raw_notifier_chain_unregister(&netdev_chain, nb); 1420 rtnl_unlock(); 1421 return err; 1422 } 1423 EXPORT_SYMBOL(unregister_netdevice_notifier); 1424 1425 /** 1426 * call_netdevice_notifiers - call all network notifier blocks 1427 * @val: value passed unmodified to notifier function 1428 * @dev: net_device pointer passed unmodified to notifier function 1429 * 1430 * Call all network notifier blocks. Parameters and return value 1431 * are as for raw_notifier_call_chain(). 1432 */ 1433 1434 int call_netdevice_notifiers(unsigned long val, struct net_device *dev) 1435 { 1436 ASSERT_RTNL(); 1437 return raw_notifier_call_chain(&netdev_chain, val, dev); 1438 } 1439 EXPORT_SYMBOL(call_netdevice_notifiers); 1440 1441 static struct static_key netstamp_needed __read_mostly; 1442 #ifdef HAVE_JUMP_LABEL 1443 /* We are not allowed to call static_key_slow_dec() from irq context 1444 * If net_disable_timestamp() is called from irq context, defer the 1445 * static_key_slow_dec() calls. 1446 */ 1447 static atomic_t netstamp_needed_deferred; 1448 #endif 1449 1450 void net_enable_timestamp(void) 1451 { 1452 #ifdef HAVE_JUMP_LABEL 1453 int deferred = atomic_xchg(&netstamp_needed_deferred, 0); 1454 1455 if (deferred) { 1456 while (--deferred) 1457 static_key_slow_dec(&netstamp_needed); 1458 return; 1459 } 1460 #endif 1461 WARN_ON(in_interrupt()); 1462 static_key_slow_inc(&netstamp_needed); 1463 } 1464 EXPORT_SYMBOL(net_enable_timestamp); 1465 1466 void net_disable_timestamp(void) 1467 { 1468 #ifdef HAVE_JUMP_LABEL 1469 if (in_interrupt()) { 1470 atomic_inc(&netstamp_needed_deferred); 1471 return; 1472 } 1473 #endif 1474 static_key_slow_dec(&netstamp_needed); 1475 } 1476 EXPORT_SYMBOL(net_disable_timestamp); 1477 1478 static inline void net_timestamp_set(struct sk_buff *skb) 1479 { 1480 skb->tstamp.tv64 = 0; 1481 if (static_key_false(&netstamp_needed)) 1482 __net_timestamp(skb); 1483 } 1484 1485 #define net_timestamp_check(COND, SKB) \ 1486 if (static_key_false(&netstamp_needed)) { \ 1487 if ((COND) && !(SKB)->tstamp.tv64) \ 1488 __net_timestamp(SKB); \ 1489 } \ 1490 1491 static int net_hwtstamp_validate(struct ifreq *ifr) 1492 { 1493 struct hwtstamp_config cfg; 1494 enum hwtstamp_tx_types tx_type; 1495 enum hwtstamp_rx_filters rx_filter; 1496 int tx_type_valid = 0; 1497 int rx_filter_valid = 0; 1498 1499 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 1500 return -EFAULT; 1501 1502 if (cfg.flags) /* reserved for future extensions */ 1503 return -EINVAL; 1504 1505 tx_type = cfg.tx_type; 1506 rx_filter = cfg.rx_filter; 1507 1508 switch (tx_type) { 1509 case HWTSTAMP_TX_OFF: 1510 case HWTSTAMP_TX_ON: 1511 case HWTSTAMP_TX_ONESTEP_SYNC: 1512 tx_type_valid = 1; 1513 break; 1514 } 1515 1516 switch (rx_filter) { 1517 case HWTSTAMP_FILTER_NONE: 1518 case HWTSTAMP_FILTER_ALL: 1519 case HWTSTAMP_FILTER_SOME: 1520 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 1521 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 1522 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 1523 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1524 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 1525 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 1526 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1527 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 1528 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 1529 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1530 case HWTSTAMP_FILTER_PTP_V2_SYNC: 1531 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1532 rx_filter_valid = 1; 1533 break; 1534 } 1535 1536 if (!tx_type_valid || !rx_filter_valid) 1537 return -ERANGE; 1538 1539 return 0; 1540 } 1541 1542 static inline bool is_skb_forwardable(struct net_device *dev, 1543 struct sk_buff *skb) 1544 { 1545 unsigned int len; 1546 1547 if (!(dev->flags & IFF_UP)) 1548 return false; 1549 1550 len = dev->mtu + dev->hard_header_len + VLAN_HLEN; 1551 if (skb->len <= len) 1552 return true; 1553 1554 /* if TSO is enabled, we don't care about the length as the packet 1555 * could be forwarded without being segmented before 1556 */ 1557 if (skb_is_gso(skb)) 1558 return true; 1559 1560 return false; 1561 } 1562 1563 /** 1564 * dev_forward_skb - loopback an skb to another netif 1565 * 1566 * @dev: destination network device 1567 * @skb: buffer to forward 1568 * 1569 * return values: 1570 * NET_RX_SUCCESS (no congestion) 1571 * NET_RX_DROP (packet was dropped, but freed) 1572 * 1573 * dev_forward_skb can be used for injecting an skb from the 1574 * start_xmit function of one device into the receive queue 1575 * of another device. 1576 * 1577 * The receiving device may be in another namespace, so 1578 * we have to clear all information in the skb that could 1579 * impact namespace isolation. 1580 */ 1581 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1582 { 1583 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 1584 if (skb_copy_ubufs(skb, GFP_ATOMIC)) { 1585 atomic_long_inc(&dev->rx_dropped); 1586 kfree_skb(skb); 1587 return NET_RX_DROP; 1588 } 1589 } 1590 1591 skb_orphan(skb); 1592 nf_reset(skb); 1593 1594 if (unlikely(!is_skb_forwardable(dev, skb))) { 1595 atomic_long_inc(&dev->rx_dropped); 1596 kfree_skb(skb); 1597 return NET_RX_DROP; 1598 } 1599 skb_set_dev(skb, dev); 1600 skb->tstamp.tv64 = 0; 1601 skb->pkt_type = PACKET_HOST; 1602 skb->protocol = eth_type_trans(skb, dev); 1603 return netif_rx(skb); 1604 } 1605 EXPORT_SYMBOL_GPL(dev_forward_skb); 1606 1607 static inline int deliver_skb(struct sk_buff *skb, 1608 struct packet_type *pt_prev, 1609 struct net_device *orig_dev) 1610 { 1611 atomic_inc(&skb->users); 1612 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 1613 } 1614 1615 /* 1616 * Support routine. Sends outgoing frames to any network 1617 * taps currently in use. 1618 */ 1619 1620 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) 1621 { 1622 struct packet_type *ptype; 1623 struct sk_buff *skb2 = NULL; 1624 struct packet_type *pt_prev = NULL; 1625 1626 rcu_read_lock(); 1627 list_for_each_entry_rcu(ptype, &ptype_all, list) { 1628 /* Never send packets back to the socket 1629 * they originated from - MvS (miquels@drinkel.ow.org) 1630 */ 1631 if ((ptype->dev == dev || !ptype->dev) && 1632 (ptype->af_packet_priv == NULL || 1633 (struct sock *)ptype->af_packet_priv != skb->sk)) { 1634 if (pt_prev) { 1635 deliver_skb(skb2, pt_prev, skb->dev); 1636 pt_prev = ptype; 1637 continue; 1638 } 1639 1640 skb2 = skb_clone(skb, GFP_ATOMIC); 1641 if (!skb2) 1642 break; 1643 1644 net_timestamp_set(skb2); 1645 1646 /* skb->nh should be correctly 1647 set by sender, so that the second statement is 1648 just protection against buggy protocols. 1649 */ 1650 skb_reset_mac_header(skb2); 1651 1652 if (skb_network_header(skb2) < skb2->data || 1653 skb2->network_header > skb2->tail) { 1654 if (net_ratelimit()) 1655 pr_crit("protocol %04x is buggy, dev %s\n", 1656 ntohs(skb2->protocol), 1657 dev->name); 1658 skb_reset_network_header(skb2); 1659 } 1660 1661 skb2->transport_header = skb2->network_header; 1662 skb2->pkt_type = PACKET_OUTGOING; 1663 pt_prev = ptype; 1664 } 1665 } 1666 if (pt_prev) 1667 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); 1668 rcu_read_unlock(); 1669 } 1670 1671 /* netif_setup_tc - Handle tc mappings on real_num_tx_queues change 1672 * @dev: Network device 1673 * @txq: number of queues available 1674 * 1675 * If real_num_tx_queues is changed the tc mappings may no longer be 1676 * valid. To resolve this verify the tc mapping remains valid and if 1677 * not NULL the mapping. With no priorities mapping to this 1678 * offset/count pair it will no longer be used. In the worst case TC0 1679 * is invalid nothing can be done so disable priority mappings. If is 1680 * expected that drivers will fix this mapping if they can before 1681 * calling netif_set_real_num_tx_queues. 1682 */ 1683 static void netif_setup_tc(struct net_device *dev, unsigned int txq) 1684 { 1685 int i; 1686 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 1687 1688 /* If TC0 is invalidated disable TC mapping */ 1689 if (tc->offset + tc->count > txq) { 1690 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); 1691 dev->num_tc = 0; 1692 return; 1693 } 1694 1695 /* Invalidated prio to tc mappings set to TC0 */ 1696 for (i = 1; i < TC_BITMASK + 1; i++) { 1697 int q = netdev_get_prio_tc_map(dev, i); 1698 1699 tc = &dev->tc_to_txq[q]; 1700 if (tc->offset + tc->count > txq) { 1701 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", 1702 i, q); 1703 netdev_set_prio_tc_map(dev, i, 0); 1704 } 1705 } 1706 } 1707 1708 /* 1709 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues 1710 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed. 1711 */ 1712 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) 1713 { 1714 int rc; 1715 1716 if (txq < 1 || txq > dev->num_tx_queues) 1717 return -EINVAL; 1718 1719 if (dev->reg_state == NETREG_REGISTERED || 1720 dev->reg_state == NETREG_UNREGISTERING) { 1721 ASSERT_RTNL(); 1722 1723 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, 1724 txq); 1725 if (rc) 1726 return rc; 1727 1728 if (dev->num_tc) 1729 netif_setup_tc(dev, txq); 1730 1731 if (txq < dev->real_num_tx_queues) 1732 qdisc_reset_all_tx_gt(dev, txq); 1733 } 1734 1735 dev->real_num_tx_queues = txq; 1736 return 0; 1737 } 1738 EXPORT_SYMBOL(netif_set_real_num_tx_queues); 1739 1740 #ifdef CONFIG_RPS 1741 /** 1742 * netif_set_real_num_rx_queues - set actual number of RX queues used 1743 * @dev: Network device 1744 * @rxq: Actual number of RX queues 1745 * 1746 * This must be called either with the rtnl_lock held or before 1747 * registration of the net device. Returns 0 on success, or a 1748 * negative error code. If called before registration, it always 1749 * succeeds. 1750 */ 1751 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) 1752 { 1753 int rc; 1754 1755 if (rxq < 1 || rxq > dev->num_rx_queues) 1756 return -EINVAL; 1757 1758 if (dev->reg_state == NETREG_REGISTERED) { 1759 ASSERT_RTNL(); 1760 1761 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, 1762 rxq); 1763 if (rc) 1764 return rc; 1765 } 1766 1767 dev->real_num_rx_queues = rxq; 1768 return 0; 1769 } 1770 EXPORT_SYMBOL(netif_set_real_num_rx_queues); 1771 #endif 1772 1773 static inline void __netif_reschedule(struct Qdisc *q) 1774 { 1775 struct softnet_data *sd; 1776 unsigned long flags; 1777 1778 local_irq_save(flags); 1779 sd = &__get_cpu_var(softnet_data); 1780 q->next_sched = NULL; 1781 *sd->output_queue_tailp = q; 1782 sd->output_queue_tailp = &q->next_sched; 1783 raise_softirq_irqoff(NET_TX_SOFTIRQ); 1784 local_irq_restore(flags); 1785 } 1786 1787 void __netif_schedule(struct Qdisc *q) 1788 { 1789 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) 1790 __netif_reschedule(q); 1791 } 1792 EXPORT_SYMBOL(__netif_schedule); 1793 1794 void dev_kfree_skb_irq(struct sk_buff *skb) 1795 { 1796 if (atomic_dec_and_test(&skb->users)) { 1797 struct softnet_data *sd; 1798 unsigned long flags; 1799 1800 local_irq_save(flags); 1801 sd = &__get_cpu_var(softnet_data); 1802 skb->next = sd->completion_queue; 1803 sd->completion_queue = skb; 1804 raise_softirq_irqoff(NET_TX_SOFTIRQ); 1805 local_irq_restore(flags); 1806 } 1807 } 1808 EXPORT_SYMBOL(dev_kfree_skb_irq); 1809 1810 void dev_kfree_skb_any(struct sk_buff *skb) 1811 { 1812 if (in_irq() || irqs_disabled()) 1813 dev_kfree_skb_irq(skb); 1814 else 1815 dev_kfree_skb(skb); 1816 } 1817 EXPORT_SYMBOL(dev_kfree_skb_any); 1818 1819 1820 /** 1821 * netif_device_detach - mark device as removed 1822 * @dev: network device 1823 * 1824 * Mark device as removed from system and therefore no longer available. 1825 */ 1826 void netif_device_detach(struct net_device *dev) 1827 { 1828 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && 1829 netif_running(dev)) { 1830 netif_tx_stop_all_queues(dev); 1831 } 1832 } 1833 EXPORT_SYMBOL(netif_device_detach); 1834 1835 /** 1836 * netif_device_attach - mark device as attached 1837 * @dev: network device 1838 * 1839 * Mark device as attached from system and restart if needed. 1840 */ 1841 void netif_device_attach(struct net_device *dev) 1842 { 1843 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && 1844 netif_running(dev)) { 1845 netif_tx_wake_all_queues(dev); 1846 __netdev_watchdog_up(dev); 1847 } 1848 } 1849 EXPORT_SYMBOL(netif_device_attach); 1850 1851 /** 1852 * skb_dev_set -- assign a new device to a buffer 1853 * @skb: buffer for the new device 1854 * @dev: network device 1855 * 1856 * If an skb is owned by a device already, we have to reset 1857 * all data private to the namespace a device belongs to 1858 * before assigning it a new device. 1859 */ 1860 #ifdef CONFIG_NET_NS 1861 void skb_set_dev(struct sk_buff *skb, struct net_device *dev) 1862 { 1863 skb_dst_drop(skb); 1864 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) { 1865 secpath_reset(skb); 1866 nf_reset(skb); 1867 skb_init_secmark(skb); 1868 skb->mark = 0; 1869 skb->priority = 0; 1870 skb->nf_trace = 0; 1871 skb->ipvs_property = 0; 1872 #ifdef CONFIG_NET_SCHED 1873 skb->tc_index = 0; 1874 #endif 1875 } 1876 skb->dev = dev; 1877 } 1878 EXPORT_SYMBOL(skb_set_dev); 1879 #endif /* CONFIG_NET_NS */ 1880 1881 static void skb_warn_bad_offload(const struct sk_buff *skb) 1882 { 1883 static const netdev_features_t null_features = 0; 1884 struct net_device *dev = skb->dev; 1885 const char *driver = ""; 1886 1887 if (dev && dev->dev.parent) 1888 driver = dev_driver_string(dev->dev.parent); 1889 1890 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d " 1891 "gso_type=%d ip_summed=%d\n", 1892 driver, dev ? &dev->features : &null_features, 1893 skb->sk ? &skb->sk->sk_route_caps : &null_features, 1894 skb->len, skb->data_len, skb_shinfo(skb)->gso_size, 1895 skb_shinfo(skb)->gso_type, skb->ip_summed); 1896 } 1897 1898 /* 1899 * Invalidate hardware checksum when packet is to be mangled, and 1900 * complete checksum manually on outgoing path. 1901 */ 1902 int skb_checksum_help(struct sk_buff *skb) 1903 { 1904 __wsum csum; 1905 int ret = 0, offset; 1906 1907 if (skb->ip_summed == CHECKSUM_COMPLETE) 1908 goto out_set_summed; 1909 1910 if (unlikely(skb_shinfo(skb)->gso_size)) { 1911 skb_warn_bad_offload(skb); 1912 return -EINVAL; 1913 } 1914 1915 offset = skb_checksum_start_offset(skb); 1916 BUG_ON(offset >= skb_headlen(skb)); 1917 csum = skb_checksum(skb, offset, skb->len - offset, 0); 1918 1919 offset += skb->csum_offset; 1920 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); 1921 1922 if (skb_cloned(skb) && 1923 !skb_clone_writable(skb, offset + sizeof(__sum16))) { 1924 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1925 if (ret) 1926 goto out; 1927 } 1928 1929 *(__sum16 *)(skb->data + offset) = csum_fold(csum); 1930 out_set_summed: 1931 skb->ip_summed = CHECKSUM_NONE; 1932 out: 1933 return ret; 1934 } 1935 EXPORT_SYMBOL(skb_checksum_help); 1936 1937 /** 1938 * skb_gso_segment - Perform segmentation on skb. 1939 * @skb: buffer to segment 1940 * @features: features for the output path (see dev->features) 1941 * 1942 * This function segments the given skb and returns a list of segments. 1943 * 1944 * It may return NULL if the skb requires no segmentation. This is 1945 * only possible when GSO is used for verifying header integrity. 1946 */ 1947 struct sk_buff *skb_gso_segment(struct sk_buff *skb, 1948 netdev_features_t features) 1949 { 1950 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 1951 struct packet_type *ptype; 1952 __be16 type = skb->protocol; 1953 int vlan_depth = ETH_HLEN; 1954 int err; 1955 1956 while (type == htons(ETH_P_8021Q)) { 1957 struct vlan_hdr *vh; 1958 1959 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) 1960 return ERR_PTR(-EINVAL); 1961 1962 vh = (struct vlan_hdr *)(skb->data + vlan_depth); 1963 type = vh->h_vlan_encapsulated_proto; 1964 vlan_depth += VLAN_HLEN; 1965 } 1966 1967 skb_reset_mac_header(skb); 1968 skb->mac_len = skb->network_header - skb->mac_header; 1969 __skb_pull(skb, skb->mac_len); 1970 1971 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { 1972 skb_warn_bad_offload(skb); 1973 1974 if (skb_header_cloned(skb) && 1975 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 1976 return ERR_PTR(err); 1977 } 1978 1979 rcu_read_lock(); 1980 list_for_each_entry_rcu(ptype, 1981 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 1982 if (ptype->type == type && !ptype->dev && ptype->gso_segment) { 1983 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { 1984 err = ptype->gso_send_check(skb); 1985 segs = ERR_PTR(err); 1986 if (err || skb_gso_ok(skb, features)) 1987 break; 1988 __skb_push(skb, (skb->data - 1989 skb_network_header(skb))); 1990 } 1991 segs = ptype->gso_segment(skb, features); 1992 break; 1993 } 1994 } 1995 rcu_read_unlock(); 1996 1997 __skb_push(skb, skb->data - skb_mac_header(skb)); 1998 1999 return segs; 2000 } 2001 EXPORT_SYMBOL(skb_gso_segment); 2002 2003 /* Take action when hardware reception checksum errors are detected. */ 2004 #ifdef CONFIG_BUG 2005 void netdev_rx_csum_fault(struct net_device *dev) 2006 { 2007 if (net_ratelimit()) { 2008 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>"); 2009 dump_stack(); 2010 } 2011 } 2012 EXPORT_SYMBOL(netdev_rx_csum_fault); 2013 #endif 2014 2015 /* Actually, we should eliminate this check as soon as we know, that: 2016 * 1. IOMMU is present and allows to map all the memory. 2017 * 2. No high memory really exists on this machine. 2018 */ 2019 2020 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 2021 { 2022 #ifdef CONFIG_HIGHMEM 2023 int i; 2024 if (!(dev->features & NETIF_F_HIGHDMA)) { 2025 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2026 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2027 if (PageHighMem(skb_frag_page(frag))) 2028 return 1; 2029 } 2030 } 2031 2032 if (PCI_DMA_BUS_IS_PHYS) { 2033 struct device *pdev = dev->dev.parent; 2034 2035 if (!pdev) 2036 return 0; 2037 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2038 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2039 dma_addr_t addr = page_to_phys(skb_frag_page(frag)); 2040 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask) 2041 return 1; 2042 } 2043 } 2044 #endif 2045 return 0; 2046 } 2047 2048 struct dev_gso_cb { 2049 void (*destructor)(struct sk_buff *skb); 2050 }; 2051 2052 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb) 2053 2054 static void dev_gso_skb_destructor(struct sk_buff *skb) 2055 { 2056 struct dev_gso_cb *cb; 2057 2058 do { 2059 struct sk_buff *nskb = skb->next; 2060 2061 skb->next = nskb->next; 2062 nskb->next = NULL; 2063 kfree_skb(nskb); 2064 } while (skb->next); 2065 2066 cb = DEV_GSO_CB(skb); 2067 if (cb->destructor) 2068 cb->destructor(skb); 2069 } 2070 2071 /** 2072 * dev_gso_segment - Perform emulated hardware segmentation on skb. 2073 * @skb: buffer to segment 2074 * @features: device features as applicable to this skb 2075 * 2076 * This function segments the given skb and stores the list of segments 2077 * in skb->next. 2078 */ 2079 static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features) 2080 { 2081 struct sk_buff *segs; 2082 2083 segs = skb_gso_segment(skb, features); 2084 2085 /* Verifying header integrity only. */ 2086 if (!segs) 2087 return 0; 2088 2089 if (IS_ERR(segs)) 2090 return PTR_ERR(segs); 2091 2092 skb->next = segs; 2093 DEV_GSO_CB(skb)->destructor = skb->destructor; 2094 skb->destructor = dev_gso_skb_destructor; 2095 2096 return 0; 2097 } 2098 2099 /* 2100 * Try to orphan skb early, right before transmission by the device. 2101 * We cannot orphan skb if tx timestamp is requested or the sk-reference 2102 * is needed on driver level for other reasons, e.g. see net/can/raw.c 2103 */ 2104 static inline void skb_orphan_try(struct sk_buff *skb) 2105 { 2106 struct sock *sk = skb->sk; 2107 2108 if (sk && !skb_shinfo(skb)->tx_flags) { 2109 /* skb_tx_hash() wont be able to get sk. 2110 * We copy sk_hash into skb->rxhash 2111 */ 2112 if (!skb->rxhash) 2113 skb->rxhash = sk->sk_hash; 2114 skb_orphan(skb); 2115 } 2116 } 2117 2118 static bool can_checksum_protocol(netdev_features_t features, __be16 protocol) 2119 { 2120 return ((features & NETIF_F_GEN_CSUM) || 2121 ((features & NETIF_F_V4_CSUM) && 2122 protocol == htons(ETH_P_IP)) || 2123 ((features & NETIF_F_V6_CSUM) && 2124 protocol == htons(ETH_P_IPV6)) || 2125 ((features & NETIF_F_FCOE_CRC) && 2126 protocol == htons(ETH_P_FCOE))); 2127 } 2128 2129 static netdev_features_t harmonize_features(struct sk_buff *skb, 2130 __be16 protocol, netdev_features_t features) 2131 { 2132 if (!can_checksum_protocol(features, protocol)) { 2133 features &= ~NETIF_F_ALL_CSUM; 2134 features &= ~NETIF_F_SG; 2135 } else if (illegal_highdma(skb->dev, skb)) { 2136 features &= ~NETIF_F_SG; 2137 } 2138 2139 return features; 2140 } 2141 2142 netdev_features_t netif_skb_features(struct sk_buff *skb) 2143 { 2144 __be16 protocol = skb->protocol; 2145 netdev_features_t features = skb->dev->features; 2146 2147 if (protocol == htons(ETH_P_8021Q)) { 2148 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2149 protocol = veh->h_vlan_encapsulated_proto; 2150 } else if (!vlan_tx_tag_present(skb)) { 2151 return harmonize_features(skb, protocol, features); 2152 } 2153 2154 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX); 2155 2156 if (protocol != htons(ETH_P_8021Q)) { 2157 return harmonize_features(skb, protocol, features); 2158 } else { 2159 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | 2160 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX; 2161 return harmonize_features(skb, protocol, features); 2162 } 2163 } 2164 EXPORT_SYMBOL(netif_skb_features); 2165 2166 /* 2167 * Returns true if either: 2168 * 1. skb has frag_list and the device doesn't support FRAGLIST, or 2169 * 2. skb is fragmented and the device does not support SG, or if 2170 * at least one of fragments is in highmem and device does not 2171 * support DMA from it. 2172 */ 2173 static inline int skb_needs_linearize(struct sk_buff *skb, 2174 int features) 2175 { 2176 return skb_is_nonlinear(skb) && 2177 ((skb_has_frag_list(skb) && 2178 !(features & NETIF_F_FRAGLIST)) || 2179 (skb_shinfo(skb)->nr_frags && 2180 !(features & NETIF_F_SG))); 2181 } 2182 2183 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2184 struct netdev_queue *txq) 2185 { 2186 const struct net_device_ops *ops = dev->netdev_ops; 2187 int rc = NETDEV_TX_OK; 2188 unsigned int skb_len; 2189 2190 if (likely(!skb->next)) { 2191 netdev_features_t features; 2192 2193 /* 2194 * If device doesn't need skb->dst, release it right now while 2195 * its hot in this cpu cache 2196 */ 2197 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 2198 skb_dst_drop(skb); 2199 2200 if (!list_empty(&ptype_all)) 2201 dev_queue_xmit_nit(skb, dev); 2202 2203 skb_orphan_try(skb); 2204 2205 features = netif_skb_features(skb); 2206 2207 if (vlan_tx_tag_present(skb) && 2208 !(features & NETIF_F_HW_VLAN_TX)) { 2209 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); 2210 if (unlikely(!skb)) 2211 goto out; 2212 2213 skb->vlan_tci = 0; 2214 } 2215 2216 if (netif_needs_gso(skb, features)) { 2217 if (unlikely(dev_gso_segment(skb, features))) 2218 goto out_kfree_skb; 2219 if (skb->next) 2220 goto gso; 2221 } else { 2222 if (skb_needs_linearize(skb, features) && 2223 __skb_linearize(skb)) 2224 goto out_kfree_skb; 2225 2226 /* If packet is not checksummed and device does not 2227 * support checksumming for this protocol, complete 2228 * checksumming here. 2229 */ 2230 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2231 skb_set_transport_header(skb, 2232 skb_checksum_start_offset(skb)); 2233 if (!(features & NETIF_F_ALL_CSUM) && 2234 skb_checksum_help(skb)) 2235 goto out_kfree_skb; 2236 } 2237 } 2238 2239 skb_len = skb->len; 2240 rc = ops->ndo_start_xmit(skb, dev); 2241 trace_net_dev_xmit(skb, rc, dev, skb_len); 2242 if (rc == NETDEV_TX_OK) 2243 txq_trans_update(txq); 2244 return rc; 2245 } 2246 2247 gso: 2248 do { 2249 struct sk_buff *nskb = skb->next; 2250 2251 skb->next = nskb->next; 2252 nskb->next = NULL; 2253 2254 /* 2255 * If device doesn't need nskb->dst, release it right now while 2256 * its hot in this cpu cache 2257 */ 2258 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 2259 skb_dst_drop(nskb); 2260 2261 skb_len = nskb->len; 2262 rc = ops->ndo_start_xmit(nskb, dev); 2263 trace_net_dev_xmit(nskb, rc, dev, skb_len); 2264 if (unlikely(rc != NETDEV_TX_OK)) { 2265 if (rc & ~NETDEV_TX_MASK) 2266 goto out_kfree_gso_skb; 2267 nskb->next = skb->next; 2268 skb->next = nskb; 2269 return rc; 2270 } 2271 txq_trans_update(txq); 2272 if (unlikely(netif_xmit_stopped(txq) && skb->next)) 2273 return NETDEV_TX_BUSY; 2274 } while (skb->next); 2275 2276 out_kfree_gso_skb: 2277 if (likely(skb->next == NULL)) 2278 skb->destructor = DEV_GSO_CB(skb)->destructor; 2279 out_kfree_skb: 2280 kfree_skb(skb); 2281 out: 2282 return rc; 2283 } 2284 2285 static u32 hashrnd __read_mostly; 2286 2287 /* 2288 * Returns a Tx hash based on the given packet descriptor a Tx queues' number 2289 * to be used as a distribution range. 2290 */ 2291 u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb, 2292 unsigned int num_tx_queues) 2293 { 2294 u32 hash; 2295 u16 qoffset = 0; 2296 u16 qcount = num_tx_queues; 2297 2298 if (skb_rx_queue_recorded(skb)) { 2299 hash = skb_get_rx_queue(skb); 2300 while (unlikely(hash >= num_tx_queues)) 2301 hash -= num_tx_queues; 2302 return hash; 2303 } 2304 2305 if (dev->num_tc) { 2306 u8 tc = netdev_get_prio_tc_map(dev, skb->priority); 2307 qoffset = dev->tc_to_txq[tc].offset; 2308 qcount = dev->tc_to_txq[tc].count; 2309 } 2310 2311 if (skb->sk && skb->sk->sk_hash) 2312 hash = skb->sk->sk_hash; 2313 else 2314 hash = (__force u16) skb->protocol ^ skb->rxhash; 2315 hash = jhash_1word(hash, hashrnd); 2316 2317 return (u16) (((u64) hash * qcount) >> 32) + qoffset; 2318 } 2319 EXPORT_SYMBOL(__skb_tx_hash); 2320 2321 static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) 2322 { 2323 if (unlikely(queue_index >= dev->real_num_tx_queues)) { 2324 if (net_ratelimit()) { 2325 pr_warn("%s selects TX queue %d, but real number of TX queues is %d\n", 2326 dev->name, queue_index, 2327 dev->real_num_tx_queues); 2328 } 2329 return 0; 2330 } 2331 return queue_index; 2332 } 2333 2334 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) 2335 { 2336 #ifdef CONFIG_XPS 2337 struct xps_dev_maps *dev_maps; 2338 struct xps_map *map; 2339 int queue_index = -1; 2340 2341 rcu_read_lock(); 2342 dev_maps = rcu_dereference(dev->xps_maps); 2343 if (dev_maps) { 2344 map = rcu_dereference( 2345 dev_maps->cpu_map[raw_smp_processor_id()]); 2346 if (map) { 2347 if (map->len == 1) 2348 queue_index = map->queues[0]; 2349 else { 2350 u32 hash; 2351 if (skb->sk && skb->sk->sk_hash) 2352 hash = skb->sk->sk_hash; 2353 else 2354 hash = (__force u16) skb->protocol ^ 2355 skb->rxhash; 2356 hash = jhash_1word(hash, hashrnd); 2357 queue_index = map->queues[ 2358 ((u64)hash * map->len) >> 32]; 2359 } 2360 if (unlikely(queue_index >= dev->real_num_tx_queues)) 2361 queue_index = -1; 2362 } 2363 } 2364 rcu_read_unlock(); 2365 2366 return queue_index; 2367 #else 2368 return -1; 2369 #endif 2370 } 2371 2372 static struct netdev_queue *dev_pick_tx(struct net_device *dev, 2373 struct sk_buff *skb) 2374 { 2375 int queue_index; 2376 const struct net_device_ops *ops = dev->netdev_ops; 2377 2378 if (dev->real_num_tx_queues == 1) 2379 queue_index = 0; 2380 else if (ops->ndo_select_queue) { 2381 queue_index = ops->ndo_select_queue(dev, skb); 2382 queue_index = dev_cap_txqueue(dev, queue_index); 2383 } else { 2384 struct sock *sk = skb->sk; 2385 queue_index = sk_tx_queue_get(sk); 2386 2387 if (queue_index < 0 || skb->ooo_okay || 2388 queue_index >= dev->real_num_tx_queues) { 2389 int old_index = queue_index; 2390 2391 queue_index = get_xps_queue(dev, skb); 2392 if (queue_index < 0) 2393 queue_index = skb_tx_hash(dev, skb); 2394 2395 if (queue_index != old_index && sk) { 2396 struct dst_entry *dst = 2397 rcu_dereference_check(sk->sk_dst_cache, 1); 2398 2399 if (dst && skb_dst(skb) == dst) 2400 sk_tx_queue_set(sk, queue_index); 2401 } 2402 } 2403 } 2404 2405 skb_set_queue_mapping(skb, queue_index); 2406 return netdev_get_tx_queue(dev, queue_index); 2407 } 2408 2409 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, 2410 struct net_device *dev, 2411 struct netdev_queue *txq) 2412 { 2413 spinlock_t *root_lock = qdisc_lock(q); 2414 bool contended; 2415 int rc; 2416 2417 qdisc_skb_cb(skb)->pkt_len = skb->len; 2418 qdisc_calculate_pkt_len(skb, q); 2419 /* 2420 * Heuristic to force contended enqueues to serialize on a 2421 * separate lock before trying to get qdisc main lock. 2422 * This permits __QDISC_STATE_RUNNING owner to get the lock more often 2423 * and dequeue packets faster. 2424 */ 2425 contended = qdisc_is_running(q); 2426 if (unlikely(contended)) 2427 spin_lock(&q->busylock); 2428 2429 spin_lock(root_lock); 2430 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { 2431 kfree_skb(skb); 2432 rc = NET_XMIT_DROP; 2433 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && 2434 qdisc_run_begin(q)) { 2435 /* 2436 * This is a work-conserving queue; there are no old skbs 2437 * waiting to be sent out; and the qdisc is not running - 2438 * xmit the skb directly. 2439 */ 2440 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) 2441 skb_dst_force(skb); 2442 2443 qdisc_bstats_update(q, skb); 2444 2445 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { 2446 if (unlikely(contended)) { 2447 spin_unlock(&q->busylock); 2448 contended = false; 2449 } 2450 __qdisc_run(q); 2451 } else 2452 qdisc_run_end(q); 2453 2454 rc = NET_XMIT_SUCCESS; 2455 } else { 2456 skb_dst_force(skb); 2457 rc = q->enqueue(skb, q) & NET_XMIT_MASK; 2458 if (qdisc_run_begin(q)) { 2459 if (unlikely(contended)) { 2460 spin_unlock(&q->busylock); 2461 contended = false; 2462 } 2463 __qdisc_run(q); 2464 } 2465 } 2466 spin_unlock(root_lock); 2467 if (unlikely(contended)) 2468 spin_unlock(&q->busylock); 2469 return rc; 2470 } 2471 2472 #if IS_ENABLED(CONFIG_NETPRIO_CGROUP) 2473 static void skb_update_prio(struct sk_buff *skb) 2474 { 2475 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); 2476 2477 if ((!skb->priority) && (skb->sk) && map) 2478 skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx]; 2479 } 2480 #else 2481 #define skb_update_prio(skb) 2482 #endif 2483 2484 static DEFINE_PER_CPU(int, xmit_recursion); 2485 #define RECURSION_LIMIT 10 2486 2487 /** 2488 * dev_queue_xmit - transmit a buffer 2489 * @skb: buffer to transmit 2490 * 2491 * Queue a buffer for transmission to a network device. The caller must 2492 * have set the device and priority and built the buffer before calling 2493 * this function. The function can be called from an interrupt. 2494 * 2495 * A negative errno code is returned on a failure. A success does not 2496 * guarantee the frame will be transmitted as it may be dropped due 2497 * to congestion or traffic shaping. 2498 * 2499 * ----------------------------------------------------------------------------------- 2500 * I notice this method can also return errors from the queue disciplines, 2501 * including NET_XMIT_DROP, which is a positive value. So, errors can also 2502 * be positive. 2503 * 2504 * Regardless of the return value, the skb is consumed, so it is currently 2505 * difficult to retry a send to this method. (You can bump the ref count 2506 * before sending to hold a reference for retry if you are careful.) 2507 * 2508 * When calling this method, interrupts MUST be enabled. This is because 2509 * the BH enable code must have IRQs enabled so that it will not deadlock. 2510 * --BLG 2511 */ 2512 int dev_queue_xmit(struct sk_buff *skb) 2513 { 2514 struct net_device *dev = skb->dev; 2515 struct netdev_queue *txq; 2516 struct Qdisc *q; 2517 int rc = -ENOMEM; 2518 2519 /* Disable soft irqs for various locks below. Also 2520 * stops preemption for RCU. 2521 */ 2522 rcu_read_lock_bh(); 2523 2524 skb_update_prio(skb); 2525 2526 txq = dev_pick_tx(dev, skb); 2527 q = rcu_dereference_bh(txq->qdisc); 2528 2529 #ifdef CONFIG_NET_CLS_ACT 2530 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); 2531 #endif 2532 trace_net_dev_queue(skb); 2533 if (q->enqueue) { 2534 rc = __dev_xmit_skb(skb, q, dev, txq); 2535 goto out; 2536 } 2537 2538 /* The device has no queue. Common case for software devices: 2539 loopback, all the sorts of tunnels... 2540 2541 Really, it is unlikely that netif_tx_lock protection is necessary 2542 here. (f.e. loopback and IP tunnels are clean ignoring statistics 2543 counters.) 2544 However, it is possible, that they rely on protection 2545 made by us here. 2546 2547 Check this and shot the lock. It is not prone from deadlocks. 2548 Either shot noqueue qdisc, it is even simpler 8) 2549 */ 2550 if (dev->flags & IFF_UP) { 2551 int cpu = smp_processor_id(); /* ok because BHs are off */ 2552 2553 if (txq->xmit_lock_owner != cpu) { 2554 2555 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT) 2556 goto recursion_alert; 2557 2558 HARD_TX_LOCK(dev, txq, cpu); 2559 2560 if (!netif_xmit_stopped(txq)) { 2561 __this_cpu_inc(xmit_recursion); 2562 rc = dev_hard_start_xmit(skb, dev, txq); 2563 __this_cpu_dec(xmit_recursion); 2564 if (dev_xmit_complete(rc)) { 2565 HARD_TX_UNLOCK(dev, txq); 2566 goto out; 2567 } 2568 } 2569 HARD_TX_UNLOCK(dev, txq); 2570 if (net_ratelimit()) 2571 pr_crit("Virtual device %s asks to queue packet!\n", 2572 dev->name); 2573 } else { 2574 /* Recursion is detected! It is possible, 2575 * unfortunately 2576 */ 2577 recursion_alert: 2578 if (net_ratelimit()) 2579 pr_crit("Dead loop on virtual device %s, fix it urgently!\n", 2580 dev->name); 2581 } 2582 } 2583 2584 rc = -ENETDOWN; 2585 rcu_read_unlock_bh(); 2586 2587 kfree_skb(skb); 2588 return rc; 2589 out: 2590 rcu_read_unlock_bh(); 2591 return rc; 2592 } 2593 EXPORT_SYMBOL(dev_queue_xmit); 2594 2595 2596 /*======================================================================= 2597 Receiver routines 2598 =======================================================================*/ 2599 2600 int netdev_max_backlog __read_mostly = 1000; 2601 int netdev_tstamp_prequeue __read_mostly = 1; 2602 int netdev_budget __read_mostly = 300; 2603 int weight_p __read_mostly = 64; /* old backlog weight */ 2604 2605 /* Called with irq disabled */ 2606 static inline void ____napi_schedule(struct softnet_data *sd, 2607 struct napi_struct *napi) 2608 { 2609 list_add_tail(&napi->poll_list, &sd->poll_list); 2610 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 2611 } 2612 2613 /* 2614 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses 2615 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value 2616 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb 2617 * if hash is a canonical 4-tuple hash over transport ports. 2618 */ 2619 void __skb_get_rxhash(struct sk_buff *skb) 2620 { 2621 struct flow_keys keys; 2622 u32 hash; 2623 2624 if (!skb_flow_dissect(skb, &keys)) 2625 return; 2626 2627 if (keys.ports) { 2628 if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]) 2629 swap(keys.port16[0], keys.port16[1]); 2630 skb->l4_rxhash = 1; 2631 } 2632 2633 /* get a consistent hash (same value on both flow directions) */ 2634 if ((__force u32)keys.dst < (__force u32)keys.src) 2635 swap(keys.dst, keys.src); 2636 2637 hash = jhash_3words((__force u32)keys.dst, 2638 (__force u32)keys.src, 2639 (__force u32)keys.ports, hashrnd); 2640 if (!hash) 2641 hash = 1; 2642 2643 skb->rxhash = hash; 2644 } 2645 EXPORT_SYMBOL(__skb_get_rxhash); 2646 2647 #ifdef CONFIG_RPS 2648 2649 /* One global table that all flow-based protocols share. */ 2650 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; 2651 EXPORT_SYMBOL(rps_sock_flow_table); 2652 2653 struct static_key rps_needed __read_mostly; 2654 2655 static struct rps_dev_flow * 2656 set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 2657 struct rps_dev_flow *rflow, u16 next_cpu) 2658 { 2659 if (next_cpu != RPS_NO_CPU) { 2660 #ifdef CONFIG_RFS_ACCEL 2661 struct netdev_rx_queue *rxqueue; 2662 struct rps_dev_flow_table *flow_table; 2663 struct rps_dev_flow *old_rflow; 2664 u32 flow_id; 2665 u16 rxq_index; 2666 int rc; 2667 2668 /* Should we steer this flow to a different hardware queue? */ 2669 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || 2670 !(dev->features & NETIF_F_NTUPLE)) 2671 goto out; 2672 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); 2673 if (rxq_index == skb_get_rx_queue(skb)) 2674 goto out; 2675 2676 rxqueue = dev->_rx + rxq_index; 2677 flow_table = rcu_dereference(rxqueue->rps_flow_table); 2678 if (!flow_table) 2679 goto out; 2680 flow_id = skb->rxhash & flow_table->mask; 2681 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, 2682 rxq_index, flow_id); 2683 if (rc < 0) 2684 goto out; 2685 old_rflow = rflow; 2686 rflow = &flow_table->flows[flow_id]; 2687 rflow->filter = rc; 2688 if (old_rflow->filter == rflow->filter) 2689 old_rflow->filter = RPS_NO_FILTER; 2690 out: 2691 #endif 2692 rflow->last_qtail = 2693 per_cpu(softnet_data, next_cpu).input_queue_head; 2694 } 2695 2696 rflow->cpu = next_cpu; 2697 return rflow; 2698 } 2699 2700 /* 2701 * get_rps_cpu is called from netif_receive_skb and returns the target 2702 * CPU from the RPS map of the receiving queue for a given skb. 2703 * rcu_read_lock must be held on entry. 2704 */ 2705 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, 2706 struct rps_dev_flow **rflowp) 2707 { 2708 struct netdev_rx_queue *rxqueue; 2709 struct rps_map *map; 2710 struct rps_dev_flow_table *flow_table; 2711 struct rps_sock_flow_table *sock_flow_table; 2712 int cpu = -1; 2713 u16 tcpu; 2714 2715 if (skb_rx_queue_recorded(skb)) { 2716 u16 index = skb_get_rx_queue(skb); 2717 if (unlikely(index >= dev->real_num_rx_queues)) { 2718 WARN_ONCE(dev->real_num_rx_queues > 1, 2719 "%s received packet on queue %u, but number " 2720 "of RX queues is %u\n", 2721 dev->name, index, dev->real_num_rx_queues); 2722 goto done; 2723 } 2724 rxqueue = dev->_rx + index; 2725 } else 2726 rxqueue = dev->_rx; 2727 2728 map = rcu_dereference(rxqueue->rps_map); 2729 if (map) { 2730 if (map->len == 1 && 2731 !rcu_access_pointer(rxqueue->rps_flow_table)) { 2732 tcpu = map->cpus[0]; 2733 if (cpu_online(tcpu)) 2734 cpu = tcpu; 2735 goto done; 2736 } 2737 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) { 2738 goto done; 2739 } 2740 2741 skb_reset_network_header(skb); 2742 if (!skb_get_rxhash(skb)) 2743 goto done; 2744 2745 flow_table = rcu_dereference(rxqueue->rps_flow_table); 2746 sock_flow_table = rcu_dereference(rps_sock_flow_table); 2747 if (flow_table && sock_flow_table) { 2748 u16 next_cpu; 2749 struct rps_dev_flow *rflow; 2750 2751 rflow = &flow_table->flows[skb->rxhash & flow_table->mask]; 2752 tcpu = rflow->cpu; 2753 2754 next_cpu = sock_flow_table->ents[skb->rxhash & 2755 sock_flow_table->mask]; 2756 2757 /* 2758 * If the desired CPU (where last recvmsg was done) is 2759 * different from current CPU (one in the rx-queue flow 2760 * table entry), switch if one of the following holds: 2761 * - Current CPU is unset (equal to RPS_NO_CPU). 2762 * - Current CPU is offline. 2763 * - The current CPU's queue tail has advanced beyond the 2764 * last packet that was enqueued using this table entry. 2765 * This guarantees that all previous packets for the flow 2766 * have been dequeued, thus preserving in order delivery. 2767 */ 2768 if (unlikely(tcpu != next_cpu) && 2769 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) || 2770 ((int)(per_cpu(softnet_data, tcpu).input_queue_head - 2771 rflow->last_qtail)) >= 0)) 2772 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); 2773 2774 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) { 2775 *rflowp = rflow; 2776 cpu = tcpu; 2777 goto done; 2778 } 2779 } 2780 2781 if (map) { 2782 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32]; 2783 2784 if (cpu_online(tcpu)) { 2785 cpu = tcpu; 2786 goto done; 2787 } 2788 } 2789 2790 done: 2791 return cpu; 2792 } 2793 2794 #ifdef CONFIG_RFS_ACCEL 2795 2796 /** 2797 * rps_may_expire_flow - check whether an RFS hardware filter may be removed 2798 * @dev: Device on which the filter was set 2799 * @rxq_index: RX queue index 2800 * @flow_id: Flow ID passed to ndo_rx_flow_steer() 2801 * @filter_id: Filter ID returned by ndo_rx_flow_steer() 2802 * 2803 * Drivers that implement ndo_rx_flow_steer() should periodically call 2804 * this function for each installed filter and remove the filters for 2805 * which it returns %true. 2806 */ 2807 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, 2808 u32 flow_id, u16 filter_id) 2809 { 2810 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index; 2811 struct rps_dev_flow_table *flow_table; 2812 struct rps_dev_flow *rflow; 2813 bool expire = true; 2814 int cpu; 2815 2816 rcu_read_lock(); 2817 flow_table = rcu_dereference(rxqueue->rps_flow_table); 2818 if (flow_table && flow_id <= flow_table->mask) { 2819 rflow = &flow_table->flows[flow_id]; 2820 cpu = ACCESS_ONCE(rflow->cpu); 2821 if (rflow->filter == filter_id && cpu != RPS_NO_CPU && 2822 ((int)(per_cpu(softnet_data, cpu).input_queue_head - 2823 rflow->last_qtail) < 2824 (int)(10 * flow_table->mask))) 2825 expire = false; 2826 } 2827 rcu_read_unlock(); 2828 return expire; 2829 } 2830 EXPORT_SYMBOL(rps_may_expire_flow); 2831 2832 #endif /* CONFIG_RFS_ACCEL */ 2833 2834 /* Called from hardirq (IPI) context */ 2835 static void rps_trigger_softirq(void *data) 2836 { 2837 struct softnet_data *sd = data; 2838 2839 ____napi_schedule(sd, &sd->backlog); 2840 sd->received_rps++; 2841 } 2842 2843 #endif /* CONFIG_RPS */ 2844 2845 /* 2846 * Check if this softnet_data structure is another cpu one 2847 * If yes, queue it to our IPI list and return 1 2848 * If no, return 0 2849 */ 2850 static int rps_ipi_queued(struct softnet_data *sd) 2851 { 2852 #ifdef CONFIG_RPS 2853 struct softnet_data *mysd = &__get_cpu_var(softnet_data); 2854 2855 if (sd != mysd) { 2856 sd->rps_ipi_next = mysd->rps_ipi_list; 2857 mysd->rps_ipi_list = sd; 2858 2859 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 2860 return 1; 2861 } 2862 #endif /* CONFIG_RPS */ 2863 return 0; 2864 } 2865 2866 /* 2867 * enqueue_to_backlog is called to queue an skb to a per CPU backlog 2868 * queue (may be a remote CPU queue). 2869 */ 2870 static int enqueue_to_backlog(struct sk_buff *skb, int cpu, 2871 unsigned int *qtail) 2872 { 2873 struct softnet_data *sd; 2874 unsigned long flags; 2875 2876 sd = &per_cpu(softnet_data, cpu); 2877 2878 local_irq_save(flags); 2879 2880 rps_lock(sd); 2881 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) { 2882 if (skb_queue_len(&sd->input_pkt_queue)) { 2883 enqueue: 2884 __skb_queue_tail(&sd->input_pkt_queue, skb); 2885 input_queue_tail_incr_save(sd, qtail); 2886 rps_unlock(sd); 2887 local_irq_restore(flags); 2888 return NET_RX_SUCCESS; 2889 } 2890 2891 /* Schedule NAPI for backlog device 2892 * We can use non atomic operation since we own the queue lock 2893 */ 2894 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) { 2895 if (!rps_ipi_queued(sd)) 2896 ____napi_schedule(sd, &sd->backlog); 2897 } 2898 goto enqueue; 2899 } 2900 2901 sd->dropped++; 2902 rps_unlock(sd); 2903 2904 local_irq_restore(flags); 2905 2906 atomic_long_inc(&skb->dev->rx_dropped); 2907 kfree_skb(skb); 2908 return NET_RX_DROP; 2909 } 2910 2911 /** 2912 * netif_rx - post buffer to the network code 2913 * @skb: buffer to post 2914 * 2915 * This function receives a packet from a device driver and queues it for 2916 * the upper (protocol) levels to process. It always succeeds. The buffer 2917 * may be dropped during processing for congestion control or by the 2918 * protocol layers. 2919 * 2920 * return values: 2921 * NET_RX_SUCCESS (no congestion) 2922 * NET_RX_DROP (packet was dropped) 2923 * 2924 */ 2925 2926 int netif_rx(struct sk_buff *skb) 2927 { 2928 int ret; 2929 2930 /* if netpoll wants it, pretend we never saw it */ 2931 if (netpoll_rx(skb)) 2932 return NET_RX_DROP; 2933 2934 net_timestamp_check(netdev_tstamp_prequeue, skb); 2935 2936 trace_netif_rx(skb); 2937 #ifdef CONFIG_RPS 2938 if (static_key_false(&rps_needed)) { 2939 struct rps_dev_flow voidflow, *rflow = &voidflow; 2940 int cpu; 2941 2942 preempt_disable(); 2943 rcu_read_lock(); 2944 2945 cpu = get_rps_cpu(skb->dev, skb, &rflow); 2946 if (cpu < 0) 2947 cpu = smp_processor_id(); 2948 2949 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 2950 2951 rcu_read_unlock(); 2952 preempt_enable(); 2953 } else 2954 #endif 2955 { 2956 unsigned int qtail; 2957 ret = enqueue_to_backlog(skb, get_cpu(), &qtail); 2958 put_cpu(); 2959 } 2960 return ret; 2961 } 2962 EXPORT_SYMBOL(netif_rx); 2963 2964 int netif_rx_ni(struct sk_buff *skb) 2965 { 2966 int err; 2967 2968 preempt_disable(); 2969 err = netif_rx(skb); 2970 if (local_softirq_pending()) 2971 do_softirq(); 2972 preempt_enable(); 2973 2974 return err; 2975 } 2976 EXPORT_SYMBOL(netif_rx_ni); 2977 2978 static void net_tx_action(struct softirq_action *h) 2979 { 2980 struct softnet_data *sd = &__get_cpu_var(softnet_data); 2981 2982 if (sd->completion_queue) { 2983 struct sk_buff *clist; 2984 2985 local_irq_disable(); 2986 clist = sd->completion_queue; 2987 sd->completion_queue = NULL; 2988 local_irq_enable(); 2989 2990 while (clist) { 2991 struct sk_buff *skb = clist; 2992 clist = clist->next; 2993 2994 WARN_ON(atomic_read(&skb->users)); 2995 trace_kfree_skb(skb, net_tx_action); 2996 __kfree_skb(skb); 2997 } 2998 } 2999 3000 if (sd->output_queue) { 3001 struct Qdisc *head; 3002 3003 local_irq_disable(); 3004 head = sd->output_queue; 3005 sd->output_queue = NULL; 3006 sd->output_queue_tailp = &sd->output_queue; 3007 local_irq_enable(); 3008 3009 while (head) { 3010 struct Qdisc *q = head; 3011 spinlock_t *root_lock; 3012 3013 head = head->next_sched; 3014 3015 root_lock = qdisc_lock(q); 3016 if (spin_trylock(root_lock)) { 3017 smp_mb__before_clear_bit(); 3018 clear_bit(__QDISC_STATE_SCHED, 3019 &q->state); 3020 qdisc_run(q); 3021 spin_unlock(root_lock); 3022 } else { 3023 if (!test_bit(__QDISC_STATE_DEACTIVATED, 3024 &q->state)) { 3025 __netif_reschedule(q); 3026 } else { 3027 smp_mb__before_clear_bit(); 3028 clear_bit(__QDISC_STATE_SCHED, 3029 &q->state); 3030 } 3031 } 3032 } 3033 } 3034 } 3035 3036 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \ 3037 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)) 3038 /* This hook is defined here for ATM LANE */ 3039 int (*br_fdb_test_addr_hook)(struct net_device *dev, 3040 unsigned char *addr) __read_mostly; 3041 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); 3042 #endif 3043 3044 #ifdef CONFIG_NET_CLS_ACT 3045 /* TODO: Maybe we should just force sch_ingress to be compiled in 3046 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions 3047 * a compare and 2 stores extra right now if we dont have it on 3048 * but have CONFIG_NET_CLS_ACT 3049 * NOTE: This doesn't stop any functionality; if you dont have 3050 * the ingress scheduler, you just can't add policies on ingress. 3051 * 3052 */ 3053 static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq) 3054 { 3055 struct net_device *dev = skb->dev; 3056 u32 ttl = G_TC_RTTL(skb->tc_verd); 3057 int result = TC_ACT_OK; 3058 struct Qdisc *q; 3059 3060 if (unlikely(MAX_RED_LOOP < ttl++)) { 3061 if (net_ratelimit()) 3062 pr_warn("Redir loop detected Dropping packet (%d->%d)\n", 3063 skb->skb_iif, dev->ifindex); 3064 return TC_ACT_SHOT; 3065 } 3066 3067 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); 3068 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); 3069 3070 q = rxq->qdisc; 3071 if (q != &noop_qdisc) { 3072 spin_lock(qdisc_lock(q)); 3073 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) 3074 result = qdisc_enqueue_root(skb, q); 3075 spin_unlock(qdisc_lock(q)); 3076 } 3077 3078 return result; 3079 } 3080 3081 static inline struct sk_buff *handle_ing(struct sk_buff *skb, 3082 struct packet_type **pt_prev, 3083 int *ret, struct net_device *orig_dev) 3084 { 3085 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue); 3086 3087 if (!rxq || rxq->qdisc == &noop_qdisc) 3088 goto out; 3089 3090 if (*pt_prev) { 3091 *ret = deliver_skb(skb, *pt_prev, orig_dev); 3092 *pt_prev = NULL; 3093 } 3094 3095 switch (ing_filter(skb, rxq)) { 3096 case TC_ACT_SHOT: 3097 case TC_ACT_STOLEN: 3098 kfree_skb(skb); 3099 return NULL; 3100 } 3101 3102 out: 3103 skb->tc_verd = 0; 3104 return skb; 3105 } 3106 #endif 3107 3108 /** 3109 * netdev_rx_handler_register - register receive handler 3110 * @dev: device to register a handler for 3111 * @rx_handler: receive handler to register 3112 * @rx_handler_data: data pointer that is used by rx handler 3113 * 3114 * Register a receive hander for a device. This handler will then be 3115 * called from __netif_receive_skb. A negative errno code is returned 3116 * on a failure. 3117 * 3118 * The caller must hold the rtnl_mutex. 3119 * 3120 * For a general description of rx_handler, see enum rx_handler_result. 3121 */ 3122 int netdev_rx_handler_register(struct net_device *dev, 3123 rx_handler_func_t *rx_handler, 3124 void *rx_handler_data) 3125 { 3126 ASSERT_RTNL(); 3127 3128 if (dev->rx_handler) 3129 return -EBUSY; 3130 3131 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); 3132 rcu_assign_pointer(dev->rx_handler, rx_handler); 3133 3134 return 0; 3135 } 3136 EXPORT_SYMBOL_GPL(netdev_rx_handler_register); 3137 3138 /** 3139 * netdev_rx_handler_unregister - unregister receive handler 3140 * @dev: device to unregister a handler from 3141 * 3142 * Unregister a receive hander from a device. 3143 * 3144 * The caller must hold the rtnl_mutex. 3145 */ 3146 void netdev_rx_handler_unregister(struct net_device *dev) 3147 { 3148 3149 ASSERT_RTNL(); 3150 RCU_INIT_POINTER(dev->rx_handler, NULL); 3151 RCU_INIT_POINTER(dev->rx_handler_data, NULL); 3152 } 3153 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 3154 3155 static int __netif_receive_skb(struct sk_buff *skb) 3156 { 3157 struct packet_type *ptype, *pt_prev; 3158 rx_handler_func_t *rx_handler; 3159 struct net_device *orig_dev; 3160 struct net_device *null_or_dev; 3161 bool deliver_exact = false; 3162 int ret = NET_RX_DROP; 3163 __be16 type; 3164 3165 net_timestamp_check(!netdev_tstamp_prequeue, skb); 3166 3167 trace_netif_receive_skb(skb); 3168 3169 /* if we've gotten here through NAPI, check netpoll */ 3170 if (netpoll_receive_skb(skb)) 3171 return NET_RX_DROP; 3172 3173 if (!skb->skb_iif) 3174 skb->skb_iif = skb->dev->ifindex; 3175 orig_dev = skb->dev; 3176 3177 skb_reset_network_header(skb); 3178 skb_reset_transport_header(skb); 3179 skb_reset_mac_len(skb); 3180 3181 pt_prev = NULL; 3182 3183 rcu_read_lock(); 3184 3185 another_round: 3186 3187 __this_cpu_inc(softnet_data.processed); 3188 3189 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { 3190 skb = vlan_untag(skb); 3191 if (unlikely(!skb)) 3192 goto out; 3193 } 3194 3195 #ifdef CONFIG_NET_CLS_ACT 3196 if (skb->tc_verd & TC_NCLS) { 3197 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); 3198 goto ncls; 3199 } 3200 #endif 3201 3202 list_for_each_entry_rcu(ptype, &ptype_all, list) { 3203 if (!ptype->dev || ptype->dev == skb->dev) { 3204 if (pt_prev) 3205 ret = deliver_skb(skb, pt_prev, orig_dev); 3206 pt_prev = ptype; 3207 } 3208 } 3209 3210 #ifdef CONFIG_NET_CLS_ACT 3211 skb = handle_ing(skb, &pt_prev, &ret, orig_dev); 3212 if (!skb) 3213 goto out; 3214 ncls: 3215 #endif 3216 3217 rx_handler = rcu_dereference(skb->dev->rx_handler); 3218 if (vlan_tx_tag_present(skb)) { 3219 if (pt_prev) { 3220 ret = deliver_skb(skb, pt_prev, orig_dev); 3221 pt_prev = NULL; 3222 } 3223 if (vlan_do_receive(&skb, !rx_handler)) 3224 goto another_round; 3225 else if (unlikely(!skb)) 3226 goto out; 3227 } 3228 3229 if (rx_handler) { 3230 if (pt_prev) { 3231 ret = deliver_skb(skb, pt_prev, orig_dev); 3232 pt_prev = NULL; 3233 } 3234 switch (rx_handler(&skb)) { 3235 case RX_HANDLER_CONSUMED: 3236 goto out; 3237 case RX_HANDLER_ANOTHER: 3238 goto another_round; 3239 case RX_HANDLER_EXACT: 3240 deliver_exact = true; 3241 case RX_HANDLER_PASS: 3242 break; 3243 default: 3244 BUG(); 3245 } 3246 } 3247 3248 /* deliver only exact match when indicated */ 3249 null_or_dev = deliver_exact ? skb->dev : NULL; 3250 3251 type = skb->protocol; 3252 list_for_each_entry_rcu(ptype, 3253 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 3254 if (ptype->type == type && 3255 (ptype->dev == null_or_dev || ptype->dev == skb->dev || 3256 ptype->dev == orig_dev)) { 3257 if (pt_prev) 3258 ret = deliver_skb(skb, pt_prev, orig_dev); 3259 pt_prev = ptype; 3260 } 3261 } 3262 3263 if (pt_prev) { 3264 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 3265 } else { 3266 atomic_long_inc(&skb->dev->rx_dropped); 3267 kfree_skb(skb); 3268 /* Jamal, now you will not able to escape explaining 3269 * me how you were going to use this. :-) 3270 */ 3271 ret = NET_RX_DROP; 3272 } 3273 3274 out: 3275 rcu_read_unlock(); 3276 return ret; 3277 } 3278 3279 /** 3280 * netif_receive_skb - process receive buffer from network 3281 * @skb: buffer to process 3282 * 3283 * netif_receive_skb() is the main receive data processing function. 3284 * It always succeeds. The buffer may be dropped during processing 3285 * for congestion control or by the protocol layers. 3286 * 3287 * This function may only be called from softirq context and interrupts 3288 * should be enabled. 3289 * 3290 * Return values (usually ignored): 3291 * NET_RX_SUCCESS: no congestion 3292 * NET_RX_DROP: packet was dropped 3293 */ 3294 int netif_receive_skb(struct sk_buff *skb) 3295 { 3296 net_timestamp_check(netdev_tstamp_prequeue, skb); 3297 3298 if (skb_defer_rx_timestamp(skb)) 3299 return NET_RX_SUCCESS; 3300 3301 #ifdef CONFIG_RPS 3302 if (static_key_false(&rps_needed)) { 3303 struct rps_dev_flow voidflow, *rflow = &voidflow; 3304 int cpu, ret; 3305 3306 rcu_read_lock(); 3307 3308 cpu = get_rps_cpu(skb->dev, skb, &rflow); 3309 3310 if (cpu >= 0) { 3311 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 3312 rcu_read_unlock(); 3313 return ret; 3314 } 3315 rcu_read_unlock(); 3316 } 3317 #endif 3318 return __netif_receive_skb(skb); 3319 } 3320 EXPORT_SYMBOL(netif_receive_skb); 3321 3322 /* Network device is going away, flush any packets still pending 3323 * Called with irqs disabled. 3324 */ 3325 static void flush_backlog(void *arg) 3326 { 3327 struct net_device *dev = arg; 3328 struct softnet_data *sd = &__get_cpu_var(softnet_data); 3329 struct sk_buff *skb, *tmp; 3330 3331 rps_lock(sd); 3332 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { 3333 if (skb->dev == dev) { 3334 __skb_unlink(skb, &sd->input_pkt_queue); 3335 kfree_skb(skb); 3336 input_queue_head_incr(sd); 3337 } 3338 } 3339 rps_unlock(sd); 3340 3341 skb_queue_walk_safe(&sd->process_queue, skb, tmp) { 3342 if (skb->dev == dev) { 3343 __skb_unlink(skb, &sd->process_queue); 3344 kfree_skb(skb); 3345 input_queue_head_incr(sd); 3346 } 3347 } 3348 } 3349 3350 static int napi_gro_complete(struct sk_buff *skb) 3351 { 3352 struct packet_type *ptype; 3353 __be16 type = skb->protocol; 3354 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; 3355 int err = -ENOENT; 3356 3357 if (NAPI_GRO_CB(skb)->count == 1) { 3358 skb_shinfo(skb)->gso_size = 0; 3359 goto out; 3360 } 3361 3362 rcu_read_lock(); 3363 list_for_each_entry_rcu(ptype, head, list) { 3364 if (ptype->type != type || ptype->dev || !ptype->gro_complete) 3365 continue; 3366 3367 err = ptype->gro_complete(skb); 3368 break; 3369 } 3370 rcu_read_unlock(); 3371 3372 if (err) { 3373 WARN_ON(&ptype->list == head); 3374 kfree_skb(skb); 3375 return NET_RX_SUCCESS; 3376 } 3377 3378 out: 3379 return netif_receive_skb(skb); 3380 } 3381 3382 inline void napi_gro_flush(struct napi_struct *napi) 3383 { 3384 struct sk_buff *skb, *next; 3385 3386 for (skb = napi->gro_list; skb; skb = next) { 3387 next = skb->next; 3388 skb->next = NULL; 3389 napi_gro_complete(skb); 3390 } 3391 3392 napi->gro_count = 0; 3393 napi->gro_list = NULL; 3394 } 3395 EXPORT_SYMBOL(napi_gro_flush); 3396 3397 enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3398 { 3399 struct sk_buff **pp = NULL; 3400 struct packet_type *ptype; 3401 __be16 type = skb->protocol; 3402 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; 3403 int same_flow; 3404 int mac_len; 3405 enum gro_result ret; 3406 3407 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb)) 3408 goto normal; 3409 3410 if (skb_is_gso(skb) || skb_has_frag_list(skb)) 3411 goto normal; 3412 3413 rcu_read_lock(); 3414 list_for_each_entry_rcu(ptype, head, list) { 3415 if (ptype->type != type || ptype->dev || !ptype->gro_receive) 3416 continue; 3417 3418 skb_set_network_header(skb, skb_gro_offset(skb)); 3419 mac_len = skb->network_header - skb->mac_header; 3420 skb->mac_len = mac_len; 3421 NAPI_GRO_CB(skb)->same_flow = 0; 3422 NAPI_GRO_CB(skb)->flush = 0; 3423 NAPI_GRO_CB(skb)->free = 0; 3424 3425 pp = ptype->gro_receive(&napi->gro_list, skb); 3426 break; 3427 } 3428 rcu_read_unlock(); 3429 3430 if (&ptype->list == head) 3431 goto normal; 3432 3433 same_flow = NAPI_GRO_CB(skb)->same_flow; 3434 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; 3435 3436 if (pp) { 3437 struct sk_buff *nskb = *pp; 3438 3439 *pp = nskb->next; 3440 nskb->next = NULL; 3441 napi_gro_complete(nskb); 3442 napi->gro_count--; 3443 } 3444 3445 if (same_flow) 3446 goto ok; 3447 3448 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS) 3449 goto normal; 3450 3451 napi->gro_count++; 3452 NAPI_GRO_CB(skb)->count = 1; 3453 skb_shinfo(skb)->gso_size = skb_gro_len(skb); 3454 skb->next = napi->gro_list; 3455 napi->gro_list = skb; 3456 ret = GRO_HELD; 3457 3458 pull: 3459 if (skb_headlen(skb) < skb_gro_offset(skb)) { 3460 int grow = skb_gro_offset(skb) - skb_headlen(skb); 3461 3462 BUG_ON(skb->end - skb->tail < grow); 3463 3464 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); 3465 3466 skb->tail += grow; 3467 skb->data_len -= grow; 3468 3469 skb_shinfo(skb)->frags[0].page_offset += grow; 3470 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow); 3471 3472 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) { 3473 skb_frag_unref(skb, 0); 3474 memmove(skb_shinfo(skb)->frags, 3475 skb_shinfo(skb)->frags + 1, 3476 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); 3477 } 3478 } 3479 3480 ok: 3481 return ret; 3482 3483 normal: 3484 ret = GRO_NORMAL; 3485 goto pull; 3486 } 3487 EXPORT_SYMBOL(dev_gro_receive); 3488 3489 static inline gro_result_t 3490 __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3491 { 3492 struct sk_buff *p; 3493 unsigned int maclen = skb->dev->hard_header_len; 3494 3495 for (p = napi->gro_list; p; p = p->next) { 3496 unsigned long diffs; 3497 3498 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; 3499 diffs |= p->vlan_tci ^ skb->vlan_tci; 3500 if (maclen == ETH_HLEN) 3501 diffs |= compare_ether_header(skb_mac_header(p), 3502 skb_gro_mac_header(skb)); 3503 else if (!diffs) 3504 diffs = memcmp(skb_mac_header(p), 3505 skb_gro_mac_header(skb), 3506 maclen); 3507 NAPI_GRO_CB(p)->same_flow = !diffs; 3508 NAPI_GRO_CB(p)->flush = 0; 3509 } 3510 3511 return dev_gro_receive(napi, skb); 3512 } 3513 3514 gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) 3515 { 3516 switch (ret) { 3517 case GRO_NORMAL: 3518 if (netif_receive_skb(skb)) 3519 ret = GRO_DROP; 3520 break; 3521 3522 case GRO_DROP: 3523 case GRO_MERGED_FREE: 3524 kfree_skb(skb); 3525 break; 3526 3527 case GRO_HELD: 3528 case GRO_MERGED: 3529 break; 3530 } 3531 3532 return ret; 3533 } 3534 EXPORT_SYMBOL(napi_skb_finish); 3535 3536 void skb_gro_reset_offset(struct sk_buff *skb) 3537 { 3538 NAPI_GRO_CB(skb)->data_offset = 0; 3539 NAPI_GRO_CB(skb)->frag0 = NULL; 3540 NAPI_GRO_CB(skb)->frag0_len = 0; 3541 3542 if (skb->mac_header == skb->tail && 3543 !PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) { 3544 NAPI_GRO_CB(skb)->frag0 = 3545 skb_frag_address(&skb_shinfo(skb)->frags[0]); 3546 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]); 3547 } 3548 } 3549 EXPORT_SYMBOL(skb_gro_reset_offset); 3550 3551 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3552 { 3553 skb_gro_reset_offset(skb); 3554 3555 return napi_skb_finish(__napi_gro_receive(napi, skb), skb); 3556 } 3557 EXPORT_SYMBOL(napi_gro_receive); 3558 3559 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) 3560 { 3561 __skb_pull(skb, skb_headlen(skb)); 3562 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb)); 3563 skb->vlan_tci = 0; 3564 skb->dev = napi->dev; 3565 skb->skb_iif = 0; 3566 3567 napi->skb = skb; 3568 } 3569 3570 struct sk_buff *napi_get_frags(struct napi_struct *napi) 3571 { 3572 struct sk_buff *skb = napi->skb; 3573 3574 if (!skb) { 3575 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD); 3576 if (skb) 3577 napi->skb = skb; 3578 } 3579 return skb; 3580 } 3581 EXPORT_SYMBOL(napi_get_frags); 3582 3583 gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, 3584 gro_result_t ret) 3585 { 3586 switch (ret) { 3587 case GRO_NORMAL: 3588 case GRO_HELD: 3589 skb->protocol = eth_type_trans(skb, skb->dev); 3590 3591 if (ret == GRO_HELD) 3592 skb_gro_pull(skb, -ETH_HLEN); 3593 else if (netif_receive_skb(skb)) 3594 ret = GRO_DROP; 3595 break; 3596 3597 case GRO_DROP: 3598 case GRO_MERGED_FREE: 3599 napi_reuse_skb(napi, skb); 3600 break; 3601 3602 case GRO_MERGED: 3603 break; 3604 } 3605 3606 return ret; 3607 } 3608 EXPORT_SYMBOL(napi_frags_finish); 3609 3610 struct sk_buff *napi_frags_skb(struct napi_struct *napi) 3611 { 3612 struct sk_buff *skb = napi->skb; 3613 struct ethhdr *eth; 3614 unsigned int hlen; 3615 unsigned int off; 3616 3617 napi->skb = NULL; 3618 3619 skb_reset_mac_header(skb); 3620 skb_gro_reset_offset(skb); 3621 3622 off = skb_gro_offset(skb); 3623 hlen = off + sizeof(*eth); 3624 eth = skb_gro_header_fast(skb, off); 3625 if (skb_gro_header_hard(skb, hlen)) { 3626 eth = skb_gro_header_slow(skb, hlen, off); 3627 if (unlikely(!eth)) { 3628 napi_reuse_skb(napi, skb); 3629 skb = NULL; 3630 goto out; 3631 } 3632 } 3633 3634 skb_gro_pull(skb, sizeof(*eth)); 3635 3636 /* 3637 * This works because the only protocols we care about don't require 3638 * special handling. We'll fix it up properly at the end. 3639 */ 3640 skb->protocol = eth->h_proto; 3641 3642 out: 3643 return skb; 3644 } 3645 EXPORT_SYMBOL(napi_frags_skb); 3646 3647 gro_result_t napi_gro_frags(struct napi_struct *napi) 3648 { 3649 struct sk_buff *skb = napi_frags_skb(napi); 3650 3651 if (!skb) 3652 return GRO_DROP; 3653 3654 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb)); 3655 } 3656 EXPORT_SYMBOL(napi_gro_frags); 3657 3658 /* 3659 * net_rps_action sends any pending IPI's for rps. 3660 * Note: called with local irq disabled, but exits with local irq enabled. 3661 */ 3662 static void net_rps_action_and_irq_enable(struct softnet_data *sd) 3663 { 3664 #ifdef CONFIG_RPS 3665 struct softnet_data *remsd = sd->rps_ipi_list; 3666 3667 if (remsd) { 3668 sd->rps_ipi_list = NULL; 3669 3670 local_irq_enable(); 3671 3672 /* Send pending IPI's to kick RPS processing on remote cpus. */ 3673 while (remsd) { 3674 struct softnet_data *next = remsd->rps_ipi_next; 3675 3676 if (cpu_online(remsd->cpu)) 3677 __smp_call_function_single(remsd->cpu, 3678 &remsd->csd, 0); 3679 remsd = next; 3680 } 3681 } else 3682 #endif 3683 local_irq_enable(); 3684 } 3685 3686 static int process_backlog(struct napi_struct *napi, int quota) 3687 { 3688 int work = 0; 3689 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); 3690 3691 #ifdef CONFIG_RPS 3692 /* Check if we have pending ipi, its better to send them now, 3693 * not waiting net_rx_action() end. 3694 */ 3695 if (sd->rps_ipi_list) { 3696 local_irq_disable(); 3697 net_rps_action_and_irq_enable(sd); 3698 } 3699 #endif 3700 napi->weight = weight_p; 3701 local_irq_disable(); 3702 while (work < quota) { 3703 struct sk_buff *skb; 3704 unsigned int qlen; 3705 3706 while ((skb = __skb_dequeue(&sd->process_queue))) { 3707 local_irq_enable(); 3708 __netif_receive_skb(skb); 3709 local_irq_disable(); 3710 input_queue_head_incr(sd); 3711 if (++work >= quota) { 3712 local_irq_enable(); 3713 return work; 3714 } 3715 } 3716 3717 rps_lock(sd); 3718 qlen = skb_queue_len(&sd->input_pkt_queue); 3719 if (qlen) 3720 skb_queue_splice_tail_init(&sd->input_pkt_queue, 3721 &sd->process_queue); 3722 3723 if (qlen < quota - work) { 3724 /* 3725 * Inline a custom version of __napi_complete(). 3726 * only current cpu owns and manipulates this napi, 3727 * and NAPI_STATE_SCHED is the only possible flag set on backlog. 3728 * we can use a plain write instead of clear_bit(), 3729 * and we dont need an smp_mb() memory barrier. 3730 */ 3731 list_del(&napi->poll_list); 3732 napi->state = 0; 3733 3734 quota = work + qlen; 3735 } 3736 rps_unlock(sd); 3737 } 3738 local_irq_enable(); 3739 3740 return work; 3741 } 3742 3743 /** 3744 * __napi_schedule - schedule for receive 3745 * @n: entry to schedule 3746 * 3747 * The entry's receive function will be scheduled to run 3748 */ 3749 void __napi_schedule(struct napi_struct *n) 3750 { 3751 unsigned long flags; 3752 3753 local_irq_save(flags); 3754 ____napi_schedule(&__get_cpu_var(softnet_data), n); 3755 local_irq_restore(flags); 3756 } 3757 EXPORT_SYMBOL(__napi_schedule); 3758 3759 void __napi_complete(struct napi_struct *n) 3760 { 3761 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); 3762 BUG_ON(n->gro_list); 3763 3764 list_del(&n->poll_list); 3765 smp_mb__before_clear_bit(); 3766 clear_bit(NAPI_STATE_SCHED, &n->state); 3767 } 3768 EXPORT_SYMBOL(__napi_complete); 3769 3770 void napi_complete(struct napi_struct *n) 3771 { 3772 unsigned long flags; 3773 3774 /* 3775 * don't let napi dequeue from the cpu poll list 3776 * just in case its running on a different cpu 3777 */ 3778 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state))) 3779 return; 3780 3781 napi_gro_flush(n); 3782 local_irq_save(flags); 3783 __napi_complete(n); 3784 local_irq_restore(flags); 3785 } 3786 EXPORT_SYMBOL(napi_complete); 3787 3788 void netif_napi_add(struct net_device *dev, struct napi_struct *napi, 3789 int (*poll)(struct napi_struct *, int), int weight) 3790 { 3791 INIT_LIST_HEAD(&napi->poll_list); 3792 napi->gro_count = 0; 3793 napi->gro_list = NULL; 3794 napi->skb = NULL; 3795 napi->poll = poll; 3796 napi->weight = weight; 3797 list_add(&napi->dev_list, &dev->napi_list); 3798 napi->dev = dev; 3799 #ifdef CONFIG_NETPOLL 3800 spin_lock_init(&napi->poll_lock); 3801 napi->poll_owner = -1; 3802 #endif 3803 set_bit(NAPI_STATE_SCHED, &napi->state); 3804 } 3805 EXPORT_SYMBOL(netif_napi_add); 3806 3807 void netif_napi_del(struct napi_struct *napi) 3808 { 3809 struct sk_buff *skb, *next; 3810 3811 list_del_init(&napi->dev_list); 3812 napi_free_frags(napi); 3813 3814 for (skb = napi->gro_list; skb; skb = next) { 3815 next = skb->next; 3816 skb->next = NULL; 3817 kfree_skb(skb); 3818 } 3819 3820 napi->gro_list = NULL; 3821 napi->gro_count = 0; 3822 } 3823 EXPORT_SYMBOL(netif_napi_del); 3824 3825 static void net_rx_action(struct softirq_action *h) 3826 { 3827 struct softnet_data *sd = &__get_cpu_var(softnet_data); 3828 unsigned long time_limit = jiffies + 2; 3829 int budget = netdev_budget; 3830 void *have; 3831 3832 local_irq_disable(); 3833 3834 while (!list_empty(&sd->poll_list)) { 3835 struct napi_struct *n; 3836 int work, weight; 3837 3838 /* If softirq window is exhuasted then punt. 3839 * Allow this to run for 2 jiffies since which will allow 3840 * an average latency of 1.5/HZ. 3841 */ 3842 if (unlikely(budget <= 0 || time_after(jiffies, time_limit))) 3843 goto softnet_break; 3844 3845 local_irq_enable(); 3846 3847 /* Even though interrupts have been re-enabled, this 3848 * access is safe because interrupts can only add new 3849 * entries to the tail of this list, and only ->poll() 3850 * calls can remove this head entry from the list. 3851 */ 3852 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list); 3853 3854 have = netpoll_poll_lock(n); 3855 3856 weight = n->weight; 3857 3858 /* This NAPI_STATE_SCHED test is for avoiding a race 3859 * with netpoll's poll_napi(). Only the entity which 3860 * obtains the lock and sees NAPI_STATE_SCHED set will 3861 * actually make the ->poll() call. Therefore we avoid 3862 * accidentally calling ->poll() when NAPI is not scheduled. 3863 */ 3864 work = 0; 3865 if (test_bit(NAPI_STATE_SCHED, &n->state)) { 3866 work = n->poll(n, weight); 3867 trace_napi_poll(n); 3868 } 3869 3870 WARN_ON_ONCE(work > weight); 3871 3872 budget -= work; 3873 3874 local_irq_disable(); 3875 3876 /* Drivers must not modify the NAPI state if they 3877 * consume the entire weight. In such cases this code 3878 * still "owns" the NAPI instance and therefore can 3879 * move the instance around on the list at-will. 3880 */ 3881 if (unlikely(work == weight)) { 3882 if (unlikely(napi_disable_pending(n))) { 3883 local_irq_enable(); 3884 napi_complete(n); 3885 local_irq_disable(); 3886 } else 3887 list_move_tail(&n->poll_list, &sd->poll_list); 3888 } 3889 3890 netpoll_poll_unlock(have); 3891 } 3892 out: 3893 net_rps_action_and_irq_enable(sd); 3894 3895 #ifdef CONFIG_NET_DMA 3896 /* 3897 * There may not be any more sk_buffs coming right now, so push 3898 * any pending DMA copies to hardware 3899 */ 3900 dma_issue_pending_all(); 3901 #endif 3902 3903 return; 3904 3905 softnet_break: 3906 sd->time_squeeze++; 3907 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 3908 goto out; 3909 } 3910 3911 static gifconf_func_t *gifconf_list[NPROTO]; 3912 3913 /** 3914 * register_gifconf - register a SIOCGIF handler 3915 * @family: Address family 3916 * @gifconf: Function handler 3917 * 3918 * Register protocol dependent address dumping routines. The handler 3919 * that is passed must not be freed or reused until it has been replaced 3920 * by another handler. 3921 */ 3922 int register_gifconf(unsigned int family, gifconf_func_t *gifconf) 3923 { 3924 if (family >= NPROTO) 3925 return -EINVAL; 3926 gifconf_list[family] = gifconf; 3927 return 0; 3928 } 3929 EXPORT_SYMBOL(register_gifconf); 3930 3931 3932 /* 3933 * Map an interface index to its name (SIOCGIFNAME) 3934 */ 3935 3936 /* 3937 * We need this ioctl for efficient implementation of the 3938 * if_indextoname() function required by the IPv6 API. Without 3939 * it, we would have to search all the interfaces to find a 3940 * match. --pb 3941 */ 3942 3943 static int dev_ifname(struct net *net, struct ifreq __user *arg) 3944 { 3945 struct net_device *dev; 3946 struct ifreq ifr; 3947 3948 /* 3949 * Fetch the caller's info block. 3950 */ 3951 3952 if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) 3953 return -EFAULT; 3954 3955 rcu_read_lock(); 3956 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex); 3957 if (!dev) { 3958 rcu_read_unlock(); 3959 return -ENODEV; 3960 } 3961 3962 strcpy(ifr.ifr_name, dev->name); 3963 rcu_read_unlock(); 3964 3965 if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) 3966 return -EFAULT; 3967 return 0; 3968 } 3969 3970 /* 3971 * Perform a SIOCGIFCONF call. This structure will change 3972 * size eventually, and there is nothing I can do about it. 3973 * Thus we will need a 'compatibility mode'. 3974 */ 3975 3976 static int dev_ifconf(struct net *net, char __user *arg) 3977 { 3978 struct ifconf ifc; 3979 struct net_device *dev; 3980 char __user *pos; 3981 int len; 3982 int total; 3983 int i; 3984 3985 /* 3986 * Fetch the caller's info block. 3987 */ 3988 3989 if (copy_from_user(&ifc, arg, sizeof(struct ifconf))) 3990 return -EFAULT; 3991 3992 pos = ifc.ifc_buf; 3993 len = ifc.ifc_len; 3994 3995 /* 3996 * Loop over the interfaces, and write an info block for each. 3997 */ 3998 3999 total = 0; 4000 for_each_netdev(net, dev) { 4001 for (i = 0; i < NPROTO; i++) { 4002 if (gifconf_list[i]) { 4003 int done; 4004 if (!pos) 4005 done = gifconf_list[i](dev, NULL, 0); 4006 else 4007 done = gifconf_list[i](dev, pos + total, 4008 len - total); 4009 if (done < 0) 4010 return -EFAULT; 4011 total += done; 4012 } 4013 } 4014 } 4015 4016 /* 4017 * All done. Write the updated control block back to the caller. 4018 */ 4019 ifc.ifc_len = total; 4020 4021 /* 4022 * Both BSD and Solaris return 0 here, so we do too. 4023 */ 4024 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0; 4025 } 4026 4027 #ifdef CONFIG_PROC_FS 4028 4029 #define BUCKET_SPACE (32 - NETDEV_HASHBITS) 4030 4031 struct dev_iter_state { 4032 struct seq_net_private p; 4033 unsigned int pos; /* bucket << BUCKET_SPACE + offset */ 4034 }; 4035 4036 #define get_bucket(x) ((x) >> BUCKET_SPACE) 4037 #define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1)) 4038 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o)) 4039 4040 static inline struct net_device *dev_from_same_bucket(struct seq_file *seq) 4041 { 4042 struct dev_iter_state *state = seq->private; 4043 struct net *net = seq_file_net(seq); 4044 struct net_device *dev; 4045 struct hlist_node *p; 4046 struct hlist_head *h; 4047 unsigned int count, bucket, offset; 4048 4049 bucket = get_bucket(state->pos); 4050 offset = get_offset(state->pos); 4051 h = &net->dev_name_head[bucket]; 4052 count = 0; 4053 hlist_for_each_entry_rcu(dev, p, h, name_hlist) { 4054 if (count++ == offset) { 4055 state->pos = set_bucket_offset(bucket, count); 4056 return dev; 4057 } 4058 } 4059 4060 return NULL; 4061 } 4062 4063 static inline struct net_device *dev_from_new_bucket(struct seq_file *seq) 4064 { 4065 struct dev_iter_state *state = seq->private; 4066 struct net_device *dev; 4067 unsigned int bucket; 4068 4069 bucket = get_bucket(state->pos); 4070 do { 4071 dev = dev_from_same_bucket(seq); 4072 if (dev) 4073 return dev; 4074 4075 bucket++; 4076 state->pos = set_bucket_offset(bucket, 0); 4077 } while (bucket < NETDEV_HASHENTRIES); 4078 4079 return NULL; 4080 } 4081 4082 /* 4083 * This is invoked by the /proc filesystem handler to display a device 4084 * in detail. 4085 */ 4086 void *dev_seq_start(struct seq_file *seq, loff_t *pos) 4087 __acquires(RCU) 4088 { 4089 struct dev_iter_state *state = seq->private; 4090 4091 rcu_read_lock(); 4092 if (!*pos) 4093 return SEQ_START_TOKEN; 4094 4095 /* check for end of the hash */ 4096 if (state->pos == 0 && *pos > 1) 4097 return NULL; 4098 4099 return dev_from_new_bucket(seq); 4100 } 4101 4102 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4103 { 4104 struct net_device *dev; 4105 4106 ++*pos; 4107 4108 if (v == SEQ_START_TOKEN) 4109 return dev_from_new_bucket(seq); 4110 4111 dev = dev_from_same_bucket(seq); 4112 if (dev) 4113 return dev; 4114 4115 return dev_from_new_bucket(seq); 4116 } 4117 4118 void dev_seq_stop(struct seq_file *seq, void *v) 4119 __releases(RCU) 4120 { 4121 rcu_read_unlock(); 4122 } 4123 4124 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) 4125 { 4126 struct rtnl_link_stats64 temp; 4127 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); 4128 4129 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu " 4130 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n", 4131 dev->name, stats->rx_bytes, stats->rx_packets, 4132 stats->rx_errors, 4133 stats->rx_dropped + stats->rx_missed_errors, 4134 stats->rx_fifo_errors, 4135 stats->rx_length_errors + stats->rx_over_errors + 4136 stats->rx_crc_errors + stats->rx_frame_errors, 4137 stats->rx_compressed, stats->multicast, 4138 stats->tx_bytes, stats->tx_packets, 4139 stats->tx_errors, stats->tx_dropped, 4140 stats->tx_fifo_errors, stats->collisions, 4141 stats->tx_carrier_errors + 4142 stats->tx_aborted_errors + 4143 stats->tx_window_errors + 4144 stats->tx_heartbeat_errors, 4145 stats->tx_compressed); 4146 } 4147 4148 /* 4149 * Called from the PROCfs module. This now uses the new arbitrary sized 4150 * /proc/net interface to create /proc/net/dev 4151 */ 4152 static int dev_seq_show(struct seq_file *seq, void *v) 4153 { 4154 if (v == SEQ_START_TOKEN) 4155 seq_puts(seq, "Inter-| Receive " 4156 " | Transmit\n" 4157 " face |bytes packets errs drop fifo frame " 4158 "compressed multicast|bytes packets errs " 4159 "drop fifo colls carrier compressed\n"); 4160 else 4161 dev_seq_printf_stats(seq, v); 4162 return 0; 4163 } 4164 4165 static struct softnet_data *softnet_get_online(loff_t *pos) 4166 { 4167 struct softnet_data *sd = NULL; 4168 4169 while (*pos < nr_cpu_ids) 4170 if (cpu_online(*pos)) { 4171 sd = &per_cpu(softnet_data, *pos); 4172 break; 4173 } else 4174 ++*pos; 4175 return sd; 4176 } 4177 4178 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos) 4179 { 4180 return softnet_get_online(pos); 4181 } 4182 4183 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4184 { 4185 ++*pos; 4186 return softnet_get_online(pos); 4187 } 4188 4189 static void softnet_seq_stop(struct seq_file *seq, void *v) 4190 { 4191 } 4192 4193 static int softnet_seq_show(struct seq_file *seq, void *v) 4194 { 4195 struct softnet_data *sd = v; 4196 4197 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", 4198 sd->processed, sd->dropped, sd->time_squeeze, 0, 4199 0, 0, 0, 0, /* was fastroute */ 4200 sd->cpu_collision, sd->received_rps); 4201 return 0; 4202 } 4203 4204 static const struct seq_operations dev_seq_ops = { 4205 .start = dev_seq_start, 4206 .next = dev_seq_next, 4207 .stop = dev_seq_stop, 4208 .show = dev_seq_show, 4209 }; 4210 4211 static int dev_seq_open(struct inode *inode, struct file *file) 4212 { 4213 return seq_open_net(inode, file, &dev_seq_ops, 4214 sizeof(struct dev_iter_state)); 4215 } 4216 4217 int dev_seq_open_ops(struct inode *inode, struct file *file, 4218 const struct seq_operations *ops) 4219 { 4220 return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state)); 4221 } 4222 4223 static const struct file_operations dev_seq_fops = { 4224 .owner = THIS_MODULE, 4225 .open = dev_seq_open, 4226 .read = seq_read, 4227 .llseek = seq_lseek, 4228 .release = seq_release_net, 4229 }; 4230 4231 static const struct seq_operations softnet_seq_ops = { 4232 .start = softnet_seq_start, 4233 .next = softnet_seq_next, 4234 .stop = softnet_seq_stop, 4235 .show = softnet_seq_show, 4236 }; 4237 4238 static int softnet_seq_open(struct inode *inode, struct file *file) 4239 { 4240 return seq_open(file, &softnet_seq_ops); 4241 } 4242 4243 static const struct file_operations softnet_seq_fops = { 4244 .owner = THIS_MODULE, 4245 .open = softnet_seq_open, 4246 .read = seq_read, 4247 .llseek = seq_lseek, 4248 .release = seq_release, 4249 }; 4250 4251 static void *ptype_get_idx(loff_t pos) 4252 { 4253 struct packet_type *pt = NULL; 4254 loff_t i = 0; 4255 int t; 4256 4257 list_for_each_entry_rcu(pt, &ptype_all, list) { 4258 if (i == pos) 4259 return pt; 4260 ++i; 4261 } 4262 4263 for (t = 0; t < PTYPE_HASH_SIZE; t++) { 4264 list_for_each_entry_rcu(pt, &ptype_base[t], list) { 4265 if (i == pos) 4266 return pt; 4267 ++i; 4268 } 4269 } 4270 return NULL; 4271 } 4272 4273 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos) 4274 __acquires(RCU) 4275 { 4276 rcu_read_lock(); 4277 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN; 4278 } 4279 4280 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4281 { 4282 struct packet_type *pt; 4283 struct list_head *nxt; 4284 int hash; 4285 4286 ++*pos; 4287 if (v == SEQ_START_TOKEN) 4288 return ptype_get_idx(0); 4289 4290 pt = v; 4291 nxt = pt->list.next; 4292 if (pt->type == htons(ETH_P_ALL)) { 4293 if (nxt != &ptype_all) 4294 goto found; 4295 hash = 0; 4296 nxt = ptype_base[0].next; 4297 } else 4298 hash = ntohs(pt->type) & PTYPE_HASH_MASK; 4299 4300 while (nxt == &ptype_base[hash]) { 4301 if (++hash >= PTYPE_HASH_SIZE) 4302 return NULL; 4303 nxt = ptype_base[hash].next; 4304 } 4305 found: 4306 return list_entry(nxt, struct packet_type, list); 4307 } 4308 4309 static void ptype_seq_stop(struct seq_file *seq, void *v) 4310 __releases(RCU) 4311 { 4312 rcu_read_unlock(); 4313 } 4314 4315 static int ptype_seq_show(struct seq_file *seq, void *v) 4316 { 4317 struct packet_type *pt = v; 4318 4319 if (v == SEQ_START_TOKEN) 4320 seq_puts(seq, "Type Device Function\n"); 4321 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) { 4322 if (pt->type == htons(ETH_P_ALL)) 4323 seq_puts(seq, "ALL "); 4324 else 4325 seq_printf(seq, "%04x", ntohs(pt->type)); 4326 4327 seq_printf(seq, " %-8s %pF\n", 4328 pt->dev ? pt->dev->name : "", pt->func); 4329 } 4330 4331 return 0; 4332 } 4333 4334 static const struct seq_operations ptype_seq_ops = { 4335 .start = ptype_seq_start, 4336 .next = ptype_seq_next, 4337 .stop = ptype_seq_stop, 4338 .show = ptype_seq_show, 4339 }; 4340 4341 static int ptype_seq_open(struct inode *inode, struct file *file) 4342 { 4343 return seq_open_net(inode, file, &ptype_seq_ops, 4344 sizeof(struct seq_net_private)); 4345 } 4346 4347 static const struct file_operations ptype_seq_fops = { 4348 .owner = THIS_MODULE, 4349 .open = ptype_seq_open, 4350 .read = seq_read, 4351 .llseek = seq_lseek, 4352 .release = seq_release_net, 4353 }; 4354 4355 4356 static int __net_init dev_proc_net_init(struct net *net) 4357 { 4358 int rc = -ENOMEM; 4359 4360 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops)) 4361 goto out; 4362 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops)) 4363 goto out_dev; 4364 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops)) 4365 goto out_softnet; 4366 4367 if (wext_proc_init(net)) 4368 goto out_ptype; 4369 rc = 0; 4370 out: 4371 return rc; 4372 out_ptype: 4373 proc_net_remove(net, "ptype"); 4374 out_softnet: 4375 proc_net_remove(net, "softnet_stat"); 4376 out_dev: 4377 proc_net_remove(net, "dev"); 4378 goto out; 4379 } 4380 4381 static void __net_exit dev_proc_net_exit(struct net *net) 4382 { 4383 wext_proc_exit(net); 4384 4385 proc_net_remove(net, "ptype"); 4386 proc_net_remove(net, "softnet_stat"); 4387 proc_net_remove(net, "dev"); 4388 } 4389 4390 static struct pernet_operations __net_initdata dev_proc_ops = { 4391 .init = dev_proc_net_init, 4392 .exit = dev_proc_net_exit, 4393 }; 4394 4395 static int __init dev_proc_init(void) 4396 { 4397 return register_pernet_subsys(&dev_proc_ops); 4398 } 4399 #else 4400 #define dev_proc_init() 0 4401 #endif /* CONFIG_PROC_FS */ 4402 4403 4404 /** 4405 * netdev_set_master - set up master pointer 4406 * @slave: slave device 4407 * @master: new master device 4408 * 4409 * Changes the master device of the slave. Pass %NULL to break the 4410 * bonding. The caller must hold the RTNL semaphore. On a failure 4411 * a negative errno code is returned. On success the reference counts 4412 * are adjusted and the function returns zero. 4413 */ 4414 int netdev_set_master(struct net_device *slave, struct net_device *master) 4415 { 4416 struct net_device *old = slave->master; 4417 4418 ASSERT_RTNL(); 4419 4420 if (master) { 4421 if (old) 4422 return -EBUSY; 4423 dev_hold(master); 4424 } 4425 4426 slave->master = master; 4427 4428 if (old) 4429 dev_put(old); 4430 return 0; 4431 } 4432 EXPORT_SYMBOL(netdev_set_master); 4433 4434 /** 4435 * netdev_set_bond_master - set up bonding master/slave pair 4436 * @slave: slave device 4437 * @master: new master device 4438 * 4439 * Changes the master device of the slave. Pass %NULL to break the 4440 * bonding. The caller must hold the RTNL semaphore. On a failure 4441 * a negative errno code is returned. On success %RTM_NEWLINK is sent 4442 * to the routing socket and the function returns zero. 4443 */ 4444 int netdev_set_bond_master(struct net_device *slave, struct net_device *master) 4445 { 4446 int err; 4447 4448 ASSERT_RTNL(); 4449 4450 err = netdev_set_master(slave, master); 4451 if (err) 4452 return err; 4453 if (master) 4454 slave->flags |= IFF_SLAVE; 4455 else 4456 slave->flags &= ~IFF_SLAVE; 4457 4458 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE); 4459 return 0; 4460 } 4461 EXPORT_SYMBOL(netdev_set_bond_master); 4462 4463 static void dev_change_rx_flags(struct net_device *dev, int flags) 4464 { 4465 const struct net_device_ops *ops = dev->netdev_ops; 4466 4467 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags) 4468 ops->ndo_change_rx_flags(dev, flags); 4469 } 4470 4471 static int __dev_set_promiscuity(struct net_device *dev, int inc) 4472 { 4473 unsigned int old_flags = dev->flags; 4474 uid_t uid; 4475 gid_t gid; 4476 4477 ASSERT_RTNL(); 4478 4479 dev->flags |= IFF_PROMISC; 4480 dev->promiscuity += inc; 4481 if (dev->promiscuity == 0) { 4482 /* 4483 * Avoid overflow. 4484 * If inc causes overflow, untouch promisc and return error. 4485 */ 4486 if (inc < 0) 4487 dev->flags &= ~IFF_PROMISC; 4488 else { 4489 dev->promiscuity -= inc; 4490 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n", 4491 dev->name); 4492 return -EOVERFLOW; 4493 } 4494 } 4495 if (dev->flags != old_flags) { 4496 pr_info("device %s %s promiscuous mode\n", 4497 dev->name, 4498 dev->flags & IFF_PROMISC ? "entered" : "left"); 4499 if (audit_enabled) { 4500 current_uid_gid(&uid, &gid); 4501 audit_log(current->audit_context, GFP_ATOMIC, 4502 AUDIT_ANOM_PROMISCUOUS, 4503 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", 4504 dev->name, (dev->flags & IFF_PROMISC), 4505 (old_flags & IFF_PROMISC), 4506 audit_get_loginuid(current), 4507 uid, gid, 4508 audit_get_sessionid(current)); 4509 } 4510 4511 dev_change_rx_flags(dev, IFF_PROMISC); 4512 } 4513 return 0; 4514 } 4515 4516 /** 4517 * dev_set_promiscuity - update promiscuity count on a device 4518 * @dev: device 4519 * @inc: modifier 4520 * 4521 * Add or remove promiscuity from a device. While the count in the device 4522 * remains above zero the interface remains promiscuous. Once it hits zero 4523 * the device reverts back to normal filtering operation. A negative inc 4524 * value is used to drop promiscuity on the device. 4525 * Return 0 if successful or a negative errno code on error. 4526 */ 4527 int dev_set_promiscuity(struct net_device *dev, int inc) 4528 { 4529 unsigned int old_flags = dev->flags; 4530 int err; 4531 4532 err = __dev_set_promiscuity(dev, inc); 4533 if (err < 0) 4534 return err; 4535 if (dev->flags != old_flags) 4536 dev_set_rx_mode(dev); 4537 return err; 4538 } 4539 EXPORT_SYMBOL(dev_set_promiscuity); 4540 4541 /** 4542 * dev_set_allmulti - update allmulti count on a device 4543 * @dev: device 4544 * @inc: modifier 4545 * 4546 * Add or remove reception of all multicast frames to a device. While the 4547 * count in the device remains above zero the interface remains listening 4548 * to all interfaces. Once it hits zero the device reverts back to normal 4549 * filtering operation. A negative @inc value is used to drop the counter 4550 * when releasing a resource needing all multicasts. 4551 * Return 0 if successful or a negative errno code on error. 4552 */ 4553 4554 int dev_set_allmulti(struct net_device *dev, int inc) 4555 { 4556 unsigned int old_flags = dev->flags; 4557 4558 ASSERT_RTNL(); 4559 4560 dev->flags |= IFF_ALLMULTI; 4561 dev->allmulti += inc; 4562 if (dev->allmulti == 0) { 4563 /* 4564 * Avoid overflow. 4565 * If inc causes overflow, untouch allmulti and return error. 4566 */ 4567 if (inc < 0) 4568 dev->flags &= ~IFF_ALLMULTI; 4569 else { 4570 dev->allmulti -= inc; 4571 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n", 4572 dev->name); 4573 return -EOVERFLOW; 4574 } 4575 } 4576 if (dev->flags ^ old_flags) { 4577 dev_change_rx_flags(dev, IFF_ALLMULTI); 4578 dev_set_rx_mode(dev); 4579 } 4580 return 0; 4581 } 4582 EXPORT_SYMBOL(dev_set_allmulti); 4583 4584 /* 4585 * Upload unicast and multicast address lists to device and 4586 * configure RX filtering. When the device doesn't support unicast 4587 * filtering it is put in promiscuous mode while unicast addresses 4588 * are present. 4589 */ 4590 void __dev_set_rx_mode(struct net_device *dev) 4591 { 4592 const struct net_device_ops *ops = dev->netdev_ops; 4593 4594 /* dev_open will call this function so the list will stay sane. */ 4595 if (!(dev->flags&IFF_UP)) 4596 return; 4597 4598 if (!netif_device_present(dev)) 4599 return; 4600 4601 if (!(dev->priv_flags & IFF_UNICAST_FLT)) { 4602 /* Unicast addresses changes may only happen under the rtnl, 4603 * therefore calling __dev_set_promiscuity here is safe. 4604 */ 4605 if (!netdev_uc_empty(dev) && !dev->uc_promisc) { 4606 __dev_set_promiscuity(dev, 1); 4607 dev->uc_promisc = true; 4608 } else if (netdev_uc_empty(dev) && dev->uc_promisc) { 4609 __dev_set_promiscuity(dev, -1); 4610 dev->uc_promisc = false; 4611 } 4612 } 4613 4614 if (ops->ndo_set_rx_mode) 4615 ops->ndo_set_rx_mode(dev); 4616 } 4617 4618 void dev_set_rx_mode(struct net_device *dev) 4619 { 4620 netif_addr_lock_bh(dev); 4621 __dev_set_rx_mode(dev); 4622 netif_addr_unlock_bh(dev); 4623 } 4624 4625 /** 4626 * dev_get_flags - get flags reported to userspace 4627 * @dev: device 4628 * 4629 * Get the combination of flag bits exported through APIs to userspace. 4630 */ 4631 unsigned dev_get_flags(const struct net_device *dev) 4632 { 4633 unsigned flags; 4634 4635 flags = (dev->flags & ~(IFF_PROMISC | 4636 IFF_ALLMULTI | 4637 IFF_RUNNING | 4638 IFF_LOWER_UP | 4639 IFF_DORMANT)) | 4640 (dev->gflags & (IFF_PROMISC | 4641 IFF_ALLMULTI)); 4642 4643 if (netif_running(dev)) { 4644 if (netif_oper_up(dev)) 4645 flags |= IFF_RUNNING; 4646 if (netif_carrier_ok(dev)) 4647 flags |= IFF_LOWER_UP; 4648 if (netif_dormant(dev)) 4649 flags |= IFF_DORMANT; 4650 } 4651 4652 return flags; 4653 } 4654 EXPORT_SYMBOL(dev_get_flags); 4655 4656 int __dev_change_flags(struct net_device *dev, unsigned int flags) 4657 { 4658 unsigned int old_flags = dev->flags; 4659 int ret; 4660 4661 ASSERT_RTNL(); 4662 4663 /* 4664 * Set the flags on our device. 4665 */ 4666 4667 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | 4668 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | 4669 IFF_AUTOMEDIA)) | 4670 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | 4671 IFF_ALLMULTI)); 4672 4673 /* 4674 * Load in the correct multicast list now the flags have changed. 4675 */ 4676 4677 if ((old_flags ^ flags) & IFF_MULTICAST) 4678 dev_change_rx_flags(dev, IFF_MULTICAST); 4679 4680 dev_set_rx_mode(dev); 4681 4682 /* 4683 * Have we downed the interface. We handle IFF_UP ourselves 4684 * according to user attempts to set it, rather than blindly 4685 * setting it. 4686 */ 4687 4688 ret = 0; 4689 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */ 4690 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev); 4691 4692 if (!ret) 4693 dev_set_rx_mode(dev); 4694 } 4695 4696 if ((flags ^ dev->gflags) & IFF_PROMISC) { 4697 int inc = (flags & IFF_PROMISC) ? 1 : -1; 4698 4699 dev->gflags ^= IFF_PROMISC; 4700 dev_set_promiscuity(dev, inc); 4701 } 4702 4703 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI 4704 is important. Some (broken) drivers set IFF_PROMISC, when 4705 IFF_ALLMULTI is requested not asking us and not reporting. 4706 */ 4707 if ((flags ^ dev->gflags) & IFF_ALLMULTI) { 4708 int inc = (flags & IFF_ALLMULTI) ? 1 : -1; 4709 4710 dev->gflags ^= IFF_ALLMULTI; 4711 dev_set_allmulti(dev, inc); 4712 } 4713 4714 return ret; 4715 } 4716 4717 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags) 4718 { 4719 unsigned int changes = dev->flags ^ old_flags; 4720 4721 if (changes & IFF_UP) { 4722 if (dev->flags & IFF_UP) 4723 call_netdevice_notifiers(NETDEV_UP, dev); 4724 else 4725 call_netdevice_notifiers(NETDEV_DOWN, dev); 4726 } 4727 4728 if (dev->flags & IFF_UP && 4729 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) 4730 call_netdevice_notifiers(NETDEV_CHANGE, dev); 4731 } 4732 4733 /** 4734 * dev_change_flags - change device settings 4735 * @dev: device 4736 * @flags: device state flags 4737 * 4738 * Change settings on device based state flags. The flags are 4739 * in the userspace exported format. 4740 */ 4741 int dev_change_flags(struct net_device *dev, unsigned int flags) 4742 { 4743 int ret; 4744 unsigned int changes, old_flags = dev->flags; 4745 4746 ret = __dev_change_flags(dev, flags); 4747 if (ret < 0) 4748 return ret; 4749 4750 changes = old_flags ^ dev->flags; 4751 if (changes) 4752 rtmsg_ifinfo(RTM_NEWLINK, dev, changes); 4753 4754 __dev_notify_flags(dev, old_flags); 4755 return ret; 4756 } 4757 EXPORT_SYMBOL(dev_change_flags); 4758 4759 /** 4760 * dev_set_mtu - Change maximum transfer unit 4761 * @dev: device 4762 * @new_mtu: new transfer unit 4763 * 4764 * Change the maximum transfer size of the network device. 4765 */ 4766 int dev_set_mtu(struct net_device *dev, int new_mtu) 4767 { 4768 const struct net_device_ops *ops = dev->netdev_ops; 4769 int err; 4770 4771 if (new_mtu == dev->mtu) 4772 return 0; 4773 4774 /* MTU must be positive. */ 4775 if (new_mtu < 0) 4776 return -EINVAL; 4777 4778 if (!netif_device_present(dev)) 4779 return -ENODEV; 4780 4781 err = 0; 4782 if (ops->ndo_change_mtu) 4783 err = ops->ndo_change_mtu(dev, new_mtu); 4784 else 4785 dev->mtu = new_mtu; 4786 4787 if (!err && dev->flags & IFF_UP) 4788 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); 4789 return err; 4790 } 4791 EXPORT_SYMBOL(dev_set_mtu); 4792 4793 /** 4794 * dev_set_group - Change group this device belongs to 4795 * @dev: device 4796 * @new_group: group this device should belong to 4797 */ 4798 void dev_set_group(struct net_device *dev, int new_group) 4799 { 4800 dev->group = new_group; 4801 } 4802 EXPORT_SYMBOL(dev_set_group); 4803 4804 /** 4805 * dev_set_mac_address - Change Media Access Control Address 4806 * @dev: device 4807 * @sa: new address 4808 * 4809 * Change the hardware (MAC) address of the device 4810 */ 4811 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa) 4812 { 4813 const struct net_device_ops *ops = dev->netdev_ops; 4814 int err; 4815 4816 if (!ops->ndo_set_mac_address) 4817 return -EOPNOTSUPP; 4818 if (sa->sa_family != dev->type) 4819 return -EINVAL; 4820 if (!netif_device_present(dev)) 4821 return -ENODEV; 4822 err = ops->ndo_set_mac_address(dev, sa); 4823 if (!err) 4824 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 4825 return err; 4826 } 4827 EXPORT_SYMBOL(dev_set_mac_address); 4828 4829 /* 4830 * Perform the SIOCxIFxxx calls, inside rcu_read_lock() 4831 */ 4832 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd) 4833 { 4834 int err; 4835 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name); 4836 4837 if (!dev) 4838 return -ENODEV; 4839 4840 switch (cmd) { 4841 case SIOCGIFFLAGS: /* Get interface flags */ 4842 ifr->ifr_flags = (short) dev_get_flags(dev); 4843 return 0; 4844 4845 case SIOCGIFMETRIC: /* Get the metric on the interface 4846 (currently unused) */ 4847 ifr->ifr_metric = 0; 4848 return 0; 4849 4850 case SIOCGIFMTU: /* Get the MTU of a device */ 4851 ifr->ifr_mtu = dev->mtu; 4852 return 0; 4853 4854 case SIOCGIFHWADDR: 4855 if (!dev->addr_len) 4856 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data); 4857 else 4858 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr, 4859 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); 4860 ifr->ifr_hwaddr.sa_family = dev->type; 4861 return 0; 4862 4863 case SIOCGIFSLAVE: 4864 err = -EINVAL; 4865 break; 4866 4867 case SIOCGIFMAP: 4868 ifr->ifr_map.mem_start = dev->mem_start; 4869 ifr->ifr_map.mem_end = dev->mem_end; 4870 ifr->ifr_map.base_addr = dev->base_addr; 4871 ifr->ifr_map.irq = dev->irq; 4872 ifr->ifr_map.dma = dev->dma; 4873 ifr->ifr_map.port = dev->if_port; 4874 return 0; 4875 4876 case SIOCGIFINDEX: 4877 ifr->ifr_ifindex = dev->ifindex; 4878 return 0; 4879 4880 case SIOCGIFTXQLEN: 4881 ifr->ifr_qlen = dev->tx_queue_len; 4882 return 0; 4883 4884 default: 4885 /* dev_ioctl() should ensure this case 4886 * is never reached 4887 */ 4888 WARN_ON(1); 4889 err = -ENOTTY; 4890 break; 4891 4892 } 4893 return err; 4894 } 4895 4896 /* 4897 * Perform the SIOCxIFxxx calls, inside rtnl_lock() 4898 */ 4899 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) 4900 { 4901 int err; 4902 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name); 4903 const struct net_device_ops *ops; 4904 4905 if (!dev) 4906 return -ENODEV; 4907 4908 ops = dev->netdev_ops; 4909 4910 switch (cmd) { 4911 case SIOCSIFFLAGS: /* Set interface flags */ 4912 return dev_change_flags(dev, ifr->ifr_flags); 4913 4914 case SIOCSIFMETRIC: /* Set the metric on the interface 4915 (currently unused) */ 4916 return -EOPNOTSUPP; 4917 4918 case SIOCSIFMTU: /* Set the MTU of a device */ 4919 return dev_set_mtu(dev, ifr->ifr_mtu); 4920 4921 case SIOCSIFHWADDR: 4922 return dev_set_mac_address(dev, &ifr->ifr_hwaddr); 4923 4924 case SIOCSIFHWBROADCAST: 4925 if (ifr->ifr_hwaddr.sa_family != dev->type) 4926 return -EINVAL; 4927 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, 4928 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); 4929 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 4930 return 0; 4931 4932 case SIOCSIFMAP: 4933 if (ops->ndo_set_config) { 4934 if (!netif_device_present(dev)) 4935 return -ENODEV; 4936 return ops->ndo_set_config(dev, &ifr->ifr_map); 4937 } 4938 return -EOPNOTSUPP; 4939 4940 case SIOCADDMULTI: 4941 if (!ops->ndo_set_rx_mode || 4942 ifr->ifr_hwaddr.sa_family != AF_UNSPEC) 4943 return -EINVAL; 4944 if (!netif_device_present(dev)) 4945 return -ENODEV; 4946 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data); 4947 4948 case SIOCDELMULTI: 4949 if (!ops->ndo_set_rx_mode || 4950 ifr->ifr_hwaddr.sa_family != AF_UNSPEC) 4951 return -EINVAL; 4952 if (!netif_device_present(dev)) 4953 return -ENODEV; 4954 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data); 4955 4956 case SIOCSIFTXQLEN: 4957 if (ifr->ifr_qlen < 0) 4958 return -EINVAL; 4959 dev->tx_queue_len = ifr->ifr_qlen; 4960 return 0; 4961 4962 case SIOCSIFNAME: 4963 ifr->ifr_newname[IFNAMSIZ-1] = '\0'; 4964 return dev_change_name(dev, ifr->ifr_newname); 4965 4966 case SIOCSHWTSTAMP: 4967 err = net_hwtstamp_validate(ifr); 4968 if (err) 4969 return err; 4970 /* fall through */ 4971 4972 /* 4973 * Unknown or private ioctl 4974 */ 4975 default: 4976 if ((cmd >= SIOCDEVPRIVATE && 4977 cmd <= SIOCDEVPRIVATE + 15) || 4978 cmd == SIOCBONDENSLAVE || 4979 cmd == SIOCBONDRELEASE || 4980 cmd == SIOCBONDSETHWADDR || 4981 cmd == SIOCBONDSLAVEINFOQUERY || 4982 cmd == SIOCBONDINFOQUERY || 4983 cmd == SIOCBONDCHANGEACTIVE || 4984 cmd == SIOCGMIIPHY || 4985 cmd == SIOCGMIIREG || 4986 cmd == SIOCSMIIREG || 4987 cmd == SIOCBRADDIF || 4988 cmd == SIOCBRDELIF || 4989 cmd == SIOCSHWTSTAMP || 4990 cmd == SIOCWANDEV) { 4991 err = -EOPNOTSUPP; 4992 if (ops->ndo_do_ioctl) { 4993 if (netif_device_present(dev)) 4994 err = ops->ndo_do_ioctl(dev, ifr, cmd); 4995 else 4996 err = -ENODEV; 4997 } 4998 } else 4999 err = -EINVAL; 5000 5001 } 5002 return err; 5003 } 5004 5005 /* 5006 * This function handles all "interface"-type I/O control requests. The actual 5007 * 'doing' part of this is dev_ifsioc above. 5008 */ 5009 5010 /** 5011 * dev_ioctl - network device ioctl 5012 * @net: the applicable net namespace 5013 * @cmd: command to issue 5014 * @arg: pointer to a struct ifreq in user space 5015 * 5016 * Issue ioctl functions to devices. This is normally called by the 5017 * user space syscall interfaces but can sometimes be useful for 5018 * other purposes. The return value is the return from the syscall if 5019 * positive or a negative errno code on error. 5020 */ 5021 5022 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg) 5023 { 5024 struct ifreq ifr; 5025 int ret; 5026 char *colon; 5027 5028 /* One special case: SIOCGIFCONF takes ifconf argument 5029 and requires shared lock, because it sleeps writing 5030 to user space. 5031 */ 5032 5033 if (cmd == SIOCGIFCONF) { 5034 rtnl_lock(); 5035 ret = dev_ifconf(net, (char __user *) arg); 5036 rtnl_unlock(); 5037 return ret; 5038 } 5039 if (cmd == SIOCGIFNAME) 5040 return dev_ifname(net, (struct ifreq __user *)arg); 5041 5042 if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) 5043 return -EFAULT; 5044 5045 ifr.ifr_name[IFNAMSIZ-1] = 0; 5046 5047 colon = strchr(ifr.ifr_name, ':'); 5048 if (colon) 5049 *colon = 0; 5050 5051 /* 5052 * See which interface the caller is talking about. 5053 */ 5054 5055 switch (cmd) { 5056 /* 5057 * These ioctl calls: 5058 * - can be done by all. 5059 * - atomic and do not require locking. 5060 * - return a value 5061 */ 5062 case SIOCGIFFLAGS: 5063 case SIOCGIFMETRIC: 5064 case SIOCGIFMTU: 5065 case SIOCGIFHWADDR: 5066 case SIOCGIFSLAVE: 5067 case SIOCGIFMAP: 5068 case SIOCGIFINDEX: 5069 case SIOCGIFTXQLEN: 5070 dev_load(net, ifr.ifr_name); 5071 rcu_read_lock(); 5072 ret = dev_ifsioc_locked(net, &ifr, cmd); 5073 rcu_read_unlock(); 5074 if (!ret) { 5075 if (colon) 5076 *colon = ':'; 5077 if (copy_to_user(arg, &ifr, 5078 sizeof(struct ifreq))) 5079 ret = -EFAULT; 5080 } 5081 return ret; 5082 5083 case SIOCETHTOOL: 5084 dev_load(net, ifr.ifr_name); 5085 rtnl_lock(); 5086 ret = dev_ethtool(net, &ifr); 5087 rtnl_unlock(); 5088 if (!ret) { 5089 if (colon) 5090 *colon = ':'; 5091 if (copy_to_user(arg, &ifr, 5092 sizeof(struct ifreq))) 5093 ret = -EFAULT; 5094 } 5095 return ret; 5096 5097 /* 5098 * These ioctl calls: 5099 * - require superuser power. 5100 * - require strict serialization. 5101 * - return a value 5102 */ 5103 case SIOCGMIIPHY: 5104 case SIOCGMIIREG: 5105 case SIOCSIFNAME: 5106 if (!capable(CAP_NET_ADMIN)) 5107 return -EPERM; 5108 dev_load(net, ifr.ifr_name); 5109 rtnl_lock(); 5110 ret = dev_ifsioc(net, &ifr, cmd); 5111 rtnl_unlock(); 5112 if (!ret) { 5113 if (colon) 5114 *colon = ':'; 5115 if (copy_to_user(arg, &ifr, 5116 sizeof(struct ifreq))) 5117 ret = -EFAULT; 5118 } 5119 return ret; 5120 5121 /* 5122 * These ioctl calls: 5123 * - require superuser power. 5124 * - require strict serialization. 5125 * - do not return a value 5126 */ 5127 case SIOCSIFFLAGS: 5128 case SIOCSIFMETRIC: 5129 case SIOCSIFMTU: 5130 case SIOCSIFMAP: 5131 case SIOCSIFHWADDR: 5132 case SIOCSIFSLAVE: 5133 case SIOCADDMULTI: 5134 case SIOCDELMULTI: 5135 case SIOCSIFHWBROADCAST: 5136 case SIOCSIFTXQLEN: 5137 case SIOCSMIIREG: 5138 case SIOCBONDENSLAVE: 5139 case SIOCBONDRELEASE: 5140 case SIOCBONDSETHWADDR: 5141 case SIOCBONDCHANGEACTIVE: 5142 case SIOCBRADDIF: 5143 case SIOCBRDELIF: 5144 case SIOCSHWTSTAMP: 5145 if (!capable(CAP_NET_ADMIN)) 5146 return -EPERM; 5147 /* fall through */ 5148 case SIOCBONDSLAVEINFOQUERY: 5149 case SIOCBONDINFOQUERY: 5150 dev_load(net, ifr.ifr_name); 5151 rtnl_lock(); 5152 ret = dev_ifsioc(net, &ifr, cmd); 5153 rtnl_unlock(); 5154 return ret; 5155 5156 case SIOCGIFMEM: 5157 /* Get the per device memory space. We can add this but 5158 * currently do not support it */ 5159 case SIOCSIFMEM: 5160 /* Set the per device memory buffer space. 5161 * Not applicable in our case */ 5162 case SIOCSIFLINK: 5163 return -ENOTTY; 5164 5165 /* 5166 * Unknown or private ioctl. 5167 */ 5168 default: 5169 if (cmd == SIOCWANDEV || 5170 (cmd >= SIOCDEVPRIVATE && 5171 cmd <= SIOCDEVPRIVATE + 15)) { 5172 dev_load(net, ifr.ifr_name); 5173 rtnl_lock(); 5174 ret = dev_ifsioc(net, &ifr, cmd); 5175 rtnl_unlock(); 5176 if (!ret && copy_to_user(arg, &ifr, 5177 sizeof(struct ifreq))) 5178 ret = -EFAULT; 5179 return ret; 5180 } 5181 /* Take care of Wireless Extensions */ 5182 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) 5183 return wext_handle_ioctl(net, &ifr, cmd, arg); 5184 return -ENOTTY; 5185 } 5186 } 5187 5188 5189 /** 5190 * dev_new_index - allocate an ifindex 5191 * @net: the applicable net namespace 5192 * 5193 * Returns a suitable unique value for a new device interface 5194 * number. The caller must hold the rtnl semaphore or the 5195 * dev_base_lock to be sure it remains unique. 5196 */ 5197 static int dev_new_index(struct net *net) 5198 { 5199 static int ifindex; 5200 for (;;) { 5201 if (++ifindex <= 0) 5202 ifindex = 1; 5203 if (!__dev_get_by_index(net, ifindex)) 5204 return ifindex; 5205 } 5206 } 5207 5208 /* Delayed registration/unregisteration */ 5209 static LIST_HEAD(net_todo_list); 5210 5211 static void net_set_todo(struct net_device *dev) 5212 { 5213 list_add_tail(&dev->todo_list, &net_todo_list); 5214 } 5215 5216 static void rollback_registered_many(struct list_head *head) 5217 { 5218 struct net_device *dev, *tmp; 5219 5220 BUG_ON(dev_boot_phase); 5221 ASSERT_RTNL(); 5222 5223 list_for_each_entry_safe(dev, tmp, head, unreg_list) { 5224 /* Some devices call without registering 5225 * for initialization unwind. Remove those 5226 * devices and proceed with the remaining. 5227 */ 5228 if (dev->reg_state == NETREG_UNINITIALIZED) { 5229 pr_debug("unregister_netdevice: device %s/%p never was registered\n", 5230 dev->name, dev); 5231 5232 WARN_ON(1); 5233 list_del(&dev->unreg_list); 5234 continue; 5235 } 5236 dev->dismantle = true; 5237 BUG_ON(dev->reg_state != NETREG_REGISTERED); 5238 } 5239 5240 /* If device is running, close it first. */ 5241 dev_close_many(head); 5242 5243 list_for_each_entry(dev, head, unreg_list) { 5244 /* And unlink it from device chain. */ 5245 unlist_netdevice(dev); 5246 5247 dev->reg_state = NETREG_UNREGISTERING; 5248 } 5249 5250 synchronize_net(); 5251 5252 list_for_each_entry(dev, head, unreg_list) { 5253 /* Shutdown queueing discipline. */ 5254 dev_shutdown(dev); 5255 5256 5257 /* Notify protocols, that we are about to destroy 5258 this device. They should clean all the things. 5259 */ 5260 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 5261 5262 if (!dev->rtnl_link_ops || 5263 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 5264 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); 5265 5266 /* 5267 * Flush the unicast and multicast chains 5268 */ 5269 dev_uc_flush(dev); 5270 dev_mc_flush(dev); 5271 5272 if (dev->netdev_ops->ndo_uninit) 5273 dev->netdev_ops->ndo_uninit(dev); 5274 5275 /* Notifier chain MUST detach us from master device. */ 5276 WARN_ON(dev->master); 5277 5278 /* Remove entries from kobject tree */ 5279 netdev_unregister_kobject(dev); 5280 } 5281 5282 /* Process any work delayed until the end of the batch */ 5283 dev = list_first_entry(head, struct net_device, unreg_list); 5284 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); 5285 5286 synchronize_net(); 5287 5288 list_for_each_entry(dev, head, unreg_list) 5289 dev_put(dev); 5290 } 5291 5292 static void rollback_registered(struct net_device *dev) 5293 { 5294 LIST_HEAD(single); 5295 5296 list_add(&dev->unreg_list, &single); 5297 rollback_registered_many(&single); 5298 list_del(&single); 5299 } 5300 5301 static netdev_features_t netdev_fix_features(struct net_device *dev, 5302 netdev_features_t features) 5303 { 5304 /* Fix illegal checksum combinations */ 5305 if ((features & NETIF_F_HW_CSUM) && 5306 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 5307 netdev_warn(dev, "mixed HW and IP checksum settings.\n"); 5308 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 5309 } 5310 5311 /* Fix illegal SG+CSUM combinations. */ 5312 if ((features & NETIF_F_SG) && 5313 !(features & NETIF_F_ALL_CSUM)) { 5314 netdev_dbg(dev, 5315 "Dropping NETIF_F_SG since no checksum feature.\n"); 5316 features &= ~NETIF_F_SG; 5317 } 5318 5319 /* TSO requires that SG is present as well. */ 5320 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { 5321 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); 5322 features &= ~NETIF_F_ALL_TSO; 5323 } 5324 5325 /* TSO ECN requires that TSO is present as well. */ 5326 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) 5327 features &= ~NETIF_F_TSO_ECN; 5328 5329 /* Software GSO depends on SG. */ 5330 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { 5331 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); 5332 features &= ~NETIF_F_GSO; 5333 } 5334 5335 /* UFO needs SG and checksumming */ 5336 if (features & NETIF_F_UFO) { 5337 /* maybe split UFO into V4 and V6? */ 5338 if (!((features & NETIF_F_GEN_CSUM) || 5339 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)) 5340 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 5341 netdev_dbg(dev, 5342 "Dropping NETIF_F_UFO since no checksum offload features.\n"); 5343 features &= ~NETIF_F_UFO; 5344 } 5345 5346 if (!(features & NETIF_F_SG)) { 5347 netdev_dbg(dev, 5348 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n"); 5349 features &= ~NETIF_F_UFO; 5350 } 5351 } 5352 5353 return features; 5354 } 5355 5356 int __netdev_update_features(struct net_device *dev) 5357 { 5358 netdev_features_t features; 5359 int err = 0; 5360 5361 ASSERT_RTNL(); 5362 5363 features = netdev_get_wanted_features(dev); 5364 5365 if (dev->netdev_ops->ndo_fix_features) 5366 features = dev->netdev_ops->ndo_fix_features(dev, features); 5367 5368 /* driver might be less strict about feature dependencies */ 5369 features = netdev_fix_features(dev, features); 5370 5371 if (dev->features == features) 5372 return 0; 5373 5374 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n", 5375 &dev->features, &features); 5376 5377 if (dev->netdev_ops->ndo_set_features) 5378 err = dev->netdev_ops->ndo_set_features(dev, features); 5379 5380 if (unlikely(err < 0)) { 5381 netdev_err(dev, 5382 "set_features() failed (%d); wanted %pNF, left %pNF\n", 5383 err, &features, &dev->features); 5384 return -1; 5385 } 5386 5387 if (!err) 5388 dev->features = features; 5389 5390 return 1; 5391 } 5392 5393 /** 5394 * netdev_update_features - recalculate device features 5395 * @dev: the device to check 5396 * 5397 * Recalculate dev->features set and send notifications if it 5398 * has changed. Should be called after driver or hardware dependent 5399 * conditions might have changed that influence the features. 5400 */ 5401 void netdev_update_features(struct net_device *dev) 5402 { 5403 if (__netdev_update_features(dev)) 5404 netdev_features_change(dev); 5405 } 5406 EXPORT_SYMBOL(netdev_update_features); 5407 5408 /** 5409 * netdev_change_features - recalculate device features 5410 * @dev: the device to check 5411 * 5412 * Recalculate dev->features set and send notifications even 5413 * if they have not changed. Should be called instead of 5414 * netdev_update_features() if also dev->vlan_features might 5415 * have changed to allow the changes to be propagated to stacked 5416 * VLAN devices. 5417 */ 5418 void netdev_change_features(struct net_device *dev) 5419 { 5420 __netdev_update_features(dev); 5421 netdev_features_change(dev); 5422 } 5423 EXPORT_SYMBOL(netdev_change_features); 5424 5425 /** 5426 * netif_stacked_transfer_operstate - transfer operstate 5427 * @rootdev: the root or lower level device to transfer state from 5428 * @dev: the device to transfer operstate to 5429 * 5430 * Transfer operational state from root to device. This is normally 5431 * called when a stacking relationship exists between the root 5432 * device and the device(a leaf device). 5433 */ 5434 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 5435 struct net_device *dev) 5436 { 5437 if (rootdev->operstate == IF_OPER_DORMANT) 5438 netif_dormant_on(dev); 5439 else 5440 netif_dormant_off(dev); 5441 5442 if (netif_carrier_ok(rootdev)) { 5443 if (!netif_carrier_ok(dev)) 5444 netif_carrier_on(dev); 5445 } else { 5446 if (netif_carrier_ok(dev)) 5447 netif_carrier_off(dev); 5448 } 5449 } 5450 EXPORT_SYMBOL(netif_stacked_transfer_operstate); 5451 5452 #ifdef CONFIG_RPS 5453 static int netif_alloc_rx_queues(struct net_device *dev) 5454 { 5455 unsigned int i, count = dev->num_rx_queues; 5456 struct netdev_rx_queue *rx; 5457 5458 BUG_ON(count < 1); 5459 5460 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL); 5461 if (!rx) { 5462 pr_err("netdev: Unable to allocate %u rx queues\n", count); 5463 return -ENOMEM; 5464 } 5465 dev->_rx = rx; 5466 5467 for (i = 0; i < count; i++) 5468 rx[i].dev = dev; 5469 return 0; 5470 } 5471 #endif 5472 5473 static void netdev_init_one_queue(struct net_device *dev, 5474 struct netdev_queue *queue, void *_unused) 5475 { 5476 /* Initialize queue lock */ 5477 spin_lock_init(&queue->_xmit_lock); 5478 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); 5479 queue->xmit_lock_owner = -1; 5480 netdev_queue_numa_node_write(queue, NUMA_NO_NODE); 5481 queue->dev = dev; 5482 #ifdef CONFIG_BQL 5483 dql_init(&queue->dql, HZ); 5484 #endif 5485 } 5486 5487 static int netif_alloc_netdev_queues(struct net_device *dev) 5488 { 5489 unsigned int count = dev->num_tx_queues; 5490 struct netdev_queue *tx; 5491 5492 BUG_ON(count < 1); 5493 5494 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL); 5495 if (!tx) { 5496 pr_err("netdev: Unable to allocate %u tx queues\n", count); 5497 return -ENOMEM; 5498 } 5499 dev->_tx = tx; 5500 5501 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 5502 spin_lock_init(&dev->tx_global_lock); 5503 5504 return 0; 5505 } 5506 5507 /** 5508 * register_netdevice - register a network device 5509 * @dev: device to register 5510 * 5511 * Take a completed network device structure and add it to the kernel 5512 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 5513 * chain. 0 is returned on success. A negative errno code is returned 5514 * on a failure to set up the device, or if the name is a duplicate. 5515 * 5516 * Callers must hold the rtnl semaphore. You may want 5517 * register_netdev() instead of this. 5518 * 5519 * BUGS: 5520 * The locking appears insufficient to guarantee two parallel registers 5521 * will not get the same name. 5522 */ 5523 5524 int register_netdevice(struct net_device *dev) 5525 { 5526 int ret; 5527 struct net *net = dev_net(dev); 5528 5529 BUG_ON(dev_boot_phase); 5530 ASSERT_RTNL(); 5531 5532 might_sleep(); 5533 5534 /* When net_device's are persistent, this will be fatal. */ 5535 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 5536 BUG_ON(!net); 5537 5538 spin_lock_init(&dev->addr_list_lock); 5539 netdev_set_addr_lockdep_class(dev); 5540 5541 dev->iflink = -1; 5542 5543 ret = dev_get_valid_name(dev, dev->name); 5544 if (ret < 0) 5545 goto out; 5546 5547 /* Init, if this function is available */ 5548 if (dev->netdev_ops->ndo_init) { 5549 ret = dev->netdev_ops->ndo_init(dev); 5550 if (ret) { 5551 if (ret > 0) 5552 ret = -EIO; 5553 goto out; 5554 } 5555 } 5556 5557 dev->ifindex = dev_new_index(net); 5558 if (dev->iflink == -1) 5559 dev->iflink = dev->ifindex; 5560 5561 /* Transfer changeable features to wanted_features and enable 5562 * software offloads (GSO and GRO). 5563 */ 5564 dev->hw_features |= NETIF_F_SOFT_FEATURES; 5565 dev->features |= NETIF_F_SOFT_FEATURES; 5566 dev->wanted_features = dev->features & dev->hw_features; 5567 5568 /* Turn on no cache copy if HW is doing checksum */ 5569 if (!(dev->flags & IFF_LOOPBACK)) { 5570 dev->hw_features |= NETIF_F_NOCACHE_COPY; 5571 if (dev->features & NETIF_F_ALL_CSUM) { 5572 dev->wanted_features |= NETIF_F_NOCACHE_COPY; 5573 dev->features |= NETIF_F_NOCACHE_COPY; 5574 } 5575 } 5576 5577 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices. 5578 */ 5579 dev->vlan_features |= NETIF_F_HIGHDMA; 5580 5581 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); 5582 ret = notifier_to_errno(ret); 5583 if (ret) 5584 goto err_uninit; 5585 5586 ret = netdev_register_kobject(dev); 5587 if (ret) 5588 goto err_uninit; 5589 dev->reg_state = NETREG_REGISTERED; 5590 5591 __netdev_update_features(dev); 5592 5593 /* 5594 * Default initial state at registry is that the 5595 * device is present. 5596 */ 5597 5598 set_bit(__LINK_STATE_PRESENT, &dev->state); 5599 5600 dev_init_scheduler(dev); 5601 dev_hold(dev); 5602 list_netdevice(dev); 5603 5604 /* Notify protocols, that a new device appeared. */ 5605 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); 5606 ret = notifier_to_errno(ret); 5607 if (ret) { 5608 rollback_registered(dev); 5609 dev->reg_state = NETREG_UNREGISTERED; 5610 } 5611 /* 5612 * Prevent userspace races by waiting until the network 5613 * device is fully setup before sending notifications. 5614 */ 5615 if (!dev->rtnl_link_ops || 5616 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 5617 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); 5618 5619 out: 5620 return ret; 5621 5622 err_uninit: 5623 if (dev->netdev_ops->ndo_uninit) 5624 dev->netdev_ops->ndo_uninit(dev); 5625 goto out; 5626 } 5627 EXPORT_SYMBOL(register_netdevice); 5628 5629 /** 5630 * init_dummy_netdev - init a dummy network device for NAPI 5631 * @dev: device to init 5632 * 5633 * This takes a network device structure and initialize the minimum 5634 * amount of fields so it can be used to schedule NAPI polls without 5635 * registering a full blown interface. This is to be used by drivers 5636 * that need to tie several hardware interfaces to a single NAPI 5637 * poll scheduler due to HW limitations. 5638 */ 5639 int init_dummy_netdev(struct net_device *dev) 5640 { 5641 /* Clear everything. Note we don't initialize spinlocks 5642 * are they aren't supposed to be taken by any of the 5643 * NAPI code and this dummy netdev is supposed to be 5644 * only ever used for NAPI polls 5645 */ 5646 memset(dev, 0, sizeof(struct net_device)); 5647 5648 /* make sure we BUG if trying to hit standard 5649 * register/unregister code path 5650 */ 5651 dev->reg_state = NETREG_DUMMY; 5652 5653 /* NAPI wants this */ 5654 INIT_LIST_HEAD(&dev->napi_list); 5655 5656 /* a dummy interface is started by default */ 5657 set_bit(__LINK_STATE_PRESENT, &dev->state); 5658 set_bit(__LINK_STATE_START, &dev->state); 5659 5660 /* Note : We dont allocate pcpu_refcnt for dummy devices, 5661 * because users of this 'device' dont need to change 5662 * its refcount. 5663 */ 5664 5665 return 0; 5666 } 5667 EXPORT_SYMBOL_GPL(init_dummy_netdev); 5668 5669 5670 /** 5671 * register_netdev - register a network device 5672 * @dev: device to register 5673 * 5674 * Take a completed network device structure and add it to the kernel 5675 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 5676 * chain. 0 is returned on success. A negative errno code is returned 5677 * on a failure to set up the device, or if the name is a duplicate. 5678 * 5679 * This is a wrapper around register_netdevice that takes the rtnl semaphore 5680 * and expands the device name if you passed a format string to 5681 * alloc_netdev. 5682 */ 5683 int register_netdev(struct net_device *dev) 5684 { 5685 int err; 5686 5687 rtnl_lock(); 5688 err = register_netdevice(dev); 5689 rtnl_unlock(); 5690 return err; 5691 } 5692 EXPORT_SYMBOL(register_netdev); 5693 5694 int netdev_refcnt_read(const struct net_device *dev) 5695 { 5696 int i, refcnt = 0; 5697 5698 for_each_possible_cpu(i) 5699 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i); 5700 return refcnt; 5701 } 5702 EXPORT_SYMBOL(netdev_refcnt_read); 5703 5704 /* 5705 * netdev_wait_allrefs - wait until all references are gone. 5706 * 5707 * This is called when unregistering network devices. 5708 * 5709 * Any protocol or device that holds a reference should register 5710 * for netdevice notification, and cleanup and put back the 5711 * reference if they receive an UNREGISTER event. 5712 * We can get stuck here if buggy protocols don't correctly 5713 * call dev_put. 5714 */ 5715 static void netdev_wait_allrefs(struct net_device *dev) 5716 { 5717 unsigned long rebroadcast_time, warning_time; 5718 int refcnt; 5719 5720 linkwatch_forget_dev(dev); 5721 5722 rebroadcast_time = warning_time = jiffies; 5723 refcnt = netdev_refcnt_read(dev); 5724 5725 while (refcnt != 0) { 5726 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { 5727 rtnl_lock(); 5728 5729 /* Rebroadcast unregister notification */ 5730 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 5731 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users 5732 * should have already handle it the first time */ 5733 5734 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 5735 &dev->state)) { 5736 /* We must not have linkwatch events 5737 * pending on unregister. If this 5738 * happens, we simply run the queue 5739 * unscheduled, resulting in a noop 5740 * for this device. 5741 */ 5742 linkwatch_run_queue(); 5743 } 5744 5745 __rtnl_unlock(); 5746 5747 rebroadcast_time = jiffies; 5748 } 5749 5750 msleep(250); 5751 5752 refcnt = netdev_refcnt_read(dev); 5753 5754 if (time_after(jiffies, warning_time + 10 * HZ)) { 5755 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", 5756 dev->name, refcnt); 5757 warning_time = jiffies; 5758 } 5759 } 5760 } 5761 5762 /* The sequence is: 5763 * 5764 * rtnl_lock(); 5765 * ... 5766 * register_netdevice(x1); 5767 * register_netdevice(x2); 5768 * ... 5769 * unregister_netdevice(y1); 5770 * unregister_netdevice(y2); 5771 * ... 5772 * rtnl_unlock(); 5773 * free_netdev(y1); 5774 * free_netdev(y2); 5775 * 5776 * We are invoked by rtnl_unlock(). 5777 * This allows us to deal with problems: 5778 * 1) We can delete sysfs objects which invoke hotplug 5779 * without deadlocking with linkwatch via keventd. 5780 * 2) Since we run with the RTNL semaphore not held, we can sleep 5781 * safely in order to wait for the netdev refcnt to drop to zero. 5782 * 5783 * We must not return until all unregister events added during 5784 * the interval the lock was held have been completed. 5785 */ 5786 void netdev_run_todo(void) 5787 { 5788 struct list_head list; 5789 5790 /* Snapshot list, allow later requests */ 5791 list_replace_init(&net_todo_list, &list); 5792 5793 __rtnl_unlock(); 5794 5795 /* Wait for rcu callbacks to finish before attempting to drain 5796 * the device list. This usually avoids a 250ms wait. 5797 */ 5798 if (!list_empty(&list)) 5799 rcu_barrier(); 5800 5801 while (!list_empty(&list)) { 5802 struct net_device *dev 5803 = list_first_entry(&list, struct net_device, todo_list); 5804 list_del(&dev->todo_list); 5805 5806 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 5807 pr_err("network todo '%s' but state %d\n", 5808 dev->name, dev->reg_state); 5809 dump_stack(); 5810 continue; 5811 } 5812 5813 dev->reg_state = NETREG_UNREGISTERED; 5814 5815 on_each_cpu(flush_backlog, dev, 1); 5816 5817 netdev_wait_allrefs(dev); 5818 5819 /* paranoia */ 5820 BUG_ON(netdev_refcnt_read(dev)); 5821 WARN_ON(rcu_access_pointer(dev->ip_ptr)); 5822 WARN_ON(rcu_access_pointer(dev->ip6_ptr)); 5823 WARN_ON(dev->dn_ptr); 5824 5825 if (dev->destructor) 5826 dev->destructor(dev); 5827 5828 /* Free network device */ 5829 kobject_put(&dev->dev.kobj); 5830 } 5831 } 5832 5833 /* Convert net_device_stats to rtnl_link_stats64. They have the same 5834 * fields in the same order, with only the type differing. 5835 */ 5836 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 5837 const struct net_device_stats *netdev_stats) 5838 { 5839 #if BITS_PER_LONG == 64 5840 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats)); 5841 memcpy(stats64, netdev_stats, sizeof(*stats64)); 5842 #else 5843 size_t i, n = sizeof(*stats64) / sizeof(u64); 5844 const unsigned long *src = (const unsigned long *)netdev_stats; 5845 u64 *dst = (u64 *)stats64; 5846 5847 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) != 5848 sizeof(*stats64) / sizeof(u64)); 5849 for (i = 0; i < n; i++) 5850 dst[i] = src[i]; 5851 #endif 5852 } 5853 EXPORT_SYMBOL(netdev_stats_to_stats64); 5854 5855 /** 5856 * dev_get_stats - get network device statistics 5857 * @dev: device to get statistics from 5858 * @storage: place to store stats 5859 * 5860 * Get network statistics from device. Return @storage. 5861 * The device driver may provide its own method by setting 5862 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats; 5863 * otherwise the internal statistics structure is used. 5864 */ 5865 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 5866 struct rtnl_link_stats64 *storage) 5867 { 5868 const struct net_device_ops *ops = dev->netdev_ops; 5869 5870 if (ops->ndo_get_stats64) { 5871 memset(storage, 0, sizeof(*storage)); 5872 ops->ndo_get_stats64(dev, storage); 5873 } else if (ops->ndo_get_stats) { 5874 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); 5875 } else { 5876 netdev_stats_to_stats64(storage, &dev->stats); 5877 } 5878 storage->rx_dropped += atomic_long_read(&dev->rx_dropped); 5879 return storage; 5880 } 5881 EXPORT_SYMBOL(dev_get_stats); 5882 5883 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) 5884 { 5885 struct netdev_queue *queue = dev_ingress_queue(dev); 5886 5887 #ifdef CONFIG_NET_CLS_ACT 5888 if (queue) 5889 return queue; 5890 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 5891 if (!queue) 5892 return NULL; 5893 netdev_init_one_queue(dev, queue, NULL); 5894 queue->qdisc = &noop_qdisc; 5895 queue->qdisc_sleeping = &noop_qdisc; 5896 rcu_assign_pointer(dev->ingress_queue, queue); 5897 #endif 5898 return queue; 5899 } 5900 5901 /** 5902 * alloc_netdev_mqs - allocate network device 5903 * @sizeof_priv: size of private data to allocate space for 5904 * @name: device name format string 5905 * @setup: callback to initialize device 5906 * @txqs: the number of TX subqueues to allocate 5907 * @rxqs: the number of RX subqueues to allocate 5908 * 5909 * Allocates a struct net_device with private data area for driver use 5910 * and performs basic initialization. Also allocates subquue structs 5911 * for each queue on the device. 5912 */ 5913 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 5914 void (*setup)(struct net_device *), 5915 unsigned int txqs, unsigned int rxqs) 5916 { 5917 struct net_device *dev; 5918 size_t alloc_size; 5919 struct net_device *p; 5920 5921 BUG_ON(strlen(name) >= sizeof(dev->name)); 5922 5923 if (txqs < 1) { 5924 pr_err("alloc_netdev: Unable to allocate device with zero queues\n"); 5925 return NULL; 5926 } 5927 5928 #ifdef CONFIG_RPS 5929 if (rxqs < 1) { 5930 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n"); 5931 return NULL; 5932 } 5933 #endif 5934 5935 alloc_size = sizeof(struct net_device); 5936 if (sizeof_priv) { 5937 /* ensure 32-byte alignment of private area */ 5938 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); 5939 alloc_size += sizeof_priv; 5940 } 5941 /* ensure 32-byte alignment of whole construct */ 5942 alloc_size += NETDEV_ALIGN - 1; 5943 5944 p = kzalloc(alloc_size, GFP_KERNEL); 5945 if (!p) { 5946 pr_err("alloc_netdev: Unable to allocate device\n"); 5947 return NULL; 5948 } 5949 5950 dev = PTR_ALIGN(p, NETDEV_ALIGN); 5951 dev->padded = (char *)dev - (char *)p; 5952 5953 dev->pcpu_refcnt = alloc_percpu(int); 5954 if (!dev->pcpu_refcnt) 5955 goto free_p; 5956 5957 if (dev_addr_init(dev)) 5958 goto free_pcpu; 5959 5960 dev_mc_init(dev); 5961 dev_uc_init(dev); 5962 5963 dev_net_set(dev, &init_net); 5964 5965 dev->gso_max_size = GSO_MAX_SIZE; 5966 5967 INIT_LIST_HEAD(&dev->napi_list); 5968 INIT_LIST_HEAD(&dev->unreg_list); 5969 INIT_LIST_HEAD(&dev->link_watch_list); 5970 dev->priv_flags = IFF_XMIT_DST_RELEASE; 5971 setup(dev); 5972 5973 dev->num_tx_queues = txqs; 5974 dev->real_num_tx_queues = txqs; 5975 if (netif_alloc_netdev_queues(dev)) 5976 goto free_all; 5977 5978 #ifdef CONFIG_RPS 5979 dev->num_rx_queues = rxqs; 5980 dev->real_num_rx_queues = rxqs; 5981 if (netif_alloc_rx_queues(dev)) 5982 goto free_all; 5983 #endif 5984 5985 strcpy(dev->name, name); 5986 dev->group = INIT_NETDEV_GROUP; 5987 return dev; 5988 5989 free_all: 5990 free_netdev(dev); 5991 return NULL; 5992 5993 free_pcpu: 5994 free_percpu(dev->pcpu_refcnt); 5995 kfree(dev->_tx); 5996 #ifdef CONFIG_RPS 5997 kfree(dev->_rx); 5998 #endif 5999 6000 free_p: 6001 kfree(p); 6002 return NULL; 6003 } 6004 EXPORT_SYMBOL(alloc_netdev_mqs); 6005 6006 /** 6007 * free_netdev - free network device 6008 * @dev: device 6009 * 6010 * This function does the last stage of destroying an allocated device 6011 * interface. The reference to the device object is released. 6012 * If this is the last reference then it will be freed. 6013 */ 6014 void free_netdev(struct net_device *dev) 6015 { 6016 struct napi_struct *p, *n; 6017 6018 release_net(dev_net(dev)); 6019 6020 kfree(dev->_tx); 6021 #ifdef CONFIG_RPS 6022 kfree(dev->_rx); 6023 #endif 6024 6025 kfree(rcu_dereference_protected(dev->ingress_queue, 1)); 6026 6027 /* Flush device addresses */ 6028 dev_addr_flush(dev); 6029 6030 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 6031 netif_napi_del(p); 6032 6033 free_percpu(dev->pcpu_refcnt); 6034 dev->pcpu_refcnt = NULL; 6035 6036 /* Compatibility with error handling in drivers */ 6037 if (dev->reg_state == NETREG_UNINITIALIZED) { 6038 kfree((char *)dev - dev->padded); 6039 return; 6040 } 6041 6042 BUG_ON(dev->reg_state != NETREG_UNREGISTERED); 6043 dev->reg_state = NETREG_RELEASED; 6044 6045 /* will free via device release */ 6046 put_device(&dev->dev); 6047 } 6048 EXPORT_SYMBOL(free_netdev); 6049 6050 /** 6051 * synchronize_net - Synchronize with packet receive processing 6052 * 6053 * Wait for packets currently being received to be done. 6054 * Does not block later packets from starting. 6055 */ 6056 void synchronize_net(void) 6057 { 6058 might_sleep(); 6059 if (rtnl_is_locked()) 6060 synchronize_rcu_expedited(); 6061 else 6062 synchronize_rcu(); 6063 } 6064 EXPORT_SYMBOL(synchronize_net); 6065 6066 /** 6067 * unregister_netdevice_queue - remove device from the kernel 6068 * @dev: device 6069 * @head: list 6070 * 6071 * This function shuts down a device interface and removes it 6072 * from the kernel tables. 6073 * If head not NULL, device is queued to be unregistered later. 6074 * 6075 * Callers must hold the rtnl semaphore. You may want 6076 * unregister_netdev() instead of this. 6077 */ 6078 6079 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) 6080 { 6081 ASSERT_RTNL(); 6082 6083 if (head) { 6084 list_move_tail(&dev->unreg_list, head); 6085 } else { 6086 rollback_registered(dev); 6087 /* Finish processing unregister after unlock */ 6088 net_set_todo(dev); 6089 } 6090 } 6091 EXPORT_SYMBOL(unregister_netdevice_queue); 6092 6093 /** 6094 * unregister_netdevice_many - unregister many devices 6095 * @head: list of devices 6096 */ 6097 void unregister_netdevice_many(struct list_head *head) 6098 { 6099 struct net_device *dev; 6100 6101 if (!list_empty(head)) { 6102 rollback_registered_many(head); 6103 list_for_each_entry(dev, head, unreg_list) 6104 net_set_todo(dev); 6105 } 6106 } 6107 EXPORT_SYMBOL(unregister_netdevice_many); 6108 6109 /** 6110 * unregister_netdev - remove device from the kernel 6111 * @dev: device 6112 * 6113 * This function shuts down a device interface and removes it 6114 * from the kernel tables. 6115 * 6116 * This is just a wrapper for unregister_netdevice that takes 6117 * the rtnl semaphore. In general you want to use this and not 6118 * unregister_netdevice. 6119 */ 6120 void unregister_netdev(struct net_device *dev) 6121 { 6122 rtnl_lock(); 6123 unregister_netdevice(dev); 6124 rtnl_unlock(); 6125 } 6126 EXPORT_SYMBOL(unregister_netdev); 6127 6128 /** 6129 * dev_change_net_namespace - move device to different nethost namespace 6130 * @dev: device 6131 * @net: network namespace 6132 * @pat: If not NULL name pattern to try if the current device name 6133 * is already taken in the destination network namespace. 6134 * 6135 * This function shuts down a device interface and moves it 6136 * to a new network namespace. On success 0 is returned, on 6137 * a failure a netagive errno code is returned. 6138 * 6139 * Callers must hold the rtnl semaphore. 6140 */ 6141 6142 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat) 6143 { 6144 int err; 6145 6146 ASSERT_RTNL(); 6147 6148 /* Don't allow namespace local devices to be moved. */ 6149 err = -EINVAL; 6150 if (dev->features & NETIF_F_NETNS_LOCAL) 6151 goto out; 6152 6153 /* Ensure the device has been registrered */ 6154 err = -EINVAL; 6155 if (dev->reg_state != NETREG_REGISTERED) 6156 goto out; 6157 6158 /* Get out if there is nothing todo */ 6159 err = 0; 6160 if (net_eq(dev_net(dev), net)) 6161 goto out; 6162 6163 /* Pick the destination device name, and ensure 6164 * we can use it in the destination network namespace. 6165 */ 6166 err = -EEXIST; 6167 if (__dev_get_by_name(net, dev->name)) { 6168 /* We get here if we can't use the current device name */ 6169 if (!pat) 6170 goto out; 6171 if (dev_get_valid_name(dev, pat) < 0) 6172 goto out; 6173 } 6174 6175 /* 6176 * And now a mini version of register_netdevice unregister_netdevice. 6177 */ 6178 6179 /* If device is running close it first. */ 6180 dev_close(dev); 6181 6182 /* And unlink it from device chain */ 6183 err = -ENODEV; 6184 unlist_netdevice(dev); 6185 6186 synchronize_net(); 6187 6188 /* Shutdown queueing discipline. */ 6189 dev_shutdown(dev); 6190 6191 /* Notify protocols, that we are about to destroy 6192 this device. They should clean all the things. 6193 6194 Note that dev->reg_state stays at NETREG_REGISTERED. 6195 This is wanted because this way 8021q and macvlan know 6196 the device is just moving and can keep their slaves up. 6197 */ 6198 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 6199 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); 6200 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); 6201 6202 /* 6203 * Flush the unicast and multicast chains 6204 */ 6205 dev_uc_flush(dev); 6206 dev_mc_flush(dev); 6207 6208 /* Actually switch the network namespace */ 6209 dev_net_set(dev, net); 6210 6211 /* If there is an ifindex conflict assign a new one */ 6212 if (__dev_get_by_index(net, dev->ifindex)) { 6213 int iflink = (dev->iflink == dev->ifindex); 6214 dev->ifindex = dev_new_index(net); 6215 if (iflink) 6216 dev->iflink = dev->ifindex; 6217 } 6218 6219 /* Fixup kobjects */ 6220 err = device_rename(&dev->dev, dev->name); 6221 WARN_ON(err); 6222 6223 /* Add the device back in the hashes */ 6224 list_netdevice(dev); 6225 6226 /* Notify protocols, that a new device appeared. */ 6227 call_netdevice_notifiers(NETDEV_REGISTER, dev); 6228 6229 /* 6230 * Prevent userspace races by waiting until the network 6231 * device is fully setup before sending notifications. 6232 */ 6233 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); 6234 6235 synchronize_net(); 6236 err = 0; 6237 out: 6238 return err; 6239 } 6240 EXPORT_SYMBOL_GPL(dev_change_net_namespace); 6241 6242 static int dev_cpu_callback(struct notifier_block *nfb, 6243 unsigned long action, 6244 void *ocpu) 6245 { 6246 struct sk_buff **list_skb; 6247 struct sk_buff *skb; 6248 unsigned int cpu, oldcpu = (unsigned long)ocpu; 6249 struct softnet_data *sd, *oldsd; 6250 6251 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) 6252 return NOTIFY_OK; 6253 6254 local_irq_disable(); 6255 cpu = smp_processor_id(); 6256 sd = &per_cpu(softnet_data, cpu); 6257 oldsd = &per_cpu(softnet_data, oldcpu); 6258 6259 /* Find end of our completion_queue. */ 6260 list_skb = &sd->completion_queue; 6261 while (*list_skb) 6262 list_skb = &(*list_skb)->next; 6263 /* Append completion queue from offline CPU. */ 6264 *list_skb = oldsd->completion_queue; 6265 oldsd->completion_queue = NULL; 6266 6267 /* Append output queue from offline CPU. */ 6268 if (oldsd->output_queue) { 6269 *sd->output_queue_tailp = oldsd->output_queue; 6270 sd->output_queue_tailp = oldsd->output_queue_tailp; 6271 oldsd->output_queue = NULL; 6272 oldsd->output_queue_tailp = &oldsd->output_queue; 6273 } 6274 /* Append NAPI poll list from offline CPU. */ 6275 if (!list_empty(&oldsd->poll_list)) { 6276 list_splice_init(&oldsd->poll_list, &sd->poll_list); 6277 raise_softirq_irqoff(NET_RX_SOFTIRQ); 6278 } 6279 6280 raise_softirq_irqoff(NET_TX_SOFTIRQ); 6281 local_irq_enable(); 6282 6283 /* Process offline CPU's input_pkt_queue */ 6284 while ((skb = __skb_dequeue(&oldsd->process_queue))) { 6285 netif_rx(skb); 6286 input_queue_head_incr(oldsd); 6287 } 6288 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { 6289 netif_rx(skb); 6290 input_queue_head_incr(oldsd); 6291 } 6292 6293 return NOTIFY_OK; 6294 } 6295 6296 6297 /** 6298 * netdev_increment_features - increment feature set by one 6299 * @all: current feature set 6300 * @one: new feature set 6301 * @mask: mask feature set 6302 * 6303 * Computes a new feature set after adding a device with feature set 6304 * @one to the master device with current feature set @all. Will not 6305 * enable anything that is off in @mask. Returns the new feature set. 6306 */ 6307 netdev_features_t netdev_increment_features(netdev_features_t all, 6308 netdev_features_t one, netdev_features_t mask) 6309 { 6310 if (mask & NETIF_F_GEN_CSUM) 6311 mask |= NETIF_F_ALL_CSUM; 6312 mask |= NETIF_F_VLAN_CHALLENGED; 6313 6314 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask; 6315 all &= one | ~NETIF_F_ALL_FOR_ALL; 6316 6317 /* If one device supports hw checksumming, set for all. */ 6318 if (all & NETIF_F_GEN_CSUM) 6319 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); 6320 6321 return all; 6322 } 6323 EXPORT_SYMBOL(netdev_increment_features); 6324 6325 static struct hlist_head *netdev_create_hash(void) 6326 { 6327 int i; 6328 struct hlist_head *hash; 6329 6330 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL); 6331 if (hash != NULL) 6332 for (i = 0; i < NETDEV_HASHENTRIES; i++) 6333 INIT_HLIST_HEAD(&hash[i]); 6334 6335 return hash; 6336 } 6337 6338 /* Initialize per network namespace state */ 6339 static int __net_init netdev_init(struct net *net) 6340 { 6341 INIT_LIST_HEAD(&net->dev_base_head); 6342 6343 net->dev_name_head = netdev_create_hash(); 6344 if (net->dev_name_head == NULL) 6345 goto err_name; 6346 6347 net->dev_index_head = netdev_create_hash(); 6348 if (net->dev_index_head == NULL) 6349 goto err_idx; 6350 6351 return 0; 6352 6353 err_idx: 6354 kfree(net->dev_name_head); 6355 err_name: 6356 return -ENOMEM; 6357 } 6358 6359 /** 6360 * netdev_drivername - network driver for the device 6361 * @dev: network device 6362 * 6363 * Determine network driver for device. 6364 */ 6365 const char *netdev_drivername(const struct net_device *dev) 6366 { 6367 const struct device_driver *driver; 6368 const struct device *parent; 6369 const char *empty = ""; 6370 6371 parent = dev->dev.parent; 6372 if (!parent) 6373 return empty; 6374 6375 driver = parent->driver; 6376 if (driver && driver->name) 6377 return driver->name; 6378 return empty; 6379 } 6380 6381 int __netdev_printk(const char *level, const struct net_device *dev, 6382 struct va_format *vaf) 6383 { 6384 int r; 6385 6386 if (dev && dev->dev.parent) 6387 r = dev_printk(level, dev->dev.parent, "%s: %pV", 6388 netdev_name(dev), vaf); 6389 else if (dev) 6390 r = printk("%s%s: %pV", level, netdev_name(dev), vaf); 6391 else 6392 r = printk("%s(NULL net_device): %pV", level, vaf); 6393 6394 return r; 6395 } 6396 EXPORT_SYMBOL(__netdev_printk); 6397 6398 int netdev_printk(const char *level, const struct net_device *dev, 6399 const char *format, ...) 6400 { 6401 struct va_format vaf; 6402 va_list args; 6403 int r; 6404 6405 va_start(args, format); 6406 6407 vaf.fmt = format; 6408 vaf.va = &args; 6409 6410 r = __netdev_printk(level, dev, &vaf); 6411 va_end(args); 6412 6413 return r; 6414 } 6415 EXPORT_SYMBOL(netdev_printk); 6416 6417 #define define_netdev_printk_level(func, level) \ 6418 int func(const struct net_device *dev, const char *fmt, ...) \ 6419 { \ 6420 int r; \ 6421 struct va_format vaf; \ 6422 va_list args; \ 6423 \ 6424 va_start(args, fmt); \ 6425 \ 6426 vaf.fmt = fmt; \ 6427 vaf.va = &args; \ 6428 \ 6429 r = __netdev_printk(level, dev, &vaf); \ 6430 va_end(args); \ 6431 \ 6432 return r; \ 6433 } \ 6434 EXPORT_SYMBOL(func); 6435 6436 define_netdev_printk_level(netdev_emerg, KERN_EMERG); 6437 define_netdev_printk_level(netdev_alert, KERN_ALERT); 6438 define_netdev_printk_level(netdev_crit, KERN_CRIT); 6439 define_netdev_printk_level(netdev_err, KERN_ERR); 6440 define_netdev_printk_level(netdev_warn, KERN_WARNING); 6441 define_netdev_printk_level(netdev_notice, KERN_NOTICE); 6442 define_netdev_printk_level(netdev_info, KERN_INFO); 6443 6444 static void __net_exit netdev_exit(struct net *net) 6445 { 6446 kfree(net->dev_name_head); 6447 kfree(net->dev_index_head); 6448 } 6449 6450 static struct pernet_operations __net_initdata netdev_net_ops = { 6451 .init = netdev_init, 6452 .exit = netdev_exit, 6453 }; 6454 6455 static void __net_exit default_device_exit(struct net *net) 6456 { 6457 struct net_device *dev, *aux; 6458 /* 6459 * Push all migratable network devices back to the 6460 * initial network namespace 6461 */ 6462 rtnl_lock(); 6463 for_each_netdev_safe(net, dev, aux) { 6464 int err; 6465 char fb_name[IFNAMSIZ]; 6466 6467 /* Ignore unmoveable devices (i.e. loopback) */ 6468 if (dev->features & NETIF_F_NETNS_LOCAL) 6469 continue; 6470 6471 /* Leave virtual devices for the generic cleanup */ 6472 if (dev->rtnl_link_ops) 6473 continue; 6474 6475 /* Push remaining network devices to init_net */ 6476 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 6477 err = dev_change_net_namespace(dev, &init_net, fb_name); 6478 if (err) { 6479 pr_emerg("%s: failed to move %s to init_net: %d\n", 6480 __func__, dev->name, err); 6481 BUG(); 6482 } 6483 } 6484 rtnl_unlock(); 6485 } 6486 6487 static void __net_exit default_device_exit_batch(struct list_head *net_list) 6488 { 6489 /* At exit all network devices most be removed from a network 6490 * namespace. Do this in the reverse order of registration. 6491 * Do this across as many network namespaces as possible to 6492 * improve batching efficiency. 6493 */ 6494 struct net_device *dev; 6495 struct net *net; 6496 LIST_HEAD(dev_kill_list); 6497 6498 rtnl_lock(); 6499 list_for_each_entry(net, net_list, exit_list) { 6500 for_each_netdev_reverse(net, dev) { 6501 if (dev->rtnl_link_ops) 6502 dev->rtnl_link_ops->dellink(dev, &dev_kill_list); 6503 else 6504 unregister_netdevice_queue(dev, &dev_kill_list); 6505 } 6506 } 6507 unregister_netdevice_many(&dev_kill_list); 6508 list_del(&dev_kill_list); 6509 rtnl_unlock(); 6510 } 6511 6512 static struct pernet_operations __net_initdata default_device_ops = { 6513 .exit = default_device_exit, 6514 .exit_batch = default_device_exit_batch, 6515 }; 6516 6517 /* 6518 * Initialize the DEV module. At boot time this walks the device list and 6519 * unhooks any devices that fail to initialise (normally hardware not 6520 * present) and leaves us with a valid list of present and active devices. 6521 * 6522 */ 6523 6524 /* 6525 * This is called single threaded during boot, so no need 6526 * to take the rtnl semaphore. 6527 */ 6528 static int __init net_dev_init(void) 6529 { 6530 int i, rc = -ENOMEM; 6531 6532 BUG_ON(!dev_boot_phase); 6533 6534 if (dev_proc_init()) 6535 goto out; 6536 6537 if (netdev_kobject_init()) 6538 goto out; 6539 6540 INIT_LIST_HEAD(&ptype_all); 6541 for (i = 0; i < PTYPE_HASH_SIZE; i++) 6542 INIT_LIST_HEAD(&ptype_base[i]); 6543 6544 if (register_pernet_subsys(&netdev_net_ops)) 6545 goto out; 6546 6547 /* 6548 * Initialise the packet receive queues. 6549 */ 6550 6551 for_each_possible_cpu(i) { 6552 struct softnet_data *sd = &per_cpu(softnet_data, i); 6553 6554 memset(sd, 0, sizeof(*sd)); 6555 skb_queue_head_init(&sd->input_pkt_queue); 6556 skb_queue_head_init(&sd->process_queue); 6557 sd->completion_queue = NULL; 6558 INIT_LIST_HEAD(&sd->poll_list); 6559 sd->output_queue = NULL; 6560 sd->output_queue_tailp = &sd->output_queue; 6561 #ifdef CONFIG_RPS 6562 sd->csd.func = rps_trigger_softirq; 6563 sd->csd.info = sd; 6564 sd->csd.flags = 0; 6565 sd->cpu = i; 6566 #endif 6567 6568 sd->backlog.poll = process_backlog; 6569 sd->backlog.weight = weight_p; 6570 sd->backlog.gro_list = NULL; 6571 sd->backlog.gro_count = 0; 6572 } 6573 6574 dev_boot_phase = 0; 6575 6576 /* The loopback device is special if any other network devices 6577 * is present in a network namespace the loopback device must 6578 * be present. Since we now dynamically allocate and free the 6579 * loopback device ensure this invariant is maintained by 6580 * keeping the loopback device as the first device on the 6581 * list of network devices. Ensuring the loopback devices 6582 * is the first device that appears and the last network device 6583 * that disappears. 6584 */ 6585 if (register_pernet_device(&loopback_net_ops)) 6586 goto out; 6587 6588 if (register_pernet_device(&default_device_ops)) 6589 goto out; 6590 6591 open_softirq(NET_TX_SOFTIRQ, net_tx_action); 6592 open_softirq(NET_RX_SOFTIRQ, net_rx_action); 6593 6594 hotcpu_notifier(dev_cpu_callback, 0); 6595 dst_init(); 6596 dev_mcast_init(); 6597 rc = 0; 6598 out: 6599 return rc; 6600 } 6601 6602 subsys_initcall(net_dev_init); 6603 6604 static int __init initialize_hashrnd(void) 6605 { 6606 get_random_bytes(&hashrnd, sizeof(hashrnd)); 6607 return 0; 6608 } 6609 6610 late_initcall_sync(initialize_hashrnd); 6611 6612