1 /* 2 * NET3 Protocol independent device support routines. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Derived from the non IP parts of dev.c 1.0.19 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 13 * 14 * Additional Authors: 15 * Florian la Roche <rzsfl@rz.uni-sb.de> 16 * Alan Cox <gw4pts@gw4pts.ampr.org> 17 * David Hinds <dahinds@users.sourceforge.net> 18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 19 * Adam Sulmicki <adam@cfar.umd.edu> 20 * Pekka Riikonen <priikone@poesidon.pspt.fi> 21 * 22 * Changes: 23 * D.J. Barrow : Fixed bug where dev->refcnt gets set 24 * to 2 if register_netdev gets called 25 * before net_dev_init & also removed a 26 * few lines of code in the process. 27 * Alan Cox : device private ioctl copies fields back. 28 * Alan Cox : Transmit queue code does relevant 29 * stunts to keep the queue safe. 30 * Alan Cox : Fixed double lock. 31 * Alan Cox : Fixed promisc NULL pointer trap 32 * ???????? : Support the full private ioctl range 33 * Alan Cox : Moved ioctl permission check into 34 * drivers 35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI 36 * Alan Cox : 100 backlog just doesn't cut it when 37 * you start doing multicast video 8) 38 * Alan Cox : Rewrote net_bh and list manager. 39 * Alan Cox : Fix ETH_P_ALL echoback lengths. 40 * Alan Cox : Took out transmit every packet pass 41 * Saved a few bytes in the ioctl handler 42 * Alan Cox : Network driver sets packet type before 43 * calling netif_rx. Saves a function 44 * call a packet. 45 * Alan Cox : Hashed net_bh() 46 * Richard Kooijman: Timestamp fixes. 47 * Alan Cox : Wrong field in SIOCGIFDSTADDR 48 * Alan Cox : Device lock protection. 49 * Alan Cox : Fixed nasty side effect of device close 50 * changes. 51 * Rudi Cilibrasi : Pass the right thing to 52 * set_mac_address() 53 * Dave Miller : 32bit quantity for the device lock to 54 * make it work out on a Sparc. 55 * Bjorn Ekwall : Added KERNELD hack. 56 * Alan Cox : Cleaned up the backlog initialise. 57 * Craig Metz : SIOCGIFCONF fix if space for under 58 * 1 device. 59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there 60 * is no device open function. 61 * Andi Kleen : Fix error reporting for SIOCGIFCONF 62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF 63 * Cyrus Durgin : Cleaned for KMOD 64 * Adam Sulmicki : Bug Fix : Network Device Unload 65 * A network device unload needs to purge 66 * the backlog queue. 67 * Paul Rusty Russell : SIOCSIFNAME 68 * Pekka Riikonen : Netdev boot-time settings code 69 * Andrew Morton : Make unregister_netdevice wait 70 * indefinitely on dev->refcnt 71 * J Hadi Salim : - Backlog queue sampling 72 * - netif_rx() feedback 73 */ 74 75 #include <asm/uaccess.h> 76 #include <linux/bitops.h> 77 #include <linux/capability.h> 78 #include <linux/cpu.h> 79 #include <linux/types.h> 80 #include <linux/kernel.h> 81 #include <linux/hash.h> 82 #include <linux/slab.h> 83 #include <linux/sched.h> 84 #include <linux/mutex.h> 85 #include <linux/string.h> 86 #include <linux/mm.h> 87 #include <linux/socket.h> 88 #include <linux/sockios.h> 89 #include <linux/errno.h> 90 #include <linux/interrupt.h> 91 #include <linux/if_ether.h> 92 #include <linux/netdevice.h> 93 #include <linux/etherdevice.h> 94 #include <linux/ethtool.h> 95 #include <linux/notifier.h> 96 #include <linux/skbuff.h> 97 #include <net/net_namespace.h> 98 #include <net/sock.h> 99 #include <linux/rtnetlink.h> 100 #include <linux/proc_fs.h> 101 #include <linux/seq_file.h> 102 #include <linux/stat.h> 103 #include <net/dst.h> 104 #include <net/pkt_sched.h> 105 #include <net/checksum.h> 106 #include <net/xfrm.h> 107 #include <linux/highmem.h> 108 #include <linux/init.h> 109 #include <linux/kmod.h> 110 #include <linux/module.h> 111 #include <linux/netpoll.h> 112 #include <linux/rcupdate.h> 113 #include <linux/delay.h> 114 #include <net/wext.h> 115 #include <net/iw_handler.h> 116 #include <asm/current.h> 117 #include <linux/audit.h> 118 #include <linux/dmaengine.h> 119 #include <linux/err.h> 120 #include <linux/ctype.h> 121 #include <linux/if_arp.h> 122 #include <linux/if_vlan.h> 123 #include <linux/ip.h> 124 #include <net/ip.h> 125 #include <linux/ipv6.h> 126 #include <linux/in.h> 127 #include <linux/jhash.h> 128 #include <linux/random.h> 129 #include <trace/events/napi.h> 130 #include <trace/events/net.h> 131 #include <trace/events/skb.h> 132 #include <linux/pci.h> 133 #include <linux/inetdevice.h> 134 #include <linux/cpu_rmap.h> 135 #include <linux/net_tstamp.h> 136 #include <linux/static_key.h> 137 #include <net/flow_keys.h> 138 139 #include "net-sysfs.h" 140 141 /* Instead of increasing this, you should create a hash table. */ 142 #define MAX_GRO_SKBS 8 143 144 /* This should be increased if a protocol with a bigger head is added. */ 145 #define GRO_MAX_HEAD (MAX_HEADER + 128) 146 147 /* 148 * The list of packet types we will receive (as opposed to discard) 149 * and the routines to invoke. 150 * 151 * Why 16. Because with 16 the only overlap we get on a hash of the 152 * low nibble of the protocol value is RARP/SNAP/X.25. 153 * 154 * NOTE: That is no longer true with the addition of VLAN tags. Not 155 * sure which should go first, but I bet it won't make much 156 * difference if we are running VLANs. The good news is that 157 * this protocol won't be in the list unless compiled in, so 158 * the average user (w/out VLANs) will not be adversely affected. 159 * --BLG 160 * 161 * 0800 IP 162 * 8100 802.1Q VLAN 163 * 0001 802.3 164 * 0002 AX.25 165 * 0004 802.2 166 * 8035 RARP 167 * 0005 SNAP 168 * 0805 X.25 169 * 0806 ARP 170 * 8137 IPX 171 * 0009 Localtalk 172 * 86DD IPv6 173 */ 174 175 #define PTYPE_HASH_SIZE (16) 176 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) 177 178 static DEFINE_SPINLOCK(ptype_lock); 179 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 180 static struct list_head ptype_all __read_mostly; /* Taps */ 181 182 /* 183 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 184 * semaphore. 185 * 186 * Pure readers hold dev_base_lock for reading, or rcu_read_lock() 187 * 188 * Writers must hold the rtnl semaphore while they loop through the 189 * dev_base_head list, and hold dev_base_lock for writing when they do the 190 * actual updates. This allows pure readers to access the list even 191 * while a writer is preparing to update it. 192 * 193 * To put it another way, dev_base_lock is held for writing only to 194 * protect against pure readers; the rtnl semaphore provides the 195 * protection against other writers. 196 * 197 * See, for example usages, register_netdevice() and 198 * unregister_netdevice(), which must be called with the rtnl 199 * semaphore held. 200 */ 201 DEFINE_RWLOCK(dev_base_lock); 202 EXPORT_SYMBOL(dev_base_lock); 203 204 static inline void dev_base_seq_inc(struct net *net) 205 { 206 while (++net->dev_base_seq == 0); 207 } 208 209 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) 210 { 211 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); 212 213 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; 214 } 215 216 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) 217 { 218 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; 219 } 220 221 static inline void rps_lock(struct softnet_data *sd) 222 { 223 #ifdef CONFIG_RPS 224 spin_lock(&sd->input_pkt_queue.lock); 225 #endif 226 } 227 228 static inline void rps_unlock(struct softnet_data *sd) 229 { 230 #ifdef CONFIG_RPS 231 spin_unlock(&sd->input_pkt_queue.lock); 232 #endif 233 } 234 235 /* Device list insertion */ 236 static int list_netdevice(struct net_device *dev) 237 { 238 struct net *net = dev_net(dev); 239 240 ASSERT_RTNL(); 241 242 write_lock_bh(&dev_base_lock); 243 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); 244 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); 245 hlist_add_head_rcu(&dev->index_hlist, 246 dev_index_hash(net, dev->ifindex)); 247 write_unlock_bh(&dev_base_lock); 248 249 dev_base_seq_inc(net); 250 251 return 0; 252 } 253 254 /* Device list removal 255 * caller must respect a RCU grace period before freeing/reusing dev 256 */ 257 static void unlist_netdevice(struct net_device *dev) 258 { 259 ASSERT_RTNL(); 260 261 /* Unlink dev from the device chain */ 262 write_lock_bh(&dev_base_lock); 263 list_del_rcu(&dev->dev_list); 264 hlist_del_rcu(&dev->name_hlist); 265 hlist_del_rcu(&dev->index_hlist); 266 write_unlock_bh(&dev_base_lock); 267 268 dev_base_seq_inc(dev_net(dev)); 269 } 270 271 /* 272 * Our notifier list 273 */ 274 275 static RAW_NOTIFIER_HEAD(netdev_chain); 276 277 /* 278 * Device drivers call our routines to queue packets here. We empty the 279 * queue in the local softnet handler. 280 */ 281 282 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 283 EXPORT_PER_CPU_SYMBOL(softnet_data); 284 285 #ifdef CONFIG_LOCKDEP 286 /* 287 * register_netdevice() inits txq->_xmit_lock and sets lockdep class 288 * according to dev->type 289 */ 290 static const unsigned short netdev_lock_type[] = 291 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, 292 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, 293 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, 294 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, 295 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, 296 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, 297 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, 298 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, 299 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, 300 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, 301 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, 302 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 303 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, 304 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, 305 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; 306 307 static const char *const netdev_lock_name[] = 308 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", 309 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", 310 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", 311 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", 312 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", 313 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", 314 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", 315 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", 316 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", 317 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", 318 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", 319 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 320 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", 321 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", 322 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; 323 324 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 325 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; 326 327 static inline unsigned short netdev_lock_pos(unsigned short dev_type) 328 { 329 int i; 330 331 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) 332 if (netdev_lock_type[i] == dev_type) 333 return i; 334 /* the last key is used by default */ 335 return ARRAY_SIZE(netdev_lock_type) - 1; 336 } 337 338 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 339 unsigned short dev_type) 340 { 341 int i; 342 343 i = netdev_lock_pos(dev_type); 344 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], 345 netdev_lock_name[i]); 346 } 347 348 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 349 { 350 int i; 351 352 i = netdev_lock_pos(dev->type); 353 lockdep_set_class_and_name(&dev->addr_list_lock, 354 &netdev_addr_lock_key[i], 355 netdev_lock_name[i]); 356 } 357 #else 358 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 359 unsigned short dev_type) 360 { 361 } 362 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 363 { 364 } 365 #endif 366 367 /******************************************************************************* 368 369 Protocol management and registration routines 370 371 *******************************************************************************/ 372 373 /* 374 * Add a protocol ID to the list. Now that the input handler is 375 * smarter we can dispense with all the messy stuff that used to be 376 * here. 377 * 378 * BEWARE!!! Protocol handlers, mangling input packets, 379 * MUST BE last in hash buckets and checking protocol handlers 380 * MUST start from promiscuous ptype_all chain in net_bh. 381 * It is true now, do not change it. 382 * Explanation follows: if protocol handler, mangling packet, will 383 * be the first on list, it is not able to sense, that packet 384 * is cloned and should be copied-on-write, so that it will 385 * change it and subsequent readers will get broken packet. 386 * --ANK (980803) 387 */ 388 389 static inline struct list_head *ptype_head(const struct packet_type *pt) 390 { 391 if (pt->type == htons(ETH_P_ALL)) 392 return &ptype_all; 393 else 394 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; 395 } 396 397 /** 398 * dev_add_pack - add packet handler 399 * @pt: packet type declaration 400 * 401 * Add a protocol handler to the networking stack. The passed &packet_type 402 * is linked into kernel lists and may not be freed until it has been 403 * removed from the kernel lists. 404 * 405 * This call does not sleep therefore it can not 406 * guarantee all CPU's that are in middle of receiving packets 407 * will see the new packet type (until the next received packet). 408 */ 409 410 void dev_add_pack(struct packet_type *pt) 411 { 412 struct list_head *head = ptype_head(pt); 413 414 spin_lock(&ptype_lock); 415 list_add_rcu(&pt->list, head); 416 spin_unlock(&ptype_lock); 417 } 418 EXPORT_SYMBOL(dev_add_pack); 419 420 /** 421 * __dev_remove_pack - remove packet handler 422 * @pt: packet type declaration 423 * 424 * Remove a protocol handler that was previously added to the kernel 425 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 426 * from the kernel lists and can be freed or reused once this function 427 * returns. 428 * 429 * The packet type might still be in use by receivers 430 * and must not be freed until after all the CPU's have gone 431 * through a quiescent state. 432 */ 433 void __dev_remove_pack(struct packet_type *pt) 434 { 435 struct list_head *head = ptype_head(pt); 436 struct packet_type *pt1; 437 438 spin_lock(&ptype_lock); 439 440 list_for_each_entry(pt1, head, list) { 441 if (pt == pt1) { 442 list_del_rcu(&pt->list); 443 goto out; 444 } 445 } 446 447 pr_warn("dev_remove_pack: %p not found\n", pt); 448 out: 449 spin_unlock(&ptype_lock); 450 } 451 EXPORT_SYMBOL(__dev_remove_pack); 452 453 /** 454 * dev_remove_pack - remove packet handler 455 * @pt: packet type declaration 456 * 457 * Remove a protocol handler that was previously added to the kernel 458 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 459 * from the kernel lists and can be freed or reused once this function 460 * returns. 461 * 462 * This call sleeps to guarantee that no CPU is looking at the packet 463 * type after return. 464 */ 465 void dev_remove_pack(struct packet_type *pt) 466 { 467 __dev_remove_pack(pt); 468 469 synchronize_net(); 470 } 471 EXPORT_SYMBOL(dev_remove_pack); 472 473 /****************************************************************************** 474 475 Device Boot-time Settings Routines 476 477 *******************************************************************************/ 478 479 /* Boot time configuration table */ 480 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX]; 481 482 /** 483 * netdev_boot_setup_add - add new setup entry 484 * @name: name of the device 485 * @map: configured settings for the device 486 * 487 * Adds new setup entry to the dev_boot_setup list. The function 488 * returns 0 on error and 1 on success. This is a generic routine to 489 * all netdevices. 490 */ 491 static int netdev_boot_setup_add(char *name, struct ifmap *map) 492 { 493 struct netdev_boot_setup *s; 494 int i; 495 496 s = dev_boot_setup; 497 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { 498 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') { 499 memset(s[i].name, 0, sizeof(s[i].name)); 500 strlcpy(s[i].name, name, IFNAMSIZ); 501 memcpy(&s[i].map, map, sizeof(s[i].map)); 502 break; 503 } 504 } 505 506 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1; 507 } 508 509 /** 510 * netdev_boot_setup_check - check boot time settings 511 * @dev: the netdevice 512 * 513 * Check boot time settings for the device. 514 * The found settings are set for the device to be used 515 * later in the device probing. 516 * Returns 0 if no settings found, 1 if they are. 517 */ 518 int netdev_boot_setup_check(struct net_device *dev) 519 { 520 struct netdev_boot_setup *s = dev_boot_setup; 521 int i; 522 523 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { 524 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' && 525 !strcmp(dev->name, s[i].name)) { 526 dev->irq = s[i].map.irq; 527 dev->base_addr = s[i].map.base_addr; 528 dev->mem_start = s[i].map.mem_start; 529 dev->mem_end = s[i].map.mem_end; 530 return 1; 531 } 532 } 533 return 0; 534 } 535 EXPORT_SYMBOL(netdev_boot_setup_check); 536 537 538 /** 539 * netdev_boot_base - get address from boot time settings 540 * @prefix: prefix for network device 541 * @unit: id for network device 542 * 543 * Check boot time settings for the base address of device. 544 * The found settings are set for the device to be used 545 * later in the device probing. 546 * Returns 0 if no settings found. 547 */ 548 unsigned long netdev_boot_base(const char *prefix, int unit) 549 { 550 const struct netdev_boot_setup *s = dev_boot_setup; 551 char name[IFNAMSIZ]; 552 int i; 553 554 sprintf(name, "%s%d", prefix, unit); 555 556 /* 557 * If device already registered then return base of 1 558 * to indicate not to probe for this interface 559 */ 560 if (__dev_get_by_name(&init_net, name)) 561 return 1; 562 563 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) 564 if (!strcmp(name, s[i].name)) 565 return s[i].map.base_addr; 566 return 0; 567 } 568 569 /* 570 * Saves at boot time configured settings for any netdevice. 571 */ 572 int __init netdev_boot_setup(char *str) 573 { 574 int ints[5]; 575 struct ifmap map; 576 577 str = get_options(str, ARRAY_SIZE(ints), ints); 578 if (!str || !*str) 579 return 0; 580 581 /* Save settings */ 582 memset(&map, 0, sizeof(map)); 583 if (ints[0] > 0) 584 map.irq = ints[1]; 585 if (ints[0] > 1) 586 map.base_addr = ints[2]; 587 if (ints[0] > 2) 588 map.mem_start = ints[3]; 589 if (ints[0] > 3) 590 map.mem_end = ints[4]; 591 592 /* Add new entry to the list */ 593 return netdev_boot_setup_add(str, &map); 594 } 595 596 __setup("netdev=", netdev_boot_setup); 597 598 /******************************************************************************* 599 600 Device Interface Subroutines 601 602 *******************************************************************************/ 603 604 /** 605 * __dev_get_by_name - find a device by its name 606 * @net: the applicable net namespace 607 * @name: name to find 608 * 609 * Find an interface by name. Must be called under RTNL semaphore 610 * or @dev_base_lock. If the name is found a pointer to the device 611 * is returned. If the name is not found then %NULL is returned. The 612 * reference counters are not incremented so the caller must be 613 * careful with locks. 614 */ 615 616 struct net_device *__dev_get_by_name(struct net *net, const char *name) 617 { 618 struct hlist_node *p; 619 struct net_device *dev; 620 struct hlist_head *head = dev_name_hash(net, name); 621 622 hlist_for_each_entry(dev, p, head, name_hlist) 623 if (!strncmp(dev->name, name, IFNAMSIZ)) 624 return dev; 625 626 return NULL; 627 } 628 EXPORT_SYMBOL(__dev_get_by_name); 629 630 /** 631 * dev_get_by_name_rcu - find a device by its name 632 * @net: the applicable net namespace 633 * @name: name to find 634 * 635 * Find an interface by name. 636 * If the name is found a pointer to the device is returned. 637 * If the name is not found then %NULL is returned. 638 * The reference counters are not incremented so the caller must be 639 * careful with locks. The caller must hold RCU lock. 640 */ 641 642 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) 643 { 644 struct hlist_node *p; 645 struct net_device *dev; 646 struct hlist_head *head = dev_name_hash(net, name); 647 648 hlist_for_each_entry_rcu(dev, p, head, name_hlist) 649 if (!strncmp(dev->name, name, IFNAMSIZ)) 650 return dev; 651 652 return NULL; 653 } 654 EXPORT_SYMBOL(dev_get_by_name_rcu); 655 656 /** 657 * dev_get_by_name - find a device by its name 658 * @net: the applicable net namespace 659 * @name: name to find 660 * 661 * Find an interface by name. This can be called from any 662 * context and does its own locking. The returned handle has 663 * the usage count incremented and the caller must use dev_put() to 664 * release it when it is no longer needed. %NULL is returned if no 665 * matching device is found. 666 */ 667 668 struct net_device *dev_get_by_name(struct net *net, const char *name) 669 { 670 struct net_device *dev; 671 672 rcu_read_lock(); 673 dev = dev_get_by_name_rcu(net, name); 674 if (dev) 675 dev_hold(dev); 676 rcu_read_unlock(); 677 return dev; 678 } 679 EXPORT_SYMBOL(dev_get_by_name); 680 681 /** 682 * __dev_get_by_index - find a device by its ifindex 683 * @net: the applicable net namespace 684 * @ifindex: index of device 685 * 686 * Search for an interface by index. Returns %NULL if the device 687 * is not found or a pointer to the device. The device has not 688 * had its reference counter increased so the caller must be careful 689 * about locking. The caller must hold either the RTNL semaphore 690 * or @dev_base_lock. 691 */ 692 693 struct net_device *__dev_get_by_index(struct net *net, int ifindex) 694 { 695 struct hlist_node *p; 696 struct net_device *dev; 697 struct hlist_head *head = dev_index_hash(net, ifindex); 698 699 hlist_for_each_entry(dev, p, head, index_hlist) 700 if (dev->ifindex == ifindex) 701 return dev; 702 703 return NULL; 704 } 705 EXPORT_SYMBOL(__dev_get_by_index); 706 707 /** 708 * dev_get_by_index_rcu - find a device by its ifindex 709 * @net: the applicable net namespace 710 * @ifindex: index of device 711 * 712 * Search for an interface by index. Returns %NULL if the device 713 * is not found or a pointer to the device. The device has not 714 * had its reference counter increased so the caller must be careful 715 * about locking. The caller must hold RCU lock. 716 */ 717 718 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) 719 { 720 struct hlist_node *p; 721 struct net_device *dev; 722 struct hlist_head *head = dev_index_hash(net, ifindex); 723 724 hlist_for_each_entry_rcu(dev, p, head, index_hlist) 725 if (dev->ifindex == ifindex) 726 return dev; 727 728 return NULL; 729 } 730 EXPORT_SYMBOL(dev_get_by_index_rcu); 731 732 733 /** 734 * dev_get_by_index - find a device by its ifindex 735 * @net: the applicable net namespace 736 * @ifindex: index of device 737 * 738 * Search for an interface by index. Returns NULL if the device 739 * is not found or a pointer to the device. The device returned has 740 * had a reference added and the pointer is safe until the user calls 741 * dev_put to indicate they have finished with it. 742 */ 743 744 struct net_device *dev_get_by_index(struct net *net, int ifindex) 745 { 746 struct net_device *dev; 747 748 rcu_read_lock(); 749 dev = dev_get_by_index_rcu(net, ifindex); 750 if (dev) 751 dev_hold(dev); 752 rcu_read_unlock(); 753 return dev; 754 } 755 EXPORT_SYMBOL(dev_get_by_index); 756 757 /** 758 * dev_getbyhwaddr_rcu - find a device by its hardware address 759 * @net: the applicable net namespace 760 * @type: media type of device 761 * @ha: hardware address 762 * 763 * Search for an interface by MAC address. Returns NULL if the device 764 * is not found or a pointer to the device. 765 * The caller must hold RCU or RTNL. 766 * The returned device has not had its ref count increased 767 * and the caller must therefore be careful about locking 768 * 769 */ 770 771 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 772 const char *ha) 773 { 774 struct net_device *dev; 775 776 for_each_netdev_rcu(net, dev) 777 if (dev->type == type && 778 !memcmp(dev->dev_addr, ha, dev->addr_len)) 779 return dev; 780 781 return NULL; 782 } 783 EXPORT_SYMBOL(dev_getbyhwaddr_rcu); 784 785 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) 786 { 787 struct net_device *dev; 788 789 ASSERT_RTNL(); 790 for_each_netdev(net, dev) 791 if (dev->type == type) 792 return dev; 793 794 return NULL; 795 } 796 EXPORT_SYMBOL(__dev_getfirstbyhwtype); 797 798 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) 799 { 800 struct net_device *dev, *ret = NULL; 801 802 rcu_read_lock(); 803 for_each_netdev_rcu(net, dev) 804 if (dev->type == type) { 805 dev_hold(dev); 806 ret = dev; 807 break; 808 } 809 rcu_read_unlock(); 810 return ret; 811 } 812 EXPORT_SYMBOL(dev_getfirstbyhwtype); 813 814 /** 815 * dev_get_by_flags_rcu - find any device with given flags 816 * @net: the applicable net namespace 817 * @if_flags: IFF_* values 818 * @mask: bitmask of bits in if_flags to check 819 * 820 * Search for any interface with the given flags. Returns NULL if a device 821 * is not found or a pointer to the device. Must be called inside 822 * rcu_read_lock(), and result refcount is unchanged. 823 */ 824 825 struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags, 826 unsigned short mask) 827 { 828 struct net_device *dev, *ret; 829 830 ret = NULL; 831 for_each_netdev_rcu(net, dev) { 832 if (((dev->flags ^ if_flags) & mask) == 0) { 833 ret = dev; 834 break; 835 } 836 } 837 return ret; 838 } 839 EXPORT_SYMBOL(dev_get_by_flags_rcu); 840 841 /** 842 * dev_valid_name - check if name is okay for network device 843 * @name: name string 844 * 845 * Network device names need to be valid file names to 846 * to allow sysfs to work. We also disallow any kind of 847 * whitespace. 848 */ 849 bool dev_valid_name(const char *name) 850 { 851 if (*name == '\0') 852 return false; 853 if (strlen(name) >= IFNAMSIZ) 854 return false; 855 if (!strcmp(name, ".") || !strcmp(name, "..")) 856 return false; 857 858 while (*name) { 859 if (*name == '/' || isspace(*name)) 860 return false; 861 name++; 862 } 863 return true; 864 } 865 EXPORT_SYMBOL(dev_valid_name); 866 867 /** 868 * __dev_alloc_name - allocate a name for a device 869 * @net: network namespace to allocate the device name in 870 * @name: name format string 871 * @buf: scratch buffer and result name string 872 * 873 * Passed a format string - eg "lt%d" it will try and find a suitable 874 * id. It scans list of devices to build up a free map, then chooses 875 * the first empty slot. The caller must hold the dev_base or rtnl lock 876 * while allocating the name and adding the device in order to avoid 877 * duplicates. 878 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 879 * Returns the number of the unit assigned or a negative errno code. 880 */ 881 882 static int __dev_alloc_name(struct net *net, const char *name, char *buf) 883 { 884 int i = 0; 885 const char *p; 886 const int max_netdevices = 8*PAGE_SIZE; 887 unsigned long *inuse; 888 struct net_device *d; 889 890 p = strnchr(name, IFNAMSIZ-1, '%'); 891 if (p) { 892 /* 893 * Verify the string as this thing may have come from 894 * the user. There must be either one "%d" and no other "%" 895 * characters. 896 */ 897 if (p[1] != 'd' || strchr(p + 2, '%')) 898 return -EINVAL; 899 900 /* Use one page as a bit array of possible slots */ 901 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC); 902 if (!inuse) 903 return -ENOMEM; 904 905 for_each_netdev(net, d) { 906 if (!sscanf(d->name, name, &i)) 907 continue; 908 if (i < 0 || i >= max_netdevices) 909 continue; 910 911 /* avoid cases where sscanf is not exact inverse of printf */ 912 snprintf(buf, IFNAMSIZ, name, i); 913 if (!strncmp(buf, d->name, IFNAMSIZ)) 914 set_bit(i, inuse); 915 } 916 917 i = find_first_zero_bit(inuse, max_netdevices); 918 free_page((unsigned long) inuse); 919 } 920 921 if (buf != name) 922 snprintf(buf, IFNAMSIZ, name, i); 923 if (!__dev_get_by_name(net, buf)) 924 return i; 925 926 /* It is possible to run out of possible slots 927 * when the name is long and there isn't enough space left 928 * for the digits, or if all bits are used. 929 */ 930 return -ENFILE; 931 } 932 933 /** 934 * dev_alloc_name - allocate a name for a device 935 * @dev: device 936 * @name: name format string 937 * 938 * Passed a format string - eg "lt%d" it will try and find a suitable 939 * id. It scans list of devices to build up a free map, then chooses 940 * the first empty slot. The caller must hold the dev_base or rtnl lock 941 * while allocating the name and adding the device in order to avoid 942 * duplicates. 943 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 944 * Returns the number of the unit assigned or a negative errno code. 945 */ 946 947 int dev_alloc_name(struct net_device *dev, const char *name) 948 { 949 char buf[IFNAMSIZ]; 950 struct net *net; 951 int ret; 952 953 BUG_ON(!dev_net(dev)); 954 net = dev_net(dev); 955 ret = __dev_alloc_name(net, name, buf); 956 if (ret >= 0) 957 strlcpy(dev->name, buf, IFNAMSIZ); 958 return ret; 959 } 960 EXPORT_SYMBOL(dev_alloc_name); 961 962 static int dev_alloc_name_ns(struct net *net, 963 struct net_device *dev, 964 const char *name) 965 { 966 char buf[IFNAMSIZ]; 967 int ret; 968 969 ret = __dev_alloc_name(net, name, buf); 970 if (ret >= 0) 971 strlcpy(dev->name, buf, IFNAMSIZ); 972 return ret; 973 } 974 975 static int dev_get_valid_name(struct net *net, 976 struct net_device *dev, 977 const char *name) 978 { 979 BUG_ON(!net); 980 981 if (!dev_valid_name(name)) 982 return -EINVAL; 983 984 if (strchr(name, '%')) 985 return dev_alloc_name_ns(net, dev, name); 986 else if (__dev_get_by_name(net, name)) 987 return -EEXIST; 988 else if (dev->name != name) 989 strlcpy(dev->name, name, IFNAMSIZ); 990 991 return 0; 992 } 993 994 /** 995 * dev_change_name - change name of a device 996 * @dev: device 997 * @newname: name (or format string) must be at least IFNAMSIZ 998 * 999 * Change name of a device, can pass format strings "eth%d". 1000 * for wildcarding. 1001 */ 1002 int dev_change_name(struct net_device *dev, const char *newname) 1003 { 1004 char oldname[IFNAMSIZ]; 1005 int err = 0; 1006 int ret; 1007 struct net *net; 1008 1009 ASSERT_RTNL(); 1010 BUG_ON(!dev_net(dev)); 1011 1012 net = dev_net(dev); 1013 if (dev->flags & IFF_UP) 1014 return -EBUSY; 1015 1016 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) 1017 return 0; 1018 1019 memcpy(oldname, dev->name, IFNAMSIZ); 1020 1021 err = dev_get_valid_name(net, dev, newname); 1022 if (err < 0) 1023 return err; 1024 1025 rollback: 1026 ret = device_rename(&dev->dev, dev->name); 1027 if (ret) { 1028 memcpy(dev->name, oldname, IFNAMSIZ); 1029 return ret; 1030 } 1031 1032 write_lock_bh(&dev_base_lock); 1033 hlist_del_rcu(&dev->name_hlist); 1034 write_unlock_bh(&dev_base_lock); 1035 1036 synchronize_rcu(); 1037 1038 write_lock_bh(&dev_base_lock); 1039 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); 1040 write_unlock_bh(&dev_base_lock); 1041 1042 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); 1043 ret = notifier_to_errno(ret); 1044 1045 if (ret) { 1046 /* err >= 0 after dev_alloc_name() or stores the first errno */ 1047 if (err >= 0) { 1048 err = ret; 1049 memcpy(dev->name, oldname, IFNAMSIZ); 1050 goto rollback; 1051 } else { 1052 pr_err("%s: name change rollback failed: %d\n", 1053 dev->name, ret); 1054 } 1055 } 1056 1057 return err; 1058 } 1059 1060 /** 1061 * dev_set_alias - change ifalias of a device 1062 * @dev: device 1063 * @alias: name up to IFALIASZ 1064 * @len: limit of bytes to copy from info 1065 * 1066 * Set ifalias for a device, 1067 */ 1068 int dev_set_alias(struct net_device *dev, const char *alias, size_t len) 1069 { 1070 char *new_ifalias; 1071 1072 ASSERT_RTNL(); 1073 1074 if (len >= IFALIASZ) 1075 return -EINVAL; 1076 1077 if (!len) { 1078 if (dev->ifalias) { 1079 kfree(dev->ifalias); 1080 dev->ifalias = NULL; 1081 } 1082 return 0; 1083 } 1084 1085 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); 1086 if (!new_ifalias) 1087 return -ENOMEM; 1088 dev->ifalias = new_ifalias; 1089 1090 strlcpy(dev->ifalias, alias, len+1); 1091 return len; 1092 } 1093 1094 1095 /** 1096 * netdev_features_change - device changes features 1097 * @dev: device to cause notification 1098 * 1099 * Called to indicate a device has changed features. 1100 */ 1101 void netdev_features_change(struct net_device *dev) 1102 { 1103 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); 1104 } 1105 EXPORT_SYMBOL(netdev_features_change); 1106 1107 /** 1108 * netdev_state_change - device changes state 1109 * @dev: device to cause notification 1110 * 1111 * Called to indicate a device has changed state. This function calls 1112 * the notifier chains for netdev_chain and sends a NEWLINK message 1113 * to the routing socket. 1114 */ 1115 void netdev_state_change(struct net_device *dev) 1116 { 1117 if (dev->flags & IFF_UP) { 1118 call_netdevice_notifiers(NETDEV_CHANGE, dev); 1119 rtmsg_ifinfo(RTM_NEWLINK, dev, 0); 1120 } 1121 } 1122 EXPORT_SYMBOL(netdev_state_change); 1123 1124 /** 1125 * netdev_notify_peers - notify network peers about existence of @dev 1126 * @dev: network device 1127 * 1128 * Generate traffic such that interested network peers are aware of 1129 * @dev, such as by generating a gratuitous ARP. This may be used when 1130 * a device wants to inform the rest of the network about some sort of 1131 * reconfiguration such as a failover event or virtual machine 1132 * migration. 1133 */ 1134 void netdev_notify_peers(struct net_device *dev) 1135 { 1136 rtnl_lock(); 1137 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); 1138 rtnl_unlock(); 1139 } 1140 EXPORT_SYMBOL(netdev_notify_peers); 1141 1142 /** 1143 * dev_load - load a network module 1144 * @net: the applicable net namespace 1145 * @name: name of interface 1146 * 1147 * If a network interface is not present and the process has suitable 1148 * privileges this function loads the module. If module loading is not 1149 * available in this kernel then it becomes a nop. 1150 */ 1151 1152 void dev_load(struct net *net, const char *name) 1153 { 1154 struct net_device *dev; 1155 int no_module; 1156 1157 rcu_read_lock(); 1158 dev = dev_get_by_name_rcu(net, name); 1159 rcu_read_unlock(); 1160 1161 no_module = !dev; 1162 if (no_module && capable(CAP_NET_ADMIN)) 1163 no_module = request_module("netdev-%s", name); 1164 if (no_module && capable(CAP_SYS_MODULE)) { 1165 if (!request_module("%s", name)) 1166 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n", 1167 name); 1168 } 1169 } 1170 EXPORT_SYMBOL(dev_load); 1171 1172 static int __dev_open(struct net_device *dev) 1173 { 1174 const struct net_device_ops *ops = dev->netdev_ops; 1175 int ret; 1176 1177 ASSERT_RTNL(); 1178 1179 if (!netif_device_present(dev)) 1180 return -ENODEV; 1181 1182 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev); 1183 ret = notifier_to_errno(ret); 1184 if (ret) 1185 return ret; 1186 1187 set_bit(__LINK_STATE_START, &dev->state); 1188 1189 if (ops->ndo_validate_addr) 1190 ret = ops->ndo_validate_addr(dev); 1191 1192 if (!ret && ops->ndo_open) 1193 ret = ops->ndo_open(dev); 1194 1195 if (ret) 1196 clear_bit(__LINK_STATE_START, &dev->state); 1197 else { 1198 dev->flags |= IFF_UP; 1199 net_dmaengine_get(); 1200 dev_set_rx_mode(dev); 1201 dev_activate(dev); 1202 add_device_randomness(dev->dev_addr, dev->addr_len); 1203 } 1204 1205 return ret; 1206 } 1207 1208 /** 1209 * dev_open - prepare an interface for use. 1210 * @dev: device to open 1211 * 1212 * Takes a device from down to up state. The device's private open 1213 * function is invoked and then the multicast lists are loaded. Finally 1214 * the device is moved into the up state and a %NETDEV_UP message is 1215 * sent to the netdev notifier chain. 1216 * 1217 * Calling this function on an active interface is a nop. On a failure 1218 * a negative errno code is returned. 1219 */ 1220 int dev_open(struct net_device *dev) 1221 { 1222 int ret; 1223 1224 if (dev->flags & IFF_UP) 1225 return 0; 1226 1227 ret = __dev_open(dev); 1228 if (ret < 0) 1229 return ret; 1230 1231 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); 1232 call_netdevice_notifiers(NETDEV_UP, dev); 1233 1234 return ret; 1235 } 1236 EXPORT_SYMBOL(dev_open); 1237 1238 static int __dev_close_many(struct list_head *head) 1239 { 1240 struct net_device *dev; 1241 1242 ASSERT_RTNL(); 1243 might_sleep(); 1244 1245 list_for_each_entry(dev, head, unreg_list) { 1246 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); 1247 1248 clear_bit(__LINK_STATE_START, &dev->state); 1249 1250 /* Synchronize to scheduled poll. We cannot touch poll list, it 1251 * can be even on different cpu. So just clear netif_running(). 1252 * 1253 * dev->stop() will invoke napi_disable() on all of it's 1254 * napi_struct instances on this device. 1255 */ 1256 smp_mb__after_clear_bit(); /* Commit netif_running(). */ 1257 } 1258 1259 dev_deactivate_many(head); 1260 1261 list_for_each_entry(dev, head, unreg_list) { 1262 const struct net_device_ops *ops = dev->netdev_ops; 1263 1264 /* 1265 * Call the device specific close. This cannot fail. 1266 * Only if device is UP 1267 * 1268 * We allow it to be called even after a DETACH hot-plug 1269 * event. 1270 */ 1271 if (ops->ndo_stop) 1272 ops->ndo_stop(dev); 1273 1274 dev->flags &= ~IFF_UP; 1275 net_dmaengine_put(); 1276 } 1277 1278 return 0; 1279 } 1280 1281 static int __dev_close(struct net_device *dev) 1282 { 1283 int retval; 1284 LIST_HEAD(single); 1285 1286 list_add(&dev->unreg_list, &single); 1287 retval = __dev_close_many(&single); 1288 list_del(&single); 1289 return retval; 1290 } 1291 1292 static int dev_close_many(struct list_head *head) 1293 { 1294 struct net_device *dev, *tmp; 1295 LIST_HEAD(tmp_list); 1296 1297 list_for_each_entry_safe(dev, tmp, head, unreg_list) 1298 if (!(dev->flags & IFF_UP)) 1299 list_move(&dev->unreg_list, &tmp_list); 1300 1301 __dev_close_many(head); 1302 1303 list_for_each_entry(dev, head, unreg_list) { 1304 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); 1305 call_netdevice_notifiers(NETDEV_DOWN, dev); 1306 } 1307 1308 /* rollback_registered_many needs the complete original list */ 1309 list_splice(&tmp_list, head); 1310 return 0; 1311 } 1312 1313 /** 1314 * dev_close - shutdown an interface. 1315 * @dev: device to shutdown 1316 * 1317 * This function moves an active device into down state. A 1318 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device 1319 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier 1320 * chain. 1321 */ 1322 int dev_close(struct net_device *dev) 1323 { 1324 if (dev->flags & IFF_UP) { 1325 LIST_HEAD(single); 1326 1327 list_add(&dev->unreg_list, &single); 1328 dev_close_many(&single); 1329 list_del(&single); 1330 } 1331 return 0; 1332 } 1333 EXPORT_SYMBOL(dev_close); 1334 1335 1336 /** 1337 * dev_disable_lro - disable Large Receive Offload on a device 1338 * @dev: device 1339 * 1340 * Disable Large Receive Offload (LRO) on a net device. Must be 1341 * called under RTNL. This is needed if received packets may be 1342 * forwarded to another interface. 1343 */ 1344 void dev_disable_lro(struct net_device *dev) 1345 { 1346 /* 1347 * If we're trying to disable lro on a vlan device 1348 * use the underlying physical device instead 1349 */ 1350 if (is_vlan_dev(dev)) 1351 dev = vlan_dev_real_dev(dev); 1352 1353 dev->wanted_features &= ~NETIF_F_LRO; 1354 netdev_update_features(dev); 1355 1356 if (unlikely(dev->features & NETIF_F_LRO)) 1357 netdev_WARN(dev, "failed to disable LRO!\n"); 1358 } 1359 EXPORT_SYMBOL(dev_disable_lro); 1360 1361 1362 static int dev_boot_phase = 1; 1363 1364 /** 1365 * register_netdevice_notifier - register a network notifier block 1366 * @nb: notifier 1367 * 1368 * Register a notifier to be called when network device events occur. 1369 * The notifier passed is linked into the kernel structures and must 1370 * not be reused until it has been unregistered. A negative errno code 1371 * is returned on a failure. 1372 * 1373 * When registered all registration and up events are replayed 1374 * to the new notifier to allow device to have a race free 1375 * view of the network device list. 1376 */ 1377 1378 int register_netdevice_notifier(struct notifier_block *nb) 1379 { 1380 struct net_device *dev; 1381 struct net_device *last; 1382 struct net *net; 1383 int err; 1384 1385 rtnl_lock(); 1386 err = raw_notifier_chain_register(&netdev_chain, nb); 1387 if (err) 1388 goto unlock; 1389 if (dev_boot_phase) 1390 goto unlock; 1391 for_each_net(net) { 1392 for_each_netdev(net, dev) { 1393 err = nb->notifier_call(nb, NETDEV_REGISTER, dev); 1394 err = notifier_to_errno(err); 1395 if (err) 1396 goto rollback; 1397 1398 if (!(dev->flags & IFF_UP)) 1399 continue; 1400 1401 nb->notifier_call(nb, NETDEV_UP, dev); 1402 } 1403 } 1404 1405 unlock: 1406 rtnl_unlock(); 1407 return err; 1408 1409 rollback: 1410 last = dev; 1411 for_each_net(net) { 1412 for_each_netdev(net, dev) { 1413 if (dev == last) 1414 goto outroll; 1415 1416 if (dev->flags & IFF_UP) { 1417 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); 1418 nb->notifier_call(nb, NETDEV_DOWN, dev); 1419 } 1420 nb->notifier_call(nb, NETDEV_UNREGISTER, dev); 1421 } 1422 } 1423 1424 outroll: 1425 raw_notifier_chain_unregister(&netdev_chain, nb); 1426 goto unlock; 1427 } 1428 EXPORT_SYMBOL(register_netdevice_notifier); 1429 1430 /** 1431 * unregister_netdevice_notifier - unregister a network notifier block 1432 * @nb: notifier 1433 * 1434 * Unregister a notifier previously registered by 1435 * register_netdevice_notifier(). The notifier is unlinked into the 1436 * kernel structures and may then be reused. A negative errno code 1437 * is returned on a failure. 1438 * 1439 * After unregistering unregister and down device events are synthesized 1440 * for all devices on the device list to the removed notifier to remove 1441 * the need for special case cleanup code. 1442 */ 1443 1444 int unregister_netdevice_notifier(struct notifier_block *nb) 1445 { 1446 struct net_device *dev; 1447 struct net *net; 1448 int err; 1449 1450 rtnl_lock(); 1451 err = raw_notifier_chain_unregister(&netdev_chain, nb); 1452 if (err) 1453 goto unlock; 1454 1455 for_each_net(net) { 1456 for_each_netdev(net, dev) { 1457 if (dev->flags & IFF_UP) { 1458 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); 1459 nb->notifier_call(nb, NETDEV_DOWN, dev); 1460 } 1461 nb->notifier_call(nb, NETDEV_UNREGISTER, dev); 1462 } 1463 } 1464 unlock: 1465 rtnl_unlock(); 1466 return err; 1467 } 1468 EXPORT_SYMBOL(unregister_netdevice_notifier); 1469 1470 /** 1471 * call_netdevice_notifiers - call all network notifier blocks 1472 * @val: value passed unmodified to notifier function 1473 * @dev: net_device pointer passed unmodified to notifier function 1474 * 1475 * Call all network notifier blocks. Parameters and return value 1476 * are as for raw_notifier_call_chain(). 1477 */ 1478 1479 int call_netdevice_notifiers(unsigned long val, struct net_device *dev) 1480 { 1481 ASSERT_RTNL(); 1482 return raw_notifier_call_chain(&netdev_chain, val, dev); 1483 } 1484 EXPORT_SYMBOL(call_netdevice_notifiers); 1485 1486 static struct static_key netstamp_needed __read_mostly; 1487 #ifdef HAVE_JUMP_LABEL 1488 /* We are not allowed to call static_key_slow_dec() from irq context 1489 * If net_disable_timestamp() is called from irq context, defer the 1490 * static_key_slow_dec() calls. 1491 */ 1492 static atomic_t netstamp_needed_deferred; 1493 #endif 1494 1495 void net_enable_timestamp(void) 1496 { 1497 #ifdef HAVE_JUMP_LABEL 1498 int deferred = atomic_xchg(&netstamp_needed_deferred, 0); 1499 1500 if (deferred) { 1501 while (--deferred) 1502 static_key_slow_dec(&netstamp_needed); 1503 return; 1504 } 1505 #endif 1506 WARN_ON(in_interrupt()); 1507 static_key_slow_inc(&netstamp_needed); 1508 } 1509 EXPORT_SYMBOL(net_enable_timestamp); 1510 1511 void net_disable_timestamp(void) 1512 { 1513 #ifdef HAVE_JUMP_LABEL 1514 if (in_interrupt()) { 1515 atomic_inc(&netstamp_needed_deferred); 1516 return; 1517 } 1518 #endif 1519 static_key_slow_dec(&netstamp_needed); 1520 } 1521 EXPORT_SYMBOL(net_disable_timestamp); 1522 1523 static inline void net_timestamp_set(struct sk_buff *skb) 1524 { 1525 skb->tstamp.tv64 = 0; 1526 if (static_key_false(&netstamp_needed)) 1527 __net_timestamp(skb); 1528 } 1529 1530 #define net_timestamp_check(COND, SKB) \ 1531 if (static_key_false(&netstamp_needed)) { \ 1532 if ((COND) && !(SKB)->tstamp.tv64) \ 1533 __net_timestamp(SKB); \ 1534 } \ 1535 1536 static int net_hwtstamp_validate(struct ifreq *ifr) 1537 { 1538 struct hwtstamp_config cfg; 1539 enum hwtstamp_tx_types tx_type; 1540 enum hwtstamp_rx_filters rx_filter; 1541 int tx_type_valid = 0; 1542 int rx_filter_valid = 0; 1543 1544 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 1545 return -EFAULT; 1546 1547 if (cfg.flags) /* reserved for future extensions */ 1548 return -EINVAL; 1549 1550 tx_type = cfg.tx_type; 1551 rx_filter = cfg.rx_filter; 1552 1553 switch (tx_type) { 1554 case HWTSTAMP_TX_OFF: 1555 case HWTSTAMP_TX_ON: 1556 case HWTSTAMP_TX_ONESTEP_SYNC: 1557 tx_type_valid = 1; 1558 break; 1559 } 1560 1561 switch (rx_filter) { 1562 case HWTSTAMP_FILTER_NONE: 1563 case HWTSTAMP_FILTER_ALL: 1564 case HWTSTAMP_FILTER_SOME: 1565 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 1566 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 1567 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 1568 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1569 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 1570 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 1571 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1572 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 1573 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 1574 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1575 case HWTSTAMP_FILTER_PTP_V2_SYNC: 1576 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1577 rx_filter_valid = 1; 1578 break; 1579 } 1580 1581 if (!tx_type_valid || !rx_filter_valid) 1582 return -ERANGE; 1583 1584 return 0; 1585 } 1586 1587 static inline bool is_skb_forwardable(struct net_device *dev, 1588 struct sk_buff *skb) 1589 { 1590 unsigned int len; 1591 1592 if (!(dev->flags & IFF_UP)) 1593 return false; 1594 1595 len = dev->mtu + dev->hard_header_len + VLAN_HLEN; 1596 if (skb->len <= len) 1597 return true; 1598 1599 /* if TSO is enabled, we don't care about the length as the packet 1600 * could be forwarded without being segmented before 1601 */ 1602 if (skb_is_gso(skb)) 1603 return true; 1604 1605 return false; 1606 } 1607 1608 /** 1609 * dev_forward_skb - loopback an skb to another netif 1610 * 1611 * @dev: destination network device 1612 * @skb: buffer to forward 1613 * 1614 * return values: 1615 * NET_RX_SUCCESS (no congestion) 1616 * NET_RX_DROP (packet was dropped, but freed) 1617 * 1618 * dev_forward_skb can be used for injecting an skb from the 1619 * start_xmit function of one device into the receive queue 1620 * of another device. 1621 * 1622 * The receiving device may be in another namespace, so 1623 * we have to clear all information in the skb that could 1624 * impact namespace isolation. 1625 */ 1626 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1627 { 1628 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 1629 if (skb_copy_ubufs(skb, GFP_ATOMIC)) { 1630 atomic_long_inc(&dev->rx_dropped); 1631 kfree_skb(skb); 1632 return NET_RX_DROP; 1633 } 1634 } 1635 1636 skb_orphan(skb); 1637 nf_reset(skb); 1638 1639 if (unlikely(!is_skb_forwardable(dev, skb))) { 1640 atomic_long_inc(&dev->rx_dropped); 1641 kfree_skb(skb); 1642 return NET_RX_DROP; 1643 } 1644 skb->skb_iif = 0; 1645 skb->dev = dev; 1646 skb_dst_drop(skb); 1647 skb->tstamp.tv64 = 0; 1648 skb->pkt_type = PACKET_HOST; 1649 skb->protocol = eth_type_trans(skb, dev); 1650 skb->mark = 0; 1651 secpath_reset(skb); 1652 nf_reset(skb); 1653 return netif_rx(skb); 1654 } 1655 EXPORT_SYMBOL_GPL(dev_forward_skb); 1656 1657 static inline int deliver_skb(struct sk_buff *skb, 1658 struct packet_type *pt_prev, 1659 struct net_device *orig_dev) 1660 { 1661 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) 1662 return -ENOMEM; 1663 atomic_inc(&skb->users); 1664 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 1665 } 1666 1667 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) 1668 { 1669 if (!ptype->af_packet_priv || !skb->sk) 1670 return false; 1671 1672 if (ptype->id_match) 1673 return ptype->id_match(ptype, skb->sk); 1674 else if ((struct sock *)ptype->af_packet_priv == skb->sk) 1675 return true; 1676 1677 return false; 1678 } 1679 1680 /* 1681 * Support routine. Sends outgoing frames to any network 1682 * taps currently in use. 1683 */ 1684 1685 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) 1686 { 1687 struct packet_type *ptype; 1688 struct sk_buff *skb2 = NULL; 1689 struct packet_type *pt_prev = NULL; 1690 1691 rcu_read_lock(); 1692 list_for_each_entry_rcu(ptype, &ptype_all, list) { 1693 /* Never send packets back to the socket 1694 * they originated from - MvS (miquels@drinkel.ow.org) 1695 */ 1696 if ((ptype->dev == dev || !ptype->dev) && 1697 (!skb_loop_sk(ptype, skb))) { 1698 if (pt_prev) { 1699 deliver_skb(skb2, pt_prev, skb->dev); 1700 pt_prev = ptype; 1701 continue; 1702 } 1703 1704 skb2 = skb_clone(skb, GFP_ATOMIC); 1705 if (!skb2) 1706 break; 1707 1708 net_timestamp_set(skb2); 1709 1710 /* skb->nh should be correctly 1711 set by sender, so that the second statement is 1712 just protection against buggy protocols. 1713 */ 1714 skb_reset_mac_header(skb2); 1715 1716 if (skb_network_header(skb2) < skb2->data || 1717 skb2->network_header > skb2->tail) { 1718 net_crit_ratelimited("protocol %04x is buggy, dev %s\n", 1719 ntohs(skb2->protocol), 1720 dev->name); 1721 skb_reset_network_header(skb2); 1722 } 1723 1724 skb2->transport_header = skb2->network_header; 1725 skb2->pkt_type = PACKET_OUTGOING; 1726 pt_prev = ptype; 1727 } 1728 } 1729 if (pt_prev) 1730 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); 1731 rcu_read_unlock(); 1732 } 1733 1734 /** 1735 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change 1736 * @dev: Network device 1737 * @txq: number of queues available 1738 * 1739 * If real_num_tx_queues is changed the tc mappings may no longer be 1740 * valid. To resolve this verify the tc mapping remains valid and if 1741 * not NULL the mapping. With no priorities mapping to this 1742 * offset/count pair it will no longer be used. In the worst case TC0 1743 * is invalid nothing can be done so disable priority mappings. If is 1744 * expected that drivers will fix this mapping if they can before 1745 * calling netif_set_real_num_tx_queues. 1746 */ 1747 static void netif_setup_tc(struct net_device *dev, unsigned int txq) 1748 { 1749 int i; 1750 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 1751 1752 /* If TC0 is invalidated disable TC mapping */ 1753 if (tc->offset + tc->count > txq) { 1754 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); 1755 dev->num_tc = 0; 1756 return; 1757 } 1758 1759 /* Invalidated prio to tc mappings set to TC0 */ 1760 for (i = 1; i < TC_BITMASK + 1; i++) { 1761 int q = netdev_get_prio_tc_map(dev, i); 1762 1763 tc = &dev->tc_to_txq[q]; 1764 if (tc->offset + tc->count > txq) { 1765 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", 1766 i, q); 1767 netdev_set_prio_tc_map(dev, i, 0); 1768 } 1769 } 1770 } 1771 1772 /* 1773 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues 1774 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed. 1775 */ 1776 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) 1777 { 1778 int rc; 1779 1780 if (txq < 1 || txq > dev->num_tx_queues) 1781 return -EINVAL; 1782 1783 if (dev->reg_state == NETREG_REGISTERED || 1784 dev->reg_state == NETREG_UNREGISTERING) { 1785 ASSERT_RTNL(); 1786 1787 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, 1788 txq); 1789 if (rc) 1790 return rc; 1791 1792 if (dev->num_tc) 1793 netif_setup_tc(dev, txq); 1794 1795 if (txq < dev->real_num_tx_queues) 1796 qdisc_reset_all_tx_gt(dev, txq); 1797 } 1798 1799 dev->real_num_tx_queues = txq; 1800 return 0; 1801 } 1802 EXPORT_SYMBOL(netif_set_real_num_tx_queues); 1803 1804 #ifdef CONFIG_RPS 1805 /** 1806 * netif_set_real_num_rx_queues - set actual number of RX queues used 1807 * @dev: Network device 1808 * @rxq: Actual number of RX queues 1809 * 1810 * This must be called either with the rtnl_lock held or before 1811 * registration of the net device. Returns 0 on success, or a 1812 * negative error code. If called before registration, it always 1813 * succeeds. 1814 */ 1815 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) 1816 { 1817 int rc; 1818 1819 if (rxq < 1 || rxq > dev->num_rx_queues) 1820 return -EINVAL; 1821 1822 if (dev->reg_state == NETREG_REGISTERED) { 1823 ASSERT_RTNL(); 1824 1825 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, 1826 rxq); 1827 if (rc) 1828 return rc; 1829 } 1830 1831 dev->real_num_rx_queues = rxq; 1832 return 0; 1833 } 1834 EXPORT_SYMBOL(netif_set_real_num_rx_queues); 1835 #endif 1836 1837 /** 1838 * netif_get_num_default_rss_queues - default number of RSS queues 1839 * 1840 * This routine should set an upper limit on the number of RSS queues 1841 * used by default by multiqueue devices. 1842 */ 1843 int netif_get_num_default_rss_queues(void) 1844 { 1845 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus()); 1846 } 1847 EXPORT_SYMBOL(netif_get_num_default_rss_queues); 1848 1849 static inline void __netif_reschedule(struct Qdisc *q) 1850 { 1851 struct softnet_data *sd; 1852 unsigned long flags; 1853 1854 local_irq_save(flags); 1855 sd = &__get_cpu_var(softnet_data); 1856 q->next_sched = NULL; 1857 *sd->output_queue_tailp = q; 1858 sd->output_queue_tailp = &q->next_sched; 1859 raise_softirq_irqoff(NET_TX_SOFTIRQ); 1860 local_irq_restore(flags); 1861 } 1862 1863 void __netif_schedule(struct Qdisc *q) 1864 { 1865 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) 1866 __netif_reschedule(q); 1867 } 1868 EXPORT_SYMBOL(__netif_schedule); 1869 1870 void dev_kfree_skb_irq(struct sk_buff *skb) 1871 { 1872 if (atomic_dec_and_test(&skb->users)) { 1873 struct softnet_data *sd; 1874 unsigned long flags; 1875 1876 local_irq_save(flags); 1877 sd = &__get_cpu_var(softnet_data); 1878 skb->next = sd->completion_queue; 1879 sd->completion_queue = skb; 1880 raise_softirq_irqoff(NET_TX_SOFTIRQ); 1881 local_irq_restore(flags); 1882 } 1883 } 1884 EXPORT_SYMBOL(dev_kfree_skb_irq); 1885 1886 void dev_kfree_skb_any(struct sk_buff *skb) 1887 { 1888 if (in_irq() || irqs_disabled()) 1889 dev_kfree_skb_irq(skb); 1890 else 1891 dev_kfree_skb(skb); 1892 } 1893 EXPORT_SYMBOL(dev_kfree_skb_any); 1894 1895 1896 /** 1897 * netif_device_detach - mark device as removed 1898 * @dev: network device 1899 * 1900 * Mark device as removed from system and therefore no longer available. 1901 */ 1902 void netif_device_detach(struct net_device *dev) 1903 { 1904 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && 1905 netif_running(dev)) { 1906 netif_tx_stop_all_queues(dev); 1907 } 1908 } 1909 EXPORT_SYMBOL(netif_device_detach); 1910 1911 /** 1912 * netif_device_attach - mark device as attached 1913 * @dev: network device 1914 * 1915 * Mark device as attached from system and restart if needed. 1916 */ 1917 void netif_device_attach(struct net_device *dev) 1918 { 1919 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && 1920 netif_running(dev)) { 1921 netif_tx_wake_all_queues(dev); 1922 __netdev_watchdog_up(dev); 1923 } 1924 } 1925 EXPORT_SYMBOL(netif_device_attach); 1926 1927 static void skb_warn_bad_offload(const struct sk_buff *skb) 1928 { 1929 static const netdev_features_t null_features = 0; 1930 struct net_device *dev = skb->dev; 1931 const char *driver = ""; 1932 1933 if (dev && dev->dev.parent) 1934 driver = dev_driver_string(dev->dev.parent); 1935 1936 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d " 1937 "gso_type=%d ip_summed=%d\n", 1938 driver, dev ? &dev->features : &null_features, 1939 skb->sk ? &skb->sk->sk_route_caps : &null_features, 1940 skb->len, skb->data_len, skb_shinfo(skb)->gso_size, 1941 skb_shinfo(skb)->gso_type, skb->ip_summed); 1942 } 1943 1944 /* 1945 * Invalidate hardware checksum when packet is to be mangled, and 1946 * complete checksum manually on outgoing path. 1947 */ 1948 int skb_checksum_help(struct sk_buff *skb) 1949 { 1950 __wsum csum; 1951 int ret = 0, offset; 1952 1953 if (skb->ip_summed == CHECKSUM_COMPLETE) 1954 goto out_set_summed; 1955 1956 if (unlikely(skb_shinfo(skb)->gso_size)) { 1957 skb_warn_bad_offload(skb); 1958 return -EINVAL; 1959 } 1960 1961 offset = skb_checksum_start_offset(skb); 1962 BUG_ON(offset >= skb_headlen(skb)); 1963 csum = skb_checksum(skb, offset, skb->len - offset, 0); 1964 1965 offset += skb->csum_offset; 1966 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); 1967 1968 if (skb_cloned(skb) && 1969 !skb_clone_writable(skb, offset + sizeof(__sum16))) { 1970 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1971 if (ret) 1972 goto out; 1973 } 1974 1975 *(__sum16 *)(skb->data + offset) = csum_fold(csum); 1976 out_set_summed: 1977 skb->ip_summed = CHECKSUM_NONE; 1978 out: 1979 return ret; 1980 } 1981 EXPORT_SYMBOL(skb_checksum_help); 1982 1983 /** 1984 * skb_gso_segment - Perform segmentation on skb. 1985 * @skb: buffer to segment 1986 * @features: features for the output path (see dev->features) 1987 * 1988 * This function segments the given skb and returns a list of segments. 1989 * 1990 * It may return NULL if the skb requires no segmentation. This is 1991 * only possible when GSO is used for verifying header integrity. 1992 */ 1993 struct sk_buff *skb_gso_segment(struct sk_buff *skb, 1994 netdev_features_t features) 1995 { 1996 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 1997 struct packet_type *ptype; 1998 __be16 type = skb->protocol; 1999 int vlan_depth = ETH_HLEN; 2000 int err; 2001 2002 while (type == htons(ETH_P_8021Q)) { 2003 struct vlan_hdr *vh; 2004 2005 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) 2006 return ERR_PTR(-EINVAL); 2007 2008 vh = (struct vlan_hdr *)(skb->data + vlan_depth); 2009 type = vh->h_vlan_encapsulated_proto; 2010 vlan_depth += VLAN_HLEN; 2011 } 2012 2013 skb_reset_mac_header(skb); 2014 skb->mac_len = skb->network_header - skb->mac_header; 2015 __skb_pull(skb, skb->mac_len); 2016 2017 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { 2018 skb_warn_bad_offload(skb); 2019 2020 if (skb_header_cloned(skb) && 2021 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 2022 return ERR_PTR(err); 2023 } 2024 2025 rcu_read_lock(); 2026 list_for_each_entry_rcu(ptype, 2027 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 2028 if (ptype->type == type && !ptype->dev && ptype->gso_segment) { 2029 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { 2030 err = ptype->gso_send_check(skb); 2031 segs = ERR_PTR(err); 2032 if (err || skb_gso_ok(skb, features)) 2033 break; 2034 __skb_push(skb, (skb->data - 2035 skb_network_header(skb))); 2036 } 2037 segs = ptype->gso_segment(skb, features); 2038 break; 2039 } 2040 } 2041 rcu_read_unlock(); 2042 2043 __skb_push(skb, skb->data - skb_mac_header(skb)); 2044 2045 return segs; 2046 } 2047 EXPORT_SYMBOL(skb_gso_segment); 2048 2049 /* Take action when hardware reception checksum errors are detected. */ 2050 #ifdef CONFIG_BUG 2051 void netdev_rx_csum_fault(struct net_device *dev) 2052 { 2053 if (net_ratelimit()) { 2054 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>"); 2055 dump_stack(); 2056 } 2057 } 2058 EXPORT_SYMBOL(netdev_rx_csum_fault); 2059 #endif 2060 2061 /* Actually, we should eliminate this check as soon as we know, that: 2062 * 1. IOMMU is present and allows to map all the memory. 2063 * 2. No high memory really exists on this machine. 2064 */ 2065 2066 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 2067 { 2068 #ifdef CONFIG_HIGHMEM 2069 int i; 2070 if (!(dev->features & NETIF_F_HIGHDMA)) { 2071 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2072 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2073 if (PageHighMem(skb_frag_page(frag))) 2074 return 1; 2075 } 2076 } 2077 2078 if (PCI_DMA_BUS_IS_PHYS) { 2079 struct device *pdev = dev->dev.parent; 2080 2081 if (!pdev) 2082 return 0; 2083 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2084 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2085 dma_addr_t addr = page_to_phys(skb_frag_page(frag)); 2086 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask) 2087 return 1; 2088 } 2089 } 2090 #endif 2091 return 0; 2092 } 2093 2094 struct dev_gso_cb { 2095 void (*destructor)(struct sk_buff *skb); 2096 }; 2097 2098 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb) 2099 2100 static void dev_gso_skb_destructor(struct sk_buff *skb) 2101 { 2102 struct dev_gso_cb *cb; 2103 2104 do { 2105 struct sk_buff *nskb = skb->next; 2106 2107 skb->next = nskb->next; 2108 nskb->next = NULL; 2109 kfree_skb(nskb); 2110 } while (skb->next); 2111 2112 cb = DEV_GSO_CB(skb); 2113 if (cb->destructor) 2114 cb->destructor(skb); 2115 } 2116 2117 /** 2118 * dev_gso_segment - Perform emulated hardware segmentation on skb. 2119 * @skb: buffer to segment 2120 * @features: device features as applicable to this skb 2121 * 2122 * This function segments the given skb and stores the list of segments 2123 * in skb->next. 2124 */ 2125 static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features) 2126 { 2127 struct sk_buff *segs; 2128 2129 segs = skb_gso_segment(skb, features); 2130 2131 /* Verifying header integrity only. */ 2132 if (!segs) 2133 return 0; 2134 2135 if (IS_ERR(segs)) 2136 return PTR_ERR(segs); 2137 2138 skb->next = segs; 2139 DEV_GSO_CB(skb)->destructor = skb->destructor; 2140 skb->destructor = dev_gso_skb_destructor; 2141 2142 return 0; 2143 } 2144 2145 static bool can_checksum_protocol(netdev_features_t features, __be16 protocol) 2146 { 2147 return ((features & NETIF_F_GEN_CSUM) || 2148 ((features & NETIF_F_V4_CSUM) && 2149 protocol == htons(ETH_P_IP)) || 2150 ((features & NETIF_F_V6_CSUM) && 2151 protocol == htons(ETH_P_IPV6)) || 2152 ((features & NETIF_F_FCOE_CRC) && 2153 protocol == htons(ETH_P_FCOE))); 2154 } 2155 2156 static netdev_features_t harmonize_features(struct sk_buff *skb, 2157 __be16 protocol, netdev_features_t features) 2158 { 2159 if (skb->ip_summed != CHECKSUM_NONE && 2160 !can_checksum_protocol(features, protocol)) { 2161 features &= ~NETIF_F_ALL_CSUM; 2162 features &= ~NETIF_F_SG; 2163 } else if (illegal_highdma(skb->dev, skb)) { 2164 features &= ~NETIF_F_SG; 2165 } 2166 2167 return features; 2168 } 2169 2170 netdev_features_t netif_skb_features(struct sk_buff *skb) 2171 { 2172 __be16 protocol = skb->protocol; 2173 netdev_features_t features = skb->dev->features; 2174 2175 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs) 2176 features &= ~NETIF_F_GSO_MASK; 2177 2178 if (protocol == htons(ETH_P_8021Q)) { 2179 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2180 protocol = veh->h_vlan_encapsulated_proto; 2181 } else if (!vlan_tx_tag_present(skb)) { 2182 return harmonize_features(skb, protocol, features); 2183 } 2184 2185 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX); 2186 2187 if (protocol != htons(ETH_P_8021Q)) { 2188 return harmonize_features(skb, protocol, features); 2189 } else { 2190 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | 2191 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX; 2192 return harmonize_features(skb, protocol, features); 2193 } 2194 } 2195 EXPORT_SYMBOL(netif_skb_features); 2196 2197 /* 2198 * Returns true if either: 2199 * 1. skb has frag_list and the device doesn't support FRAGLIST, or 2200 * 2. skb is fragmented and the device does not support SG. 2201 */ 2202 static inline int skb_needs_linearize(struct sk_buff *skb, 2203 int features) 2204 { 2205 return skb_is_nonlinear(skb) && 2206 ((skb_has_frag_list(skb) && 2207 !(features & NETIF_F_FRAGLIST)) || 2208 (skb_shinfo(skb)->nr_frags && 2209 !(features & NETIF_F_SG))); 2210 } 2211 2212 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2213 struct netdev_queue *txq) 2214 { 2215 const struct net_device_ops *ops = dev->netdev_ops; 2216 int rc = NETDEV_TX_OK; 2217 unsigned int skb_len; 2218 2219 if (likely(!skb->next)) { 2220 netdev_features_t features; 2221 2222 /* 2223 * If device doesn't need skb->dst, release it right now while 2224 * its hot in this cpu cache 2225 */ 2226 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 2227 skb_dst_drop(skb); 2228 2229 features = netif_skb_features(skb); 2230 2231 if (vlan_tx_tag_present(skb) && 2232 !(features & NETIF_F_HW_VLAN_TX)) { 2233 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); 2234 if (unlikely(!skb)) 2235 goto out; 2236 2237 skb->vlan_tci = 0; 2238 } 2239 2240 if (netif_needs_gso(skb, features)) { 2241 if (unlikely(dev_gso_segment(skb, features))) 2242 goto out_kfree_skb; 2243 if (skb->next) 2244 goto gso; 2245 } else { 2246 if (skb_needs_linearize(skb, features) && 2247 __skb_linearize(skb)) 2248 goto out_kfree_skb; 2249 2250 /* If packet is not checksummed and device does not 2251 * support checksumming for this protocol, complete 2252 * checksumming here. 2253 */ 2254 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2255 skb_set_transport_header(skb, 2256 skb_checksum_start_offset(skb)); 2257 if (!(features & NETIF_F_ALL_CSUM) && 2258 skb_checksum_help(skb)) 2259 goto out_kfree_skb; 2260 } 2261 } 2262 2263 if (!list_empty(&ptype_all)) 2264 dev_queue_xmit_nit(skb, dev); 2265 2266 skb_len = skb->len; 2267 rc = ops->ndo_start_xmit(skb, dev); 2268 trace_net_dev_xmit(skb, rc, dev, skb_len); 2269 if (rc == NETDEV_TX_OK) 2270 txq_trans_update(txq); 2271 return rc; 2272 } 2273 2274 gso: 2275 do { 2276 struct sk_buff *nskb = skb->next; 2277 2278 skb->next = nskb->next; 2279 nskb->next = NULL; 2280 2281 /* 2282 * If device doesn't need nskb->dst, release it right now while 2283 * its hot in this cpu cache 2284 */ 2285 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 2286 skb_dst_drop(nskb); 2287 2288 if (!list_empty(&ptype_all)) 2289 dev_queue_xmit_nit(nskb, dev); 2290 2291 skb_len = nskb->len; 2292 rc = ops->ndo_start_xmit(nskb, dev); 2293 trace_net_dev_xmit(nskb, rc, dev, skb_len); 2294 if (unlikely(rc != NETDEV_TX_OK)) { 2295 if (rc & ~NETDEV_TX_MASK) 2296 goto out_kfree_gso_skb; 2297 nskb->next = skb->next; 2298 skb->next = nskb; 2299 return rc; 2300 } 2301 txq_trans_update(txq); 2302 if (unlikely(netif_xmit_stopped(txq) && skb->next)) 2303 return NETDEV_TX_BUSY; 2304 } while (skb->next); 2305 2306 out_kfree_gso_skb: 2307 if (likely(skb->next == NULL)) 2308 skb->destructor = DEV_GSO_CB(skb)->destructor; 2309 out_kfree_skb: 2310 kfree_skb(skb); 2311 out: 2312 return rc; 2313 } 2314 2315 static u32 hashrnd __read_mostly; 2316 2317 /* 2318 * Returns a Tx hash based on the given packet descriptor a Tx queues' number 2319 * to be used as a distribution range. 2320 */ 2321 u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb, 2322 unsigned int num_tx_queues) 2323 { 2324 u32 hash; 2325 u16 qoffset = 0; 2326 u16 qcount = num_tx_queues; 2327 2328 if (skb_rx_queue_recorded(skb)) { 2329 hash = skb_get_rx_queue(skb); 2330 while (unlikely(hash >= num_tx_queues)) 2331 hash -= num_tx_queues; 2332 return hash; 2333 } 2334 2335 if (dev->num_tc) { 2336 u8 tc = netdev_get_prio_tc_map(dev, skb->priority); 2337 qoffset = dev->tc_to_txq[tc].offset; 2338 qcount = dev->tc_to_txq[tc].count; 2339 } 2340 2341 if (skb->sk && skb->sk->sk_hash) 2342 hash = skb->sk->sk_hash; 2343 else 2344 hash = (__force u16) skb->protocol; 2345 hash = jhash_1word(hash, hashrnd); 2346 2347 return (u16) (((u64) hash * qcount) >> 32) + qoffset; 2348 } 2349 EXPORT_SYMBOL(__skb_tx_hash); 2350 2351 static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) 2352 { 2353 if (unlikely(queue_index >= dev->real_num_tx_queues)) { 2354 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", 2355 dev->name, queue_index, 2356 dev->real_num_tx_queues); 2357 return 0; 2358 } 2359 return queue_index; 2360 } 2361 2362 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) 2363 { 2364 #ifdef CONFIG_XPS 2365 struct xps_dev_maps *dev_maps; 2366 struct xps_map *map; 2367 int queue_index = -1; 2368 2369 rcu_read_lock(); 2370 dev_maps = rcu_dereference(dev->xps_maps); 2371 if (dev_maps) { 2372 map = rcu_dereference( 2373 dev_maps->cpu_map[raw_smp_processor_id()]); 2374 if (map) { 2375 if (map->len == 1) 2376 queue_index = map->queues[0]; 2377 else { 2378 u32 hash; 2379 if (skb->sk && skb->sk->sk_hash) 2380 hash = skb->sk->sk_hash; 2381 else 2382 hash = (__force u16) skb->protocol ^ 2383 skb->rxhash; 2384 hash = jhash_1word(hash, hashrnd); 2385 queue_index = map->queues[ 2386 ((u64)hash * map->len) >> 32]; 2387 } 2388 if (unlikely(queue_index >= dev->real_num_tx_queues)) 2389 queue_index = -1; 2390 } 2391 } 2392 rcu_read_unlock(); 2393 2394 return queue_index; 2395 #else 2396 return -1; 2397 #endif 2398 } 2399 2400 struct netdev_queue *netdev_pick_tx(struct net_device *dev, 2401 struct sk_buff *skb) 2402 { 2403 int queue_index; 2404 const struct net_device_ops *ops = dev->netdev_ops; 2405 2406 if (dev->real_num_tx_queues == 1) 2407 queue_index = 0; 2408 else if (ops->ndo_select_queue) { 2409 queue_index = ops->ndo_select_queue(dev, skb); 2410 queue_index = dev_cap_txqueue(dev, queue_index); 2411 } else { 2412 struct sock *sk = skb->sk; 2413 queue_index = sk_tx_queue_get(sk); 2414 2415 if (queue_index < 0 || skb->ooo_okay || 2416 queue_index >= dev->real_num_tx_queues) { 2417 int old_index = queue_index; 2418 2419 queue_index = get_xps_queue(dev, skb); 2420 if (queue_index < 0) 2421 queue_index = skb_tx_hash(dev, skb); 2422 2423 if (queue_index != old_index && sk) { 2424 struct dst_entry *dst = 2425 rcu_dereference_check(sk->sk_dst_cache, 1); 2426 2427 if (dst && skb_dst(skb) == dst) 2428 sk_tx_queue_set(sk, queue_index); 2429 } 2430 } 2431 } 2432 2433 skb_set_queue_mapping(skb, queue_index); 2434 return netdev_get_tx_queue(dev, queue_index); 2435 } 2436 2437 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, 2438 struct net_device *dev, 2439 struct netdev_queue *txq) 2440 { 2441 spinlock_t *root_lock = qdisc_lock(q); 2442 bool contended; 2443 int rc; 2444 2445 qdisc_skb_cb(skb)->pkt_len = skb->len; 2446 qdisc_calculate_pkt_len(skb, q); 2447 /* 2448 * Heuristic to force contended enqueues to serialize on a 2449 * separate lock before trying to get qdisc main lock. 2450 * This permits __QDISC_STATE_RUNNING owner to get the lock more often 2451 * and dequeue packets faster. 2452 */ 2453 contended = qdisc_is_running(q); 2454 if (unlikely(contended)) 2455 spin_lock(&q->busylock); 2456 2457 spin_lock(root_lock); 2458 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { 2459 kfree_skb(skb); 2460 rc = NET_XMIT_DROP; 2461 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && 2462 qdisc_run_begin(q)) { 2463 /* 2464 * This is a work-conserving queue; there are no old skbs 2465 * waiting to be sent out; and the qdisc is not running - 2466 * xmit the skb directly. 2467 */ 2468 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) 2469 skb_dst_force(skb); 2470 2471 qdisc_bstats_update(q, skb); 2472 2473 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { 2474 if (unlikely(contended)) { 2475 spin_unlock(&q->busylock); 2476 contended = false; 2477 } 2478 __qdisc_run(q); 2479 } else 2480 qdisc_run_end(q); 2481 2482 rc = NET_XMIT_SUCCESS; 2483 } else { 2484 skb_dst_force(skb); 2485 rc = q->enqueue(skb, q) & NET_XMIT_MASK; 2486 if (qdisc_run_begin(q)) { 2487 if (unlikely(contended)) { 2488 spin_unlock(&q->busylock); 2489 contended = false; 2490 } 2491 __qdisc_run(q); 2492 } 2493 } 2494 spin_unlock(root_lock); 2495 if (unlikely(contended)) 2496 spin_unlock(&q->busylock); 2497 return rc; 2498 } 2499 2500 #if IS_ENABLED(CONFIG_NETPRIO_CGROUP) 2501 static void skb_update_prio(struct sk_buff *skb) 2502 { 2503 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); 2504 2505 if (!skb->priority && skb->sk && map) { 2506 unsigned int prioidx = skb->sk->sk_cgrp_prioidx; 2507 2508 if (prioidx < map->priomap_len) 2509 skb->priority = map->priomap[prioidx]; 2510 } 2511 } 2512 #else 2513 #define skb_update_prio(skb) 2514 #endif 2515 2516 static DEFINE_PER_CPU(int, xmit_recursion); 2517 #define RECURSION_LIMIT 10 2518 2519 /** 2520 * dev_loopback_xmit - loop back @skb 2521 * @skb: buffer to transmit 2522 */ 2523 int dev_loopback_xmit(struct sk_buff *skb) 2524 { 2525 skb_reset_mac_header(skb); 2526 __skb_pull(skb, skb_network_offset(skb)); 2527 skb->pkt_type = PACKET_LOOPBACK; 2528 skb->ip_summed = CHECKSUM_UNNECESSARY; 2529 WARN_ON(!skb_dst(skb)); 2530 skb_dst_force(skb); 2531 netif_rx_ni(skb); 2532 return 0; 2533 } 2534 EXPORT_SYMBOL(dev_loopback_xmit); 2535 2536 /** 2537 * dev_queue_xmit - transmit a buffer 2538 * @skb: buffer to transmit 2539 * 2540 * Queue a buffer for transmission to a network device. The caller must 2541 * have set the device and priority and built the buffer before calling 2542 * this function. The function can be called from an interrupt. 2543 * 2544 * A negative errno code is returned on a failure. A success does not 2545 * guarantee the frame will be transmitted as it may be dropped due 2546 * to congestion or traffic shaping. 2547 * 2548 * ----------------------------------------------------------------------------------- 2549 * I notice this method can also return errors from the queue disciplines, 2550 * including NET_XMIT_DROP, which is a positive value. So, errors can also 2551 * be positive. 2552 * 2553 * Regardless of the return value, the skb is consumed, so it is currently 2554 * difficult to retry a send to this method. (You can bump the ref count 2555 * before sending to hold a reference for retry if you are careful.) 2556 * 2557 * When calling this method, interrupts MUST be enabled. This is because 2558 * the BH enable code must have IRQs enabled so that it will not deadlock. 2559 * --BLG 2560 */ 2561 int dev_queue_xmit(struct sk_buff *skb) 2562 { 2563 struct net_device *dev = skb->dev; 2564 struct netdev_queue *txq; 2565 struct Qdisc *q; 2566 int rc = -ENOMEM; 2567 2568 /* Disable soft irqs for various locks below. Also 2569 * stops preemption for RCU. 2570 */ 2571 rcu_read_lock_bh(); 2572 2573 skb_update_prio(skb); 2574 2575 txq = netdev_pick_tx(dev, skb); 2576 q = rcu_dereference_bh(txq->qdisc); 2577 2578 #ifdef CONFIG_NET_CLS_ACT 2579 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); 2580 #endif 2581 trace_net_dev_queue(skb); 2582 if (q->enqueue) { 2583 rc = __dev_xmit_skb(skb, q, dev, txq); 2584 goto out; 2585 } 2586 2587 /* The device has no queue. Common case for software devices: 2588 loopback, all the sorts of tunnels... 2589 2590 Really, it is unlikely that netif_tx_lock protection is necessary 2591 here. (f.e. loopback and IP tunnels are clean ignoring statistics 2592 counters.) 2593 However, it is possible, that they rely on protection 2594 made by us here. 2595 2596 Check this and shot the lock. It is not prone from deadlocks. 2597 Either shot noqueue qdisc, it is even simpler 8) 2598 */ 2599 if (dev->flags & IFF_UP) { 2600 int cpu = smp_processor_id(); /* ok because BHs are off */ 2601 2602 if (txq->xmit_lock_owner != cpu) { 2603 2604 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT) 2605 goto recursion_alert; 2606 2607 HARD_TX_LOCK(dev, txq, cpu); 2608 2609 if (!netif_xmit_stopped(txq)) { 2610 __this_cpu_inc(xmit_recursion); 2611 rc = dev_hard_start_xmit(skb, dev, txq); 2612 __this_cpu_dec(xmit_recursion); 2613 if (dev_xmit_complete(rc)) { 2614 HARD_TX_UNLOCK(dev, txq); 2615 goto out; 2616 } 2617 } 2618 HARD_TX_UNLOCK(dev, txq); 2619 net_crit_ratelimited("Virtual device %s asks to queue packet!\n", 2620 dev->name); 2621 } else { 2622 /* Recursion is detected! It is possible, 2623 * unfortunately 2624 */ 2625 recursion_alert: 2626 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n", 2627 dev->name); 2628 } 2629 } 2630 2631 rc = -ENETDOWN; 2632 rcu_read_unlock_bh(); 2633 2634 kfree_skb(skb); 2635 return rc; 2636 out: 2637 rcu_read_unlock_bh(); 2638 return rc; 2639 } 2640 EXPORT_SYMBOL(dev_queue_xmit); 2641 2642 2643 /*======================================================================= 2644 Receiver routines 2645 =======================================================================*/ 2646 2647 int netdev_max_backlog __read_mostly = 1000; 2648 EXPORT_SYMBOL(netdev_max_backlog); 2649 2650 int netdev_tstamp_prequeue __read_mostly = 1; 2651 int netdev_budget __read_mostly = 300; 2652 int weight_p __read_mostly = 64; /* old backlog weight */ 2653 2654 /* Called with irq disabled */ 2655 static inline void ____napi_schedule(struct softnet_data *sd, 2656 struct napi_struct *napi) 2657 { 2658 list_add_tail(&napi->poll_list, &sd->poll_list); 2659 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 2660 } 2661 2662 /* 2663 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses 2664 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value 2665 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb 2666 * if hash is a canonical 4-tuple hash over transport ports. 2667 */ 2668 void __skb_get_rxhash(struct sk_buff *skb) 2669 { 2670 struct flow_keys keys; 2671 u32 hash; 2672 2673 if (!skb_flow_dissect(skb, &keys)) 2674 return; 2675 2676 if (keys.ports) 2677 skb->l4_rxhash = 1; 2678 2679 /* get a consistent hash (same value on both flow directions) */ 2680 if (((__force u32)keys.dst < (__force u32)keys.src) || 2681 (((__force u32)keys.dst == (__force u32)keys.src) && 2682 ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) { 2683 swap(keys.dst, keys.src); 2684 swap(keys.port16[0], keys.port16[1]); 2685 } 2686 2687 hash = jhash_3words((__force u32)keys.dst, 2688 (__force u32)keys.src, 2689 (__force u32)keys.ports, hashrnd); 2690 if (!hash) 2691 hash = 1; 2692 2693 skb->rxhash = hash; 2694 } 2695 EXPORT_SYMBOL(__skb_get_rxhash); 2696 2697 #ifdef CONFIG_RPS 2698 2699 /* One global table that all flow-based protocols share. */ 2700 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; 2701 EXPORT_SYMBOL(rps_sock_flow_table); 2702 2703 struct static_key rps_needed __read_mostly; 2704 2705 static struct rps_dev_flow * 2706 set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 2707 struct rps_dev_flow *rflow, u16 next_cpu) 2708 { 2709 if (next_cpu != RPS_NO_CPU) { 2710 #ifdef CONFIG_RFS_ACCEL 2711 struct netdev_rx_queue *rxqueue; 2712 struct rps_dev_flow_table *flow_table; 2713 struct rps_dev_flow *old_rflow; 2714 u32 flow_id; 2715 u16 rxq_index; 2716 int rc; 2717 2718 /* Should we steer this flow to a different hardware queue? */ 2719 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || 2720 !(dev->features & NETIF_F_NTUPLE)) 2721 goto out; 2722 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); 2723 if (rxq_index == skb_get_rx_queue(skb)) 2724 goto out; 2725 2726 rxqueue = dev->_rx + rxq_index; 2727 flow_table = rcu_dereference(rxqueue->rps_flow_table); 2728 if (!flow_table) 2729 goto out; 2730 flow_id = skb->rxhash & flow_table->mask; 2731 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, 2732 rxq_index, flow_id); 2733 if (rc < 0) 2734 goto out; 2735 old_rflow = rflow; 2736 rflow = &flow_table->flows[flow_id]; 2737 rflow->filter = rc; 2738 if (old_rflow->filter == rflow->filter) 2739 old_rflow->filter = RPS_NO_FILTER; 2740 out: 2741 #endif 2742 rflow->last_qtail = 2743 per_cpu(softnet_data, next_cpu).input_queue_head; 2744 } 2745 2746 rflow->cpu = next_cpu; 2747 return rflow; 2748 } 2749 2750 /* 2751 * get_rps_cpu is called from netif_receive_skb and returns the target 2752 * CPU from the RPS map of the receiving queue for a given skb. 2753 * rcu_read_lock must be held on entry. 2754 */ 2755 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, 2756 struct rps_dev_flow **rflowp) 2757 { 2758 struct netdev_rx_queue *rxqueue; 2759 struct rps_map *map; 2760 struct rps_dev_flow_table *flow_table; 2761 struct rps_sock_flow_table *sock_flow_table; 2762 int cpu = -1; 2763 u16 tcpu; 2764 2765 if (skb_rx_queue_recorded(skb)) { 2766 u16 index = skb_get_rx_queue(skb); 2767 if (unlikely(index >= dev->real_num_rx_queues)) { 2768 WARN_ONCE(dev->real_num_rx_queues > 1, 2769 "%s received packet on queue %u, but number " 2770 "of RX queues is %u\n", 2771 dev->name, index, dev->real_num_rx_queues); 2772 goto done; 2773 } 2774 rxqueue = dev->_rx + index; 2775 } else 2776 rxqueue = dev->_rx; 2777 2778 map = rcu_dereference(rxqueue->rps_map); 2779 if (map) { 2780 if (map->len == 1 && 2781 !rcu_access_pointer(rxqueue->rps_flow_table)) { 2782 tcpu = map->cpus[0]; 2783 if (cpu_online(tcpu)) 2784 cpu = tcpu; 2785 goto done; 2786 } 2787 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) { 2788 goto done; 2789 } 2790 2791 skb_reset_network_header(skb); 2792 if (!skb_get_rxhash(skb)) 2793 goto done; 2794 2795 flow_table = rcu_dereference(rxqueue->rps_flow_table); 2796 sock_flow_table = rcu_dereference(rps_sock_flow_table); 2797 if (flow_table && sock_flow_table) { 2798 u16 next_cpu; 2799 struct rps_dev_flow *rflow; 2800 2801 rflow = &flow_table->flows[skb->rxhash & flow_table->mask]; 2802 tcpu = rflow->cpu; 2803 2804 next_cpu = sock_flow_table->ents[skb->rxhash & 2805 sock_flow_table->mask]; 2806 2807 /* 2808 * If the desired CPU (where last recvmsg was done) is 2809 * different from current CPU (one in the rx-queue flow 2810 * table entry), switch if one of the following holds: 2811 * - Current CPU is unset (equal to RPS_NO_CPU). 2812 * - Current CPU is offline. 2813 * - The current CPU's queue tail has advanced beyond the 2814 * last packet that was enqueued using this table entry. 2815 * This guarantees that all previous packets for the flow 2816 * have been dequeued, thus preserving in order delivery. 2817 */ 2818 if (unlikely(tcpu != next_cpu) && 2819 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) || 2820 ((int)(per_cpu(softnet_data, tcpu).input_queue_head - 2821 rflow->last_qtail)) >= 0)) { 2822 tcpu = next_cpu; 2823 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); 2824 } 2825 2826 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) { 2827 *rflowp = rflow; 2828 cpu = tcpu; 2829 goto done; 2830 } 2831 } 2832 2833 if (map) { 2834 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32]; 2835 2836 if (cpu_online(tcpu)) { 2837 cpu = tcpu; 2838 goto done; 2839 } 2840 } 2841 2842 done: 2843 return cpu; 2844 } 2845 2846 #ifdef CONFIG_RFS_ACCEL 2847 2848 /** 2849 * rps_may_expire_flow - check whether an RFS hardware filter may be removed 2850 * @dev: Device on which the filter was set 2851 * @rxq_index: RX queue index 2852 * @flow_id: Flow ID passed to ndo_rx_flow_steer() 2853 * @filter_id: Filter ID returned by ndo_rx_flow_steer() 2854 * 2855 * Drivers that implement ndo_rx_flow_steer() should periodically call 2856 * this function for each installed filter and remove the filters for 2857 * which it returns %true. 2858 */ 2859 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, 2860 u32 flow_id, u16 filter_id) 2861 { 2862 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index; 2863 struct rps_dev_flow_table *flow_table; 2864 struct rps_dev_flow *rflow; 2865 bool expire = true; 2866 int cpu; 2867 2868 rcu_read_lock(); 2869 flow_table = rcu_dereference(rxqueue->rps_flow_table); 2870 if (flow_table && flow_id <= flow_table->mask) { 2871 rflow = &flow_table->flows[flow_id]; 2872 cpu = ACCESS_ONCE(rflow->cpu); 2873 if (rflow->filter == filter_id && cpu != RPS_NO_CPU && 2874 ((int)(per_cpu(softnet_data, cpu).input_queue_head - 2875 rflow->last_qtail) < 2876 (int)(10 * flow_table->mask))) 2877 expire = false; 2878 } 2879 rcu_read_unlock(); 2880 return expire; 2881 } 2882 EXPORT_SYMBOL(rps_may_expire_flow); 2883 2884 #endif /* CONFIG_RFS_ACCEL */ 2885 2886 /* Called from hardirq (IPI) context */ 2887 static void rps_trigger_softirq(void *data) 2888 { 2889 struct softnet_data *sd = data; 2890 2891 ____napi_schedule(sd, &sd->backlog); 2892 sd->received_rps++; 2893 } 2894 2895 #endif /* CONFIG_RPS */ 2896 2897 /* 2898 * Check if this softnet_data structure is another cpu one 2899 * If yes, queue it to our IPI list and return 1 2900 * If no, return 0 2901 */ 2902 static int rps_ipi_queued(struct softnet_data *sd) 2903 { 2904 #ifdef CONFIG_RPS 2905 struct softnet_data *mysd = &__get_cpu_var(softnet_data); 2906 2907 if (sd != mysd) { 2908 sd->rps_ipi_next = mysd->rps_ipi_list; 2909 mysd->rps_ipi_list = sd; 2910 2911 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 2912 return 1; 2913 } 2914 #endif /* CONFIG_RPS */ 2915 return 0; 2916 } 2917 2918 /* 2919 * enqueue_to_backlog is called to queue an skb to a per CPU backlog 2920 * queue (may be a remote CPU queue). 2921 */ 2922 static int enqueue_to_backlog(struct sk_buff *skb, int cpu, 2923 unsigned int *qtail) 2924 { 2925 struct softnet_data *sd; 2926 unsigned long flags; 2927 2928 sd = &per_cpu(softnet_data, cpu); 2929 2930 local_irq_save(flags); 2931 2932 rps_lock(sd); 2933 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) { 2934 if (skb_queue_len(&sd->input_pkt_queue)) { 2935 enqueue: 2936 __skb_queue_tail(&sd->input_pkt_queue, skb); 2937 input_queue_tail_incr_save(sd, qtail); 2938 rps_unlock(sd); 2939 local_irq_restore(flags); 2940 return NET_RX_SUCCESS; 2941 } 2942 2943 /* Schedule NAPI for backlog device 2944 * We can use non atomic operation since we own the queue lock 2945 */ 2946 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) { 2947 if (!rps_ipi_queued(sd)) 2948 ____napi_schedule(sd, &sd->backlog); 2949 } 2950 goto enqueue; 2951 } 2952 2953 sd->dropped++; 2954 rps_unlock(sd); 2955 2956 local_irq_restore(flags); 2957 2958 atomic_long_inc(&skb->dev->rx_dropped); 2959 kfree_skb(skb); 2960 return NET_RX_DROP; 2961 } 2962 2963 /** 2964 * netif_rx - post buffer to the network code 2965 * @skb: buffer to post 2966 * 2967 * This function receives a packet from a device driver and queues it for 2968 * the upper (protocol) levels to process. It always succeeds. The buffer 2969 * may be dropped during processing for congestion control or by the 2970 * protocol layers. 2971 * 2972 * return values: 2973 * NET_RX_SUCCESS (no congestion) 2974 * NET_RX_DROP (packet was dropped) 2975 * 2976 */ 2977 2978 int netif_rx(struct sk_buff *skb) 2979 { 2980 int ret; 2981 2982 /* if netpoll wants it, pretend we never saw it */ 2983 if (netpoll_rx(skb)) 2984 return NET_RX_DROP; 2985 2986 net_timestamp_check(netdev_tstamp_prequeue, skb); 2987 2988 trace_netif_rx(skb); 2989 #ifdef CONFIG_RPS 2990 if (static_key_false(&rps_needed)) { 2991 struct rps_dev_flow voidflow, *rflow = &voidflow; 2992 int cpu; 2993 2994 preempt_disable(); 2995 rcu_read_lock(); 2996 2997 cpu = get_rps_cpu(skb->dev, skb, &rflow); 2998 if (cpu < 0) 2999 cpu = smp_processor_id(); 3000 3001 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 3002 3003 rcu_read_unlock(); 3004 preempt_enable(); 3005 } else 3006 #endif 3007 { 3008 unsigned int qtail; 3009 ret = enqueue_to_backlog(skb, get_cpu(), &qtail); 3010 put_cpu(); 3011 } 3012 return ret; 3013 } 3014 EXPORT_SYMBOL(netif_rx); 3015 3016 int netif_rx_ni(struct sk_buff *skb) 3017 { 3018 int err; 3019 3020 preempt_disable(); 3021 err = netif_rx(skb); 3022 if (local_softirq_pending()) 3023 do_softirq(); 3024 preempt_enable(); 3025 3026 return err; 3027 } 3028 EXPORT_SYMBOL(netif_rx_ni); 3029 3030 static void net_tx_action(struct softirq_action *h) 3031 { 3032 struct softnet_data *sd = &__get_cpu_var(softnet_data); 3033 3034 if (sd->completion_queue) { 3035 struct sk_buff *clist; 3036 3037 local_irq_disable(); 3038 clist = sd->completion_queue; 3039 sd->completion_queue = NULL; 3040 local_irq_enable(); 3041 3042 while (clist) { 3043 struct sk_buff *skb = clist; 3044 clist = clist->next; 3045 3046 WARN_ON(atomic_read(&skb->users)); 3047 trace_kfree_skb(skb, net_tx_action); 3048 __kfree_skb(skb); 3049 } 3050 } 3051 3052 if (sd->output_queue) { 3053 struct Qdisc *head; 3054 3055 local_irq_disable(); 3056 head = sd->output_queue; 3057 sd->output_queue = NULL; 3058 sd->output_queue_tailp = &sd->output_queue; 3059 local_irq_enable(); 3060 3061 while (head) { 3062 struct Qdisc *q = head; 3063 spinlock_t *root_lock; 3064 3065 head = head->next_sched; 3066 3067 root_lock = qdisc_lock(q); 3068 if (spin_trylock(root_lock)) { 3069 smp_mb__before_clear_bit(); 3070 clear_bit(__QDISC_STATE_SCHED, 3071 &q->state); 3072 qdisc_run(q); 3073 spin_unlock(root_lock); 3074 } else { 3075 if (!test_bit(__QDISC_STATE_DEACTIVATED, 3076 &q->state)) { 3077 __netif_reschedule(q); 3078 } else { 3079 smp_mb__before_clear_bit(); 3080 clear_bit(__QDISC_STATE_SCHED, 3081 &q->state); 3082 } 3083 } 3084 } 3085 } 3086 } 3087 3088 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \ 3089 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)) 3090 /* This hook is defined here for ATM LANE */ 3091 int (*br_fdb_test_addr_hook)(struct net_device *dev, 3092 unsigned char *addr) __read_mostly; 3093 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); 3094 #endif 3095 3096 #ifdef CONFIG_NET_CLS_ACT 3097 /* TODO: Maybe we should just force sch_ingress to be compiled in 3098 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions 3099 * a compare and 2 stores extra right now if we dont have it on 3100 * but have CONFIG_NET_CLS_ACT 3101 * NOTE: This doesn't stop any functionality; if you dont have 3102 * the ingress scheduler, you just can't add policies on ingress. 3103 * 3104 */ 3105 static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq) 3106 { 3107 struct net_device *dev = skb->dev; 3108 u32 ttl = G_TC_RTTL(skb->tc_verd); 3109 int result = TC_ACT_OK; 3110 struct Qdisc *q; 3111 3112 if (unlikely(MAX_RED_LOOP < ttl++)) { 3113 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n", 3114 skb->skb_iif, dev->ifindex); 3115 return TC_ACT_SHOT; 3116 } 3117 3118 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); 3119 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); 3120 3121 q = rxq->qdisc; 3122 if (q != &noop_qdisc) { 3123 spin_lock(qdisc_lock(q)); 3124 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) 3125 result = qdisc_enqueue_root(skb, q); 3126 spin_unlock(qdisc_lock(q)); 3127 } 3128 3129 return result; 3130 } 3131 3132 static inline struct sk_buff *handle_ing(struct sk_buff *skb, 3133 struct packet_type **pt_prev, 3134 int *ret, struct net_device *orig_dev) 3135 { 3136 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue); 3137 3138 if (!rxq || rxq->qdisc == &noop_qdisc) 3139 goto out; 3140 3141 if (*pt_prev) { 3142 *ret = deliver_skb(skb, *pt_prev, orig_dev); 3143 *pt_prev = NULL; 3144 } 3145 3146 switch (ing_filter(skb, rxq)) { 3147 case TC_ACT_SHOT: 3148 case TC_ACT_STOLEN: 3149 kfree_skb(skb); 3150 return NULL; 3151 } 3152 3153 out: 3154 skb->tc_verd = 0; 3155 return skb; 3156 } 3157 #endif 3158 3159 /** 3160 * netdev_rx_handler_register - register receive handler 3161 * @dev: device to register a handler for 3162 * @rx_handler: receive handler to register 3163 * @rx_handler_data: data pointer that is used by rx handler 3164 * 3165 * Register a receive hander for a device. This handler will then be 3166 * called from __netif_receive_skb. A negative errno code is returned 3167 * on a failure. 3168 * 3169 * The caller must hold the rtnl_mutex. 3170 * 3171 * For a general description of rx_handler, see enum rx_handler_result. 3172 */ 3173 int netdev_rx_handler_register(struct net_device *dev, 3174 rx_handler_func_t *rx_handler, 3175 void *rx_handler_data) 3176 { 3177 ASSERT_RTNL(); 3178 3179 if (dev->rx_handler) 3180 return -EBUSY; 3181 3182 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); 3183 rcu_assign_pointer(dev->rx_handler, rx_handler); 3184 3185 return 0; 3186 } 3187 EXPORT_SYMBOL_GPL(netdev_rx_handler_register); 3188 3189 /** 3190 * netdev_rx_handler_unregister - unregister receive handler 3191 * @dev: device to unregister a handler from 3192 * 3193 * Unregister a receive hander from a device. 3194 * 3195 * The caller must hold the rtnl_mutex. 3196 */ 3197 void netdev_rx_handler_unregister(struct net_device *dev) 3198 { 3199 3200 ASSERT_RTNL(); 3201 RCU_INIT_POINTER(dev->rx_handler, NULL); 3202 RCU_INIT_POINTER(dev->rx_handler_data, NULL); 3203 } 3204 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 3205 3206 /* 3207 * Limit the use of PFMEMALLOC reserves to those protocols that implement 3208 * the special handling of PFMEMALLOC skbs. 3209 */ 3210 static bool skb_pfmemalloc_protocol(struct sk_buff *skb) 3211 { 3212 switch (skb->protocol) { 3213 case __constant_htons(ETH_P_ARP): 3214 case __constant_htons(ETH_P_IP): 3215 case __constant_htons(ETH_P_IPV6): 3216 case __constant_htons(ETH_P_8021Q): 3217 return true; 3218 default: 3219 return false; 3220 } 3221 } 3222 3223 static int __netif_receive_skb(struct sk_buff *skb) 3224 { 3225 struct packet_type *ptype, *pt_prev; 3226 rx_handler_func_t *rx_handler; 3227 struct net_device *orig_dev; 3228 struct net_device *null_or_dev; 3229 bool deliver_exact = false; 3230 int ret = NET_RX_DROP; 3231 __be16 type; 3232 unsigned long pflags = current->flags; 3233 3234 net_timestamp_check(!netdev_tstamp_prequeue, skb); 3235 3236 trace_netif_receive_skb(skb); 3237 3238 /* 3239 * PFMEMALLOC skbs are special, they should 3240 * - be delivered to SOCK_MEMALLOC sockets only 3241 * - stay away from userspace 3242 * - have bounded memory usage 3243 * 3244 * Use PF_MEMALLOC as this saves us from propagating the allocation 3245 * context down to all allocation sites. 3246 */ 3247 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) 3248 current->flags |= PF_MEMALLOC; 3249 3250 /* if we've gotten here through NAPI, check netpoll */ 3251 if (netpoll_receive_skb(skb)) 3252 goto out; 3253 3254 orig_dev = skb->dev; 3255 3256 skb_reset_network_header(skb); 3257 skb_reset_transport_header(skb); 3258 skb_reset_mac_len(skb); 3259 3260 pt_prev = NULL; 3261 3262 rcu_read_lock(); 3263 3264 another_round: 3265 skb->skb_iif = skb->dev->ifindex; 3266 3267 __this_cpu_inc(softnet_data.processed); 3268 3269 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { 3270 skb = vlan_untag(skb); 3271 if (unlikely(!skb)) 3272 goto unlock; 3273 } 3274 3275 #ifdef CONFIG_NET_CLS_ACT 3276 if (skb->tc_verd & TC_NCLS) { 3277 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); 3278 goto ncls; 3279 } 3280 #endif 3281 3282 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) 3283 goto skip_taps; 3284 3285 list_for_each_entry_rcu(ptype, &ptype_all, list) { 3286 if (!ptype->dev || ptype->dev == skb->dev) { 3287 if (pt_prev) 3288 ret = deliver_skb(skb, pt_prev, orig_dev); 3289 pt_prev = ptype; 3290 } 3291 } 3292 3293 skip_taps: 3294 #ifdef CONFIG_NET_CLS_ACT 3295 skb = handle_ing(skb, &pt_prev, &ret, orig_dev); 3296 if (!skb) 3297 goto unlock; 3298 ncls: 3299 #endif 3300 3301 if (sk_memalloc_socks() && skb_pfmemalloc(skb) 3302 && !skb_pfmemalloc_protocol(skb)) 3303 goto drop; 3304 3305 if (vlan_tx_tag_present(skb)) { 3306 if (pt_prev) { 3307 ret = deliver_skb(skb, pt_prev, orig_dev); 3308 pt_prev = NULL; 3309 } 3310 if (vlan_do_receive(&skb)) 3311 goto another_round; 3312 else if (unlikely(!skb)) 3313 goto unlock; 3314 } 3315 3316 rx_handler = rcu_dereference(skb->dev->rx_handler); 3317 if (rx_handler) { 3318 if (pt_prev) { 3319 ret = deliver_skb(skb, pt_prev, orig_dev); 3320 pt_prev = NULL; 3321 } 3322 switch (rx_handler(&skb)) { 3323 case RX_HANDLER_CONSUMED: 3324 goto unlock; 3325 case RX_HANDLER_ANOTHER: 3326 goto another_round; 3327 case RX_HANDLER_EXACT: 3328 deliver_exact = true; 3329 case RX_HANDLER_PASS: 3330 break; 3331 default: 3332 BUG(); 3333 } 3334 } 3335 3336 if (vlan_tx_nonzero_tag_present(skb)) 3337 skb->pkt_type = PACKET_OTHERHOST; 3338 3339 /* deliver only exact match when indicated */ 3340 null_or_dev = deliver_exact ? skb->dev : NULL; 3341 3342 type = skb->protocol; 3343 list_for_each_entry_rcu(ptype, 3344 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 3345 if (ptype->type == type && 3346 (ptype->dev == null_or_dev || ptype->dev == skb->dev || 3347 ptype->dev == orig_dev)) { 3348 if (pt_prev) 3349 ret = deliver_skb(skb, pt_prev, orig_dev); 3350 pt_prev = ptype; 3351 } 3352 } 3353 3354 if (pt_prev) { 3355 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) 3356 goto drop; 3357 else 3358 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 3359 } else { 3360 drop: 3361 atomic_long_inc(&skb->dev->rx_dropped); 3362 kfree_skb(skb); 3363 /* Jamal, now you will not able to escape explaining 3364 * me how you were going to use this. :-) 3365 */ 3366 ret = NET_RX_DROP; 3367 } 3368 3369 unlock: 3370 rcu_read_unlock(); 3371 out: 3372 tsk_restore_flags(current, pflags, PF_MEMALLOC); 3373 return ret; 3374 } 3375 3376 /** 3377 * netif_receive_skb - process receive buffer from network 3378 * @skb: buffer to process 3379 * 3380 * netif_receive_skb() is the main receive data processing function. 3381 * It always succeeds. The buffer may be dropped during processing 3382 * for congestion control or by the protocol layers. 3383 * 3384 * This function may only be called from softirq context and interrupts 3385 * should be enabled. 3386 * 3387 * Return values (usually ignored): 3388 * NET_RX_SUCCESS: no congestion 3389 * NET_RX_DROP: packet was dropped 3390 */ 3391 int netif_receive_skb(struct sk_buff *skb) 3392 { 3393 net_timestamp_check(netdev_tstamp_prequeue, skb); 3394 3395 if (skb_defer_rx_timestamp(skb)) 3396 return NET_RX_SUCCESS; 3397 3398 #ifdef CONFIG_RPS 3399 if (static_key_false(&rps_needed)) { 3400 struct rps_dev_flow voidflow, *rflow = &voidflow; 3401 int cpu, ret; 3402 3403 rcu_read_lock(); 3404 3405 cpu = get_rps_cpu(skb->dev, skb, &rflow); 3406 3407 if (cpu >= 0) { 3408 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 3409 rcu_read_unlock(); 3410 return ret; 3411 } 3412 rcu_read_unlock(); 3413 } 3414 #endif 3415 return __netif_receive_skb(skb); 3416 } 3417 EXPORT_SYMBOL(netif_receive_skb); 3418 3419 /* Network device is going away, flush any packets still pending 3420 * Called with irqs disabled. 3421 */ 3422 static void flush_backlog(void *arg) 3423 { 3424 struct net_device *dev = arg; 3425 struct softnet_data *sd = &__get_cpu_var(softnet_data); 3426 struct sk_buff *skb, *tmp; 3427 3428 rps_lock(sd); 3429 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { 3430 if (skb->dev == dev) { 3431 __skb_unlink(skb, &sd->input_pkt_queue); 3432 kfree_skb(skb); 3433 input_queue_head_incr(sd); 3434 } 3435 } 3436 rps_unlock(sd); 3437 3438 skb_queue_walk_safe(&sd->process_queue, skb, tmp) { 3439 if (skb->dev == dev) { 3440 __skb_unlink(skb, &sd->process_queue); 3441 kfree_skb(skb); 3442 input_queue_head_incr(sd); 3443 } 3444 } 3445 } 3446 3447 static int napi_gro_complete(struct sk_buff *skb) 3448 { 3449 struct packet_type *ptype; 3450 __be16 type = skb->protocol; 3451 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; 3452 int err = -ENOENT; 3453 3454 if (NAPI_GRO_CB(skb)->count == 1) { 3455 skb_shinfo(skb)->gso_size = 0; 3456 goto out; 3457 } 3458 3459 rcu_read_lock(); 3460 list_for_each_entry_rcu(ptype, head, list) { 3461 if (ptype->type != type || ptype->dev || !ptype->gro_complete) 3462 continue; 3463 3464 err = ptype->gro_complete(skb); 3465 break; 3466 } 3467 rcu_read_unlock(); 3468 3469 if (err) { 3470 WARN_ON(&ptype->list == head); 3471 kfree_skb(skb); 3472 return NET_RX_SUCCESS; 3473 } 3474 3475 out: 3476 return netif_receive_skb(skb); 3477 } 3478 3479 /* napi->gro_list contains packets ordered by age. 3480 * youngest packets at the head of it. 3481 * Complete skbs in reverse order to reduce latencies. 3482 */ 3483 void napi_gro_flush(struct napi_struct *napi, bool flush_old) 3484 { 3485 struct sk_buff *skb, *prev = NULL; 3486 3487 /* scan list and build reverse chain */ 3488 for (skb = napi->gro_list; skb != NULL; skb = skb->next) { 3489 skb->prev = prev; 3490 prev = skb; 3491 } 3492 3493 for (skb = prev; skb; skb = prev) { 3494 skb->next = NULL; 3495 3496 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) 3497 return; 3498 3499 prev = skb->prev; 3500 napi_gro_complete(skb); 3501 napi->gro_count--; 3502 } 3503 3504 napi->gro_list = NULL; 3505 } 3506 EXPORT_SYMBOL(napi_gro_flush); 3507 3508 enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3509 { 3510 struct sk_buff **pp = NULL; 3511 struct packet_type *ptype; 3512 __be16 type = skb->protocol; 3513 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; 3514 int same_flow; 3515 int mac_len; 3516 enum gro_result ret; 3517 3518 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb)) 3519 goto normal; 3520 3521 if (skb_is_gso(skb) || skb_has_frag_list(skb)) 3522 goto normal; 3523 3524 rcu_read_lock(); 3525 list_for_each_entry_rcu(ptype, head, list) { 3526 if (ptype->type != type || ptype->dev || !ptype->gro_receive) 3527 continue; 3528 3529 skb_set_network_header(skb, skb_gro_offset(skb)); 3530 mac_len = skb->network_header - skb->mac_header; 3531 skb->mac_len = mac_len; 3532 NAPI_GRO_CB(skb)->same_flow = 0; 3533 NAPI_GRO_CB(skb)->flush = 0; 3534 NAPI_GRO_CB(skb)->free = 0; 3535 3536 pp = ptype->gro_receive(&napi->gro_list, skb); 3537 break; 3538 } 3539 rcu_read_unlock(); 3540 3541 if (&ptype->list == head) 3542 goto normal; 3543 3544 same_flow = NAPI_GRO_CB(skb)->same_flow; 3545 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; 3546 3547 if (pp) { 3548 struct sk_buff *nskb = *pp; 3549 3550 *pp = nskb->next; 3551 nskb->next = NULL; 3552 napi_gro_complete(nskb); 3553 napi->gro_count--; 3554 } 3555 3556 if (same_flow) 3557 goto ok; 3558 3559 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS) 3560 goto normal; 3561 3562 napi->gro_count++; 3563 NAPI_GRO_CB(skb)->count = 1; 3564 NAPI_GRO_CB(skb)->age = jiffies; 3565 skb_shinfo(skb)->gso_size = skb_gro_len(skb); 3566 skb->next = napi->gro_list; 3567 napi->gro_list = skb; 3568 ret = GRO_HELD; 3569 3570 pull: 3571 if (skb_headlen(skb) < skb_gro_offset(skb)) { 3572 int grow = skb_gro_offset(skb) - skb_headlen(skb); 3573 3574 BUG_ON(skb->end - skb->tail < grow); 3575 3576 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); 3577 3578 skb->tail += grow; 3579 skb->data_len -= grow; 3580 3581 skb_shinfo(skb)->frags[0].page_offset += grow; 3582 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow); 3583 3584 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) { 3585 skb_frag_unref(skb, 0); 3586 memmove(skb_shinfo(skb)->frags, 3587 skb_shinfo(skb)->frags + 1, 3588 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); 3589 } 3590 } 3591 3592 ok: 3593 return ret; 3594 3595 normal: 3596 ret = GRO_NORMAL; 3597 goto pull; 3598 } 3599 EXPORT_SYMBOL(dev_gro_receive); 3600 3601 static inline gro_result_t 3602 __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3603 { 3604 struct sk_buff *p; 3605 unsigned int maclen = skb->dev->hard_header_len; 3606 3607 for (p = napi->gro_list; p; p = p->next) { 3608 unsigned long diffs; 3609 3610 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; 3611 diffs |= p->vlan_tci ^ skb->vlan_tci; 3612 if (maclen == ETH_HLEN) 3613 diffs |= compare_ether_header(skb_mac_header(p), 3614 skb_gro_mac_header(skb)); 3615 else if (!diffs) 3616 diffs = memcmp(skb_mac_header(p), 3617 skb_gro_mac_header(skb), 3618 maclen); 3619 NAPI_GRO_CB(p)->same_flow = !diffs; 3620 NAPI_GRO_CB(p)->flush = 0; 3621 } 3622 3623 return dev_gro_receive(napi, skb); 3624 } 3625 3626 gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) 3627 { 3628 switch (ret) { 3629 case GRO_NORMAL: 3630 if (netif_receive_skb(skb)) 3631 ret = GRO_DROP; 3632 break; 3633 3634 case GRO_DROP: 3635 kfree_skb(skb); 3636 break; 3637 3638 case GRO_MERGED_FREE: 3639 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 3640 kmem_cache_free(skbuff_head_cache, skb); 3641 else 3642 __kfree_skb(skb); 3643 break; 3644 3645 case GRO_HELD: 3646 case GRO_MERGED: 3647 break; 3648 } 3649 3650 return ret; 3651 } 3652 EXPORT_SYMBOL(napi_skb_finish); 3653 3654 static void skb_gro_reset_offset(struct sk_buff *skb) 3655 { 3656 const struct skb_shared_info *pinfo = skb_shinfo(skb); 3657 const skb_frag_t *frag0 = &pinfo->frags[0]; 3658 3659 NAPI_GRO_CB(skb)->data_offset = 0; 3660 NAPI_GRO_CB(skb)->frag0 = NULL; 3661 NAPI_GRO_CB(skb)->frag0_len = 0; 3662 3663 if (skb->mac_header == skb->tail && 3664 pinfo->nr_frags && 3665 !PageHighMem(skb_frag_page(frag0))) { 3666 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); 3667 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0); 3668 } 3669 } 3670 3671 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3672 { 3673 skb_gro_reset_offset(skb); 3674 3675 return napi_skb_finish(__napi_gro_receive(napi, skb), skb); 3676 } 3677 EXPORT_SYMBOL(napi_gro_receive); 3678 3679 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) 3680 { 3681 __skb_pull(skb, skb_headlen(skb)); 3682 /* restore the reserve we had after netdev_alloc_skb_ip_align() */ 3683 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); 3684 skb->vlan_tci = 0; 3685 skb->dev = napi->dev; 3686 skb->skb_iif = 0; 3687 3688 napi->skb = skb; 3689 } 3690 3691 struct sk_buff *napi_get_frags(struct napi_struct *napi) 3692 { 3693 struct sk_buff *skb = napi->skb; 3694 3695 if (!skb) { 3696 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD); 3697 if (skb) 3698 napi->skb = skb; 3699 } 3700 return skb; 3701 } 3702 EXPORT_SYMBOL(napi_get_frags); 3703 3704 gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, 3705 gro_result_t ret) 3706 { 3707 switch (ret) { 3708 case GRO_NORMAL: 3709 case GRO_HELD: 3710 skb->protocol = eth_type_trans(skb, skb->dev); 3711 3712 if (ret == GRO_HELD) 3713 skb_gro_pull(skb, -ETH_HLEN); 3714 else if (netif_receive_skb(skb)) 3715 ret = GRO_DROP; 3716 break; 3717 3718 case GRO_DROP: 3719 case GRO_MERGED_FREE: 3720 napi_reuse_skb(napi, skb); 3721 break; 3722 3723 case GRO_MERGED: 3724 break; 3725 } 3726 3727 return ret; 3728 } 3729 EXPORT_SYMBOL(napi_frags_finish); 3730 3731 static struct sk_buff *napi_frags_skb(struct napi_struct *napi) 3732 { 3733 struct sk_buff *skb = napi->skb; 3734 struct ethhdr *eth; 3735 unsigned int hlen; 3736 unsigned int off; 3737 3738 napi->skb = NULL; 3739 3740 skb_reset_mac_header(skb); 3741 skb_gro_reset_offset(skb); 3742 3743 off = skb_gro_offset(skb); 3744 hlen = off + sizeof(*eth); 3745 eth = skb_gro_header_fast(skb, off); 3746 if (skb_gro_header_hard(skb, hlen)) { 3747 eth = skb_gro_header_slow(skb, hlen, off); 3748 if (unlikely(!eth)) { 3749 napi_reuse_skb(napi, skb); 3750 skb = NULL; 3751 goto out; 3752 } 3753 } 3754 3755 skb_gro_pull(skb, sizeof(*eth)); 3756 3757 /* 3758 * This works because the only protocols we care about don't require 3759 * special handling. We'll fix it up properly at the end. 3760 */ 3761 skb->protocol = eth->h_proto; 3762 3763 out: 3764 return skb; 3765 } 3766 3767 gro_result_t napi_gro_frags(struct napi_struct *napi) 3768 { 3769 struct sk_buff *skb = napi_frags_skb(napi); 3770 3771 if (!skb) 3772 return GRO_DROP; 3773 3774 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb)); 3775 } 3776 EXPORT_SYMBOL(napi_gro_frags); 3777 3778 /* 3779 * net_rps_action sends any pending IPI's for rps. 3780 * Note: called with local irq disabled, but exits with local irq enabled. 3781 */ 3782 static void net_rps_action_and_irq_enable(struct softnet_data *sd) 3783 { 3784 #ifdef CONFIG_RPS 3785 struct softnet_data *remsd = sd->rps_ipi_list; 3786 3787 if (remsd) { 3788 sd->rps_ipi_list = NULL; 3789 3790 local_irq_enable(); 3791 3792 /* Send pending IPI's to kick RPS processing on remote cpus. */ 3793 while (remsd) { 3794 struct softnet_data *next = remsd->rps_ipi_next; 3795 3796 if (cpu_online(remsd->cpu)) 3797 __smp_call_function_single(remsd->cpu, 3798 &remsd->csd, 0); 3799 remsd = next; 3800 } 3801 } else 3802 #endif 3803 local_irq_enable(); 3804 } 3805 3806 static int process_backlog(struct napi_struct *napi, int quota) 3807 { 3808 int work = 0; 3809 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); 3810 3811 #ifdef CONFIG_RPS 3812 /* Check if we have pending ipi, its better to send them now, 3813 * not waiting net_rx_action() end. 3814 */ 3815 if (sd->rps_ipi_list) { 3816 local_irq_disable(); 3817 net_rps_action_and_irq_enable(sd); 3818 } 3819 #endif 3820 napi->weight = weight_p; 3821 local_irq_disable(); 3822 while (work < quota) { 3823 struct sk_buff *skb; 3824 unsigned int qlen; 3825 3826 while ((skb = __skb_dequeue(&sd->process_queue))) { 3827 local_irq_enable(); 3828 __netif_receive_skb(skb); 3829 local_irq_disable(); 3830 input_queue_head_incr(sd); 3831 if (++work >= quota) { 3832 local_irq_enable(); 3833 return work; 3834 } 3835 } 3836 3837 rps_lock(sd); 3838 qlen = skb_queue_len(&sd->input_pkt_queue); 3839 if (qlen) 3840 skb_queue_splice_tail_init(&sd->input_pkt_queue, 3841 &sd->process_queue); 3842 3843 if (qlen < quota - work) { 3844 /* 3845 * Inline a custom version of __napi_complete(). 3846 * only current cpu owns and manipulates this napi, 3847 * and NAPI_STATE_SCHED is the only possible flag set on backlog. 3848 * we can use a plain write instead of clear_bit(), 3849 * and we dont need an smp_mb() memory barrier. 3850 */ 3851 list_del(&napi->poll_list); 3852 napi->state = 0; 3853 3854 quota = work + qlen; 3855 } 3856 rps_unlock(sd); 3857 } 3858 local_irq_enable(); 3859 3860 return work; 3861 } 3862 3863 /** 3864 * __napi_schedule - schedule for receive 3865 * @n: entry to schedule 3866 * 3867 * The entry's receive function will be scheduled to run 3868 */ 3869 void __napi_schedule(struct napi_struct *n) 3870 { 3871 unsigned long flags; 3872 3873 local_irq_save(flags); 3874 ____napi_schedule(&__get_cpu_var(softnet_data), n); 3875 local_irq_restore(flags); 3876 } 3877 EXPORT_SYMBOL(__napi_schedule); 3878 3879 void __napi_complete(struct napi_struct *n) 3880 { 3881 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); 3882 BUG_ON(n->gro_list); 3883 3884 list_del(&n->poll_list); 3885 smp_mb__before_clear_bit(); 3886 clear_bit(NAPI_STATE_SCHED, &n->state); 3887 } 3888 EXPORT_SYMBOL(__napi_complete); 3889 3890 void napi_complete(struct napi_struct *n) 3891 { 3892 unsigned long flags; 3893 3894 /* 3895 * don't let napi dequeue from the cpu poll list 3896 * just in case its running on a different cpu 3897 */ 3898 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state))) 3899 return; 3900 3901 napi_gro_flush(n, false); 3902 local_irq_save(flags); 3903 __napi_complete(n); 3904 local_irq_restore(flags); 3905 } 3906 EXPORT_SYMBOL(napi_complete); 3907 3908 void netif_napi_add(struct net_device *dev, struct napi_struct *napi, 3909 int (*poll)(struct napi_struct *, int), int weight) 3910 { 3911 INIT_LIST_HEAD(&napi->poll_list); 3912 napi->gro_count = 0; 3913 napi->gro_list = NULL; 3914 napi->skb = NULL; 3915 napi->poll = poll; 3916 napi->weight = weight; 3917 list_add(&napi->dev_list, &dev->napi_list); 3918 napi->dev = dev; 3919 #ifdef CONFIG_NETPOLL 3920 spin_lock_init(&napi->poll_lock); 3921 napi->poll_owner = -1; 3922 #endif 3923 set_bit(NAPI_STATE_SCHED, &napi->state); 3924 } 3925 EXPORT_SYMBOL(netif_napi_add); 3926 3927 void netif_napi_del(struct napi_struct *napi) 3928 { 3929 struct sk_buff *skb, *next; 3930 3931 list_del_init(&napi->dev_list); 3932 napi_free_frags(napi); 3933 3934 for (skb = napi->gro_list; skb; skb = next) { 3935 next = skb->next; 3936 skb->next = NULL; 3937 kfree_skb(skb); 3938 } 3939 3940 napi->gro_list = NULL; 3941 napi->gro_count = 0; 3942 } 3943 EXPORT_SYMBOL(netif_napi_del); 3944 3945 static void net_rx_action(struct softirq_action *h) 3946 { 3947 struct softnet_data *sd = &__get_cpu_var(softnet_data); 3948 unsigned long time_limit = jiffies + 2; 3949 int budget = netdev_budget; 3950 void *have; 3951 3952 local_irq_disable(); 3953 3954 while (!list_empty(&sd->poll_list)) { 3955 struct napi_struct *n; 3956 int work, weight; 3957 3958 /* If softirq window is exhuasted then punt. 3959 * Allow this to run for 2 jiffies since which will allow 3960 * an average latency of 1.5/HZ. 3961 */ 3962 if (unlikely(budget <= 0 || time_after(jiffies, time_limit))) 3963 goto softnet_break; 3964 3965 local_irq_enable(); 3966 3967 /* Even though interrupts have been re-enabled, this 3968 * access is safe because interrupts can only add new 3969 * entries to the tail of this list, and only ->poll() 3970 * calls can remove this head entry from the list. 3971 */ 3972 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list); 3973 3974 have = netpoll_poll_lock(n); 3975 3976 weight = n->weight; 3977 3978 /* This NAPI_STATE_SCHED test is for avoiding a race 3979 * with netpoll's poll_napi(). Only the entity which 3980 * obtains the lock and sees NAPI_STATE_SCHED set will 3981 * actually make the ->poll() call. Therefore we avoid 3982 * accidentally calling ->poll() when NAPI is not scheduled. 3983 */ 3984 work = 0; 3985 if (test_bit(NAPI_STATE_SCHED, &n->state)) { 3986 work = n->poll(n, weight); 3987 trace_napi_poll(n); 3988 } 3989 3990 WARN_ON_ONCE(work > weight); 3991 3992 budget -= work; 3993 3994 local_irq_disable(); 3995 3996 /* Drivers must not modify the NAPI state if they 3997 * consume the entire weight. In such cases this code 3998 * still "owns" the NAPI instance and therefore can 3999 * move the instance around on the list at-will. 4000 */ 4001 if (unlikely(work == weight)) { 4002 if (unlikely(napi_disable_pending(n))) { 4003 local_irq_enable(); 4004 napi_complete(n); 4005 local_irq_disable(); 4006 } else { 4007 if (n->gro_list) { 4008 /* flush too old packets 4009 * If HZ < 1000, flush all packets. 4010 */ 4011 local_irq_enable(); 4012 napi_gro_flush(n, HZ >= 1000); 4013 local_irq_disable(); 4014 } 4015 list_move_tail(&n->poll_list, &sd->poll_list); 4016 } 4017 } 4018 4019 netpoll_poll_unlock(have); 4020 } 4021 out: 4022 net_rps_action_and_irq_enable(sd); 4023 4024 #ifdef CONFIG_NET_DMA 4025 /* 4026 * There may not be any more sk_buffs coming right now, so push 4027 * any pending DMA copies to hardware 4028 */ 4029 dma_issue_pending_all(); 4030 #endif 4031 4032 return; 4033 4034 softnet_break: 4035 sd->time_squeeze++; 4036 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4037 goto out; 4038 } 4039 4040 static gifconf_func_t *gifconf_list[NPROTO]; 4041 4042 /** 4043 * register_gifconf - register a SIOCGIF handler 4044 * @family: Address family 4045 * @gifconf: Function handler 4046 * 4047 * Register protocol dependent address dumping routines. The handler 4048 * that is passed must not be freed or reused until it has been replaced 4049 * by another handler. 4050 */ 4051 int register_gifconf(unsigned int family, gifconf_func_t *gifconf) 4052 { 4053 if (family >= NPROTO) 4054 return -EINVAL; 4055 gifconf_list[family] = gifconf; 4056 return 0; 4057 } 4058 EXPORT_SYMBOL(register_gifconf); 4059 4060 4061 /* 4062 * Map an interface index to its name (SIOCGIFNAME) 4063 */ 4064 4065 /* 4066 * We need this ioctl for efficient implementation of the 4067 * if_indextoname() function required by the IPv6 API. Without 4068 * it, we would have to search all the interfaces to find a 4069 * match. --pb 4070 */ 4071 4072 static int dev_ifname(struct net *net, struct ifreq __user *arg) 4073 { 4074 struct net_device *dev; 4075 struct ifreq ifr; 4076 4077 /* 4078 * Fetch the caller's info block. 4079 */ 4080 4081 if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) 4082 return -EFAULT; 4083 4084 rcu_read_lock(); 4085 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex); 4086 if (!dev) { 4087 rcu_read_unlock(); 4088 return -ENODEV; 4089 } 4090 4091 strcpy(ifr.ifr_name, dev->name); 4092 rcu_read_unlock(); 4093 4094 if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) 4095 return -EFAULT; 4096 return 0; 4097 } 4098 4099 /* 4100 * Perform a SIOCGIFCONF call. This structure will change 4101 * size eventually, and there is nothing I can do about it. 4102 * Thus we will need a 'compatibility mode'. 4103 */ 4104 4105 static int dev_ifconf(struct net *net, char __user *arg) 4106 { 4107 struct ifconf ifc; 4108 struct net_device *dev; 4109 char __user *pos; 4110 int len; 4111 int total; 4112 int i; 4113 4114 /* 4115 * Fetch the caller's info block. 4116 */ 4117 4118 if (copy_from_user(&ifc, arg, sizeof(struct ifconf))) 4119 return -EFAULT; 4120 4121 pos = ifc.ifc_buf; 4122 len = ifc.ifc_len; 4123 4124 /* 4125 * Loop over the interfaces, and write an info block for each. 4126 */ 4127 4128 total = 0; 4129 for_each_netdev(net, dev) { 4130 for (i = 0; i < NPROTO; i++) { 4131 if (gifconf_list[i]) { 4132 int done; 4133 if (!pos) 4134 done = gifconf_list[i](dev, NULL, 0); 4135 else 4136 done = gifconf_list[i](dev, pos + total, 4137 len - total); 4138 if (done < 0) 4139 return -EFAULT; 4140 total += done; 4141 } 4142 } 4143 } 4144 4145 /* 4146 * All done. Write the updated control block back to the caller. 4147 */ 4148 ifc.ifc_len = total; 4149 4150 /* 4151 * Both BSD and Solaris return 0 here, so we do too. 4152 */ 4153 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0; 4154 } 4155 4156 #ifdef CONFIG_PROC_FS 4157 4158 #define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1) 4159 4160 #define get_bucket(x) ((x) >> BUCKET_SPACE) 4161 #define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1)) 4162 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o)) 4163 4164 static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos) 4165 { 4166 struct net *net = seq_file_net(seq); 4167 struct net_device *dev; 4168 struct hlist_node *p; 4169 struct hlist_head *h; 4170 unsigned int count = 0, offset = get_offset(*pos); 4171 4172 h = &net->dev_name_head[get_bucket(*pos)]; 4173 hlist_for_each_entry_rcu(dev, p, h, name_hlist) { 4174 if (++count == offset) 4175 return dev; 4176 } 4177 4178 return NULL; 4179 } 4180 4181 static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos) 4182 { 4183 struct net_device *dev; 4184 unsigned int bucket; 4185 4186 do { 4187 dev = dev_from_same_bucket(seq, pos); 4188 if (dev) 4189 return dev; 4190 4191 bucket = get_bucket(*pos) + 1; 4192 *pos = set_bucket_offset(bucket, 1); 4193 } while (bucket < NETDEV_HASHENTRIES); 4194 4195 return NULL; 4196 } 4197 4198 /* 4199 * This is invoked by the /proc filesystem handler to display a device 4200 * in detail. 4201 */ 4202 void *dev_seq_start(struct seq_file *seq, loff_t *pos) 4203 __acquires(RCU) 4204 { 4205 rcu_read_lock(); 4206 if (!*pos) 4207 return SEQ_START_TOKEN; 4208 4209 if (get_bucket(*pos) >= NETDEV_HASHENTRIES) 4210 return NULL; 4211 4212 return dev_from_bucket(seq, pos); 4213 } 4214 4215 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4216 { 4217 ++*pos; 4218 return dev_from_bucket(seq, pos); 4219 } 4220 4221 void dev_seq_stop(struct seq_file *seq, void *v) 4222 __releases(RCU) 4223 { 4224 rcu_read_unlock(); 4225 } 4226 4227 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) 4228 { 4229 struct rtnl_link_stats64 temp; 4230 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); 4231 4232 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu " 4233 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n", 4234 dev->name, stats->rx_bytes, stats->rx_packets, 4235 stats->rx_errors, 4236 stats->rx_dropped + stats->rx_missed_errors, 4237 stats->rx_fifo_errors, 4238 stats->rx_length_errors + stats->rx_over_errors + 4239 stats->rx_crc_errors + stats->rx_frame_errors, 4240 stats->rx_compressed, stats->multicast, 4241 stats->tx_bytes, stats->tx_packets, 4242 stats->tx_errors, stats->tx_dropped, 4243 stats->tx_fifo_errors, stats->collisions, 4244 stats->tx_carrier_errors + 4245 stats->tx_aborted_errors + 4246 stats->tx_window_errors + 4247 stats->tx_heartbeat_errors, 4248 stats->tx_compressed); 4249 } 4250 4251 /* 4252 * Called from the PROCfs module. This now uses the new arbitrary sized 4253 * /proc/net interface to create /proc/net/dev 4254 */ 4255 static int dev_seq_show(struct seq_file *seq, void *v) 4256 { 4257 if (v == SEQ_START_TOKEN) 4258 seq_puts(seq, "Inter-| Receive " 4259 " | Transmit\n" 4260 " face |bytes packets errs drop fifo frame " 4261 "compressed multicast|bytes packets errs " 4262 "drop fifo colls carrier compressed\n"); 4263 else 4264 dev_seq_printf_stats(seq, v); 4265 return 0; 4266 } 4267 4268 static struct softnet_data *softnet_get_online(loff_t *pos) 4269 { 4270 struct softnet_data *sd = NULL; 4271 4272 while (*pos < nr_cpu_ids) 4273 if (cpu_online(*pos)) { 4274 sd = &per_cpu(softnet_data, *pos); 4275 break; 4276 } else 4277 ++*pos; 4278 return sd; 4279 } 4280 4281 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos) 4282 { 4283 return softnet_get_online(pos); 4284 } 4285 4286 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4287 { 4288 ++*pos; 4289 return softnet_get_online(pos); 4290 } 4291 4292 static void softnet_seq_stop(struct seq_file *seq, void *v) 4293 { 4294 } 4295 4296 static int softnet_seq_show(struct seq_file *seq, void *v) 4297 { 4298 struct softnet_data *sd = v; 4299 4300 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", 4301 sd->processed, sd->dropped, sd->time_squeeze, 0, 4302 0, 0, 0, 0, /* was fastroute */ 4303 sd->cpu_collision, sd->received_rps); 4304 return 0; 4305 } 4306 4307 static const struct seq_operations dev_seq_ops = { 4308 .start = dev_seq_start, 4309 .next = dev_seq_next, 4310 .stop = dev_seq_stop, 4311 .show = dev_seq_show, 4312 }; 4313 4314 static int dev_seq_open(struct inode *inode, struct file *file) 4315 { 4316 return seq_open_net(inode, file, &dev_seq_ops, 4317 sizeof(struct seq_net_private)); 4318 } 4319 4320 static const struct file_operations dev_seq_fops = { 4321 .owner = THIS_MODULE, 4322 .open = dev_seq_open, 4323 .read = seq_read, 4324 .llseek = seq_lseek, 4325 .release = seq_release_net, 4326 }; 4327 4328 static const struct seq_operations softnet_seq_ops = { 4329 .start = softnet_seq_start, 4330 .next = softnet_seq_next, 4331 .stop = softnet_seq_stop, 4332 .show = softnet_seq_show, 4333 }; 4334 4335 static int softnet_seq_open(struct inode *inode, struct file *file) 4336 { 4337 return seq_open(file, &softnet_seq_ops); 4338 } 4339 4340 static const struct file_operations softnet_seq_fops = { 4341 .owner = THIS_MODULE, 4342 .open = softnet_seq_open, 4343 .read = seq_read, 4344 .llseek = seq_lseek, 4345 .release = seq_release, 4346 }; 4347 4348 static void *ptype_get_idx(loff_t pos) 4349 { 4350 struct packet_type *pt = NULL; 4351 loff_t i = 0; 4352 int t; 4353 4354 list_for_each_entry_rcu(pt, &ptype_all, list) { 4355 if (i == pos) 4356 return pt; 4357 ++i; 4358 } 4359 4360 for (t = 0; t < PTYPE_HASH_SIZE; t++) { 4361 list_for_each_entry_rcu(pt, &ptype_base[t], list) { 4362 if (i == pos) 4363 return pt; 4364 ++i; 4365 } 4366 } 4367 return NULL; 4368 } 4369 4370 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos) 4371 __acquires(RCU) 4372 { 4373 rcu_read_lock(); 4374 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN; 4375 } 4376 4377 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4378 { 4379 struct packet_type *pt; 4380 struct list_head *nxt; 4381 int hash; 4382 4383 ++*pos; 4384 if (v == SEQ_START_TOKEN) 4385 return ptype_get_idx(0); 4386 4387 pt = v; 4388 nxt = pt->list.next; 4389 if (pt->type == htons(ETH_P_ALL)) { 4390 if (nxt != &ptype_all) 4391 goto found; 4392 hash = 0; 4393 nxt = ptype_base[0].next; 4394 } else 4395 hash = ntohs(pt->type) & PTYPE_HASH_MASK; 4396 4397 while (nxt == &ptype_base[hash]) { 4398 if (++hash >= PTYPE_HASH_SIZE) 4399 return NULL; 4400 nxt = ptype_base[hash].next; 4401 } 4402 found: 4403 return list_entry(nxt, struct packet_type, list); 4404 } 4405 4406 static void ptype_seq_stop(struct seq_file *seq, void *v) 4407 __releases(RCU) 4408 { 4409 rcu_read_unlock(); 4410 } 4411 4412 static int ptype_seq_show(struct seq_file *seq, void *v) 4413 { 4414 struct packet_type *pt = v; 4415 4416 if (v == SEQ_START_TOKEN) 4417 seq_puts(seq, "Type Device Function\n"); 4418 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) { 4419 if (pt->type == htons(ETH_P_ALL)) 4420 seq_puts(seq, "ALL "); 4421 else 4422 seq_printf(seq, "%04x", ntohs(pt->type)); 4423 4424 seq_printf(seq, " %-8s %pF\n", 4425 pt->dev ? pt->dev->name : "", pt->func); 4426 } 4427 4428 return 0; 4429 } 4430 4431 static const struct seq_operations ptype_seq_ops = { 4432 .start = ptype_seq_start, 4433 .next = ptype_seq_next, 4434 .stop = ptype_seq_stop, 4435 .show = ptype_seq_show, 4436 }; 4437 4438 static int ptype_seq_open(struct inode *inode, struct file *file) 4439 { 4440 return seq_open_net(inode, file, &ptype_seq_ops, 4441 sizeof(struct seq_net_private)); 4442 } 4443 4444 static const struct file_operations ptype_seq_fops = { 4445 .owner = THIS_MODULE, 4446 .open = ptype_seq_open, 4447 .read = seq_read, 4448 .llseek = seq_lseek, 4449 .release = seq_release_net, 4450 }; 4451 4452 4453 static int __net_init dev_proc_net_init(struct net *net) 4454 { 4455 int rc = -ENOMEM; 4456 4457 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops)) 4458 goto out; 4459 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops)) 4460 goto out_dev; 4461 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops)) 4462 goto out_softnet; 4463 4464 if (wext_proc_init(net)) 4465 goto out_ptype; 4466 rc = 0; 4467 out: 4468 return rc; 4469 out_ptype: 4470 proc_net_remove(net, "ptype"); 4471 out_softnet: 4472 proc_net_remove(net, "softnet_stat"); 4473 out_dev: 4474 proc_net_remove(net, "dev"); 4475 goto out; 4476 } 4477 4478 static void __net_exit dev_proc_net_exit(struct net *net) 4479 { 4480 wext_proc_exit(net); 4481 4482 proc_net_remove(net, "ptype"); 4483 proc_net_remove(net, "softnet_stat"); 4484 proc_net_remove(net, "dev"); 4485 } 4486 4487 static struct pernet_operations __net_initdata dev_proc_ops = { 4488 .init = dev_proc_net_init, 4489 .exit = dev_proc_net_exit, 4490 }; 4491 4492 static int __init dev_proc_init(void) 4493 { 4494 return register_pernet_subsys(&dev_proc_ops); 4495 } 4496 #else 4497 #define dev_proc_init() 0 4498 #endif /* CONFIG_PROC_FS */ 4499 4500 4501 /** 4502 * netdev_set_master - set up master pointer 4503 * @slave: slave device 4504 * @master: new master device 4505 * 4506 * Changes the master device of the slave. Pass %NULL to break the 4507 * bonding. The caller must hold the RTNL semaphore. On a failure 4508 * a negative errno code is returned. On success the reference counts 4509 * are adjusted and the function returns zero. 4510 */ 4511 int netdev_set_master(struct net_device *slave, struct net_device *master) 4512 { 4513 struct net_device *old = slave->master; 4514 4515 ASSERT_RTNL(); 4516 4517 if (master) { 4518 if (old) 4519 return -EBUSY; 4520 dev_hold(master); 4521 } 4522 4523 slave->master = master; 4524 4525 if (old) 4526 dev_put(old); 4527 return 0; 4528 } 4529 EXPORT_SYMBOL(netdev_set_master); 4530 4531 /** 4532 * netdev_set_bond_master - set up bonding master/slave pair 4533 * @slave: slave device 4534 * @master: new master device 4535 * 4536 * Changes the master device of the slave. Pass %NULL to break the 4537 * bonding. The caller must hold the RTNL semaphore. On a failure 4538 * a negative errno code is returned. On success %RTM_NEWLINK is sent 4539 * to the routing socket and the function returns zero. 4540 */ 4541 int netdev_set_bond_master(struct net_device *slave, struct net_device *master) 4542 { 4543 int err; 4544 4545 ASSERT_RTNL(); 4546 4547 err = netdev_set_master(slave, master); 4548 if (err) 4549 return err; 4550 if (master) 4551 slave->flags |= IFF_SLAVE; 4552 else 4553 slave->flags &= ~IFF_SLAVE; 4554 4555 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE); 4556 return 0; 4557 } 4558 EXPORT_SYMBOL(netdev_set_bond_master); 4559 4560 static void dev_change_rx_flags(struct net_device *dev, int flags) 4561 { 4562 const struct net_device_ops *ops = dev->netdev_ops; 4563 4564 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags) 4565 ops->ndo_change_rx_flags(dev, flags); 4566 } 4567 4568 static int __dev_set_promiscuity(struct net_device *dev, int inc) 4569 { 4570 unsigned int old_flags = dev->flags; 4571 kuid_t uid; 4572 kgid_t gid; 4573 4574 ASSERT_RTNL(); 4575 4576 dev->flags |= IFF_PROMISC; 4577 dev->promiscuity += inc; 4578 if (dev->promiscuity == 0) { 4579 /* 4580 * Avoid overflow. 4581 * If inc causes overflow, untouch promisc and return error. 4582 */ 4583 if (inc < 0) 4584 dev->flags &= ~IFF_PROMISC; 4585 else { 4586 dev->promiscuity -= inc; 4587 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n", 4588 dev->name); 4589 return -EOVERFLOW; 4590 } 4591 } 4592 if (dev->flags != old_flags) { 4593 pr_info("device %s %s promiscuous mode\n", 4594 dev->name, 4595 dev->flags & IFF_PROMISC ? "entered" : "left"); 4596 if (audit_enabled) { 4597 current_uid_gid(&uid, &gid); 4598 audit_log(current->audit_context, GFP_ATOMIC, 4599 AUDIT_ANOM_PROMISCUOUS, 4600 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", 4601 dev->name, (dev->flags & IFF_PROMISC), 4602 (old_flags & IFF_PROMISC), 4603 from_kuid(&init_user_ns, audit_get_loginuid(current)), 4604 from_kuid(&init_user_ns, uid), 4605 from_kgid(&init_user_ns, gid), 4606 audit_get_sessionid(current)); 4607 } 4608 4609 dev_change_rx_flags(dev, IFF_PROMISC); 4610 } 4611 return 0; 4612 } 4613 4614 /** 4615 * dev_set_promiscuity - update promiscuity count on a device 4616 * @dev: device 4617 * @inc: modifier 4618 * 4619 * Add or remove promiscuity from a device. While the count in the device 4620 * remains above zero the interface remains promiscuous. Once it hits zero 4621 * the device reverts back to normal filtering operation. A negative inc 4622 * value is used to drop promiscuity on the device. 4623 * Return 0 if successful or a negative errno code on error. 4624 */ 4625 int dev_set_promiscuity(struct net_device *dev, int inc) 4626 { 4627 unsigned int old_flags = dev->flags; 4628 int err; 4629 4630 err = __dev_set_promiscuity(dev, inc); 4631 if (err < 0) 4632 return err; 4633 if (dev->flags != old_flags) 4634 dev_set_rx_mode(dev); 4635 return err; 4636 } 4637 EXPORT_SYMBOL(dev_set_promiscuity); 4638 4639 /** 4640 * dev_set_allmulti - update allmulti count on a device 4641 * @dev: device 4642 * @inc: modifier 4643 * 4644 * Add or remove reception of all multicast frames to a device. While the 4645 * count in the device remains above zero the interface remains listening 4646 * to all interfaces. Once it hits zero the device reverts back to normal 4647 * filtering operation. A negative @inc value is used to drop the counter 4648 * when releasing a resource needing all multicasts. 4649 * Return 0 if successful or a negative errno code on error. 4650 */ 4651 4652 int dev_set_allmulti(struct net_device *dev, int inc) 4653 { 4654 unsigned int old_flags = dev->flags; 4655 4656 ASSERT_RTNL(); 4657 4658 dev->flags |= IFF_ALLMULTI; 4659 dev->allmulti += inc; 4660 if (dev->allmulti == 0) { 4661 /* 4662 * Avoid overflow. 4663 * If inc causes overflow, untouch allmulti and return error. 4664 */ 4665 if (inc < 0) 4666 dev->flags &= ~IFF_ALLMULTI; 4667 else { 4668 dev->allmulti -= inc; 4669 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n", 4670 dev->name); 4671 return -EOVERFLOW; 4672 } 4673 } 4674 if (dev->flags ^ old_flags) { 4675 dev_change_rx_flags(dev, IFF_ALLMULTI); 4676 dev_set_rx_mode(dev); 4677 } 4678 return 0; 4679 } 4680 EXPORT_SYMBOL(dev_set_allmulti); 4681 4682 /* 4683 * Upload unicast and multicast address lists to device and 4684 * configure RX filtering. When the device doesn't support unicast 4685 * filtering it is put in promiscuous mode while unicast addresses 4686 * are present. 4687 */ 4688 void __dev_set_rx_mode(struct net_device *dev) 4689 { 4690 const struct net_device_ops *ops = dev->netdev_ops; 4691 4692 /* dev_open will call this function so the list will stay sane. */ 4693 if (!(dev->flags&IFF_UP)) 4694 return; 4695 4696 if (!netif_device_present(dev)) 4697 return; 4698 4699 if (!(dev->priv_flags & IFF_UNICAST_FLT)) { 4700 /* Unicast addresses changes may only happen under the rtnl, 4701 * therefore calling __dev_set_promiscuity here is safe. 4702 */ 4703 if (!netdev_uc_empty(dev) && !dev->uc_promisc) { 4704 __dev_set_promiscuity(dev, 1); 4705 dev->uc_promisc = true; 4706 } else if (netdev_uc_empty(dev) && dev->uc_promisc) { 4707 __dev_set_promiscuity(dev, -1); 4708 dev->uc_promisc = false; 4709 } 4710 } 4711 4712 if (ops->ndo_set_rx_mode) 4713 ops->ndo_set_rx_mode(dev); 4714 } 4715 4716 void dev_set_rx_mode(struct net_device *dev) 4717 { 4718 netif_addr_lock_bh(dev); 4719 __dev_set_rx_mode(dev); 4720 netif_addr_unlock_bh(dev); 4721 } 4722 4723 /** 4724 * dev_get_flags - get flags reported to userspace 4725 * @dev: device 4726 * 4727 * Get the combination of flag bits exported through APIs to userspace. 4728 */ 4729 unsigned int dev_get_flags(const struct net_device *dev) 4730 { 4731 unsigned int flags; 4732 4733 flags = (dev->flags & ~(IFF_PROMISC | 4734 IFF_ALLMULTI | 4735 IFF_RUNNING | 4736 IFF_LOWER_UP | 4737 IFF_DORMANT)) | 4738 (dev->gflags & (IFF_PROMISC | 4739 IFF_ALLMULTI)); 4740 4741 if (netif_running(dev)) { 4742 if (netif_oper_up(dev)) 4743 flags |= IFF_RUNNING; 4744 if (netif_carrier_ok(dev)) 4745 flags |= IFF_LOWER_UP; 4746 if (netif_dormant(dev)) 4747 flags |= IFF_DORMANT; 4748 } 4749 4750 return flags; 4751 } 4752 EXPORT_SYMBOL(dev_get_flags); 4753 4754 int __dev_change_flags(struct net_device *dev, unsigned int flags) 4755 { 4756 unsigned int old_flags = dev->flags; 4757 int ret; 4758 4759 ASSERT_RTNL(); 4760 4761 /* 4762 * Set the flags on our device. 4763 */ 4764 4765 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | 4766 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | 4767 IFF_AUTOMEDIA)) | 4768 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | 4769 IFF_ALLMULTI)); 4770 4771 /* 4772 * Load in the correct multicast list now the flags have changed. 4773 */ 4774 4775 if ((old_flags ^ flags) & IFF_MULTICAST) 4776 dev_change_rx_flags(dev, IFF_MULTICAST); 4777 4778 dev_set_rx_mode(dev); 4779 4780 /* 4781 * Have we downed the interface. We handle IFF_UP ourselves 4782 * according to user attempts to set it, rather than blindly 4783 * setting it. 4784 */ 4785 4786 ret = 0; 4787 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */ 4788 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev); 4789 4790 if (!ret) 4791 dev_set_rx_mode(dev); 4792 } 4793 4794 if ((flags ^ dev->gflags) & IFF_PROMISC) { 4795 int inc = (flags & IFF_PROMISC) ? 1 : -1; 4796 4797 dev->gflags ^= IFF_PROMISC; 4798 dev_set_promiscuity(dev, inc); 4799 } 4800 4801 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI 4802 is important. Some (broken) drivers set IFF_PROMISC, when 4803 IFF_ALLMULTI is requested not asking us and not reporting. 4804 */ 4805 if ((flags ^ dev->gflags) & IFF_ALLMULTI) { 4806 int inc = (flags & IFF_ALLMULTI) ? 1 : -1; 4807 4808 dev->gflags ^= IFF_ALLMULTI; 4809 dev_set_allmulti(dev, inc); 4810 } 4811 4812 return ret; 4813 } 4814 4815 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags) 4816 { 4817 unsigned int changes = dev->flags ^ old_flags; 4818 4819 if (changes & IFF_UP) { 4820 if (dev->flags & IFF_UP) 4821 call_netdevice_notifiers(NETDEV_UP, dev); 4822 else 4823 call_netdevice_notifiers(NETDEV_DOWN, dev); 4824 } 4825 4826 if (dev->flags & IFF_UP && 4827 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) 4828 call_netdevice_notifiers(NETDEV_CHANGE, dev); 4829 } 4830 4831 /** 4832 * dev_change_flags - change device settings 4833 * @dev: device 4834 * @flags: device state flags 4835 * 4836 * Change settings on device based state flags. The flags are 4837 * in the userspace exported format. 4838 */ 4839 int dev_change_flags(struct net_device *dev, unsigned int flags) 4840 { 4841 int ret; 4842 unsigned int changes, old_flags = dev->flags; 4843 4844 ret = __dev_change_flags(dev, flags); 4845 if (ret < 0) 4846 return ret; 4847 4848 changes = old_flags ^ dev->flags; 4849 if (changes) 4850 rtmsg_ifinfo(RTM_NEWLINK, dev, changes); 4851 4852 __dev_notify_flags(dev, old_flags); 4853 return ret; 4854 } 4855 EXPORT_SYMBOL(dev_change_flags); 4856 4857 /** 4858 * dev_set_mtu - Change maximum transfer unit 4859 * @dev: device 4860 * @new_mtu: new transfer unit 4861 * 4862 * Change the maximum transfer size of the network device. 4863 */ 4864 int dev_set_mtu(struct net_device *dev, int new_mtu) 4865 { 4866 const struct net_device_ops *ops = dev->netdev_ops; 4867 int err; 4868 4869 if (new_mtu == dev->mtu) 4870 return 0; 4871 4872 /* MTU must be positive. */ 4873 if (new_mtu < 0) 4874 return -EINVAL; 4875 4876 if (!netif_device_present(dev)) 4877 return -ENODEV; 4878 4879 err = 0; 4880 if (ops->ndo_change_mtu) 4881 err = ops->ndo_change_mtu(dev, new_mtu); 4882 else 4883 dev->mtu = new_mtu; 4884 4885 if (!err && dev->flags & IFF_UP) 4886 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); 4887 return err; 4888 } 4889 EXPORT_SYMBOL(dev_set_mtu); 4890 4891 /** 4892 * dev_set_group - Change group this device belongs to 4893 * @dev: device 4894 * @new_group: group this device should belong to 4895 */ 4896 void dev_set_group(struct net_device *dev, int new_group) 4897 { 4898 dev->group = new_group; 4899 } 4900 EXPORT_SYMBOL(dev_set_group); 4901 4902 /** 4903 * dev_set_mac_address - Change Media Access Control Address 4904 * @dev: device 4905 * @sa: new address 4906 * 4907 * Change the hardware (MAC) address of the device 4908 */ 4909 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa) 4910 { 4911 const struct net_device_ops *ops = dev->netdev_ops; 4912 int err; 4913 4914 if (!ops->ndo_set_mac_address) 4915 return -EOPNOTSUPP; 4916 if (sa->sa_family != dev->type) 4917 return -EINVAL; 4918 if (!netif_device_present(dev)) 4919 return -ENODEV; 4920 err = ops->ndo_set_mac_address(dev, sa); 4921 if (!err) 4922 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 4923 add_device_randomness(dev->dev_addr, dev->addr_len); 4924 return err; 4925 } 4926 EXPORT_SYMBOL(dev_set_mac_address); 4927 4928 /* 4929 * Perform the SIOCxIFxxx calls, inside rcu_read_lock() 4930 */ 4931 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd) 4932 { 4933 int err; 4934 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name); 4935 4936 if (!dev) 4937 return -ENODEV; 4938 4939 switch (cmd) { 4940 case SIOCGIFFLAGS: /* Get interface flags */ 4941 ifr->ifr_flags = (short) dev_get_flags(dev); 4942 return 0; 4943 4944 case SIOCGIFMETRIC: /* Get the metric on the interface 4945 (currently unused) */ 4946 ifr->ifr_metric = 0; 4947 return 0; 4948 4949 case SIOCGIFMTU: /* Get the MTU of a device */ 4950 ifr->ifr_mtu = dev->mtu; 4951 return 0; 4952 4953 case SIOCGIFHWADDR: 4954 if (!dev->addr_len) 4955 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data); 4956 else 4957 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr, 4958 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); 4959 ifr->ifr_hwaddr.sa_family = dev->type; 4960 return 0; 4961 4962 case SIOCGIFSLAVE: 4963 err = -EINVAL; 4964 break; 4965 4966 case SIOCGIFMAP: 4967 ifr->ifr_map.mem_start = dev->mem_start; 4968 ifr->ifr_map.mem_end = dev->mem_end; 4969 ifr->ifr_map.base_addr = dev->base_addr; 4970 ifr->ifr_map.irq = dev->irq; 4971 ifr->ifr_map.dma = dev->dma; 4972 ifr->ifr_map.port = dev->if_port; 4973 return 0; 4974 4975 case SIOCGIFINDEX: 4976 ifr->ifr_ifindex = dev->ifindex; 4977 return 0; 4978 4979 case SIOCGIFTXQLEN: 4980 ifr->ifr_qlen = dev->tx_queue_len; 4981 return 0; 4982 4983 default: 4984 /* dev_ioctl() should ensure this case 4985 * is never reached 4986 */ 4987 WARN_ON(1); 4988 err = -ENOTTY; 4989 break; 4990 4991 } 4992 return err; 4993 } 4994 4995 /* 4996 * Perform the SIOCxIFxxx calls, inside rtnl_lock() 4997 */ 4998 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) 4999 { 5000 int err; 5001 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name); 5002 const struct net_device_ops *ops; 5003 5004 if (!dev) 5005 return -ENODEV; 5006 5007 ops = dev->netdev_ops; 5008 5009 switch (cmd) { 5010 case SIOCSIFFLAGS: /* Set interface flags */ 5011 return dev_change_flags(dev, ifr->ifr_flags); 5012 5013 case SIOCSIFMETRIC: /* Set the metric on the interface 5014 (currently unused) */ 5015 return -EOPNOTSUPP; 5016 5017 case SIOCSIFMTU: /* Set the MTU of a device */ 5018 return dev_set_mtu(dev, ifr->ifr_mtu); 5019 5020 case SIOCSIFHWADDR: 5021 return dev_set_mac_address(dev, &ifr->ifr_hwaddr); 5022 5023 case SIOCSIFHWBROADCAST: 5024 if (ifr->ifr_hwaddr.sa_family != dev->type) 5025 return -EINVAL; 5026 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, 5027 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); 5028 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 5029 return 0; 5030 5031 case SIOCSIFMAP: 5032 if (ops->ndo_set_config) { 5033 if (!netif_device_present(dev)) 5034 return -ENODEV; 5035 return ops->ndo_set_config(dev, &ifr->ifr_map); 5036 } 5037 return -EOPNOTSUPP; 5038 5039 case SIOCADDMULTI: 5040 if (!ops->ndo_set_rx_mode || 5041 ifr->ifr_hwaddr.sa_family != AF_UNSPEC) 5042 return -EINVAL; 5043 if (!netif_device_present(dev)) 5044 return -ENODEV; 5045 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data); 5046 5047 case SIOCDELMULTI: 5048 if (!ops->ndo_set_rx_mode || 5049 ifr->ifr_hwaddr.sa_family != AF_UNSPEC) 5050 return -EINVAL; 5051 if (!netif_device_present(dev)) 5052 return -ENODEV; 5053 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data); 5054 5055 case SIOCSIFTXQLEN: 5056 if (ifr->ifr_qlen < 0) 5057 return -EINVAL; 5058 dev->tx_queue_len = ifr->ifr_qlen; 5059 return 0; 5060 5061 case SIOCSIFNAME: 5062 ifr->ifr_newname[IFNAMSIZ-1] = '\0'; 5063 return dev_change_name(dev, ifr->ifr_newname); 5064 5065 case SIOCSHWTSTAMP: 5066 err = net_hwtstamp_validate(ifr); 5067 if (err) 5068 return err; 5069 /* fall through */ 5070 5071 /* 5072 * Unknown or private ioctl 5073 */ 5074 default: 5075 if ((cmd >= SIOCDEVPRIVATE && 5076 cmd <= SIOCDEVPRIVATE + 15) || 5077 cmd == SIOCBONDENSLAVE || 5078 cmd == SIOCBONDRELEASE || 5079 cmd == SIOCBONDSETHWADDR || 5080 cmd == SIOCBONDSLAVEINFOQUERY || 5081 cmd == SIOCBONDINFOQUERY || 5082 cmd == SIOCBONDCHANGEACTIVE || 5083 cmd == SIOCGMIIPHY || 5084 cmd == SIOCGMIIREG || 5085 cmd == SIOCSMIIREG || 5086 cmd == SIOCBRADDIF || 5087 cmd == SIOCBRDELIF || 5088 cmd == SIOCSHWTSTAMP || 5089 cmd == SIOCWANDEV) { 5090 err = -EOPNOTSUPP; 5091 if (ops->ndo_do_ioctl) { 5092 if (netif_device_present(dev)) 5093 err = ops->ndo_do_ioctl(dev, ifr, cmd); 5094 else 5095 err = -ENODEV; 5096 } 5097 } else 5098 err = -EINVAL; 5099 5100 } 5101 return err; 5102 } 5103 5104 /* 5105 * This function handles all "interface"-type I/O control requests. The actual 5106 * 'doing' part of this is dev_ifsioc above. 5107 */ 5108 5109 /** 5110 * dev_ioctl - network device ioctl 5111 * @net: the applicable net namespace 5112 * @cmd: command to issue 5113 * @arg: pointer to a struct ifreq in user space 5114 * 5115 * Issue ioctl functions to devices. This is normally called by the 5116 * user space syscall interfaces but can sometimes be useful for 5117 * other purposes. The return value is the return from the syscall if 5118 * positive or a negative errno code on error. 5119 */ 5120 5121 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg) 5122 { 5123 struct ifreq ifr; 5124 int ret; 5125 char *colon; 5126 5127 /* One special case: SIOCGIFCONF takes ifconf argument 5128 and requires shared lock, because it sleeps writing 5129 to user space. 5130 */ 5131 5132 if (cmd == SIOCGIFCONF) { 5133 rtnl_lock(); 5134 ret = dev_ifconf(net, (char __user *) arg); 5135 rtnl_unlock(); 5136 return ret; 5137 } 5138 if (cmd == SIOCGIFNAME) 5139 return dev_ifname(net, (struct ifreq __user *)arg); 5140 5141 if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) 5142 return -EFAULT; 5143 5144 ifr.ifr_name[IFNAMSIZ-1] = 0; 5145 5146 colon = strchr(ifr.ifr_name, ':'); 5147 if (colon) 5148 *colon = 0; 5149 5150 /* 5151 * See which interface the caller is talking about. 5152 */ 5153 5154 switch (cmd) { 5155 /* 5156 * These ioctl calls: 5157 * - can be done by all. 5158 * - atomic and do not require locking. 5159 * - return a value 5160 */ 5161 case SIOCGIFFLAGS: 5162 case SIOCGIFMETRIC: 5163 case SIOCGIFMTU: 5164 case SIOCGIFHWADDR: 5165 case SIOCGIFSLAVE: 5166 case SIOCGIFMAP: 5167 case SIOCGIFINDEX: 5168 case SIOCGIFTXQLEN: 5169 dev_load(net, ifr.ifr_name); 5170 rcu_read_lock(); 5171 ret = dev_ifsioc_locked(net, &ifr, cmd); 5172 rcu_read_unlock(); 5173 if (!ret) { 5174 if (colon) 5175 *colon = ':'; 5176 if (copy_to_user(arg, &ifr, 5177 sizeof(struct ifreq))) 5178 ret = -EFAULT; 5179 } 5180 return ret; 5181 5182 case SIOCETHTOOL: 5183 dev_load(net, ifr.ifr_name); 5184 rtnl_lock(); 5185 ret = dev_ethtool(net, &ifr); 5186 rtnl_unlock(); 5187 if (!ret) { 5188 if (colon) 5189 *colon = ':'; 5190 if (copy_to_user(arg, &ifr, 5191 sizeof(struct ifreq))) 5192 ret = -EFAULT; 5193 } 5194 return ret; 5195 5196 /* 5197 * These ioctl calls: 5198 * - require superuser power. 5199 * - require strict serialization. 5200 * - return a value 5201 */ 5202 case SIOCGMIIPHY: 5203 case SIOCGMIIREG: 5204 case SIOCSIFNAME: 5205 if (!capable(CAP_NET_ADMIN)) 5206 return -EPERM; 5207 dev_load(net, ifr.ifr_name); 5208 rtnl_lock(); 5209 ret = dev_ifsioc(net, &ifr, cmd); 5210 rtnl_unlock(); 5211 if (!ret) { 5212 if (colon) 5213 *colon = ':'; 5214 if (copy_to_user(arg, &ifr, 5215 sizeof(struct ifreq))) 5216 ret = -EFAULT; 5217 } 5218 return ret; 5219 5220 /* 5221 * These ioctl calls: 5222 * - require superuser power. 5223 * - require strict serialization. 5224 * - do not return a value 5225 */ 5226 case SIOCSIFFLAGS: 5227 case SIOCSIFMETRIC: 5228 case SIOCSIFMTU: 5229 case SIOCSIFMAP: 5230 case SIOCSIFHWADDR: 5231 case SIOCSIFSLAVE: 5232 case SIOCADDMULTI: 5233 case SIOCDELMULTI: 5234 case SIOCSIFHWBROADCAST: 5235 case SIOCSIFTXQLEN: 5236 case SIOCSMIIREG: 5237 case SIOCBONDENSLAVE: 5238 case SIOCBONDRELEASE: 5239 case SIOCBONDSETHWADDR: 5240 case SIOCBONDCHANGEACTIVE: 5241 case SIOCBRADDIF: 5242 case SIOCBRDELIF: 5243 case SIOCSHWTSTAMP: 5244 if (!capable(CAP_NET_ADMIN)) 5245 return -EPERM; 5246 /* fall through */ 5247 case SIOCBONDSLAVEINFOQUERY: 5248 case SIOCBONDINFOQUERY: 5249 dev_load(net, ifr.ifr_name); 5250 rtnl_lock(); 5251 ret = dev_ifsioc(net, &ifr, cmd); 5252 rtnl_unlock(); 5253 return ret; 5254 5255 case SIOCGIFMEM: 5256 /* Get the per device memory space. We can add this but 5257 * currently do not support it */ 5258 case SIOCSIFMEM: 5259 /* Set the per device memory buffer space. 5260 * Not applicable in our case */ 5261 case SIOCSIFLINK: 5262 return -ENOTTY; 5263 5264 /* 5265 * Unknown or private ioctl. 5266 */ 5267 default: 5268 if (cmd == SIOCWANDEV || 5269 (cmd >= SIOCDEVPRIVATE && 5270 cmd <= SIOCDEVPRIVATE + 15)) { 5271 dev_load(net, ifr.ifr_name); 5272 rtnl_lock(); 5273 ret = dev_ifsioc(net, &ifr, cmd); 5274 rtnl_unlock(); 5275 if (!ret && copy_to_user(arg, &ifr, 5276 sizeof(struct ifreq))) 5277 ret = -EFAULT; 5278 return ret; 5279 } 5280 /* Take care of Wireless Extensions */ 5281 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) 5282 return wext_handle_ioctl(net, &ifr, cmd, arg); 5283 return -ENOTTY; 5284 } 5285 } 5286 5287 5288 /** 5289 * dev_new_index - allocate an ifindex 5290 * @net: the applicable net namespace 5291 * 5292 * Returns a suitable unique value for a new device interface 5293 * number. The caller must hold the rtnl semaphore or the 5294 * dev_base_lock to be sure it remains unique. 5295 */ 5296 static int dev_new_index(struct net *net) 5297 { 5298 int ifindex = net->ifindex; 5299 for (;;) { 5300 if (++ifindex <= 0) 5301 ifindex = 1; 5302 if (!__dev_get_by_index(net, ifindex)) 5303 return net->ifindex = ifindex; 5304 } 5305 } 5306 5307 /* Delayed registration/unregisteration */ 5308 static LIST_HEAD(net_todo_list); 5309 5310 static void net_set_todo(struct net_device *dev) 5311 { 5312 list_add_tail(&dev->todo_list, &net_todo_list); 5313 } 5314 5315 static void rollback_registered_many(struct list_head *head) 5316 { 5317 struct net_device *dev, *tmp; 5318 5319 BUG_ON(dev_boot_phase); 5320 ASSERT_RTNL(); 5321 5322 list_for_each_entry_safe(dev, tmp, head, unreg_list) { 5323 /* Some devices call without registering 5324 * for initialization unwind. Remove those 5325 * devices and proceed with the remaining. 5326 */ 5327 if (dev->reg_state == NETREG_UNINITIALIZED) { 5328 pr_debug("unregister_netdevice: device %s/%p never was registered\n", 5329 dev->name, dev); 5330 5331 WARN_ON(1); 5332 list_del(&dev->unreg_list); 5333 continue; 5334 } 5335 dev->dismantle = true; 5336 BUG_ON(dev->reg_state != NETREG_REGISTERED); 5337 } 5338 5339 /* If device is running, close it first. */ 5340 dev_close_many(head); 5341 5342 list_for_each_entry(dev, head, unreg_list) { 5343 /* And unlink it from device chain. */ 5344 unlist_netdevice(dev); 5345 5346 dev->reg_state = NETREG_UNREGISTERING; 5347 } 5348 5349 synchronize_net(); 5350 5351 list_for_each_entry(dev, head, unreg_list) { 5352 /* Shutdown queueing discipline. */ 5353 dev_shutdown(dev); 5354 5355 5356 /* Notify protocols, that we are about to destroy 5357 this device. They should clean all the things. 5358 */ 5359 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 5360 5361 if (!dev->rtnl_link_ops || 5362 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 5363 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); 5364 5365 /* 5366 * Flush the unicast and multicast chains 5367 */ 5368 dev_uc_flush(dev); 5369 dev_mc_flush(dev); 5370 5371 if (dev->netdev_ops->ndo_uninit) 5372 dev->netdev_ops->ndo_uninit(dev); 5373 5374 /* Notifier chain MUST detach us from master device. */ 5375 WARN_ON(dev->master); 5376 5377 /* Remove entries from kobject tree */ 5378 netdev_unregister_kobject(dev); 5379 } 5380 5381 synchronize_net(); 5382 5383 list_for_each_entry(dev, head, unreg_list) 5384 dev_put(dev); 5385 } 5386 5387 static void rollback_registered(struct net_device *dev) 5388 { 5389 LIST_HEAD(single); 5390 5391 list_add(&dev->unreg_list, &single); 5392 rollback_registered_many(&single); 5393 list_del(&single); 5394 } 5395 5396 static netdev_features_t netdev_fix_features(struct net_device *dev, 5397 netdev_features_t features) 5398 { 5399 /* Fix illegal checksum combinations */ 5400 if ((features & NETIF_F_HW_CSUM) && 5401 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 5402 netdev_warn(dev, "mixed HW and IP checksum settings.\n"); 5403 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 5404 } 5405 5406 /* Fix illegal SG+CSUM combinations. */ 5407 if ((features & NETIF_F_SG) && 5408 !(features & NETIF_F_ALL_CSUM)) { 5409 netdev_dbg(dev, 5410 "Dropping NETIF_F_SG since no checksum feature.\n"); 5411 features &= ~NETIF_F_SG; 5412 } 5413 5414 /* TSO requires that SG is present as well. */ 5415 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { 5416 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); 5417 features &= ~NETIF_F_ALL_TSO; 5418 } 5419 5420 /* TSO ECN requires that TSO is present as well. */ 5421 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) 5422 features &= ~NETIF_F_TSO_ECN; 5423 5424 /* Software GSO depends on SG. */ 5425 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { 5426 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); 5427 features &= ~NETIF_F_GSO; 5428 } 5429 5430 /* UFO needs SG and checksumming */ 5431 if (features & NETIF_F_UFO) { 5432 /* maybe split UFO into V4 and V6? */ 5433 if (!((features & NETIF_F_GEN_CSUM) || 5434 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)) 5435 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 5436 netdev_dbg(dev, 5437 "Dropping NETIF_F_UFO since no checksum offload features.\n"); 5438 features &= ~NETIF_F_UFO; 5439 } 5440 5441 if (!(features & NETIF_F_SG)) { 5442 netdev_dbg(dev, 5443 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n"); 5444 features &= ~NETIF_F_UFO; 5445 } 5446 } 5447 5448 return features; 5449 } 5450 5451 int __netdev_update_features(struct net_device *dev) 5452 { 5453 netdev_features_t features; 5454 int err = 0; 5455 5456 ASSERT_RTNL(); 5457 5458 features = netdev_get_wanted_features(dev); 5459 5460 if (dev->netdev_ops->ndo_fix_features) 5461 features = dev->netdev_ops->ndo_fix_features(dev, features); 5462 5463 /* driver might be less strict about feature dependencies */ 5464 features = netdev_fix_features(dev, features); 5465 5466 if (dev->features == features) 5467 return 0; 5468 5469 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n", 5470 &dev->features, &features); 5471 5472 if (dev->netdev_ops->ndo_set_features) 5473 err = dev->netdev_ops->ndo_set_features(dev, features); 5474 5475 if (unlikely(err < 0)) { 5476 netdev_err(dev, 5477 "set_features() failed (%d); wanted %pNF, left %pNF\n", 5478 err, &features, &dev->features); 5479 return -1; 5480 } 5481 5482 if (!err) 5483 dev->features = features; 5484 5485 return 1; 5486 } 5487 5488 /** 5489 * netdev_update_features - recalculate device features 5490 * @dev: the device to check 5491 * 5492 * Recalculate dev->features set and send notifications if it 5493 * has changed. Should be called after driver or hardware dependent 5494 * conditions might have changed that influence the features. 5495 */ 5496 void netdev_update_features(struct net_device *dev) 5497 { 5498 if (__netdev_update_features(dev)) 5499 netdev_features_change(dev); 5500 } 5501 EXPORT_SYMBOL(netdev_update_features); 5502 5503 /** 5504 * netdev_change_features - recalculate device features 5505 * @dev: the device to check 5506 * 5507 * Recalculate dev->features set and send notifications even 5508 * if they have not changed. Should be called instead of 5509 * netdev_update_features() if also dev->vlan_features might 5510 * have changed to allow the changes to be propagated to stacked 5511 * VLAN devices. 5512 */ 5513 void netdev_change_features(struct net_device *dev) 5514 { 5515 __netdev_update_features(dev); 5516 netdev_features_change(dev); 5517 } 5518 EXPORT_SYMBOL(netdev_change_features); 5519 5520 /** 5521 * netif_stacked_transfer_operstate - transfer operstate 5522 * @rootdev: the root or lower level device to transfer state from 5523 * @dev: the device to transfer operstate to 5524 * 5525 * Transfer operational state from root to device. This is normally 5526 * called when a stacking relationship exists between the root 5527 * device and the device(a leaf device). 5528 */ 5529 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 5530 struct net_device *dev) 5531 { 5532 if (rootdev->operstate == IF_OPER_DORMANT) 5533 netif_dormant_on(dev); 5534 else 5535 netif_dormant_off(dev); 5536 5537 if (netif_carrier_ok(rootdev)) { 5538 if (!netif_carrier_ok(dev)) 5539 netif_carrier_on(dev); 5540 } else { 5541 if (netif_carrier_ok(dev)) 5542 netif_carrier_off(dev); 5543 } 5544 } 5545 EXPORT_SYMBOL(netif_stacked_transfer_operstate); 5546 5547 #ifdef CONFIG_RPS 5548 static int netif_alloc_rx_queues(struct net_device *dev) 5549 { 5550 unsigned int i, count = dev->num_rx_queues; 5551 struct netdev_rx_queue *rx; 5552 5553 BUG_ON(count < 1); 5554 5555 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL); 5556 if (!rx) { 5557 pr_err("netdev: Unable to allocate %u rx queues\n", count); 5558 return -ENOMEM; 5559 } 5560 dev->_rx = rx; 5561 5562 for (i = 0; i < count; i++) 5563 rx[i].dev = dev; 5564 return 0; 5565 } 5566 #endif 5567 5568 static void netdev_init_one_queue(struct net_device *dev, 5569 struct netdev_queue *queue, void *_unused) 5570 { 5571 /* Initialize queue lock */ 5572 spin_lock_init(&queue->_xmit_lock); 5573 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); 5574 queue->xmit_lock_owner = -1; 5575 netdev_queue_numa_node_write(queue, NUMA_NO_NODE); 5576 queue->dev = dev; 5577 #ifdef CONFIG_BQL 5578 dql_init(&queue->dql, HZ); 5579 #endif 5580 } 5581 5582 static int netif_alloc_netdev_queues(struct net_device *dev) 5583 { 5584 unsigned int count = dev->num_tx_queues; 5585 struct netdev_queue *tx; 5586 5587 BUG_ON(count < 1); 5588 5589 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL); 5590 if (!tx) { 5591 pr_err("netdev: Unable to allocate %u tx queues\n", count); 5592 return -ENOMEM; 5593 } 5594 dev->_tx = tx; 5595 5596 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 5597 spin_lock_init(&dev->tx_global_lock); 5598 5599 return 0; 5600 } 5601 5602 /** 5603 * register_netdevice - register a network device 5604 * @dev: device to register 5605 * 5606 * Take a completed network device structure and add it to the kernel 5607 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 5608 * chain. 0 is returned on success. A negative errno code is returned 5609 * on a failure to set up the device, or if the name is a duplicate. 5610 * 5611 * Callers must hold the rtnl semaphore. You may want 5612 * register_netdev() instead of this. 5613 * 5614 * BUGS: 5615 * The locking appears insufficient to guarantee two parallel registers 5616 * will not get the same name. 5617 */ 5618 5619 int register_netdevice(struct net_device *dev) 5620 { 5621 int ret; 5622 struct net *net = dev_net(dev); 5623 5624 BUG_ON(dev_boot_phase); 5625 ASSERT_RTNL(); 5626 5627 might_sleep(); 5628 5629 /* When net_device's are persistent, this will be fatal. */ 5630 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 5631 BUG_ON(!net); 5632 5633 spin_lock_init(&dev->addr_list_lock); 5634 netdev_set_addr_lockdep_class(dev); 5635 5636 dev->iflink = -1; 5637 5638 ret = dev_get_valid_name(net, dev, dev->name); 5639 if (ret < 0) 5640 goto out; 5641 5642 /* Init, if this function is available */ 5643 if (dev->netdev_ops->ndo_init) { 5644 ret = dev->netdev_ops->ndo_init(dev); 5645 if (ret) { 5646 if (ret > 0) 5647 ret = -EIO; 5648 goto out; 5649 } 5650 } 5651 5652 ret = -EBUSY; 5653 if (!dev->ifindex) 5654 dev->ifindex = dev_new_index(net); 5655 else if (__dev_get_by_index(net, dev->ifindex)) 5656 goto err_uninit; 5657 5658 if (dev->iflink == -1) 5659 dev->iflink = dev->ifindex; 5660 5661 /* Transfer changeable features to wanted_features and enable 5662 * software offloads (GSO and GRO). 5663 */ 5664 dev->hw_features |= NETIF_F_SOFT_FEATURES; 5665 dev->features |= NETIF_F_SOFT_FEATURES; 5666 dev->wanted_features = dev->features & dev->hw_features; 5667 5668 /* Turn on no cache copy if HW is doing checksum */ 5669 if (!(dev->flags & IFF_LOOPBACK)) { 5670 dev->hw_features |= NETIF_F_NOCACHE_COPY; 5671 if (dev->features & NETIF_F_ALL_CSUM) { 5672 dev->wanted_features |= NETIF_F_NOCACHE_COPY; 5673 dev->features |= NETIF_F_NOCACHE_COPY; 5674 } 5675 } 5676 5677 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices. 5678 */ 5679 dev->vlan_features |= NETIF_F_HIGHDMA; 5680 5681 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); 5682 ret = notifier_to_errno(ret); 5683 if (ret) 5684 goto err_uninit; 5685 5686 ret = netdev_register_kobject(dev); 5687 if (ret) 5688 goto err_uninit; 5689 dev->reg_state = NETREG_REGISTERED; 5690 5691 __netdev_update_features(dev); 5692 5693 /* 5694 * Default initial state at registry is that the 5695 * device is present. 5696 */ 5697 5698 set_bit(__LINK_STATE_PRESENT, &dev->state); 5699 5700 linkwatch_init_dev(dev); 5701 5702 dev_init_scheduler(dev); 5703 dev_hold(dev); 5704 list_netdevice(dev); 5705 add_device_randomness(dev->dev_addr, dev->addr_len); 5706 5707 /* Notify protocols, that a new device appeared. */ 5708 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); 5709 ret = notifier_to_errno(ret); 5710 if (ret) { 5711 rollback_registered(dev); 5712 dev->reg_state = NETREG_UNREGISTERED; 5713 } 5714 /* 5715 * Prevent userspace races by waiting until the network 5716 * device is fully setup before sending notifications. 5717 */ 5718 if (!dev->rtnl_link_ops || 5719 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 5720 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); 5721 5722 out: 5723 return ret; 5724 5725 err_uninit: 5726 if (dev->netdev_ops->ndo_uninit) 5727 dev->netdev_ops->ndo_uninit(dev); 5728 goto out; 5729 } 5730 EXPORT_SYMBOL(register_netdevice); 5731 5732 /** 5733 * init_dummy_netdev - init a dummy network device for NAPI 5734 * @dev: device to init 5735 * 5736 * This takes a network device structure and initialize the minimum 5737 * amount of fields so it can be used to schedule NAPI polls without 5738 * registering a full blown interface. This is to be used by drivers 5739 * that need to tie several hardware interfaces to a single NAPI 5740 * poll scheduler due to HW limitations. 5741 */ 5742 int init_dummy_netdev(struct net_device *dev) 5743 { 5744 /* Clear everything. Note we don't initialize spinlocks 5745 * are they aren't supposed to be taken by any of the 5746 * NAPI code and this dummy netdev is supposed to be 5747 * only ever used for NAPI polls 5748 */ 5749 memset(dev, 0, sizeof(struct net_device)); 5750 5751 /* make sure we BUG if trying to hit standard 5752 * register/unregister code path 5753 */ 5754 dev->reg_state = NETREG_DUMMY; 5755 5756 /* NAPI wants this */ 5757 INIT_LIST_HEAD(&dev->napi_list); 5758 5759 /* a dummy interface is started by default */ 5760 set_bit(__LINK_STATE_PRESENT, &dev->state); 5761 set_bit(__LINK_STATE_START, &dev->state); 5762 5763 /* Note : We dont allocate pcpu_refcnt for dummy devices, 5764 * because users of this 'device' dont need to change 5765 * its refcount. 5766 */ 5767 5768 return 0; 5769 } 5770 EXPORT_SYMBOL_GPL(init_dummy_netdev); 5771 5772 5773 /** 5774 * register_netdev - register a network device 5775 * @dev: device to register 5776 * 5777 * Take a completed network device structure and add it to the kernel 5778 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 5779 * chain. 0 is returned on success. A negative errno code is returned 5780 * on a failure to set up the device, or if the name is a duplicate. 5781 * 5782 * This is a wrapper around register_netdevice that takes the rtnl semaphore 5783 * and expands the device name if you passed a format string to 5784 * alloc_netdev. 5785 */ 5786 int register_netdev(struct net_device *dev) 5787 { 5788 int err; 5789 5790 rtnl_lock(); 5791 err = register_netdevice(dev); 5792 rtnl_unlock(); 5793 return err; 5794 } 5795 EXPORT_SYMBOL(register_netdev); 5796 5797 int netdev_refcnt_read(const struct net_device *dev) 5798 { 5799 int i, refcnt = 0; 5800 5801 for_each_possible_cpu(i) 5802 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i); 5803 return refcnt; 5804 } 5805 EXPORT_SYMBOL(netdev_refcnt_read); 5806 5807 /** 5808 * netdev_wait_allrefs - wait until all references are gone. 5809 * @dev: target net_device 5810 * 5811 * This is called when unregistering network devices. 5812 * 5813 * Any protocol or device that holds a reference should register 5814 * for netdevice notification, and cleanup and put back the 5815 * reference if they receive an UNREGISTER event. 5816 * We can get stuck here if buggy protocols don't correctly 5817 * call dev_put. 5818 */ 5819 static void netdev_wait_allrefs(struct net_device *dev) 5820 { 5821 unsigned long rebroadcast_time, warning_time; 5822 int refcnt; 5823 5824 linkwatch_forget_dev(dev); 5825 5826 rebroadcast_time = warning_time = jiffies; 5827 refcnt = netdev_refcnt_read(dev); 5828 5829 while (refcnt != 0) { 5830 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { 5831 rtnl_lock(); 5832 5833 /* Rebroadcast unregister notification */ 5834 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 5835 5836 __rtnl_unlock(); 5837 rcu_barrier(); 5838 rtnl_lock(); 5839 5840 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev); 5841 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 5842 &dev->state)) { 5843 /* We must not have linkwatch events 5844 * pending on unregister. If this 5845 * happens, we simply run the queue 5846 * unscheduled, resulting in a noop 5847 * for this device. 5848 */ 5849 linkwatch_run_queue(); 5850 } 5851 5852 __rtnl_unlock(); 5853 5854 rebroadcast_time = jiffies; 5855 } 5856 5857 msleep(250); 5858 5859 refcnt = netdev_refcnt_read(dev); 5860 5861 if (time_after(jiffies, warning_time + 10 * HZ)) { 5862 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", 5863 dev->name, refcnt); 5864 warning_time = jiffies; 5865 } 5866 } 5867 } 5868 5869 /* The sequence is: 5870 * 5871 * rtnl_lock(); 5872 * ... 5873 * register_netdevice(x1); 5874 * register_netdevice(x2); 5875 * ... 5876 * unregister_netdevice(y1); 5877 * unregister_netdevice(y2); 5878 * ... 5879 * rtnl_unlock(); 5880 * free_netdev(y1); 5881 * free_netdev(y2); 5882 * 5883 * We are invoked by rtnl_unlock(). 5884 * This allows us to deal with problems: 5885 * 1) We can delete sysfs objects which invoke hotplug 5886 * without deadlocking with linkwatch via keventd. 5887 * 2) Since we run with the RTNL semaphore not held, we can sleep 5888 * safely in order to wait for the netdev refcnt to drop to zero. 5889 * 5890 * We must not return until all unregister events added during 5891 * the interval the lock was held have been completed. 5892 */ 5893 void netdev_run_todo(void) 5894 { 5895 struct list_head list; 5896 5897 /* Snapshot list, allow later requests */ 5898 list_replace_init(&net_todo_list, &list); 5899 5900 __rtnl_unlock(); 5901 5902 5903 /* Wait for rcu callbacks to finish before next phase */ 5904 if (!list_empty(&list)) 5905 rcu_barrier(); 5906 5907 while (!list_empty(&list)) { 5908 struct net_device *dev 5909 = list_first_entry(&list, struct net_device, todo_list); 5910 list_del(&dev->todo_list); 5911 5912 rtnl_lock(); 5913 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev); 5914 __rtnl_unlock(); 5915 5916 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 5917 pr_err("network todo '%s' but state %d\n", 5918 dev->name, dev->reg_state); 5919 dump_stack(); 5920 continue; 5921 } 5922 5923 dev->reg_state = NETREG_UNREGISTERED; 5924 5925 on_each_cpu(flush_backlog, dev, 1); 5926 5927 netdev_wait_allrefs(dev); 5928 5929 /* paranoia */ 5930 BUG_ON(netdev_refcnt_read(dev)); 5931 WARN_ON(rcu_access_pointer(dev->ip_ptr)); 5932 WARN_ON(rcu_access_pointer(dev->ip6_ptr)); 5933 WARN_ON(dev->dn_ptr); 5934 5935 if (dev->destructor) 5936 dev->destructor(dev); 5937 5938 /* Free network device */ 5939 kobject_put(&dev->dev.kobj); 5940 } 5941 } 5942 5943 /* Convert net_device_stats to rtnl_link_stats64. They have the same 5944 * fields in the same order, with only the type differing. 5945 */ 5946 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 5947 const struct net_device_stats *netdev_stats) 5948 { 5949 #if BITS_PER_LONG == 64 5950 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats)); 5951 memcpy(stats64, netdev_stats, sizeof(*stats64)); 5952 #else 5953 size_t i, n = sizeof(*stats64) / sizeof(u64); 5954 const unsigned long *src = (const unsigned long *)netdev_stats; 5955 u64 *dst = (u64 *)stats64; 5956 5957 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) != 5958 sizeof(*stats64) / sizeof(u64)); 5959 for (i = 0; i < n; i++) 5960 dst[i] = src[i]; 5961 #endif 5962 } 5963 EXPORT_SYMBOL(netdev_stats_to_stats64); 5964 5965 /** 5966 * dev_get_stats - get network device statistics 5967 * @dev: device to get statistics from 5968 * @storage: place to store stats 5969 * 5970 * Get network statistics from device. Return @storage. 5971 * The device driver may provide its own method by setting 5972 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats; 5973 * otherwise the internal statistics structure is used. 5974 */ 5975 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 5976 struct rtnl_link_stats64 *storage) 5977 { 5978 const struct net_device_ops *ops = dev->netdev_ops; 5979 5980 if (ops->ndo_get_stats64) { 5981 memset(storage, 0, sizeof(*storage)); 5982 ops->ndo_get_stats64(dev, storage); 5983 } else if (ops->ndo_get_stats) { 5984 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); 5985 } else { 5986 netdev_stats_to_stats64(storage, &dev->stats); 5987 } 5988 storage->rx_dropped += atomic_long_read(&dev->rx_dropped); 5989 return storage; 5990 } 5991 EXPORT_SYMBOL(dev_get_stats); 5992 5993 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) 5994 { 5995 struct netdev_queue *queue = dev_ingress_queue(dev); 5996 5997 #ifdef CONFIG_NET_CLS_ACT 5998 if (queue) 5999 return queue; 6000 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 6001 if (!queue) 6002 return NULL; 6003 netdev_init_one_queue(dev, queue, NULL); 6004 queue->qdisc = &noop_qdisc; 6005 queue->qdisc_sleeping = &noop_qdisc; 6006 rcu_assign_pointer(dev->ingress_queue, queue); 6007 #endif 6008 return queue; 6009 } 6010 6011 static const struct ethtool_ops default_ethtool_ops; 6012 6013 /** 6014 * alloc_netdev_mqs - allocate network device 6015 * @sizeof_priv: size of private data to allocate space for 6016 * @name: device name format string 6017 * @setup: callback to initialize device 6018 * @txqs: the number of TX subqueues to allocate 6019 * @rxqs: the number of RX subqueues to allocate 6020 * 6021 * Allocates a struct net_device with private data area for driver use 6022 * and performs basic initialization. Also allocates subquue structs 6023 * for each queue on the device. 6024 */ 6025 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 6026 void (*setup)(struct net_device *), 6027 unsigned int txqs, unsigned int rxqs) 6028 { 6029 struct net_device *dev; 6030 size_t alloc_size; 6031 struct net_device *p; 6032 6033 BUG_ON(strlen(name) >= sizeof(dev->name)); 6034 6035 if (txqs < 1) { 6036 pr_err("alloc_netdev: Unable to allocate device with zero queues\n"); 6037 return NULL; 6038 } 6039 6040 #ifdef CONFIG_RPS 6041 if (rxqs < 1) { 6042 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n"); 6043 return NULL; 6044 } 6045 #endif 6046 6047 alloc_size = sizeof(struct net_device); 6048 if (sizeof_priv) { 6049 /* ensure 32-byte alignment of private area */ 6050 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); 6051 alloc_size += sizeof_priv; 6052 } 6053 /* ensure 32-byte alignment of whole construct */ 6054 alloc_size += NETDEV_ALIGN - 1; 6055 6056 p = kzalloc(alloc_size, GFP_KERNEL); 6057 if (!p) { 6058 pr_err("alloc_netdev: Unable to allocate device\n"); 6059 return NULL; 6060 } 6061 6062 dev = PTR_ALIGN(p, NETDEV_ALIGN); 6063 dev->padded = (char *)dev - (char *)p; 6064 6065 dev->pcpu_refcnt = alloc_percpu(int); 6066 if (!dev->pcpu_refcnt) 6067 goto free_p; 6068 6069 if (dev_addr_init(dev)) 6070 goto free_pcpu; 6071 6072 dev_mc_init(dev); 6073 dev_uc_init(dev); 6074 6075 dev_net_set(dev, &init_net); 6076 6077 dev->gso_max_size = GSO_MAX_SIZE; 6078 dev->gso_max_segs = GSO_MAX_SEGS; 6079 6080 INIT_LIST_HEAD(&dev->napi_list); 6081 INIT_LIST_HEAD(&dev->unreg_list); 6082 INIT_LIST_HEAD(&dev->link_watch_list); 6083 dev->priv_flags = IFF_XMIT_DST_RELEASE; 6084 setup(dev); 6085 6086 dev->num_tx_queues = txqs; 6087 dev->real_num_tx_queues = txqs; 6088 if (netif_alloc_netdev_queues(dev)) 6089 goto free_all; 6090 6091 #ifdef CONFIG_RPS 6092 dev->num_rx_queues = rxqs; 6093 dev->real_num_rx_queues = rxqs; 6094 if (netif_alloc_rx_queues(dev)) 6095 goto free_all; 6096 #endif 6097 6098 strcpy(dev->name, name); 6099 dev->group = INIT_NETDEV_GROUP; 6100 if (!dev->ethtool_ops) 6101 dev->ethtool_ops = &default_ethtool_ops; 6102 return dev; 6103 6104 free_all: 6105 free_netdev(dev); 6106 return NULL; 6107 6108 free_pcpu: 6109 free_percpu(dev->pcpu_refcnt); 6110 kfree(dev->_tx); 6111 #ifdef CONFIG_RPS 6112 kfree(dev->_rx); 6113 #endif 6114 6115 free_p: 6116 kfree(p); 6117 return NULL; 6118 } 6119 EXPORT_SYMBOL(alloc_netdev_mqs); 6120 6121 /** 6122 * free_netdev - free network device 6123 * @dev: device 6124 * 6125 * This function does the last stage of destroying an allocated device 6126 * interface. The reference to the device object is released. 6127 * If this is the last reference then it will be freed. 6128 */ 6129 void free_netdev(struct net_device *dev) 6130 { 6131 struct napi_struct *p, *n; 6132 6133 release_net(dev_net(dev)); 6134 6135 kfree(dev->_tx); 6136 #ifdef CONFIG_RPS 6137 kfree(dev->_rx); 6138 #endif 6139 6140 kfree(rcu_dereference_protected(dev->ingress_queue, 1)); 6141 6142 /* Flush device addresses */ 6143 dev_addr_flush(dev); 6144 6145 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 6146 netif_napi_del(p); 6147 6148 free_percpu(dev->pcpu_refcnt); 6149 dev->pcpu_refcnt = NULL; 6150 6151 /* Compatibility with error handling in drivers */ 6152 if (dev->reg_state == NETREG_UNINITIALIZED) { 6153 kfree((char *)dev - dev->padded); 6154 return; 6155 } 6156 6157 BUG_ON(dev->reg_state != NETREG_UNREGISTERED); 6158 dev->reg_state = NETREG_RELEASED; 6159 6160 /* will free via device release */ 6161 put_device(&dev->dev); 6162 } 6163 EXPORT_SYMBOL(free_netdev); 6164 6165 /** 6166 * synchronize_net - Synchronize with packet receive processing 6167 * 6168 * Wait for packets currently being received to be done. 6169 * Does not block later packets from starting. 6170 */ 6171 void synchronize_net(void) 6172 { 6173 might_sleep(); 6174 if (rtnl_is_locked()) 6175 synchronize_rcu_expedited(); 6176 else 6177 synchronize_rcu(); 6178 } 6179 EXPORT_SYMBOL(synchronize_net); 6180 6181 /** 6182 * unregister_netdevice_queue - remove device from the kernel 6183 * @dev: device 6184 * @head: list 6185 * 6186 * This function shuts down a device interface and removes it 6187 * from the kernel tables. 6188 * If head not NULL, device is queued to be unregistered later. 6189 * 6190 * Callers must hold the rtnl semaphore. You may want 6191 * unregister_netdev() instead of this. 6192 */ 6193 6194 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) 6195 { 6196 ASSERT_RTNL(); 6197 6198 if (head) { 6199 list_move_tail(&dev->unreg_list, head); 6200 } else { 6201 rollback_registered(dev); 6202 /* Finish processing unregister after unlock */ 6203 net_set_todo(dev); 6204 } 6205 } 6206 EXPORT_SYMBOL(unregister_netdevice_queue); 6207 6208 /** 6209 * unregister_netdevice_many - unregister many devices 6210 * @head: list of devices 6211 */ 6212 void unregister_netdevice_many(struct list_head *head) 6213 { 6214 struct net_device *dev; 6215 6216 if (!list_empty(head)) { 6217 rollback_registered_many(head); 6218 list_for_each_entry(dev, head, unreg_list) 6219 net_set_todo(dev); 6220 } 6221 } 6222 EXPORT_SYMBOL(unregister_netdevice_many); 6223 6224 /** 6225 * unregister_netdev - remove device from the kernel 6226 * @dev: device 6227 * 6228 * This function shuts down a device interface and removes it 6229 * from the kernel tables. 6230 * 6231 * This is just a wrapper for unregister_netdevice that takes 6232 * the rtnl semaphore. In general you want to use this and not 6233 * unregister_netdevice. 6234 */ 6235 void unregister_netdev(struct net_device *dev) 6236 { 6237 rtnl_lock(); 6238 unregister_netdevice(dev); 6239 rtnl_unlock(); 6240 } 6241 EXPORT_SYMBOL(unregister_netdev); 6242 6243 /** 6244 * dev_change_net_namespace - move device to different nethost namespace 6245 * @dev: device 6246 * @net: network namespace 6247 * @pat: If not NULL name pattern to try if the current device name 6248 * is already taken in the destination network namespace. 6249 * 6250 * This function shuts down a device interface and moves it 6251 * to a new network namespace. On success 0 is returned, on 6252 * a failure a netagive errno code is returned. 6253 * 6254 * Callers must hold the rtnl semaphore. 6255 */ 6256 6257 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat) 6258 { 6259 int err; 6260 6261 ASSERT_RTNL(); 6262 6263 /* Don't allow namespace local devices to be moved. */ 6264 err = -EINVAL; 6265 if (dev->features & NETIF_F_NETNS_LOCAL) 6266 goto out; 6267 6268 /* Ensure the device has been registrered */ 6269 err = -EINVAL; 6270 if (dev->reg_state != NETREG_REGISTERED) 6271 goto out; 6272 6273 /* Get out if there is nothing todo */ 6274 err = 0; 6275 if (net_eq(dev_net(dev), net)) 6276 goto out; 6277 6278 /* Pick the destination device name, and ensure 6279 * we can use it in the destination network namespace. 6280 */ 6281 err = -EEXIST; 6282 if (__dev_get_by_name(net, dev->name)) { 6283 /* We get here if we can't use the current device name */ 6284 if (!pat) 6285 goto out; 6286 if (dev_get_valid_name(net, dev, pat) < 0) 6287 goto out; 6288 } 6289 6290 /* 6291 * And now a mini version of register_netdevice unregister_netdevice. 6292 */ 6293 6294 /* If device is running close it first. */ 6295 dev_close(dev); 6296 6297 /* And unlink it from device chain */ 6298 err = -ENODEV; 6299 unlist_netdevice(dev); 6300 6301 synchronize_net(); 6302 6303 /* Shutdown queueing discipline. */ 6304 dev_shutdown(dev); 6305 6306 /* Notify protocols, that we are about to destroy 6307 this device. They should clean all the things. 6308 6309 Note that dev->reg_state stays at NETREG_REGISTERED. 6310 This is wanted because this way 8021q and macvlan know 6311 the device is just moving and can keep their slaves up. 6312 */ 6313 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 6314 rcu_barrier(); 6315 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev); 6316 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); 6317 6318 /* 6319 * Flush the unicast and multicast chains 6320 */ 6321 dev_uc_flush(dev); 6322 dev_mc_flush(dev); 6323 6324 /* Actually switch the network namespace */ 6325 dev_net_set(dev, net); 6326 6327 /* If there is an ifindex conflict assign a new one */ 6328 if (__dev_get_by_index(net, dev->ifindex)) { 6329 int iflink = (dev->iflink == dev->ifindex); 6330 dev->ifindex = dev_new_index(net); 6331 if (iflink) 6332 dev->iflink = dev->ifindex; 6333 } 6334 6335 /* Fixup kobjects */ 6336 err = device_rename(&dev->dev, dev->name); 6337 WARN_ON(err); 6338 6339 /* Add the device back in the hashes */ 6340 list_netdevice(dev); 6341 6342 /* Notify protocols, that a new device appeared. */ 6343 call_netdevice_notifiers(NETDEV_REGISTER, dev); 6344 6345 /* 6346 * Prevent userspace races by waiting until the network 6347 * device is fully setup before sending notifications. 6348 */ 6349 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); 6350 6351 synchronize_net(); 6352 err = 0; 6353 out: 6354 return err; 6355 } 6356 EXPORT_SYMBOL_GPL(dev_change_net_namespace); 6357 6358 static int dev_cpu_callback(struct notifier_block *nfb, 6359 unsigned long action, 6360 void *ocpu) 6361 { 6362 struct sk_buff **list_skb; 6363 struct sk_buff *skb; 6364 unsigned int cpu, oldcpu = (unsigned long)ocpu; 6365 struct softnet_data *sd, *oldsd; 6366 6367 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) 6368 return NOTIFY_OK; 6369 6370 local_irq_disable(); 6371 cpu = smp_processor_id(); 6372 sd = &per_cpu(softnet_data, cpu); 6373 oldsd = &per_cpu(softnet_data, oldcpu); 6374 6375 /* Find end of our completion_queue. */ 6376 list_skb = &sd->completion_queue; 6377 while (*list_skb) 6378 list_skb = &(*list_skb)->next; 6379 /* Append completion queue from offline CPU. */ 6380 *list_skb = oldsd->completion_queue; 6381 oldsd->completion_queue = NULL; 6382 6383 /* Append output queue from offline CPU. */ 6384 if (oldsd->output_queue) { 6385 *sd->output_queue_tailp = oldsd->output_queue; 6386 sd->output_queue_tailp = oldsd->output_queue_tailp; 6387 oldsd->output_queue = NULL; 6388 oldsd->output_queue_tailp = &oldsd->output_queue; 6389 } 6390 /* Append NAPI poll list from offline CPU. */ 6391 if (!list_empty(&oldsd->poll_list)) { 6392 list_splice_init(&oldsd->poll_list, &sd->poll_list); 6393 raise_softirq_irqoff(NET_RX_SOFTIRQ); 6394 } 6395 6396 raise_softirq_irqoff(NET_TX_SOFTIRQ); 6397 local_irq_enable(); 6398 6399 /* Process offline CPU's input_pkt_queue */ 6400 while ((skb = __skb_dequeue(&oldsd->process_queue))) { 6401 netif_rx(skb); 6402 input_queue_head_incr(oldsd); 6403 } 6404 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { 6405 netif_rx(skb); 6406 input_queue_head_incr(oldsd); 6407 } 6408 6409 return NOTIFY_OK; 6410 } 6411 6412 6413 /** 6414 * netdev_increment_features - increment feature set by one 6415 * @all: current feature set 6416 * @one: new feature set 6417 * @mask: mask feature set 6418 * 6419 * Computes a new feature set after adding a device with feature set 6420 * @one to the master device with current feature set @all. Will not 6421 * enable anything that is off in @mask. Returns the new feature set. 6422 */ 6423 netdev_features_t netdev_increment_features(netdev_features_t all, 6424 netdev_features_t one, netdev_features_t mask) 6425 { 6426 if (mask & NETIF_F_GEN_CSUM) 6427 mask |= NETIF_F_ALL_CSUM; 6428 mask |= NETIF_F_VLAN_CHALLENGED; 6429 6430 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask; 6431 all &= one | ~NETIF_F_ALL_FOR_ALL; 6432 6433 /* If one device supports hw checksumming, set for all. */ 6434 if (all & NETIF_F_GEN_CSUM) 6435 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); 6436 6437 return all; 6438 } 6439 EXPORT_SYMBOL(netdev_increment_features); 6440 6441 static struct hlist_head *netdev_create_hash(void) 6442 { 6443 int i; 6444 struct hlist_head *hash; 6445 6446 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL); 6447 if (hash != NULL) 6448 for (i = 0; i < NETDEV_HASHENTRIES; i++) 6449 INIT_HLIST_HEAD(&hash[i]); 6450 6451 return hash; 6452 } 6453 6454 /* Initialize per network namespace state */ 6455 static int __net_init netdev_init(struct net *net) 6456 { 6457 if (net != &init_net) 6458 INIT_LIST_HEAD(&net->dev_base_head); 6459 6460 net->dev_name_head = netdev_create_hash(); 6461 if (net->dev_name_head == NULL) 6462 goto err_name; 6463 6464 net->dev_index_head = netdev_create_hash(); 6465 if (net->dev_index_head == NULL) 6466 goto err_idx; 6467 6468 return 0; 6469 6470 err_idx: 6471 kfree(net->dev_name_head); 6472 err_name: 6473 return -ENOMEM; 6474 } 6475 6476 /** 6477 * netdev_drivername - network driver for the device 6478 * @dev: network device 6479 * 6480 * Determine network driver for device. 6481 */ 6482 const char *netdev_drivername(const struct net_device *dev) 6483 { 6484 const struct device_driver *driver; 6485 const struct device *parent; 6486 const char *empty = ""; 6487 6488 parent = dev->dev.parent; 6489 if (!parent) 6490 return empty; 6491 6492 driver = parent->driver; 6493 if (driver && driver->name) 6494 return driver->name; 6495 return empty; 6496 } 6497 6498 static int __netdev_printk(const char *level, const struct net_device *dev, 6499 struct va_format *vaf) 6500 { 6501 int r; 6502 6503 if (dev && dev->dev.parent) { 6504 r = dev_printk_emit(level[1] - '0', 6505 dev->dev.parent, 6506 "%s %s %s: %pV", 6507 dev_driver_string(dev->dev.parent), 6508 dev_name(dev->dev.parent), 6509 netdev_name(dev), vaf); 6510 } else if (dev) { 6511 r = printk("%s%s: %pV", level, netdev_name(dev), vaf); 6512 } else { 6513 r = printk("%s(NULL net_device): %pV", level, vaf); 6514 } 6515 6516 return r; 6517 } 6518 6519 int netdev_printk(const char *level, const struct net_device *dev, 6520 const char *format, ...) 6521 { 6522 struct va_format vaf; 6523 va_list args; 6524 int r; 6525 6526 va_start(args, format); 6527 6528 vaf.fmt = format; 6529 vaf.va = &args; 6530 6531 r = __netdev_printk(level, dev, &vaf); 6532 6533 va_end(args); 6534 6535 return r; 6536 } 6537 EXPORT_SYMBOL(netdev_printk); 6538 6539 #define define_netdev_printk_level(func, level) \ 6540 int func(const struct net_device *dev, const char *fmt, ...) \ 6541 { \ 6542 int r; \ 6543 struct va_format vaf; \ 6544 va_list args; \ 6545 \ 6546 va_start(args, fmt); \ 6547 \ 6548 vaf.fmt = fmt; \ 6549 vaf.va = &args; \ 6550 \ 6551 r = __netdev_printk(level, dev, &vaf); \ 6552 \ 6553 va_end(args); \ 6554 \ 6555 return r; \ 6556 } \ 6557 EXPORT_SYMBOL(func); 6558 6559 define_netdev_printk_level(netdev_emerg, KERN_EMERG); 6560 define_netdev_printk_level(netdev_alert, KERN_ALERT); 6561 define_netdev_printk_level(netdev_crit, KERN_CRIT); 6562 define_netdev_printk_level(netdev_err, KERN_ERR); 6563 define_netdev_printk_level(netdev_warn, KERN_WARNING); 6564 define_netdev_printk_level(netdev_notice, KERN_NOTICE); 6565 define_netdev_printk_level(netdev_info, KERN_INFO); 6566 6567 static void __net_exit netdev_exit(struct net *net) 6568 { 6569 kfree(net->dev_name_head); 6570 kfree(net->dev_index_head); 6571 } 6572 6573 static struct pernet_operations __net_initdata netdev_net_ops = { 6574 .init = netdev_init, 6575 .exit = netdev_exit, 6576 }; 6577 6578 static void __net_exit default_device_exit(struct net *net) 6579 { 6580 struct net_device *dev, *aux; 6581 /* 6582 * Push all migratable network devices back to the 6583 * initial network namespace 6584 */ 6585 rtnl_lock(); 6586 for_each_netdev_safe(net, dev, aux) { 6587 int err; 6588 char fb_name[IFNAMSIZ]; 6589 6590 /* Ignore unmoveable devices (i.e. loopback) */ 6591 if (dev->features & NETIF_F_NETNS_LOCAL) 6592 continue; 6593 6594 /* Leave virtual devices for the generic cleanup */ 6595 if (dev->rtnl_link_ops) 6596 continue; 6597 6598 /* Push remaining network devices to init_net */ 6599 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 6600 err = dev_change_net_namespace(dev, &init_net, fb_name); 6601 if (err) { 6602 pr_emerg("%s: failed to move %s to init_net: %d\n", 6603 __func__, dev->name, err); 6604 BUG(); 6605 } 6606 } 6607 rtnl_unlock(); 6608 } 6609 6610 static void __net_exit default_device_exit_batch(struct list_head *net_list) 6611 { 6612 /* At exit all network devices most be removed from a network 6613 * namespace. Do this in the reverse order of registration. 6614 * Do this across as many network namespaces as possible to 6615 * improve batching efficiency. 6616 */ 6617 struct net_device *dev; 6618 struct net *net; 6619 LIST_HEAD(dev_kill_list); 6620 6621 rtnl_lock(); 6622 list_for_each_entry(net, net_list, exit_list) { 6623 for_each_netdev_reverse(net, dev) { 6624 if (dev->rtnl_link_ops) 6625 dev->rtnl_link_ops->dellink(dev, &dev_kill_list); 6626 else 6627 unregister_netdevice_queue(dev, &dev_kill_list); 6628 } 6629 } 6630 unregister_netdevice_many(&dev_kill_list); 6631 list_del(&dev_kill_list); 6632 rtnl_unlock(); 6633 } 6634 6635 static struct pernet_operations __net_initdata default_device_ops = { 6636 .exit = default_device_exit, 6637 .exit_batch = default_device_exit_batch, 6638 }; 6639 6640 /* 6641 * Initialize the DEV module. At boot time this walks the device list and 6642 * unhooks any devices that fail to initialise (normally hardware not 6643 * present) and leaves us with a valid list of present and active devices. 6644 * 6645 */ 6646 6647 /* 6648 * This is called single threaded during boot, so no need 6649 * to take the rtnl semaphore. 6650 */ 6651 static int __init net_dev_init(void) 6652 { 6653 int i, rc = -ENOMEM; 6654 6655 BUG_ON(!dev_boot_phase); 6656 6657 if (dev_proc_init()) 6658 goto out; 6659 6660 if (netdev_kobject_init()) 6661 goto out; 6662 6663 INIT_LIST_HEAD(&ptype_all); 6664 for (i = 0; i < PTYPE_HASH_SIZE; i++) 6665 INIT_LIST_HEAD(&ptype_base[i]); 6666 6667 if (register_pernet_subsys(&netdev_net_ops)) 6668 goto out; 6669 6670 /* 6671 * Initialise the packet receive queues. 6672 */ 6673 6674 for_each_possible_cpu(i) { 6675 struct softnet_data *sd = &per_cpu(softnet_data, i); 6676 6677 memset(sd, 0, sizeof(*sd)); 6678 skb_queue_head_init(&sd->input_pkt_queue); 6679 skb_queue_head_init(&sd->process_queue); 6680 sd->completion_queue = NULL; 6681 INIT_LIST_HEAD(&sd->poll_list); 6682 sd->output_queue = NULL; 6683 sd->output_queue_tailp = &sd->output_queue; 6684 #ifdef CONFIG_RPS 6685 sd->csd.func = rps_trigger_softirq; 6686 sd->csd.info = sd; 6687 sd->csd.flags = 0; 6688 sd->cpu = i; 6689 #endif 6690 6691 sd->backlog.poll = process_backlog; 6692 sd->backlog.weight = weight_p; 6693 sd->backlog.gro_list = NULL; 6694 sd->backlog.gro_count = 0; 6695 } 6696 6697 dev_boot_phase = 0; 6698 6699 /* The loopback device is special if any other network devices 6700 * is present in a network namespace the loopback device must 6701 * be present. Since we now dynamically allocate and free the 6702 * loopback device ensure this invariant is maintained by 6703 * keeping the loopback device as the first device on the 6704 * list of network devices. Ensuring the loopback devices 6705 * is the first device that appears and the last network device 6706 * that disappears. 6707 */ 6708 if (register_pernet_device(&loopback_net_ops)) 6709 goto out; 6710 6711 if (register_pernet_device(&default_device_ops)) 6712 goto out; 6713 6714 open_softirq(NET_TX_SOFTIRQ, net_tx_action); 6715 open_softirq(NET_RX_SOFTIRQ, net_rx_action); 6716 6717 hotcpu_notifier(dev_cpu_callback, 0); 6718 dst_init(); 6719 dev_mcast_init(); 6720 rc = 0; 6721 out: 6722 return rc; 6723 } 6724 6725 subsys_initcall(net_dev_init); 6726 6727 static int __init initialize_hashrnd(void) 6728 { 6729 get_random_bytes(&hashrnd, sizeof(hashrnd)); 6730 return 0; 6731 } 6732 6733 late_initcall_sync(initialize_hashrnd); 6734 6735