1 /* 2 * NET3 Protocol independent device support routines. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Derived from the non IP parts of dev.c 1.0.19 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 13 * 14 * Additional Authors: 15 * Florian la Roche <rzsfl@rz.uni-sb.de> 16 * Alan Cox <gw4pts@gw4pts.ampr.org> 17 * David Hinds <dahinds@users.sourceforge.net> 18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 19 * Adam Sulmicki <adam@cfar.umd.edu> 20 * Pekka Riikonen <priikone@poesidon.pspt.fi> 21 * 22 * Changes: 23 * D.J. Barrow : Fixed bug where dev->refcnt gets set 24 * to 2 if register_netdev gets called 25 * before net_dev_init & also removed a 26 * few lines of code in the process. 27 * Alan Cox : device private ioctl copies fields back. 28 * Alan Cox : Transmit queue code does relevant 29 * stunts to keep the queue safe. 30 * Alan Cox : Fixed double lock. 31 * Alan Cox : Fixed promisc NULL pointer trap 32 * ???????? : Support the full private ioctl range 33 * Alan Cox : Moved ioctl permission check into 34 * drivers 35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI 36 * Alan Cox : 100 backlog just doesn't cut it when 37 * you start doing multicast video 8) 38 * Alan Cox : Rewrote net_bh and list manager. 39 * Alan Cox : Fix ETH_P_ALL echoback lengths. 40 * Alan Cox : Took out transmit every packet pass 41 * Saved a few bytes in the ioctl handler 42 * Alan Cox : Network driver sets packet type before 43 * calling netif_rx. Saves a function 44 * call a packet. 45 * Alan Cox : Hashed net_bh() 46 * Richard Kooijman: Timestamp fixes. 47 * Alan Cox : Wrong field in SIOCGIFDSTADDR 48 * Alan Cox : Device lock protection. 49 * Alan Cox : Fixed nasty side effect of device close 50 * changes. 51 * Rudi Cilibrasi : Pass the right thing to 52 * set_mac_address() 53 * Dave Miller : 32bit quantity for the device lock to 54 * make it work out on a Sparc. 55 * Bjorn Ekwall : Added KERNELD hack. 56 * Alan Cox : Cleaned up the backlog initialise. 57 * Craig Metz : SIOCGIFCONF fix if space for under 58 * 1 device. 59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there 60 * is no device open function. 61 * Andi Kleen : Fix error reporting for SIOCGIFCONF 62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF 63 * Cyrus Durgin : Cleaned for KMOD 64 * Adam Sulmicki : Bug Fix : Network Device Unload 65 * A network device unload needs to purge 66 * the backlog queue. 67 * Paul Rusty Russell : SIOCSIFNAME 68 * Pekka Riikonen : Netdev boot-time settings code 69 * Andrew Morton : Make unregister_netdevice wait 70 * indefinitely on dev->refcnt 71 * J Hadi Salim : - Backlog queue sampling 72 * - netif_rx() feedback 73 */ 74 75 #include <linux/uaccess.h> 76 #include <linux/bitops.h> 77 #include <linux/capability.h> 78 #include <linux/cpu.h> 79 #include <linux/types.h> 80 #include <linux/kernel.h> 81 #include <linux/hash.h> 82 #include <linux/slab.h> 83 #include <linux/sched.h> 84 #include <linux/sched/mm.h> 85 #include <linux/mutex.h> 86 #include <linux/string.h> 87 #include <linux/mm.h> 88 #include <linux/socket.h> 89 #include <linux/sockios.h> 90 #include <linux/errno.h> 91 #include <linux/interrupt.h> 92 #include <linux/if_ether.h> 93 #include <linux/netdevice.h> 94 #include <linux/etherdevice.h> 95 #include <linux/ethtool.h> 96 #include <linux/notifier.h> 97 #include <linux/skbuff.h> 98 #include <linux/bpf.h> 99 #include <linux/bpf_trace.h> 100 #include <net/net_namespace.h> 101 #include <net/sock.h> 102 #include <net/busy_poll.h> 103 #include <linux/rtnetlink.h> 104 #include <linux/stat.h> 105 #include <net/dst.h> 106 #include <net/dst_metadata.h> 107 #include <net/pkt_sched.h> 108 #include <net/pkt_cls.h> 109 #include <net/checksum.h> 110 #include <net/xfrm.h> 111 #include <linux/highmem.h> 112 #include <linux/init.h> 113 #include <linux/module.h> 114 #include <linux/netpoll.h> 115 #include <linux/rcupdate.h> 116 #include <linux/delay.h> 117 #include <net/iw_handler.h> 118 #include <asm/current.h> 119 #include <linux/audit.h> 120 #include <linux/dmaengine.h> 121 #include <linux/err.h> 122 #include <linux/ctype.h> 123 #include <linux/if_arp.h> 124 #include <linux/if_vlan.h> 125 #include <linux/ip.h> 126 #include <net/ip.h> 127 #include <net/mpls.h> 128 #include <linux/ipv6.h> 129 #include <linux/in.h> 130 #include <linux/jhash.h> 131 #include <linux/random.h> 132 #include <trace/events/napi.h> 133 #include <trace/events/net.h> 134 #include <trace/events/skb.h> 135 #include <linux/pci.h> 136 #include <linux/inetdevice.h> 137 #include <linux/cpu_rmap.h> 138 #include <linux/static_key.h> 139 #include <linux/hashtable.h> 140 #include <linux/vmalloc.h> 141 #include <linux/if_macvlan.h> 142 #include <linux/errqueue.h> 143 #include <linux/hrtimer.h> 144 #include <linux/netfilter_ingress.h> 145 #include <linux/crash_dump.h> 146 #include <linux/sctp.h> 147 #include <net/udp_tunnel.h> 148 #include <linux/net_namespace.h> 149 150 #include "net-sysfs.h" 151 152 #define MAX_GRO_SKBS 8 153 154 /* This should be increased if a protocol with a bigger head is added. */ 155 #define GRO_MAX_HEAD (MAX_HEADER + 128) 156 157 static DEFINE_SPINLOCK(ptype_lock); 158 static DEFINE_SPINLOCK(offload_lock); 159 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 160 struct list_head ptype_all __read_mostly; /* Taps */ 161 static struct list_head offload_base __read_mostly; 162 163 static int netif_rx_internal(struct sk_buff *skb); 164 static int call_netdevice_notifiers_info(unsigned long val, 165 struct netdev_notifier_info *info); 166 static struct napi_struct *napi_by_id(unsigned int napi_id); 167 168 /* 169 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 170 * semaphore. 171 * 172 * Pure readers hold dev_base_lock for reading, or rcu_read_lock() 173 * 174 * Writers must hold the rtnl semaphore while they loop through the 175 * dev_base_head list, and hold dev_base_lock for writing when they do the 176 * actual updates. This allows pure readers to access the list even 177 * while a writer is preparing to update it. 178 * 179 * To put it another way, dev_base_lock is held for writing only to 180 * protect against pure readers; the rtnl semaphore provides the 181 * protection against other writers. 182 * 183 * See, for example usages, register_netdevice() and 184 * unregister_netdevice(), which must be called with the rtnl 185 * semaphore held. 186 */ 187 DEFINE_RWLOCK(dev_base_lock); 188 EXPORT_SYMBOL(dev_base_lock); 189 190 static DEFINE_MUTEX(ifalias_mutex); 191 192 /* protects napi_hash addition/deletion and napi_gen_id */ 193 static DEFINE_SPINLOCK(napi_hash_lock); 194 195 static unsigned int napi_gen_id = NR_CPUS; 196 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); 197 198 static seqcount_t devnet_rename_seq; 199 200 static inline void dev_base_seq_inc(struct net *net) 201 { 202 while (++net->dev_base_seq == 0) 203 ; 204 } 205 206 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) 207 { 208 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ)); 209 210 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; 211 } 212 213 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) 214 { 215 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; 216 } 217 218 static inline void rps_lock(struct softnet_data *sd) 219 { 220 #ifdef CONFIG_RPS 221 spin_lock(&sd->input_pkt_queue.lock); 222 #endif 223 } 224 225 static inline void rps_unlock(struct softnet_data *sd) 226 { 227 #ifdef CONFIG_RPS 228 spin_unlock(&sd->input_pkt_queue.lock); 229 #endif 230 } 231 232 /* Device list insertion */ 233 static void list_netdevice(struct net_device *dev) 234 { 235 struct net *net = dev_net(dev); 236 237 ASSERT_RTNL(); 238 239 write_lock_bh(&dev_base_lock); 240 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); 241 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); 242 hlist_add_head_rcu(&dev->index_hlist, 243 dev_index_hash(net, dev->ifindex)); 244 write_unlock_bh(&dev_base_lock); 245 246 dev_base_seq_inc(net); 247 } 248 249 /* Device list removal 250 * caller must respect a RCU grace period before freeing/reusing dev 251 */ 252 static void unlist_netdevice(struct net_device *dev) 253 { 254 ASSERT_RTNL(); 255 256 /* Unlink dev from the device chain */ 257 write_lock_bh(&dev_base_lock); 258 list_del_rcu(&dev->dev_list); 259 hlist_del_rcu(&dev->name_hlist); 260 hlist_del_rcu(&dev->index_hlist); 261 write_unlock_bh(&dev_base_lock); 262 263 dev_base_seq_inc(dev_net(dev)); 264 } 265 266 /* 267 * Our notifier list 268 */ 269 270 static RAW_NOTIFIER_HEAD(netdev_chain); 271 272 /* 273 * Device drivers call our routines to queue packets here. We empty the 274 * queue in the local softnet handler. 275 */ 276 277 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 278 EXPORT_PER_CPU_SYMBOL(softnet_data); 279 280 #ifdef CONFIG_LOCKDEP 281 /* 282 * register_netdevice() inits txq->_xmit_lock and sets lockdep class 283 * according to dev->type 284 */ 285 static const unsigned short netdev_lock_type[] = { 286 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, 287 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, 288 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, 289 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, 290 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, 291 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, 292 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, 293 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, 294 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, 295 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, 296 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, 297 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 298 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, 299 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, 300 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; 301 302 static const char *const netdev_lock_name[] = { 303 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", 304 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", 305 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", 306 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", 307 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", 308 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", 309 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", 310 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", 311 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", 312 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", 313 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", 314 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 315 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", 316 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", 317 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; 318 319 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 320 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; 321 322 static inline unsigned short netdev_lock_pos(unsigned short dev_type) 323 { 324 int i; 325 326 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) 327 if (netdev_lock_type[i] == dev_type) 328 return i; 329 /* the last key is used by default */ 330 return ARRAY_SIZE(netdev_lock_type) - 1; 331 } 332 333 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 334 unsigned short dev_type) 335 { 336 int i; 337 338 i = netdev_lock_pos(dev_type); 339 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], 340 netdev_lock_name[i]); 341 } 342 343 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 344 { 345 int i; 346 347 i = netdev_lock_pos(dev->type); 348 lockdep_set_class_and_name(&dev->addr_list_lock, 349 &netdev_addr_lock_key[i], 350 netdev_lock_name[i]); 351 } 352 #else 353 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 354 unsigned short dev_type) 355 { 356 } 357 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 358 { 359 } 360 #endif 361 362 /******************************************************************************* 363 * 364 * Protocol management and registration routines 365 * 366 *******************************************************************************/ 367 368 369 /* 370 * Add a protocol ID to the list. Now that the input handler is 371 * smarter we can dispense with all the messy stuff that used to be 372 * here. 373 * 374 * BEWARE!!! Protocol handlers, mangling input packets, 375 * MUST BE last in hash buckets and checking protocol handlers 376 * MUST start from promiscuous ptype_all chain in net_bh. 377 * It is true now, do not change it. 378 * Explanation follows: if protocol handler, mangling packet, will 379 * be the first on list, it is not able to sense, that packet 380 * is cloned and should be copied-on-write, so that it will 381 * change it and subsequent readers will get broken packet. 382 * --ANK (980803) 383 */ 384 385 static inline struct list_head *ptype_head(const struct packet_type *pt) 386 { 387 if (pt->type == htons(ETH_P_ALL)) 388 return pt->dev ? &pt->dev->ptype_all : &ptype_all; 389 else 390 return pt->dev ? &pt->dev->ptype_specific : 391 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; 392 } 393 394 /** 395 * dev_add_pack - add packet handler 396 * @pt: packet type declaration 397 * 398 * Add a protocol handler to the networking stack. The passed &packet_type 399 * is linked into kernel lists and may not be freed until it has been 400 * removed from the kernel lists. 401 * 402 * This call does not sleep therefore it can not 403 * guarantee all CPU's that are in middle of receiving packets 404 * will see the new packet type (until the next received packet). 405 */ 406 407 void dev_add_pack(struct packet_type *pt) 408 { 409 struct list_head *head = ptype_head(pt); 410 411 spin_lock(&ptype_lock); 412 list_add_rcu(&pt->list, head); 413 spin_unlock(&ptype_lock); 414 } 415 EXPORT_SYMBOL(dev_add_pack); 416 417 /** 418 * __dev_remove_pack - remove packet handler 419 * @pt: packet type declaration 420 * 421 * Remove a protocol handler that was previously added to the kernel 422 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 423 * from the kernel lists and can be freed or reused once this function 424 * returns. 425 * 426 * The packet type might still be in use by receivers 427 * and must not be freed until after all the CPU's have gone 428 * through a quiescent state. 429 */ 430 void __dev_remove_pack(struct packet_type *pt) 431 { 432 struct list_head *head = ptype_head(pt); 433 struct packet_type *pt1; 434 435 spin_lock(&ptype_lock); 436 437 list_for_each_entry(pt1, head, list) { 438 if (pt == pt1) { 439 list_del_rcu(&pt->list); 440 goto out; 441 } 442 } 443 444 pr_warn("dev_remove_pack: %p not found\n", pt); 445 out: 446 spin_unlock(&ptype_lock); 447 } 448 EXPORT_SYMBOL(__dev_remove_pack); 449 450 /** 451 * dev_remove_pack - remove packet handler 452 * @pt: packet type declaration 453 * 454 * Remove a protocol handler that was previously added to the kernel 455 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 456 * from the kernel lists and can be freed or reused once this function 457 * returns. 458 * 459 * This call sleeps to guarantee that no CPU is looking at the packet 460 * type after return. 461 */ 462 void dev_remove_pack(struct packet_type *pt) 463 { 464 __dev_remove_pack(pt); 465 466 synchronize_net(); 467 } 468 EXPORT_SYMBOL(dev_remove_pack); 469 470 471 /** 472 * dev_add_offload - register offload handlers 473 * @po: protocol offload declaration 474 * 475 * Add protocol offload handlers to the networking stack. The passed 476 * &proto_offload is linked into kernel lists and may not be freed until 477 * it has been removed from the kernel lists. 478 * 479 * This call does not sleep therefore it can not 480 * guarantee all CPU's that are in middle of receiving packets 481 * will see the new offload handlers (until the next received packet). 482 */ 483 void dev_add_offload(struct packet_offload *po) 484 { 485 struct packet_offload *elem; 486 487 spin_lock(&offload_lock); 488 list_for_each_entry(elem, &offload_base, list) { 489 if (po->priority < elem->priority) 490 break; 491 } 492 list_add_rcu(&po->list, elem->list.prev); 493 spin_unlock(&offload_lock); 494 } 495 EXPORT_SYMBOL(dev_add_offload); 496 497 /** 498 * __dev_remove_offload - remove offload handler 499 * @po: packet offload declaration 500 * 501 * Remove a protocol offload handler that was previously added to the 502 * kernel offload handlers by dev_add_offload(). The passed &offload_type 503 * is removed from the kernel lists and can be freed or reused once this 504 * function returns. 505 * 506 * The packet type might still be in use by receivers 507 * and must not be freed until after all the CPU's have gone 508 * through a quiescent state. 509 */ 510 static void __dev_remove_offload(struct packet_offload *po) 511 { 512 struct list_head *head = &offload_base; 513 struct packet_offload *po1; 514 515 spin_lock(&offload_lock); 516 517 list_for_each_entry(po1, head, list) { 518 if (po == po1) { 519 list_del_rcu(&po->list); 520 goto out; 521 } 522 } 523 524 pr_warn("dev_remove_offload: %p not found\n", po); 525 out: 526 spin_unlock(&offload_lock); 527 } 528 529 /** 530 * dev_remove_offload - remove packet offload handler 531 * @po: packet offload declaration 532 * 533 * Remove a packet offload handler that was previously added to the kernel 534 * offload handlers by dev_add_offload(). The passed &offload_type is 535 * removed from the kernel lists and can be freed or reused once this 536 * function returns. 537 * 538 * This call sleeps to guarantee that no CPU is looking at the packet 539 * type after return. 540 */ 541 void dev_remove_offload(struct packet_offload *po) 542 { 543 __dev_remove_offload(po); 544 545 synchronize_net(); 546 } 547 EXPORT_SYMBOL(dev_remove_offload); 548 549 /****************************************************************************** 550 * 551 * Device Boot-time Settings Routines 552 * 553 ******************************************************************************/ 554 555 /* Boot time configuration table */ 556 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX]; 557 558 /** 559 * netdev_boot_setup_add - add new setup entry 560 * @name: name of the device 561 * @map: configured settings for the device 562 * 563 * Adds new setup entry to the dev_boot_setup list. The function 564 * returns 0 on error and 1 on success. This is a generic routine to 565 * all netdevices. 566 */ 567 static int netdev_boot_setup_add(char *name, struct ifmap *map) 568 { 569 struct netdev_boot_setup *s; 570 int i; 571 572 s = dev_boot_setup; 573 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { 574 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') { 575 memset(s[i].name, 0, sizeof(s[i].name)); 576 strlcpy(s[i].name, name, IFNAMSIZ); 577 memcpy(&s[i].map, map, sizeof(s[i].map)); 578 break; 579 } 580 } 581 582 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1; 583 } 584 585 /** 586 * netdev_boot_setup_check - check boot time settings 587 * @dev: the netdevice 588 * 589 * Check boot time settings for the device. 590 * The found settings are set for the device to be used 591 * later in the device probing. 592 * Returns 0 if no settings found, 1 if they are. 593 */ 594 int netdev_boot_setup_check(struct net_device *dev) 595 { 596 struct netdev_boot_setup *s = dev_boot_setup; 597 int i; 598 599 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { 600 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' && 601 !strcmp(dev->name, s[i].name)) { 602 dev->irq = s[i].map.irq; 603 dev->base_addr = s[i].map.base_addr; 604 dev->mem_start = s[i].map.mem_start; 605 dev->mem_end = s[i].map.mem_end; 606 return 1; 607 } 608 } 609 return 0; 610 } 611 EXPORT_SYMBOL(netdev_boot_setup_check); 612 613 614 /** 615 * netdev_boot_base - get address from boot time settings 616 * @prefix: prefix for network device 617 * @unit: id for network device 618 * 619 * Check boot time settings for the base address of device. 620 * The found settings are set for the device to be used 621 * later in the device probing. 622 * Returns 0 if no settings found. 623 */ 624 unsigned long netdev_boot_base(const char *prefix, int unit) 625 { 626 const struct netdev_boot_setup *s = dev_boot_setup; 627 char name[IFNAMSIZ]; 628 int i; 629 630 sprintf(name, "%s%d", prefix, unit); 631 632 /* 633 * If device already registered then return base of 1 634 * to indicate not to probe for this interface 635 */ 636 if (__dev_get_by_name(&init_net, name)) 637 return 1; 638 639 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) 640 if (!strcmp(name, s[i].name)) 641 return s[i].map.base_addr; 642 return 0; 643 } 644 645 /* 646 * Saves at boot time configured settings for any netdevice. 647 */ 648 int __init netdev_boot_setup(char *str) 649 { 650 int ints[5]; 651 struct ifmap map; 652 653 str = get_options(str, ARRAY_SIZE(ints), ints); 654 if (!str || !*str) 655 return 0; 656 657 /* Save settings */ 658 memset(&map, 0, sizeof(map)); 659 if (ints[0] > 0) 660 map.irq = ints[1]; 661 if (ints[0] > 1) 662 map.base_addr = ints[2]; 663 if (ints[0] > 2) 664 map.mem_start = ints[3]; 665 if (ints[0] > 3) 666 map.mem_end = ints[4]; 667 668 /* Add new entry to the list */ 669 return netdev_boot_setup_add(str, &map); 670 } 671 672 __setup("netdev=", netdev_boot_setup); 673 674 /******************************************************************************* 675 * 676 * Device Interface Subroutines 677 * 678 *******************************************************************************/ 679 680 /** 681 * dev_get_iflink - get 'iflink' value of a interface 682 * @dev: targeted interface 683 * 684 * Indicates the ifindex the interface is linked to. 685 * Physical interfaces have the same 'ifindex' and 'iflink' values. 686 */ 687 688 int dev_get_iflink(const struct net_device *dev) 689 { 690 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) 691 return dev->netdev_ops->ndo_get_iflink(dev); 692 693 return dev->ifindex; 694 } 695 EXPORT_SYMBOL(dev_get_iflink); 696 697 /** 698 * dev_fill_metadata_dst - Retrieve tunnel egress information. 699 * @dev: targeted interface 700 * @skb: The packet. 701 * 702 * For better visibility of tunnel traffic OVS needs to retrieve 703 * egress tunnel information for a packet. Following API allows 704 * user to get this info. 705 */ 706 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 707 { 708 struct ip_tunnel_info *info; 709 710 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst) 711 return -EINVAL; 712 713 info = skb_tunnel_info_unclone(skb); 714 if (!info) 715 return -ENOMEM; 716 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX))) 717 return -EINVAL; 718 719 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb); 720 } 721 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst); 722 723 /** 724 * __dev_get_by_name - find a device by its name 725 * @net: the applicable net namespace 726 * @name: name to find 727 * 728 * Find an interface by name. Must be called under RTNL semaphore 729 * or @dev_base_lock. If the name is found a pointer to the device 730 * is returned. If the name is not found then %NULL is returned. The 731 * reference counters are not incremented so the caller must be 732 * careful with locks. 733 */ 734 735 struct net_device *__dev_get_by_name(struct net *net, const char *name) 736 { 737 struct net_device *dev; 738 struct hlist_head *head = dev_name_hash(net, name); 739 740 hlist_for_each_entry(dev, head, name_hlist) 741 if (!strncmp(dev->name, name, IFNAMSIZ)) 742 return dev; 743 744 return NULL; 745 } 746 EXPORT_SYMBOL(__dev_get_by_name); 747 748 /** 749 * dev_get_by_name_rcu - find a device by its name 750 * @net: the applicable net namespace 751 * @name: name to find 752 * 753 * Find an interface by name. 754 * If the name is found a pointer to the device is returned. 755 * If the name is not found then %NULL is returned. 756 * The reference counters are not incremented so the caller must be 757 * careful with locks. The caller must hold RCU lock. 758 */ 759 760 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) 761 { 762 struct net_device *dev; 763 struct hlist_head *head = dev_name_hash(net, name); 764 765 hlist_for_each_entry_rcu(dev, head, name_hlist) 766 if (!strncmp(dev->name, name, IFNAMSIZ)) 767 return dev; 768 769 return NULL; 770 } 771 EXPORT_SYMBOL(dev_get_by_name_rcu); 772 773 /** 774 * dev_get_by_name - find a device by its name 775 * @net: the applicable net namespace 776 * @name: name to find 777 * 778 * Find an interface by name. This can be called from any 779 * context and does its own locking. The returned handle has 780 * the usage count incremented and the caller must use dev_put() to 781 * release it when it is no longer needed. %NULL is returned if no 782 * matching device is found. 783 */ 784 785 struct net_device *dev_get_by_name(struct net *net, const char *name) 786 { 787 struct net_device *dev; 788 789 rcu_read_lock(); 790 dev = dev_get_by_name_rcu(net, name); 791 if (dev) 792 dev_hold(dev); 793 rcu_read_unlock(); 794 return dev; 795 } 796 EXPORT_SYMBOL(dev_get_by_name); 797 798 /** 799 * __dev_get_by_index - find a device by its ifindex 800 * @net: the applicable net namespace 801 * @ifindex: index of device 802 * 803 * Search for an interface by index. Returns %NULL if the device 804 * is not found or a pointer to the device. The device has not 805 * had its reference counter increased so the caller must be careful 806 * about locking. The caller must hold either the RTNL semaphore 807 * or @dev_base_lock. 808 */ 809 810 struct net_device *__dev_get_by_index(struct net *net, int ifindex) 811 { 812 struct net_device *dev; 813 struct hlist_head *head = dev_index_hash(net, ifindex); 814 815 hlist_for_each_entry(dev, head, index_hlist) 816 if (dev->ifindex == ifindex) 817 return dev; 818 819 return NULL; 820 } 821 EXPORT_SYMBOL(__dev_get_by_index); 822 823 /** 824 * dev_get_by_index_rcu - find a device by its ifindex 825 * @net: the applicable net namespace 826 * @ifindex: index of device 827 * 828 * Search for an interface by index. Returns %NULL if the device 829 * is not found or a pointer to the device. The device has not 830 * had its reference counter increased so the caller must be careful 831 * about locking. The caller must hold RCU lock. 832 */ 833 834 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) 835 { 836 struct net_device *dev; 837 struct hlist_head *head = dev_index_hash(net, ifindex); 838 839 hlist_for_each_entry_rcu(dev, head, index_hlist) 840 if (dev->ifindex == ifindex) 841 return dev; 842 843 return NULL; 844 } 845 EXPORT_SYMBOL(dev_get_by_index_rcu); 846 847 848 /** 849 * dev_get_by_index - find a device by its ifindex 850 * @net: the applicable net namespace 851 * @ifindex: index of device 852 * 853 * Search for an interface by index. Returns NULL if the device 854 * is not found or a pointer to the device. The device returned has 855 * had a reference added and the pointer is safe until the user calls 856 * dev_put to indicate they have finished with it. 857 */ 858 859 struct net_device *dev_get_by_index(struct net *net, int ifindex) 860 { 861 struct net_device *dev; 862 863 rcu_read_lock(); 864 dev = dev_get_by_index_rcu(net, ifindex); 865 if (dev) 866 dev_hold(dev); 867 rcu_read_unlock(); 868 return dev; 869 } 870 EXPORT_SYMBOL(dev_get_by_index); 871 872 /** 873 * dev_get_by_napi_id - find a device by napi_id 874 * @napi_id: ID of the NAPI struct 875 * 876 * Search for an interface by NAPI ID. Returns %NULL if the device 877 * is not found or a pointer to the device. The device has not had 878 * its reference counter increased so the caller must be careful 879 * about locking. The caller must hold RCU lock. 880 */ 881 882 struct net_device *dev_get_by_napi_id(unsigned int napi_id) 883 { 884 struct napi_struct *napi; 885 886 WARN_ON_ONCE(!rcu_read_lock_held()); 887 888 if (napi_id < MIN_NAPI_ID) 889 return NULL; 890 891 napi = napi_by_id(napi_id); 892 893 return napi ? napi->dev : NULL; 894 } 895 EXPORT_SYMBOL(dev_get_by_napi_id); 896 897 /** 898 * netdev_get_name - get a netdevice name, knowing its ifindex. 899 * @net: network namespace 900 * @name: a pointer to the buffer where the name will be stored. 901 * @ifindex: the ifindex of the interface to get the name from. 902 * 903 * The use of raw_seqcount_begin() and cond_resched() before 904 * retrying is required as we want to give the writers a chance 905 * to complete when CONFIG_PREEMPT is not set. 906 */ 907 int netdev_get_name(struct net *net, char *name, int ifindex) 908 { 909 struct net_device *dev; 910 unsigned int seq; 911 912 retry: 913 seq = raw_seqcount_begin(&devnet_rename_seq); 914 rcu_read_lock(); 915 dev = dev_get_by_index_rcu(net, ifindex); 916 if (!dev) { 917 rcu_read_unlock(); 918 return -ENODEV; 919 } 920 921 strcpy(name, dev->name); 922 rcu_read_unlock(); 923 if (read_seqcount_retry(&devnet_rename_seq, seq)) { 924 cond_resched(); 925 goto retry; 926 } 927 928 return 0; 929 } 930 931 /** 932 * dev_getbyhwaddr_rcu - find a device by its hardware address 933 * @net: the applicable net namespace 934 * @type: media type of device 935 * @ha: hardware address 936 * 937 * Search for an interface by MAC address. Returns NULL if the device 938 * is not found or a pointer to the device. 939 * The caller must hold RCU or RTNL. 940 * The returned device has not had its ref count increased 941 * and the caller must therefore be careful about locking 942 * 943 */ 944 945 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 946 const char *ha) 947 { 948 struct net_device *dev; 949 950 for_each_netdev_rcu(net, dev) 951 if (dev->type == type && 952 !memcmp(dev->dev_addr, ha, dev->addr_len)) 953 return dev; 954 955 return NULL; 956 } 957 EXPORT_SYMBOL(dev_getbyhwaddr_rcu); 958 959 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) 960 { 961 struct net_device *dev; 962 963 ASSERT_RTNL(); 964 for_each_netdev(net, dev) 965 if (dev->type == type) 966 return dev; 967 968 return NULL; 969 } 970 EXPORT_SYMBOL(__dev_getfirstbyhwtype); 971 972 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) 973 { 974 struct net_device *dev, *ret = NULL; 975 976 rcu_read_lock(); 977 for_each_netdev_rcu(net, dev) 978 if (dev->type == type) { 979 dev_hold(dev); 980 ret = dev; 981 break; 982 } 983 rcu_read_unlock(); 984 return ret; 985 } 986 EXPORT_SYMBOL(dev_getfirstbyhwtype); 987 988 /** 989 * __dev_get_by_flags - find any device with given flags 990 * @net: the applicable net namespace 991 * @if_flags: IFF_* values 992 * @mask: bitmask of bits in if_flags to check 993 * 994 * Search for any interface with the given flags. Returns NULL if a device 995 * is not found or a pointer to the device. Must be called inside 996 * rtnl_lock(), and result refcount is unchanged. 997 */ 998 999 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags, 1000 unsigned short mask) 1001 { 1002 struct net_device *dev, *ret; 1003 1004 ASSERT_RTNL(); 1005 1006 ret = NULL; 1007 for_each_netdev(net, dev) { 1008 if (((dev->flags ^ if_flags) & mask) == 0) { 1009 ret = dev; 1010 break; 1011 } 1012 } 1013 return ret; 1014 } 1015 EXPORT_SYMBOL(__dev_get_by_flags); 1016 1017 /** 1018 * dev_valid_name - check if name is okay for network device 1019 * @name: name string 1020 * 1021 * Network device names need to be valid file names to 1022 * to allow sysfs to work. We also disallow any kind of 1023 * whitespace. 1024 */ 1025 bool dev_valid_name(const char *name) 1026 { 1027 if (*name == '\0') 1028 return false; 1029 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ) 1030 return false; 1031 if (!strcmp(name, ".") || !strcmp(name, "..")) 1032 return false; 1033 1034 while (*name) { 1035 if (*name == '/' || *name == ':' || isspace(*name)) 1036 return false; 1037 name++; 1038 } 1039 return true; 1040 } 1041 EXPORT_SYMBOL(dev_valid_name); 1042 1043 /** 1044 * __dev_alloc_name - allocate a name for a device 1045 * @net: network namespace to allocate the device name in 1046 * @name: name format string 1047 * @buf: scratch buffer and result name string 1048 * 1049 * Passed a format string - eg "lt%d" it will try and find a suitable 1050 * id. It scans list of devices to build up a free map, then chooses 1051 * the first empty slot. The caller must hold the dev_base or rtnl lock 1052 * while allocating the name and adding the device in order to avoid 1053 * duplicates. 1054 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1055 * Returns the number of the unit assigned or a negative errno code. 1056 */ 1057 1058 static int __dev_alloc_name(struct net *net, const char *name, char *buf) 1059 { 1060 int i = 0; 1061 const char *p; 1062 const int max_netdevices = 8*PAGE_SIZE; 1063 unsigned long *inuse; 1064 struct net_device *d; 1065 1066 if (!dev_valid_name(name)) 1067 return -EINVAL; 1068 1069 p = strchr(name, '%'); 1070 if (p) { 1071 /* 1072 * Verify the string as this thing may have come from 1073 * the user. There must be either one "%d" and no other "%" 1074 * characters. 1075 */ 1076 if (p[1] != 'd' || strchr(p + 2, '%')) 1077 return -EINVAL; 1078 1079 /* Use one page as a bit array of possible slots */ 1080 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC); 1081 if (!inuse) 1082 return -ENOMEM; 1083 1084 for_each_netdev(net, d) { 1085 if (!sscanf(d->name, name, &i)) 1086 continue; 1087 if (i < 0 || i >= max_netdevices) 1088 continue; 1089 1090 /* avoid cases where sscanf is not exact inverse of printf */ 1091 snprintf(buf, IFNAMSIZ, name, i); 1092 if (!strncmp(buf, d->name, IFNAMSIZ)) 1093 set_bit(i, inuse); 1094 } 1095 1096 i = find_first_zero_bit(inuse, max_netdevices); 1097 free_page((unsigned long) inuse); 1098 } 1099 1100 snprintf(buf, IFNAMSIZ, name, i); 1101 if (!__dev_get_by_name(net, buf)) 1102 return i; 1103 1104 /* It is possible to run out of possible slots 1105 * when the name is long and there isn't enough space left 1106 * for the digits, or if all bits are used. 1107 */ 1108 return -ENFILE; 1109 } 1110 1111 static int dev_alloc_name_ns(struct net *net, 1112 struct net_device *dev, 1113 const char *name) 1114 { 1115 char buf[IFNAMSIZ]; 1116 int ret; 1117 1118 BUG_ON(!net); 1119 ret = __dev_alloc_name(net, name, buf); 1120 if (ret >= 0) 1121 strlcpy(dev->name, buf, IFNAMSIZ); 1122 return ret; 1123 } 1124 1125 /** 1126 * dev_alloc_name - allocate a name for a device 1127 * @dev: device 1128 * @name: name format string 1129 * 1130 * Passed a format string - eg "lt%d" it will try and find a suitable 1131 * id. It scans list of devices to build up a free map, then chooses 1132 * the first empty slot. The caller must hold the dev_base or rtnl lock 1133 * while allocating the name and adding the device in order to avoid 1134 * duplicates. 1135 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1136 * Returns the number of the unit assigned or a negative errno code. 1137 */ 1138 1139 int dev_alloc_name(struct net_device *dev, const char *name) 1140 { 1141 return dev_alloc_name_ns(dev_net(dev), dev, name); 1142 } 1143 EXPORT_SYMBOL(dev_alloc_name); 1144 1145 int dev_get_valid_name(struct net *net, struct net_device *dev, 1146 const char *name) 1147 { 1148 BUG_ON(!net); 1149 1150 if (!dev_valid_name(name)) 1151 return -EINVAL; 1152 1153 if (strchr(name, '%')) 1154 return dev_alloc_name_ns(net, dev, name); 1155 else if (__dev_get_by_name(net, name)) 1156 return -EEXIST; 1157 else if (dev->name != name) 1158 strlcpy(dev->name, name, IFNAMSIZ); 1159 1160 return 0; 1161 } 1162 EXPORT_SYMBOL(dev_get_valid_name); 1163 1164 /** 1165 * dev_change_name - change name of a device 1166 * @dev: device 1167 * @newname: name (or format string) must be at least IFNAMSIZ 1168 * 1169 * Change name of a device, can pass format strings "eth%d". 1170 * for wildcarding. 1171 */ 1172 int dev_change_name(struct net_device *dev, const char *newname) 1173 { 1174 unsigned char old_assign_type; 1175 char oldname[IFNAMSIZ]; 1176 int err = 0; 1177 int ret; 1178 struct net *net; 1179 1180 ASSERT_RTNL(); 1181 BUG_ON(!dev_net(dev)); 1182 1183 net = dev_net(dev); 1184 if (dev->flags & IFF_UP) 1185 return -EBUSY; 1186 1187 write_seqcount_begin(&devnet_rename_seq); 1188 1189 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { 1190 write_seqcount_end(&devnet_rename_seq); 1191 return 0; 1192 } 1193 1194 memcpy(oldname, dev->name, IFNAMSIZ); 1195 1196 err = dev_get_valid_name(net, dev, newname); 1197 if (err < 0) { 1198 write_seqcount_end(&devnet_rename_seq); 1199 return err; 1200 } 1201 1202 if (oldname[0] && !strchr(oldname, '%')) 1203 netdev_info(dev, "renamed from %s\n", oldname); 1204 1205 old_assign_type = dev->name_assign_type; 1206 dev->name_assign_type = NET_NAME_RENAMED; 1207 1208 rollback: 1209 ret = device_rename(&dev->dev, dev->name); 1210 if (ret) { 1211 memcpy(dev->name, oldname, IFNAMSIZ); 1212 dev->name_assign_type = old_assign_type; 1213 write_seqcount_end(&devnet_rename_seq); 1214 return ret; 1215 } 1216 1217 write_seqcount_end(&devnet_rename_seq); 1218 1219 netdev_adjacent_rename_links(dev, oldname); 1220 1221 write_lock_bh(&dev_base_lock); 1222 hlist_del_rcu(&dev->name_hlist); 1223 write_unlock_bh(&dev_base_lock); 1224 1225 synchronize_rcu(); 1226 1227 write_lock_bh(&dev_base_lock); 1228 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); 1229 write_unlock_bh(&dev_base_lock); 1230 1231 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); 1232 ret = notifier_to_errno(ret); 1233 1234 if (ret) { 1235 /* err >= 0 after dev_alloc_name() or stores the first errno */ 1236 if (err >= 0) { 1237 err = ret; 1238 write_seqcount_begin(&devnet_rename_seq); 1239 memcpy(dev->name, oldname, IFNAMSIZ); 1240 memcpy(oldname, newname, IFNAMSIZ); 1241 dev->name_assign_type = old_assign_type; 1242 old_assign_type = NET_NAME_RENAMED; 1243 goto rollback; 1244 } else { 1245 pr_err("%s: name change rollback failed: %d\n", 1246 dev->name, ret); 1247 } 1248 } 1249 1250 return err; 1251 } 1252 1253 /** 1254 * dev_set_alias - change ifalias of a device 1255 * @dev: device 1256 * @alias: name up to IFALIASZ 1257 * @len: limit of bytes to copy from info 1258 * 1259 * Set ifalias for a device, 1260 */ 1261 int dev_set_alias(struct net_device *dev, const char *alias, size_t len) 1262 { 1263 struct dev_ifalias *new_alias = NULL; 1264 1265 if (len >= IFALIASZ) 1266 return -EINVAL; 1267 1268 if (len) { 1269 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL); 1270 if (!new_alias) 1271 return -ENOMEM; 1272 1273 memcpy(new_alias->ifalias, alias, len); 1274 new_alias->ifalias[len] = 0; 1275 } 1276 1277 mutex_lock(&ifalias_mutex); 1278 rcu_swap_protected(dev->ifalias, new_alias, 1279 mutex_is_locked(&ifalias_mutex)); 1280 mutex_unlock(&ifalias_mutex); 1281 1282 if (new_alias) 1283 kfree_rcu(new_alias, rcuhead); 1284 1285 return len; 1286 } 1287 EXPORT_SYMBOL(dev_set_alias); 1288 1289 /** 1290 * dev_get_alias - get ifalias of a device 1291 * @dev: device 1292 * @name: buffer to store name of ifalias 1293 * @len: size of buffer 1294 * 1295 * get ifalias for a device. Caller must make sure dev cannot go 1296 * away, e.g. rcu read lock or own a reference count to device. 1297 */ 1298 int dev_get_alias(const struct net_device *dev, char *name, size_t len) 1299 { 1300 const struct dev_ifalias *alias; 1301 int ret = 0; 1302 1303 rcu_read_lock(); 1304 alias = rcu_dereference(dev->ifalias); 1305 if (alias) 1306 ret = snprintf(name, len, "%s", alias->ifalias); 1307 rcu_read_unlock(); 1308 1309 return ret; 1310 } 1311 1312 /** 1313 * netdev_features_change - device changes features 1314 * @dev: device to cause notification 1315 * 1316 * Called to indicate a device has changed features. 1317 */ 1318 void netdev_features_change(struct net_device *dev) 1319 { 1320 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); 1321 } 1322 EXPORT_SYMBOL(netdev_features_change); 1323 1324 /** 1325 * netdev_state_change - device changes state 1326 * @dev: device to cause notification 1327 * 1328 * Called to indicate a device has changed state. This function calls 1329 * the notifier chains for netdev_chain and sends a NEWLINK message 1330 * to the routing socket. 1331 */ 1332 void netdev_state_change(struct net_device *dev) 1333 { 1334 if (dev->flags & IFF_UP) { 1335 struct netdev_notifier_change_info change_info = { 1336 .info.dev = dev, 1337 }; 1338 1339 call_netdevice_notifiers_info(NETDEV_CHANGE, 1340 &change_info.info); 1341 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); 1342 } 1343 } 1344 EXPORT_SYMBOL(netdev_state_change); 1345 1346 /** 1347 * netdev_notify_peers - notify network peers about existence of @dev 1348 * @dev: network device 1349 * 1350 * Generate traffic such that interested network peers are aware of 1351 * @dev, such as by generating a gratuitous ARP. This may be used when 1352 * a device wants to inform the rest of the network about some sort of 1353 * reconfiguration such as a failover event or virtual machine 1354 * migration. 1355 */ 1356 void netdev_notify_peers(struct net_device *dev) 1357 { 1358 rtnl_lock(); 1359 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); 1360 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev); 1361 rtnl_unlock(); 1362 } 1363 EXPORT_SYMBOL(netdev_notify_peers); 1364 1365 static int __dev_open(struct net_device *dev) 1366 { 1367 const struct net_device_ops *ops = dev->netdev_ops; 1368 int ret; 1369 1370 ASSERT_RTNL(); 1371 1372 if (!netif_device_present(dev)) 1373 return -ENODEV; 1374 1375 /* Block netpoll from trying to do any rx path servicing. 1376 * If we don't do this there is a chance ndo_poll_controller 1377 * or ndo_poll may be running while we open the device 1378 */ 1379 netpoll_poll_disable(dev); 1380 1381 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev); 1382 ret = notifier_to_errno(ret); 1383 if (ret) 1384 return ret; 1385 1386 set_bit(__LINK_STATE_START, &dev->state); 1387 1388 if (ops->ndo_validate_addr) 1389 ret = ops->ndo_validate_addr(dev); 1390 1391 if (!ret && ops->ndo_open) 1392 ret = ops->ndo_open(dev); 1393 1394 netpoll_poll_enable(dev); 1395 1396 if (ret) 1397 clear_bit(__LINK_STATE_START, &dev->state); 1398 else { 1399 dev->flags |= IFF_UP; 1400 dev_set_rx_mode(dev); 1401 dev_activate(dev); 1402 add_device_randomness(dev->dev_addr, dev->addr_len); 1403 } 1404 1405 return ret; 1406 } 1407 1408 /** 1409 * dev_open - prepare an interface for use. 1410 * @dev: device to open 1411 * 1412 * Takes a device from down to up state. The device's private open 1413 * function is invoked and then the multicast lists are loaded. Finally 1414 * the device is moved into the up state and a %NETDEV_UP message is 1415 * sent to the netdev notifier chain. 1416 * 1417 * Calling this function on an active interface is a nop. On a failure 1418 * a negative errno code is returned. 1419 */ 1420 int dev_open(struct net_device *dev) 1421 { 1422 int ret; 1423 1424 if (dev->flags & IFF_UP) 1425 return 0; 1426 1427 ret = __dev_open(dev); 1428 if (ret < 0) 1429 return ret; 1430 1431 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); 1432 call_netdevice_notifiers(NETDEV_UP, dev); 1433 1434 return ret; 1435 } 1436 EXPORT_SYMBOL(dev_open); 1437 1438 static void __dev_close_many(struct list_head *head) 1439 { 1440 struct net_device *dev; 1441 1442 ASSERT_RTNL(); 1443 might_sleep(); 1444 1445 list_for_each_entry(dev, head, close_list) { 1446 /* Temporarily disable netpoll until the interface is down */ 1447 netpoll_poll_disable(dev); 1448 1449 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); 1450 1451 clear_bit(__LINK_STATE_START, &dev->state); 1452 1453 /* Synchronize to scheduled poll. We cannot touch poll list, it 1454 * can be even on different cpu. So just clear netif_running(). 1455 * 1456 * dev->stop() will invoke napi_disable() on all of it's 1457 * napi_struct instances on this device. 1458 */ 1459 smp_mb__after_atomic(); /* Commit netif_running(). */ 1460 } 1461 1462 dev_deactivate_many(head); 1463 1464 list_for_each_entry(dev, head, close_list) { 1465 const struct net_device_ops *ops = dev->netdev_ops; 1466 1467 /* 1468 * Call the device specific close. This cannot fail. 1469 * Only if device is UP 1470 * 1471 * We allow it to be called even after a DETACH hot-plug 1472 * event. 1473 */ 1474 if (ops->ndo_stop) 1475 ops->ndo_stop(dev); 1476 1477 dev->flags &= ~IFF_UP; 1478 netpoll_poll_enable(dev); 1479 } 1480 } 1481 1482 static void __dev_close(struct net_device *dev) 1483 { 1484 LIST_HEAD(single); 1485 1486 list_add(&dev->close_list, &single); 1487 __dev_close_many(&single); 1488 list_del(&single); 1489 } 1490 1491 void dev_close_many(struct list_head *head, bool unlink) 1492 { 1493 struct net_device *dev, *tmp; 1494 1495 /* Remove the devices that don't need to be closed */ 1496 list_for_each_entry_safe(dev, tmp, head, close_list) 1497 if (!(dev->flags & IFF_UP)) 1498 list_del_init(&dev->close_list); 1499 1500 __dev_close_many(head); 1501 1502 list_for_each_entry_safe(dev, tmp, head, close_list) { 1503 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); 1504 call_netdevice_notifiers(NETDEV_DOWN, dev); 1505 if (unlink) 1506 list_del_init(&dev->close_list); 1507 } 1508 } 1509 EXPORT_SYMBOL(dev_close_many); 1510 1511 /** 1512 * dev_close - shutdown an interface. 1513 * @dev: device to shutdown 1514 * 1515 * This function moves an active device into down state. A 1516 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device 1517 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier 1518 * chain. 1519 */ 1520 void dev_close(struct net_device *dev) 1521 { 1522 if (dev->flags & IFF_UP) { 1523 LIST_HEAD(single); 1524 1525 list_add(&dev->close_list, &single); 1526 dev_close_many(&single, true); 1527 list_del(&single); 1528 } 1529 } 1530 EXPORT_SYMBOL(dev_close); 1531 1532 1533 /** 1534 * dev_disable_lro - disable Large Receive Offload on a device 1535 * @dev: device 1536 * 1537 * Disable Large Receive Offload (LRO) on a net device. Must be 1538 * called under RTNL. This is needed if received packets may be 1539 * forwarded to another interface. 1540 */ 1541 void dev_disable_lro(struct net_device *dev) 1542 { 1543 struct net_device *lower_dev; 1544 struct list_head *iter; 1545 1546 dev->wanted_features &= ~NETIF_F_LRO; 1547 netdev_update_features(dev); 1548 1549 if (unlikely(dev->features & NETIF_F_LRO)) 1550 netdev_WARN(dev, "failed to disable LRO!\n"); 1551 1552 netdev_for_each_lower_dev(dev, lower_dev, iter) 1553 dev_disable_lro(lower_dev); 1554 } 1555 EXPORT_SYMBOL(dev_disable_lro); 1556 1557 /** 1558 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device 1559 * @dev: device 1560 * 1561 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be 1562 * called under RTNL. This is needed if Generic XDP is installed on 1563 * the device. 1564 */ 1565 static void dev_disable_gro_hw(struct net_device *dev) 1566 { 1567 dev->wanted_features &= ~NETIF_F_GRO_HW; 1568 netdev_update_features(dev); 1569 1570 if (unlikely(dev->features & NETIF_F_GRO_HW)) 1571 netdev_WARN(dev, "failed to disable GRO_HW!\n"); 1572 } 1573 1574 const char *netdev_cmd_to_name(enum netdev_cmd cmd) 1575 { 1576 #define N(val) \ 1577 case NETDEV_##val: \ 1578 return "NETDEV_" __stringify(val); 1579 switch (cmd) { 1580 N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER) 1581 N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE) 1582 N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE) 1583 N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER) 1584 N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO) 1585 N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO) 1586 N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN) 1587 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO) 1588 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO) 1589 } 1590 #undef N 1591 return "UNKNOWN_NETDEV_EVENT"; 1592 } 1593 EXPORT_SYMBOL_GPL(netdev_cmd_to_name); 1594 1595 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val, 1596 struct net_device *dev) 1597 { 1598 struct netdev_notifier_info info = { 1599 .dev = dev, 1600 }; 1601 1602 return nb->notifier_call(nb, val, &info); 1603 } 1604 1605 static int dev_boot_phase = 1; 1606 1607 /** 1608 * register_netdevice_notifier - register a network notifier block 1609 * @nb: notifier 1610 * 1611 * Register a notifier to be called when network device events occur. 1612 * The notifier passed is linked into the kernel structures and must 1613 * not be reused until it has been unregistered. A negative errno code 1614 * is returned on a failure. 1615 * 1616 * When registered all registration and up events are replayed 1617 * to the new notifier to allow device to have a race free 1618 * view of the network device list. 1619 */ 1620 1621 int register_netdevice_notifier(struct notifier_block *nb) 1622 { 1623 struct net_device *dev; 1624 struct net_device *last; 1625 struct net *net; 1626 int err; 1627 1628 /* Close race with setup_net() and cleanup_net() */ 1629 down_write(&pernet_ops_rwsem); 1630 rtnl_lock(); 1631 err = raw_notifier_chain_register(&netdev_chain, nb); 1632 if (err) 1633 goto unlock; 1634 if (dev_boot_phase) 1635 goto unlock; 1636 for_each_net(net) { 1637 for_each_netdev(net, dev) { 1638 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev); 1639 err = notifier_to_errno(err); 1640 if (err) 1641 goto rollback; 1642 1643 if (!(dev->flags & IFF_UP)) 1644 continue; 1645 1646 call_netdevice_notifier(nb, NETDEV_UP, dev); 1647 } 1648 } 1649 1650 unlock: 1651 rtnl_unlock(); 1652 up_write(&pernet_ops_rwsem); 1653 return err; 1654 1655 rollback: 1656 last = dev; 1657 for_each_net(net) { 1658 for_each_netdev(net, dev) { 1659 if (dev == last) 1660 goto outroll; 1661 1662 if (dev->flags & IFF_UP) { 1663 call_netdevice_notifier(nb, NETDEV_GOING_DOWN, 1664 dev); 1665 call_netdevice_notifier(nb, NETDEV_DOWN, dev); 1666 } 1667 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); 1668 } 1669 } 1670 1671 outroll: 1672 raw_notifier_chain_unregister(&netdev_chain, nb); 1673 goto unlock; 1674 } 1675 EXPORT_SYMBOL(register_netdevice_notifier); 1676 1677 /** 1678 * unregister_netdevice_notifier - unregister a network notifier block 1679 * @nb: notifier 1680 * 1681 * Unregister a notifier previously registered by 1682 * register_netdevice_notifier(). The notifier is unlinked into the 1683 * kernel structures and may then be reused. A negative errno code 1684 * is returned on a failure. 1685 * 1686 * After unregistering unregister and down device events are synthesized 1687 * for all devices on the device list to the removed notifier to remove 1688 * the need for special case cleanup code. 1689 */ 1690 1691 int unregister_netdevice_notifier(struct notifier_block *nb) 1692 { 1693 struct net_device *dev; 1694 struct net *net; 1695 int err; 1696 1697 /* Close race with setup_net() and cleanup_net() */ 1698 down_write(&pernet_ops_rwsem); 1699 rtnl_lock(); 1700 err = raw_notifier_chain_unregister(&netdev_chain, nb); 1701 if (err) 1702 goto unlock; 1703 1704 for_each_net(net) { 1705 for_each_netdev(net, dev) { 1706 if (dev->flags & IFF_UP) { 1707 call_netdevice_notifier(nb, NETDEV_GOING_DOWN, 1708 dev); 1709 call_netdevice_notifier(nb, NETDEV_DOWN, dev); 1710 } 1711 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); 1712 } 1713 } 1714 unlock: 1715 rtnl_unlock(); 1716 up_write(&pernet_ops_rwsem); 1717 return err; 1718 } 1719 EXPORT_SYMBOL(unregister_netdevice_notifier); 1720 1721 /** 1722 * call_netdevice_notifiers_info - call all network notifier blocks 1723 * @val: value passed unmodified to notifier function 1724 * @info: notifier information data 1725 * 1726 * Call all network notifier blocks. Parameters and return value 1727 * are as for raw_notifier_call_chain(). 1728 */ 1729 1730 static int call_netdevice_notifiers_info(unsigned long val, 1731 struct netdev_notifier_info *info) 1732 { 1733 ASSERT_RTNL(); 1734 return raw_notifier_call_chain(&netdev_chain, val, info); 1735 } 1736 1737 /** 1738 * call_netdevice_notifiers - call all network notifier blocks 1739 * @val: value passed unmodified to notifier function 1740 * @dev: net_device pointer passed unmodified to notifier function 1741 * 1742 * Call all network notifier blocks. Parameters and return value 1743 * are as for raw_notifier_call_chain(). 1744 */ 1745 1746 int call_netdevice_notifiers(unsigned long val, struct net_device *dev) 1747 { 1748 struct netdev_notifier_info info = { 1749 .dev = dev, 1750 }; 1751 1752 return call_netdevice_notifiers_info(val, &info); 1753 } 1754 EXPORT_SYMBOL(call_netdevice_notifiers); 1755 1756 #ifdef CONFIG_NET_INGRESS 1757 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key); 1758 1759 void net_inc_ingress_queue(void) 1760 { 1761 static_branch_inc(&ingress_needed_key); 1762 } 1763 EXPORT_SYMBOL_GPL(net_inc_ingress_queue); 1764 1765 void net_dec_ingress_queue(void) 1766 { 1767 static_branch_dec(&ingress_needed_key); 1768 } 1769 EXPORT_SYMBOL_GPL(net_dec_ingress_queue); 1770 #endif 1771 1772 #ifdef CONFIG_NET_EGRESS 1773 static DEFINE_STATIC_KEY_FALSE(egress_needed_key); 1774 1775 void net_inc_egress_queue(void) 1776 { 1777 static_branch_inc(&egress_needed_key); 1778 } 1779 EXPORT_SYMBOL_GPL(net_inc_egress_queue); 1780 1781 void net_dec_egress_queue(void) 1782 { 1783 static_branch_dec(&egress_needed_key); 1784 } 1785 EXPORT_SYMBOL_GPL(net_dec_egress_queue); 1786 #endif 1787 1788 static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key); 1789 #ifdef HAVE_JUMP_LABEL 1790 static atomic_t netstamp_needed_deferred; 1791 static atomic_t netstamp_wanted; 1792 static void netstamp_clear(struct work_struct *work) 1793 { 1794 int deferred = atomic_xchg(&netstamp_needed_deferred, 0); 1795 int wanted; 1796 1797 wanted = atomic_add_return(deferred, &netstamp_wanted); 1798 if (wanted > 0) 1799 static_branch_enable(&netstamp_needed_key); 1800 else 1801 static_branch_disable(&netstamp_needed_key); 1802 } 1803 static DECLARE_WORK(netstamp_work, netstamp_clear); 1804 #endif 1805 1806 void net_enable_timestamp(void) 1807 { 1808 #ifdef HAVE_JUMP_LABEL 1809 int wanted; 1810 1811 while (1) { 1812 wanted = atomic_read(&netstamp_wanted); 1813 if (wanted <= 0) 1814 break; 1815 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted) 1816 return; 1817 } 1818 atomic_inc(&netstamp_needed_deferred); 1819 schedule_work(&netstamp_work); 1820 #else 1821 static_branch_inc(&netstamp_needed_key); 1822 #endif 1823 } 1824 EXPORT_SYMBOL(net_enable_timestamp); 1825 1826 void net_disable_timestamp(void) 1827 { 1828 #ifdef HAVE_JUMP_LABEL 1829 int wanted; 1830 1831 while (1) { 1832 wanted = atomic_read(&netstamp_wanted); 1833 if (wanted <= 1) 1834 break; 1835 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted) 1836 return; 1837 } 1838 atomic_dec(&netstamp_needed_deferred); 1839 schedule_work(&netstamp_work); 1840 #else 1841 static_branch_dec(&netstamp_needed_key); 1842 #endif 1843 } 1844 EXPORT_SYMBOL(net_disable_timestamp); 1845 1846 static inline void net_timestamp_set(struct sk_buff *skb) 1847 { 1848 skb->tstamp = 0; 1849 if (static_branch_unlikely(&netstamp_needed_key)) 1850 __net_timestamp(skb); 1851 } 1852 1853 #define net_timestamp_check(COND, SKB) \ 1854 if (static_branch_unlikely(&netstamp_needed_key)) { \ 1855 if ((COND) && !(SKB)->tstamp) \ 1856 __net_timestamp(SKB); \ 1857 } \ 1858 1859 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb) 1860 { 1861 unsigned int len; 1862 1863 if (!(dev->flags & IFF_UP)) 1864 return false; 1865 1866 len = dev->mtu + dev->hard_header_len + VLAN_HLEN; 1867 if (skb->len <= len) 1868 return true; 1869 1870 /* if TSO is enabled, we don't care about the length as the packet 1871 * could be forwarded without being segmented before 1872 */ 1873 if (skb_is_gso(skb)) 1874 return true; 1875 1876 return false; 1877 } 1878 EXPORT_SYMBOL_GPL(is_skb_forwardable); 1879 1880 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1881 { 1882 int ret = ____dev_forward_skb(dev, skb); 1883 1884 if (likely(!ret)) { 1885 skb->protocol = eth_type_trans(skb, dev); 1886 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 1887 } 1888 1889 return ret; 1890 } 1891 EXPORT_SYMBOL_GPL(__dev_forward_skb); 1892 1893 /** 1894 * dev_forward_skb - loopback an skb to another netif 1895 * 1896 * @dev: destination network device 1897 * @skb: buffer to forward 1898 * 1899 * return values: 1900 * NET_RX_SUCCESS (no congestion) 1901 * NET_RX_DROP (packet was dropped, but freed) 1902 * 1903 * dev_forward_skb can be used for injecting an skb from the 1904 * start_xmit function of one device into the receive queue 1905 * of another device. 1906 * 1907 * The receiving device may be in another namespace, so 1908 * we have to clear all information in the skb that could 1909 * impact namespace isolation. 1910 */ 1911 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1912 { 1913 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb); 1914 } 1915 EXPORT_SYMBOL_GPL(dev_forward_skb); 1916 1917 static inline int deliver_skb(struct sk_buff *skb, 1918 struct packet_type *pt_prev, 1919 struct net_device *orig_dev) 1920 { 1921 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 1922 return -ENOMEM; 1923 refcount_inc(&skb->users); 1924 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 1925 } 1926 1927 static inline void deliver_ptype_list_skb(struct sk_buff *skb, 1928 struct packet_type **pt, 1929 struct net_device *orig_dev, 1930 __be16 type, 1931 struct list_head *ptype_list) 1932 { 1933 struct packet_type *ptype, *pt_prev = *pt; 1934 1935 list_for_each_entry_rcu(ptype, ptype_list, list) { 1936 if (ptype->type != type) 1937 continue; 1938 if (pt_prev) 1939 deliver_skb(skb, pt_prev, orig_dev); 1940 pt_prev = ptype; 1941 } 1942 *pt = pt_prev; 1943 } 1944 1945 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) 1946 { 1947 if (!ptype->af_packet_priv || !skb->sk) 1948 return false; 1949 1950 if (ptype->id_match) 1951 return ptype->id_match(ptype, skb->sk); 1952 else if ((struct sock *)ptype->af_packet_priv == skb->sk) 1953 return true; 1954 1955 return false; 1956 } 1957 1958 /* 1959 * Support routine. Sends outgoing frames to any network 1960 * taps currently in use. 1961 */ 1962 1963 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) 1964 { 1965 struct packet_type *ptype; 1966 struct sk_buff *skb2 = NULL; 1967 struct packet_type *pt_prev = NULL; 1968 struct list_head *ptype_list = &ptype_all; 1969 1970 rcu_read_lock(); 1971 again: 1972 list_for_each_entry_rcu(ptype, ptype_list, list) { 1973 /* Never send packets back to the socket 1974 * they originated from - MvS (miquels@drinkel.ow.org) 1975 */ 1976 if (skb_loop_sk(ptype, skb)) 1977 continue; 1978 1979 if (pt_prev) { 1980 deliver_skb(skb2, pt_prev, skb->dev); 1981 pt_prev = ptype; 1982 continue; 1983 } 1984 1985 /* need to clone skb, done only once */ 1986 skb2 = skb_clone(skb, GFP_ATOMIC); 1987 if (!skb2) 1988 goto out_unlock; 1989 1990 net_timestamp_set(skb2); 1991 1992 /* skb->nh should be correctly 1993 * set by sender, so that the second statement is 1994 * just protection against buggy protocols. 1995 */ 1996 skb_reset_mac_header(skb2); 1997 1998 if (skb_network_header(skb2) < skb2->data || 1999 skb_network_header(skb2) > skb_tail_pointer(skb2)) { 2000 net_crit_ratelimited("protocol %04x is buggy, dev %s\n", 2001 ntohs(skb2->protocol), 2002 dev->name); 2003 skb_reset_network_header(skb2); 2004 } 2005 2006 skb2->transport_header = skb2->network_header; 2007 skb2->pkt_type = PACKET_OUTGOING; 2008 pt_prev = ptype; 2009 } 2010 2011 if (ptype_list == &ptype_all) { 2012 ptype_list = &dev->ptype_all; 2013 goto again; 2014 } 2015 out_unlock: 2016 if (pt_prev) { 2017 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC)) 2018 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); 2019 else 2020 kfree_skb(skb2); 2021 } 2022 rcu_read_unlock(); 2023 } 2024 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit); 2025 2026 /** 2027 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change 2028 * @dev: Network device 2029 * @txq: number of queues available 2030 * 2031 * If real_num_tx_queues is changed the tc mappings may no longer be 2032 * valid. To resolve this verify the tc mapping remains valid and if 2033 * not NULL the mapping. With no priorities mapping to this 2034 * offset/count pair it will no longer be used. In the worst case TC0 2035 * is invalid nothing can be done so disable priority mappings. If is 2036 * expected that drivers will fix this mapping if they can before 2037 * calling netif_set_real_num_tx_queues. 2038 */ 2039 static void netif_setup_tc(struct net_device *dev, unsigned int txq) 2040 { 2041 int i; 2042 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 2043 2044 /* If TC0 is invalidated disable TC mapping */ 2045 if (tc->offset + tc->count > txq) { 2046 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); 2047 dev->num_tc = 0; 2048 return; 2049 } 2050 2051 /* Invalidated prio to tc mappings set to TC0 */ 2052 for (i = 1; i < TC_BITMASK + 1; i++) { 2053 int q = netdev_get_prio_tc_map(dev, i); 2054 2055 tc = &dev->tc_to_txq[q]; 2056 if (tc->offset + tc->count > txq) { 2057 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", 2058 i, q); 2059 netdev_set_prio_tc_map(dev, i, 0); 2060 } 2061 } 2062 } 2063 2064 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq) 2065 { 2066 if (dev->num_tc) { 2067 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 2068 int i; 2069 2070 /* walk through the TCs and see if it falls into any of them */ 2071 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) { 2072 if ((txq - tc->offset) < tc->count) 2073 return i; 2074 } 2075 2076 /* didn't find it, just return -1 to indicate no match */ 2077 return -1; 2078 } 2079 2080 return 0; 2081 } 2082 EXPORT_SYMBOL(netdev_txq_to_tc); 2083 2084 #ifdef CONFIG_XPS 2085 struct static_key xps_needed __read_mostly; 2086 EXPORT_SYMBOL(xps_needed); 2087 struct static_key xps_rxqs_needed __read_mostly; 2088 EXPORT_SYMBOL(xps_rxqs_needed); 2089 static DEFINE_MUTEX(xps_map_mutex); 2090 #define xmap_dereference(P) \ 2091 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) 2092 2093 static bool remove_xps_queue(struct xps_dev_maps *dev_maps, 2094 int tci, u16 index) 2095 { 2096 struct xps_map *map = NULL; 2097 int pos; 2098 2099 if (dev_maps) 2100 map = xmap_dereference(dev_maps->attr_map[tci]); 2101 if (!map) 2102 return false; 2103 2104 for (pos = map->len; pos--;) { 2105 if (map->queues[pos] != index) 2106 continue; 2107 2108 if (map->len > 1) { 2109 map->queues[pos] = map->queues[--map->len]; 2110 break; 2111 } 2112 2113 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL); 2114 kfree_rcu(map, rcu); 2115 return false; 2116 } 2117 2118 return true; 2119 } 2120 2121 static bool remove_xps_queue_cpu(struct net_device *dev, 2122 struct xps_dev_maps *dev_maps, 2123 int cpu, u16 offset, u16 count) 2124 { 2125 int num_tc = dev->num_tc ? : 1; 2126 bool active = false; 2127 int tci; 2128 2129 for (tci = cpu * num_tc; num_tc--; tci++) { 2130 int i, j; 2131 2132 for (i = count, j = offset; i--; j++) { 2133 if (!remove_xps_queue(dev_maps, tci, j)) 2134 break; 2135 } 2136 2137 active |= i < 0; 2138 } 2139 2140 return active; 2141 } 2142 2143 static void clean_xps_maps(struct net_device *dev, const unsigned long *mask, 2144 struct xps_dev_maps *dev_maps, unsigned int nr_ids, 2145 u16 offset, u16 count, bool is_rxqs_map) 2146 { 2147 bool active = false; 2148 int i, j; 2149 2150 for (j = -1; j = netif_attrmask_next(j, mask, nr_ids), 2151 j < nr_ids;) 2152 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, 2153 count); 2154 if (!active) { 2155 if (is_rxqs_map) { 2156 RCU_INIT_POINTER(dev->xps_rxqs_map, NULL); 2157 } else { 2158 RCU_INIT_POINTER(dev->xps_cpus_map, NULL); 2159 2160 for (i = offset + (count - 1); count--; i--) 2161 netdev_queue_numa_node_write( 2162 netdev_get_tx_queue(dev, i), 2163 NUMA_NO_NODE); 2164 } 2165 kfree_rcu(dev_maps, rcu); 2166 } 2167 } 2168 2169 static void netif_reset_xps_queues(struct net_device *dev, u16 offset, 2170 u16 count) 2171 { 2172 const unsigned long *possible_mask = NULL; 2173 struct xps_dev_maps *dev_maps; 2174 unsigned int nr_ids; 2175 2176 if (!static_key_false(&xps_needed)) 2177 return; 2178 2179 mutex_lock(&xps_map_mutex); 2180 2181 if (static_key_false(&xps_rxqs_needed)) { 2182 dev_maps = xmap_dereference(dev->xps_rxqs_map); 2183 if (dev_maps) { 2184 nr_ids = dev->num_rx_queues; 2185 clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, 2186 offset, count, true); 2187 } 2188 } 2189 2190 dev_maps = xmap_dereference(dev->xps_cpus_map); 2191 if (!dev_maps) 2192 goto out_no_maps; 2193 2194 if (num_possible_cpus() > 1) 2195 possible_mask = cpumask_bits(cpu_possible_mask); 2196 nr_ids = nr_cpu_ids; 2197 clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset, count, 2198 false); 2199 2200 out_no_maps: 2201 if (static_key_enabled(&xps_rxqs_needed)) 2202 static_key_slow_dec(&xps_rxqs_needed); 2203 2204 static_key_slow_dec(&xps_needed); 2205 mutex_unlock(&xps_map_mutex); 2206 } 2207 2208 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index) 2209 { 2210 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index); 2211 } 2212 2213 static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index, 2214 u16 index, bool is_rxqs_map) 2215 { 2216 struct xps_map *new_map; 2217 int alloc_len = XPS_MIN_MAP_ALLOC; 2218 int i, pos; 2219 2220 for (pos = 0; map && pos < map->len; pos++) { 2221 if (map->queues[pos] != index) 2222 continue; 2223 return map; 2224 } 2225 2226 /* Need to add tx-queue to this CPU's/rx-queue's existing map */ 2227 if (map) { 2228 if (pos < map->alloc_len) 2229 return map; 2230 2231 alloc_len = map->alloc_len * 2; 2232 } 2233 2234 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's 2235 * map 2236 */ 2237 if (is_rxqs_map) 2238 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL); 2239 else 2240 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL, 2241 cpu_to_node(attr_index)); 2242 if (!new_map) 2243 return NULL; 2244 2245 for (i = 0; i < pos; i++) 2246 new_map->queues[i] = map->queues[i]; 2247 new_map->alloc_len = alloc_len; 2248 new_map->len = pos; 2249 2250 return new_map; 2251 } 2252 2253 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, 2254 u16 index, bool is_rxqs_map) 2255 { 2256 const unsigned long *online_mask = NULL, *possible_mask = NULL; 2257 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL; 2258 int i, j, tci, numa_node_id = -2; 2259 int maps_sz, num_tc = 1, tc = 0; 2260 struct xps_map *map, *new_map; 2261 bool active = false; 2262 unsigned int nr_ids; 2263 2264 if (dev->num_tc) { 2265 /* Do not allow XPS on subordinate device directly */ 2266 num_tc = dev->num_tc; 2267 if (num_tc < 0) 2268 return -EINVAL; 2269 2270 /* If queue belongs to subordinate dev use its map */ 2271 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; 2272 2273 tc = netdev_txq_to_tc(dev, index); 2274 if (tc < 0) 2275 return -EINVAL; 2276 } 2277 2278 mutex_lock(&xps_map_mutex); 2279 if (is_rxqs_map) { 2280 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues); 2281 dev_maps = xmap_dereference(dev->xps_rxqs_map); 2282 nr_ids = dev->num_rx_queues; 2283 } else { 2284 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc); 2285 if (num_possible_cpus() > 1) { 2286 online_mask = cpumask_bits(cpu_online_mask); 2287 possible_mask = cpumask_bits(cpu_possible_mask); 2288 } 2289 dev_maps = xmap_dereference(dev->xps_cpus_map); 2290 nr_ids = nr_cpu_ids; 2291 } 2292 2293 if (maps_sz < L1_CACHE_BYTES) 2294 maps_sz = L1_CACHE_BYTES; 2295 2296 /* allocate memory for queue storage */ 2297 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids), 2298 j < nr_ids;) { 2299 if (!new_dev_maps) 2300 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); 2301 if (!new_dev_maps) { 2302 mutex_unlock(&xps_map_mutex); 2303 return -ENOMEM; 2304 } 2305 2306 tci = j * num_tc + tc; 2307 map = dev_maps ? xmap_dereference(dev_maps->attr_map[tci]) : 2308 NULL; 2309 2310 map = expand_xps_map(map, j, index, is_rxqs_map); 2311 if (!map) 2312 goto error; 2313 2314 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2315 } 2316 2317 if (!new_dev_maps) 2318 goto out_no_new_maps; 2319 2320 static_key_slow_inc(&xps_needed); 2321 if (is_rxqs_map) 2322 static_key_slow_inc(&xps_rxqs_needed); 2323 2324 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), 2325 j < nr_ids;) { 2326 /* copy maps belonging to foreign traffic classes */ 2327 for (i = tc, tci = j * num_tc; dev_maps && i--; tci++) { 2328 /* fill in the new device map from the old device map */ 2329 map = xmap_dereference(dev_maps->attr_map[tci]); 2330 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2331 } 2332 2333 /* We need to explicitly update tci as prevous loop 2334 * could break out early if dev_maps is NULL. 2335 */ 2336 tci = j * num_tc + tc; 2337 2338 if (netif_attr_test_mask(j, mask, nr_ids) && 2339 netif_attr_test_online(j, online_mask, nr_ids)) { 2340 /* add tx-queue to CPU/rx-queue maps */ 2341 int pos = 0; 2342 2343 map = xmap_dereference(new_dev_maps->attr_map[tci]); 2344 while ((pos < map->len) && (map->queues[pos] != index)) 2345 pos++; 2346 2347 if (pos == map->len) 2348 map->queues[map->len++] = index; 2349 #ifdef CONFIG_NUMA 2350 if (!is_rxqs_map) { 2351 if (numa_node_id == -2) 2352 numa_node_id = cpu_to_node(j); 2353 else if (numa_node_id != cpu_to_node(j)) 2354 numa_node_id = -1; 2355 } 2356 #endif 2357 } else if (dev_maps) { 2358 /* fill in the new device map from the old device map */ 2359 map = xmap_dereference(dev_maps->attr_map[tci]); 2360 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2361 } 2362 2363 /* copy maps belonging to foreign traffic classes */ 2364 for (i = num_tc - tc, tci++; dev_maps && --i; tci++) { 2365 /* fill in the new device map from the old device map */ 2366 map = xmap_dereference(dev_maps->attr_map[tci]); 2367 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2368 } 2369 } 2370 2371 if (is_rxqs_map) 2372 rcu_assign_pointer(dev->xps_rxqs_map, new_dev_maps); 2373 else 2374 rcu_assign_pointer(dev->xps_cpus_map, new_dev_maps); 2375 2376 /* Cleanup old maps */ 2377 if (!dev_maps) 2378 goto out_no_old_maps; 2379 2380 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), 2381 j < nr_ids;) { 2382 for (i = num_tc, tci = j * num_tc; i--; tci++) { 2383 new_map = xmap_dereference(new_dev_maps->attr_map[tci]); 2384 map = xmap_dereference(dev_maps->attr_map[tci]); 2385 if (map && map != new_map) 2386 kfree_rcu(map, rcu); 2387 } 2388 } 2389 2390 kfree_rcu(dev_maps, rcu); 2391 2392 out_no_old_maps: 2393 dev_maps = new_dev_maps; 2394 active = true; 2395 2396 out_no_new_maps: 2397 if (!is_rxqs_map) { 2398 /* update Tx queue numa node */ 2399 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index), 2400 (numa_node_id >= 0) ? 2401 numa_node_id : NUMA_NO_NODE); 2402 } 2403 2404 if (!dev_maps) 2405 goto out_no_maps; 2406 2407 /* removes tx-queue from unused CPUs/rx-queues */ 2408 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), 2409 j < nr_ids;) { 2410 for (i = tc, tci = j * num_tc; i--; tci++) 2411 active |= remove_xps_queue(dev_maps, tci, index); 2412 if (!netif_attr_test_mask(j, mask, nr_ids) || 2413 !netif_attr_test_online(j, online_mask, nr_ids)) 2414 active |= remove_xps_queue(dev_maps, tci, index); 2415 for (i = num_tc - tc, tci++; --i; tci++) 2416 active |= remove_xps_queue(dev_maps, tci, index); 2417 } 2418 2419 /* free map if not active */ 2420 if (!active) { 2421 if (is_rxqs_map) 2422 RCU_INIT_POINTER(dev->xps_rxqs_map, NULL); 2423 else 2424 RCU_INIT_POINTER(dev->xps_cpus_map, NULL); 2425 kfree_rcu(dev_maps, rcu); 2426 } 2427 2428 out_no_maps: 2429 mutex_unlock(&xps_map_mutex); 2430 2431 return 0; 2432 error: 2433 /* remove any maps that we added */ 2434 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), 2435 j < nr_ids;) { 2436 for (i = num_tc, tci = j * num_tc; i--; tci++) { 2437 new_map = xmap_dereference(new_dev_maps->attr_map[tci]); 2438 map = dev_maps ? 2439 xmap_dereference(dev_maps->attr_map[tci]) : 2440 NULL; 2441 if (new_map && new_map != map) 2442 kfree(new_map); 2443 } 2444 } 2445 2446 mutex_unlock(&xps_map_mutex); 2447 2448 kfree(new_dev_maps); 2449 return -ENOMEM; 2450 } 2451 2452 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 2453 u16 index) 2454 { 2455 return __netif_set_xps_queue(dev, cpumask_bits(mask), index, false); 2456 } 2457 EXPORT_SYMBOL(netif_set_xps_queue); 2458 2459 #endif 2460 static void netdev_unbind_all_sb_channels(struct net_device *dev) 2461 { 2462 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; 2463 2464 /* Unbind any subordinate channels */ 2465 while (txq-- != &dev->_tx[0]) { 2466 if (txq->sb_dev) 2467 netdev_unbind_sb_channel(dev, txq->sb_dev); 2468 } 2469 } 2470 2471 void netdev_reset_tc(struct net_device *dev) 2472 { 2473 #ifdef CONFIG_XPS 2474 netif_reset_xps_queues_gt(dev, 0); 2475 #endif 2476 netdev_unbind_all_sb_channels(dev); 2477 2478 /* Reset TC configuration of device */ 2479 dev->num_tc = 0; 2480 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); 2481 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); 2482 } 2483 EXPORT_SYMBOL(netdev_reset_tc); 2484 2485 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) 2486 { 2487 if (tc >= dev->num_tc) 2488 return -EINVAL; 2489 2490 #ifdef CONFIG_XPS 2491 netif_reset_xps_queues(dev, offset, count); 2492 #endif 2493 dev->tc_to_txq[tc].count = count; 2494 dev->tc_to_txq[tc].offset = offset; 2495 return 0; 2496 } 2497 EXPORT_SYMBOL(netdev_set_tc_queue); 2498 2499 int netdev_set_num_tc(struct net_device *dev, u8 num_tc) 2500 { 2501 if (num_tc > TC_MAX_QUEUE) 2502 return -EINVAL; 2503 2504 #ifdef CONFIG_XPS 2505 netif_reset_xps_queues_gt(dev, 0); 2506 #endif 2507 netdev_unbind_all_sb_channels(dev); 2508 2509 dev->num_tc = num_tc; 2510 return 0; 2511 } 2512 EXPORT_SYMBOL(netdev_set_num_tc); 2513 2514 void netdev_unbind_sb_channel(struct net_device *dev, 2515 struct net_device *sb_dev) 2516 { 2517 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; 2518 2519 #ifdef CONFIG_XPS 2520 netif_reset_xps_queues_gt(sb_dev, 0); 2521 #endif 2522 memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq)); 2523 memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map)); 2524 2525 while (txq-- != &dev->_tx[0]) { 2526 if (txq->sb_dev == sb_dev) 2527 txq->sb_dev = NULL; 2528 } 2529 } 2530 EXPORT_SYMBOL(netdev_unbind_sb_channel); 2531 2532 int netdev_bind_sb_channel_queue(struct net_device *dev, 2533 struct net_device *sb_dev, 2534 u8 tc, u16 count, u16 offset) 2535 { 2536 /* Make certain the sb_dev and dev are already configured */ 2537 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc) 2538 return -EINVAL; 2539 2540 /* We cannot hand out queues we don't have */ 2541 if ((offset + count) > dev->real_num_tx_queues) 2542 return -EINVAL; 2543 2544 /* Record the mapping */ 2545 sb_dev->tc_to_txq[tc].count = count; 2546 sb_dev->tc_to_txq[tc].offset = offset; 2547 2548 /* Provide a way for Tx queue to find the tc_to_txq map or 2549 * XPS map for itself. 2550 */ 2551 while (count--) 2552 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev; 2553 2554 return 0; 2555 } 2556 EXPORT_SYMBOL(netdev_bind_sb_channel_queue); 2557 2558 int netdev_set_sb_channel(struct net_device *dev, u16 channel) 2559 { 2560 /* Do not use a multiqueue device to represent a subordinate channel */ 2561 if (netif_is_multiqueue(dev)) 2562 return -ENODEV; 2563 2564 /* We allow channels 1 - 32767 to be used for subordinate channels. 2565 * Channel 0 is meant to be "native" mode and used only to represent 2566 * the main root device. We allow writing 0 to reset the device back 2567 * to normal mode after being used as a subordinate channel. 2568 */ 2569 if (channel > S16_MAX) 2570 return -EINVAL; 2571 2572 dev->num_tc = -channel; 2573 2574 return 0; 2575 } 2576 EXPORT_SYMBOL(netdev_set_sb_channel); 2577 2578 /* 2579 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues 2580 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed. 2581 */ 2582 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) 2583 { 2584 bool disabling; 2585 int rc; 2586 2587 disabling = txq < dev->real_num_tx_queues; 2588 2589 if (txq < 1 || txq > dev->num_tx_queues) 2590 return -EINVAL; 2591 2592 if (dev->reg_state == NETREG_REGISTERED || 2593 dev->reg_state == NETREG_UNREGISTERING) { 2594 ASSERT_RTNL(); 2595 2596 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, 2597 txq); 2598 if (rc) 2599 return rc; 2600 2601 if (dev->num_tc) 2602 netif_setup_tc(dev, txq); 2603 2604 dev->real_num_tx_queues = txq; 2605 2606 if (disabling) { 2607 synchronize_net(); 2608 qdisc_reset_all_tx_gt(dev, txq); 2609 #ifdef CONFIG_XPS 2610 netif_reset_xps_queues_gt(dev, txq); 2611 #endif 2612 } 2613 } else { 2614 dev->real_num_tx_queues = txq; 2615 } 2616 2617 return 0; 2618 } 2619 EXPORT_SYMBOL(netif_set_real_num_tx_queues); 2620 2621 #ifdef CONFIG_SYSFS 2622 /** 2623 * netif_set_real_num_rx_queues - set actual number of RX queues used 2624 * @dev: Network device 2625 * @rxq: Actual number of RX queues 2626 * 2627 * This must be called either with the rtnl_lock held or before 2628 * registration of the net device. Returns 0 on success, or a 2629 * negative error code. If called before registration, it always 2630 * succeeds. 2631 */ 2632 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) 2633 { 2634 int rc; 2635 2636 if (rxq < 1 || rxq > dev->num_rx_queues) 2637 return -EINVAL; 2638 2639 if (dev->reg_state == NETREG_REGISTERED) { 2640 ASSERT_RTNL(); 2641 2642 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, 2643 rxq); 2644 if (rc) 2645 return rc; 2646 } 2647 2648 dev->real_num_rx_queues = rxq; 2649 return 0; 2650 } 2651 EXPORT_SYMBOL(netif_set_real_num_rx_queues); 2652 #endif 2653 2654 /** 2655 * netif_get_num_default_rss_queues - default number of RSS queues 2656 * 2657 * This routine should set an upper limit on the number of RSS queues 2658 * used by default by multiqueue devices. 2659 */ 2660 int netif_get_num_default_rss_queues(void) 2661 { 2662 return is_kdump_kernel() ? 2663 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus()); 2664 } 2665 EXPORT_SYMBOL(netif_get_num_default_rss_queues); 2666 2667 static void __netif_reschedule(struct Qdisc *q) 2668 { 2669 struct softnet_data *sd; 2670 unsigned long flags; 2671 2672 local_irq_save(flags); 2673 sd = this_cpu_ptr(&softnet_data); 2674 q->next_sched = NULL; 2675 *sd->output_queue_tailp = q; 2676 sd->output_queue_tailp = &q->next_sched; 2677 raise_softirq_irqoff(NET_TX_SOFTIRQ); 2678 local_irq_restore(flags); 2679 } 2680 2681 void __netif_schedule(struct Qdisc *q) 2682 { 2683 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) 2684 __netif_reschedule(q); 2685 } 2686 EXPORT_SYMBOL(__netif_schedule); 2687 2688 struct dev_kfree_skb_cb { 2689 enum skb_free_reason reason; 2690 }; 2691 2692 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb) 2693 { 2694 return (struct dev_kfree_skb_cb *)skb->cb; 2695 } 2696 2697 void netif_schedule_queue(struct netdev_queue *txq) 2698 { 2699 rcu_read_lock(); 2700 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) { 2701 struct Qdisc *q = rcu_dereference(txq->qdisc); 2702 2703 __netif_schedule(q); 2704 } 2705 rcu_read_unlock(); 2706 } 2707 EXPORT_SYMBOL(netif_schedule_queue); 2708 2709 void netif_tx_wake_queue(struct netdev_queue *dev_queue) 2710 { 2711 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) { 2712 struct Qdisc *q; 2713 2714 rcu_read_lock(); 2715 q = rcu_dereference(dev_queue->qdisc); 2716 __netif_schedule(q); 2717 rcu_read_unlock(); 2718 } 2719 } 2720 EXPORT_SYMBOL(netif_tx_wake_queue); 2721 2722 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) 2723 { 2724 unsigned long flags; 2725 2726 if (unlikely(!skb)) 2727 return; 2728 2729 if (likely(refcount_read(&skb->users) == 1)) { 2730 smp_rmb(); 2731 refcount_set(&skb->users, 0); 2732 } else if (likely(!refcount_dec_and_test(&skb->users))) { 2733 return; 2734 } 2735 get_kfree_skb_cb(skb)->reason = reason; 2736 local_irq_save(flags); 2737 skb->next = __this_cpu_read(softnet_data.completion_queue); 2738 __this_cpu_write(softnet_data.completion_queue, skb); 2739 raise_softirq_irqoff(NET_TX_SOFTIRQ); 2740 local_irq_restore(flags); 2741 } 2742 EXPORT_SYMBOL(__dev_kfree_skb_irq); 2743 2744 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason) 2745 { 2746 if (in_irq() || irqs_disabled()) 2747 __dev_kfree_skb_irq(skb, reason); 2748 else 2749 dev_kfree_skb(skb); 2750 } 2751 EXPORT_SYMBOL(__dev_kfree_skb_any); 2752 2753 2754 /** 2755 * netif_device_detach - mark device as removed 2756 * @dev: network device 2757 * 2758 * Mark device as removed from system and therefore no longer available. 2759 */ 2760 void netif_device_detach(struct net_device *dev) 2761 { 2762 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && 2763 netif_running(dev)) { 2764 netif_tx_stop_all_queues(dev); 2765 } 2766 } 2767 EXPORT_SYMBOL(netif_device_detach); 2768 2769 /** 2770 * netif_device_attach - mark device as attached 2771 * @dev: network device 2772 * 2773 * Mark device as attached from system and restart if needed. 2774 */ 2775 void netif_device_attach(struct net_device *dev) 2776 { 2777 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && 2778 netif_running(dev)) { 2779 netif_tx_wake_all_queues(dev); 2780 __netdev_watchdog_up(dev); 2781 } 2782 } 2783 EXPORT_SYMBOL(netif_device_attach); 2784 2785 /* 2786 * Returns a Tx hash based on the given packet descriptor a Tx queues' number 2787 * to be used as a distribution range. 2788 */ 2789 static u16 skb_tx_hash(const struct net_device *dev, 2790 const struct net_device *sb_dev, 2791 struct sk_buff *skb) 2792 { 2793 u32 hash; 2794 u16 qoffset = 0; 2795 u16 qcount = dev->real_num_tx_queues; 2796 2797 if (dev->num_tc) { 2798 u8 tc = netdev_get_prio_tc_map(dev, skb->priority); 2799 2800 qoffset = sb_dev->tc_to_txq[tc].offset; 2801 qcount = sb_dev->tc_to_txq[tc].count; 2802 } 2803 2804 if (skb_rx_queue_recorded(skb)) { 2805 hash = skb_get_rx_queue(skb); 2806 while (unlikely(hash >= qcount)) 2807 hash -= qcount; 2808 return hash + qoffset; 2809 } 2810 2811 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; 2812 } 2813 2814 static void skb_warn_bad_offload(const struct sk_buff *skb) 2815 { 2816 static const netdev_features_t null_features; 2817 struct net_device *dev = skb->dev; 2818 const char *name = ""; 2819 2820 if (!net_ratelimit()) 2821 return; 2822 2823 if (dev) { 2824 if (dev->dev.parent) 2825 name = dev_driver_string(dev->dev.parent); 2826 else 2827 name = netdev_name(dev); 2828 } 2829 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d " 2830 "gso_type=%d ip_summed=%d\n", 2831 name, dev ? &dev->features : &null_features, 2832 skb->sk ? &skb->sk->sk_route_caps : &null_features, 2833 skb->len, skb->data_len, skb_shinfo(skb)->gso_size, 2834 skb_shinfo(skb)->gso_type, skb->ip_summed); 2835 } 2836 2837 /* 2838 * Invalidate hardware checksum when packet is to be mangled, and 2839 * complete checksum manually on outgoing path. 2840 */ 2841 int skb_checksum_help(struct sk_buff *skb) 2842 { 2843 __wsum csum; 2844 int ret = 0, offset; 2845 2846 if (skb->ip_summed == CHECKSUM_COMPLETE) 2847 goto out_set_summed; 2848 2849 if (unlikely(skb_shinfo(skb)->gso_size)) { 2850 skb_warn_bad_offload(skb); 2851 return -EINVAL; 2852 } 2853 2854 /* Before computing a checksum, we should make sure no frag could 2855 * be modified by an external entity : checksum could be wrong. 2856 */ 2857 if (skb_has_shared_frag(skb)) { 2858 ret = __skb_linearize(skb); 2859 if (ret) 2860 goto out; 2861 } 2862 2863 offset = skb_checksum_start_offset(skb); 2864 BUG_ON(offset >= skb_headlen(skb)); 2865 csum = skb_checksum(skb, offset, skb->len - offset, 0); 2866 2867 offset += skb->csum_offset; 2868 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); 2869 2870 if (skb_cloned(skb) && 2871 !skb_clone_writable(skb, offset + sizeof(__sum16))) { 2872 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2873 if (ret) 2874 goto out; 2875 } 2876 2877 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0; 2878 out_set_summed: 2879 skb->ip_summed = CHECKSUM_NONE; 2880 out: 2881 return ret; 2882 } 2883 EXPORT_SYMBOL(skb_checksum_help); 2884 2885 int skb_crc32c_csum_help(struct sk_buff *skb) 2886 { 2887 __le32 crc32c_csum; 2888 int ret = 0, offset, start; 2889 2890 if (skb->ip_summed != CHECKSUM_PARTIAL) 2891 goto out; 2892 2893 if (unlikely(skb_is_gso(skb))) 2894 goto out; 2895 2896 /* Before computing a checksum, we should make sure no frag could 2897 * be modified by an external entity : checksum could be wrong. 2898 */ 2899 if (unlikely(skb_has_shared_frag(skb))) { 2900 ret = __skb_linearize(skb); 2901 if (ret) 2902 goto out; 2903 } 2904 start = skb_checksum_start_offset(skb); 2905 offset = start + offsetof(struct sctphdr, checksum); 2906 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) { 2907 ret = -EINVAL; 2908 goto out; 2909 } 2910 if (skb_cloned(skb) && 2911 !skb_clone_writable(skb, offset + sizeof(__le32))) { 2912 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2913 if (ret) 2914 goto out; 2915 } 2916 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start, 2917 skb->len - start, ~(__u32)0, 2918 crc32c_csum_stub)); 2919 *(__le32 *)(skb->data + offset) = crc32c_csum; 2920 skb->ip_summed = CHECKSUM_NONE; 2921 skb->csum_not_inet = 0; 2922 out: 2923 return ret; 2924 } 2925 2926 __be16 skb_network_protocol(struct sk_buff *skb, int *depth) 2927 { 2928 __be16 type = skb->protocol; 2929 2930 /* Tunnel gso handlers can set protocol to ethernet. */ 2931 if (type == htons(ETH_P_TEB)) { 2932 struct ethhdr *eth; 2933 2934 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) 2935 return 0; 2936 2937 eth = (struct ethhdr *)skb->data; 2938 type = eth->h_proto; 2939 } 2940 2941 return __vlan_get_protocol(skb, type, depth); 2942 } 2943 2944 /** 2945 * skb_mac_gso_segment - mac layer segmentation handler. 2946 * @skb: buffer to segment 2947 * @features: features for the output path (see dev->features) 2948 */ 2949 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, 2950 netdev_features_t features) 2951 { 2952 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 2953 struct packet_offload *ptype; 2954 int vlan_depth = skb->mac_len; 2955 __be16 type = skb_network_protocol(skb, &vlan_depth); 2956 2957 if (unlikely(!type)) 2958 return ERR_PTR(-EINVAL); 2959 2960 __skb_pull(skb, vlan_depth); 2961 2962 rcu_read_lock(); 2963 list_for_each_entry_rcu(ptype, &offload_base, list) { 2964 if (ptype->type == type && ptype->callbacks.gso_segment) { 2965 segs = ptype->callbacks.gso_segment(skb, features); 2966 break; 2967 } 2968 } 2969 rcu_read_unlock(); 2970 2971 __skb_push(skb, skb->data - skb_mac_header(skb)); 2972 2973 return segs; 2974 } 2975 EXPORT_SYMBOL(skb_mac_gso_segment); 2976 2977 2978 /* openvswitch calls this on rx path, so we need a different check. 2979 */ 2980 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) 2981 { 2982 if (tx_path) 2983 return skb->ip_summed != CHECKSUM_PARTIAL && 2984 skb->ip_summed != CHECKSUM_UNNECESSARY; 2985 2986 return skb->ip_summed == CHECKSUM_NONE; 2987 } 2988 2989 /** 2990 * __skb_gso_segment - Perform segmentation on skb. 2991 * @skb: buffer to segment 2992 * @features: features for the output path (see dev->features) 2993 * @tx_path: whether it is called in TX path 2994 * 2995 * This function segments the given skb and returns a list of segments. 2996 * 2997 * It may return NULL if the skb requires no segmentation. This is 2998 * only possible when GSO is used for verifying header integrity. 2999 * 3000 * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb. 3001 */ 3002 struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 3003 netdev_features_t features, bool tx_path) 3004 { 3005 struct sk_buff *segs; 3006 3007 if (unlikely(skb_needs_check(skb, tx_path))) { 3008 int err; 3009 3010 /* We're going to init ->check field in TCP or UDP header */ 3011 err = skb_cow_head(skb, 0); 3012 if (err < 0) 3013 return ERR_PTR(err); 3014 } 3015 3016 /* Only report GSO partial support if it will enable us to 3017 * support segmentation on this frame without needing additional 3018 * work. 3019 */ 3020 if (features & NETIF_F_GSO_PARTIAL) { 3021 netdev_features_t partial_features = NETIF_F_GSO_ROBUST; 3022 struct net_device *dev = skb->dev; 3023 3024 partial_features |= dev->features & dev->gso_partial_features; 3025 if (!skb_gso_ok(skb, features | partial_features)) 3026 features &= ~NETIF_F_GSO_PARTIAL; 3027 } 3028 3029 BUILD_BUG_ON(SKB_SGO_CB_OFFSET + 3030 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb)); 3031 3032 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb); 3033 SKB_GSO_CB(skb)->encap_level = 0; 3034 3035 skb_reset_mac_header(skb); 3036 skb_reset_mac_len(skb); 3037 3038 segs = skb_mac_gso_segment(skb, features); 3039 3040 if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs))) 3041 skb_warn_bad_offload(skb); 3042 3043 return segs; 3044 } 3045 EXPORT_SYMBOL(__skb_gso_segment); 3046 3047 /* Take action when hardware reception checksum errors are detected. */ 3048 #ifdef CONFIG_BUG 3049 void netdev_rx_csum_fault(struct net_device *dev) 3050 { 3051 if (net_ratelimit()) { 3052 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>"); 3053 dump_stack(); 3054 } 3055 } 3056 EXPORT_SYMBOL(netdev_rx_csum_fault); 3057 #endif 3058 3059 /* XXX: check that highmem exists at all on the given machine. */ 3060 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 3061 { 3062 #ifdef CONFIG_HIGHMEM 3063 int i; 3064 3065 if (!(dev->features & NETIF_F_HIGHDMA)) { 3066 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3067 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3068 3069 if (PageHighMem(skb_frag_page(frag))) 3070 return 1; 3071 } 3072 } 3073 #endif 3074 return 0; 3075 } 3076 3077 /* If MPLS offload request, verify we are testing hardware MPLS features 3078 * instead of standard features for the netdev. 3079 */ 3080 #if IS_ENABLED(CONFIG_NET_MPLS_GSO) 3081 static netdev_features_t net_mpls_features(struct sk_buff *skb, 3082 netdev_features_t features, 3083 __be16 type) 3084 { 3085 if (eth_p_mpls(type)) 3086 features &= skb->dev->mpls_features; 3087 3088 return features; 3089 } 3090 #else 3091 static netdev_features_t net_mpls_features(struct sk_buff *skb, 3092 netdev_features_t features, 3093 __be16 type) 3094 { 3095 return features; 3096 } 3097 #endif 3098 3099 static netdev_features_t harmonize_features(struct sk_buff *skb, 3100 netdev_features_t features) 3101 { 3102 int tmp; 3103 __be16 type; 3104 3105 type = skb_network_protocol(skb, &tmp); 3106 features = net_mpls_features(skb, features, type); 3107 3108 if (skb->ip_summed != CHECKSUM_NONE && 3109 !can_checksum_protocol(features, type)) { 3110 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3111 } 3112 if (illegal_highdma(skb->dev, skb)) 3113 features &= ~NETIF_F_SG; 3114 3115 return features; 3116 } 3117 3118 netdev_features_t passthru_features_check(struct sk_buff *skb, 3119 struct net_device *dev, 3120 netdev_features_t features) 3121 { 3122 return features; 3123 } 3124 EXPORT_SYMBOL(passthru_features_check); 3125 3126 static netdev_features_t dflt_features_check(struct sk_buff *skb, 3127 struct net_device *dev, 3128 netdev_features_t features) 3129 { 3130 return vlan_features_check(skb, features); 3131 } 3132 3133 static netdev_features_t gso_features_check(const struct sk_buff *skb, 3134 struct net_device *dev, 3135 netdev_features_t features) 3136 { 3137 u16 gso_segs = skb_shinfo(skb)->gso_segs; 3138 3139 if (gso_segs > dev->gso_max_segs) 3140 return features & ~NETIF_F_GSO_MASK; 3141 3142 /* Support for GSO partial features requires software 3143 * intervention before we can actually process the packets 3144 * so we need to strip support for any partial features now 3145 * and we can pull them back in after we have partially 3146 * segmented the frame. 3147 */ 3148 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL)) 3149 features &= ~dev->gso_partial_features; 3150 3151 /* Make sure to clear the IPv4 ID mangling feature if the 3152 * IPv4 header has the potential to be fragmented. 3153 */ 3154 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { 3155 struct iphdr *iph = skb->encapsulation ? 3156 inner_ip_hdr(skb) : ip_hdr(skb); 3157 3158 if (!(iph->frag_off & htons(IP_DF))) 3159 features &= ~NETIF_F_TSO_MANGLEID; 3160 } 3161 3162 return features; 3163 } 3164 3165 netdev_features_t netif_skb_features(struct sk_buff *skb) 3166 { 3167 struct net_device *dev = skb->dev; 3168 netdev_features_t features = dev->features; 3169 3170 if (skb_is_gso(skb)) 3171 features = gso_features_check(skb, dev, features); 3172 3173 /* If encapsulation offload request, verify we are testing 3174 * hardware encapsulation features instead of standard 3175 * features for the netdev 3176 */ 3177 if (skb->encapsulation) 3178 features &= dev->hw_enc_features; 3179 3180 if (skb_vlan_tagged(skb)) 3181 features = netdev_intersect_features(features, 3182 dev->vlan_features | 3183 NETIF_F_HW_VLAN_CTAG_TX | 3184 NETIF_F_HW_VLAN_STAG_TX); 3185 3186 if (dev->netdev_ops->ndo_features_check) 3187 features &= dev->netdev_ops->ndo_features_check(skb, dev, 3188 features); 3189 else 3190 features &= dflt_features_check(skb, dev, features); 3191 3192 return harmonize_features(skb, features); 3193 } 3194 EXPORT_SYMBOL(netif_skb_features); 3195 3196 static int xmit_one(struct sk_buff *skb, struct net_device *dev, 3197 struct netdev_queue *txq, bool more) 3198 { 3199 unsigned int len; 3200 int rc; 3201 3202 if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all)) 3203 dev_queue_xmit_nit(skb, dev); 3204 3205 len = skb->len; 3206 trace_net_dev_start_xmit(skb, dev); 3207 rc = netdev_start_xmit(skb, dev, txq, more); 3208 trace_net_dev_xmit(skb, rc, dev, len); 3209 3210 return rc; 3211 } 3212 3213 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev, 3214 struct netdev_queue *txq, int *ret) 3215 { 3216 struct sk_buff *skb = first; 3217 int rc = NETDEV_TX_OK; 3218 3219 while (skb) { 3220 struct sk_buff *next = skb->next; 3221 3222 skb->next = NULL; 3223 rc = xmit_one(skb, dev, txq, next != NULL); 3224 if (unlikely(!dev_xmit_complete(rc))) { 3225 skb->next = next; 3226 goto out; 3227 } 3228 3229 skb = next; 3230 if (netif_xmit_stopped(txq) && skb) { 3231 rc = NETDEV_TX_BUSY; 3232 break; 3233 } 3234 } 3235 3236 out: 3237 *ret = rc; 3238 return skb; 3239 } 3240 3241 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, 3242 netdev_features_t features) 3243 { 3244 if (skb_vlan_tag_present(skb) && 3245 !vlan_hw_offload_capable(features, skb->vlan_proto)) 3246 skb = __vlan_hwaccel_push_inside(skb); 3247 return skb; 3248 } 3249 3250 int skb_csum_hwoffload_help(struct sk_buff *skb, 3251 const netdev_features_t features) 3252 { 3253 if (unlikely(skb->csum_not_inet)) 3254 return !!(features & NETIF_F_SCTP_CRC) ? 0 : 3255 skb_crc32c_csum_help(skb); 3256 3257 return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb); 3258 } 3259 EXPORT_SYMBOL(skb_csum_hwoffload_help); 3260 3261 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again) 3262 { 3263 netdev_features_t features; 3264 3265 features = netif_skb_features(skb); 3266 skb = validate_xmit_vlan(skb, features); 3267 if (unlikely(!skb)) 3268 goto out_null; 3269 3270 skb = sk_validate_xmit_skb(skb, dev); 3271 if (unlikely(!skb)) 3272 goto out_null; 3273 3274 if (netif_needs_gso(skb, features)) { 3275 struct sk_buff *segs; 3276 3277 segs = skb_gso_segment(skb, features); 3278 if (IS_ERR(segs)) { 3279 goto out_kfree_skb; 3280 } else if (segs) { 3281 consume_skb(skb); 3282 skb = segs; 3283 } 3284 } else { 3285 if (skb_needs_linearize(skb, features) && 3286 __skb_linearize(skb)) 3287 goto out_kfree_skb; 3288 3289 /* If packet is not checksummed and device does not 3290 * support checksumming for this protocol, complete 3291 * checksumming here. 3292 */ 3293 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3294 if (skb->encapsulation) 3295 skb_set_inner_transport_header(skb, 3296 skb_checksum_start_offset(skb)); 3297 else 3298 skb_set_transport_header(skb, 3299 skb_checksum_start_offset(skb)); 3300 if (skb_csum_hwoffload_help(skb, features)) 3301 goto out_kfree_skb; 3302 } 3303 } 3304 3305 skb = validate_xmit_xfrm(skb, features, again); 3306 3307 return skb; 3308 3309 out_kfree_skb: 3310 kfree_skb(skb); 3311 out_null: 3312 atomic_long_inc(&dev->tx_dropped); 3313 return NULL; 3314 } 3315 3316 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again) 3317 { 3318 struct sk_buff *next, *head = NULL, *tail; 3319 3320 for (; skb != NULL; skb = next) { 3321 next = skb->next; 3322 skb->next = NULL; 3323 3324 /* in case skb wont be segmented, point to itself */ 3325 skb->prev = skb; 3326 3327 skb = validate_xmit_skb(skb, dev, again); 3328 if (!skb) 3329 continue; 3330 3331 if (!head) 3332 head = skb; 3333 else 3334 tail->next = skb; 3335 /* If skb was segmented, skb->prev points to 3336 * the last segment. If not, it still contains skb. 3337 */ 3338 tail = skb->prev; 3339 } 3340 return head; 3341 } 3342 EXPORT_SYMBOL_GPL(validate_xmit_skb_list); 3343 3344 static void qdisc_pkt_len_init(struct sk_buff *skb) 3345 { 3346 const struct skb_shared_info *shinfo = skb_shinfo(skb); 3347 3348 qdisc_skb_cb(skb)->pkt_len = skb->len; 3349 3350 /* To get more precise estimation of bytes sent on wire, 3351 * we add to pkt_len the headers size of all segments 3352 */ 3353 if (shinfo->gso_size) { 3354 unsigned int hdr_len; 3355 u16 gso_segs = shinfo->gso_segs; 3356 3357 /* mac layer + network layer */ 3358 hdr_len = skb_transport_header(skb) - skb_mac_header(skb); 3359 3360 /* + transport layer */ 3361 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 3362 const struct tcphdr *th; 3363 struct tcphdr _tcphdr; 3364 3365 th = skb_header_pointer(skb, skb_transport_offset(skb), 3366 sizeof(_tcphdr), &_tcphdr); 3367 if (likely(th)) 3368 hdr_len += __tcp_hdrlen(th); 3369 } else { 3370 struct udphdr _udphdr; 3371 3372 if (skb_header_pointer(skb, skb_transport_offset(skb), 3373 sizeof(_udphdr), &_udphdr)) 3374 hdr_len += sizeof(struct udphdr); 3375 } 3376 3377 if (shinfo->gso_type & SKB_GSO_DODGY) 3378 gso_segs = DIV_ROUND_UP(skb->len - hdr_len, 3379 shinfo->gso_size); 3380 3381 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len; 3382 } 3383 } 3384 3385 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, 3386 struct net_device *dev, 3387 struct netdev_queue *txq) 3388 { 3389 spinlock_t *root_lock = qdisc_lock(q); 3390 struct sk_buff *to_free = NULL; 3391 bool contended; 3392 int rc; 3393 3394 qdisc_calculate_pkt_len(skb, q); 3395 3396 if (q->flags & TCQ_F_NOLOCK) { 3397 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { 3398 __qdisc_drop(skb, &to_free); 3399 rc = NET_XMIT_DROP; 3400 } else { 3401 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; 3402 qdisc_run(q); 3403 } 3404 3405 if (unlikely(to_free)) 3406 kfree_skb_list(to_free); 3407 return rc; 3408 } 3409 3410 /* 3411 * Heuristic to force contended enqueues to serialize on a 3412 * separate lock before trying to get qdisc main lock. 3413 * This permits qdisc->running owner to get the lock more 3414 * often and dequeue packets faster. 3415 */ 3416 contended = qdisc_is_running(q); 3417 if (unlikely(contended)) 3418 spin_lock(&q->busylock); 3419 3420 spin_lock(root_lock); 3421 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { 3422 __qdisc_drop(skb, &to_free); 3423 rc = NET_XMIT_DROP; 3424 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && 3425 qdisc_run_begin(q)) { 3426 /* 3427 * This is a work-conserving queue; there are no old skbs 3428 * waiting to be sent out; and the qdisc is not running - 3429 * xmit the skb directly. 3430 */ 3431 3432 qdisc_bstats_update(q, skb); 3433 3434 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) { 3435 if (unlikely(contended)) { 3436 spin_unlock(&q->busylock); 3437 contended = false; 3438 } 3439 __qdisc_run(q); 3440 } 3441 3442 qdisc_run_end(q); 3443 rc = NET_XMIT_SUCCESS; 3444 } else { 3445 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; 3446 if (qdisc_run_begin(q)) { 3447 if (unlikely(contended)) { 3448 spin_unlock(&q->busylock); 3449 contended = false; 3450 } 3451 __qdisc_run(q); 3452 qdisc_run_end(q); 3453 } 3454 } 3455 spin_unlock(root_lock); 3456 if (unlikely(to_free)) 3457 kfree_skb_list(to_free); 3458 if (unlikely(contended)) 3459 spin_unlock(&q->busylock); 3460 return rc; 3461 } 3462 3463 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 3464 static void skb_update_prio(struct sk_buff *skb) 3465 { 3466 const struct netprio_map *map; 3467 const struct sock *sk; 3468 unsigned int prioidx; 3469 3470 if (skb->priority) 3471 return; 3472 map = rcu_dereference_bh(skb->dev->priomap); 3473 if (!map) 3474 return; 3475 sk = skb_to_full_sk(skb); 3476 if (!sk) 3477 return; 3478 3479 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data); 3480 3481 if (prioidx < map->priomap_len) 3482 skb->priority = map->priomap[prioidx]; 3483 } 3484 #else 3485 #define skb_update_prio(skb) 3486 #endif 3487 3488 DEFINE_PER_CPU(int, xmit_recursion); 3489 EXPORT_SYMBOL(xmit_recursion); 3490 3491 /** 3492 * dev_loopback_xmit - loop back @skb 3493 * @net: network namespace this loopback is happening in 3494 * @sk: sk needed to be a netfilter okfn 3495 * @skb: buffer to transmit 3496 */ 3497 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) 3498 { 3499 skb_reset_mac_header(skb); 3500 __skb_pull(skb, skb_network_offset(skb)); 3501 skb->pkt_type = PACKET_LOOPBACK; 3502 skb->ip_summed = CHECKSUM_UNNECESSARY; 3503 WARN_ON(!skb_dst(skb)); 3504 skb_dst_force(skb); 3505 netif_rx_ni(skb); 3506 return 0; 3507 } 3508 EXPORT_SYMBOL(dev_loopback_xmit); 3509 3510 #ifdef CONFIG_NET_EGRESS 3511 static struct sk_buff * 3512 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) 3513 { 3514 struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress); 3515 struct tcf_result cl_res; 3516 3517 if (!miniq) 3518 return skb; 3519 3520 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */ 3521 mini_qdisc_bstats_cpu_update(miniq, skb); 3522 3523 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) { 3524 case TC_ACT_OK: 3525 case TC_ACT_RECLASSIFY: 3526 skb->tc_index = TC_H_MIN(cl_res.classid); 3527 break; 3528 case TC_ACT_SHOT: 3529 mini_qdisc_qstats_cpu_drop(miniq); 3530 *ret = NET_XMIT_DROP; 3531 kfree_skb(skb); 3532 return NULL; 3533 case TC_ACT_STOLEN: 3534 case TC_ACT_QUEUED: 3535 case TC_ACT_TRAP: 3536 *ret = NET_XMIT_SUCCESS; 3537 consume_skb(skb); 3538 return NULL; 3539 case TC_ACT_REDIRECT: 3540 /* No need to push/pop skb's mac_header here on egress! */ 3541 skb_do_redirect(skb); 3542 *ret = NET_XMIT_SUCCESS; 3543 return NULL; 3544 default: 3545 break; 3546 } 3547 3548 return skb; 3549 } 3550 #endif /* CONFIG_NET_EGRESS */ 3551 3552 #ifdef CONFIG_XPS 3553 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb, 3554 struct xps_dev_maps *dev_maps, unsigned int tci) 3555 { 3556 struct xps_map *map; 3557 int queue_index = -1; 3558 3559 if (dev->num_tc) { 3560 tci *= dev->num_tc; 3561 tci += netdev_get_prio_tc_map(dev, skb->priority); 3562 } 3563 3564 map = rcu_dereference(dev_maps->attr_map[tci]); 3565 if (map) { 3566 if (map->len == 1) 3567 queue_index = map->queues[0]; 3568 else 3569 queue_index = map->queues[reciprocal_scale( 3570 skb_get_hash(skb), map->len)]; 3571 if (unlikely(queue_index >= dev->real_num_tx_queues)) 3572 queue_index = -1; 3573 } 3574 return queue_index; 3575 } 3576 #endif 3577 3578 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev, 3579 struct sk_buff *skb) 3580 { 3581 #ifdef CONFIG_XPS 3582 struct xps_dev_maps *dev_maps; 3583 struct sock *sk = skb->sk; 3584 int queue_index = -1; 3585 3586 if (!static_key_false(&xps_needed)) 3587 return -1; 3588 3589 rcu_read_lock(); 3590 if (!static_key_false(&xps_rxqs_needed)) 3591 goto get_cpus_map; 3592 3593 dev_maps = rcu_dereference(sb_dev->xps_rxqs_map); 3594 if (dev_maps) { 3595 int tci = sk_rx_queue_get(sk); 3596 3597 if (tci >= 0 && tci < dev->num_rx_queues) 3598 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, 3599 tci); 3600 } 3601 3602 get_cpus_map: 3603 if (queue_index < 0) { 3604 dev_maps = rcu_dereference(sb_dev->xps_cpus_map); 3605 if (dev_maps) { 3606 unsigned int tci = skb->sender_cpu - 1; 3607 3608 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, 3609 tci); 3610 } 3611 } 3612 rcu_read_unlock(); 3613 3614 return queue_index; 3615 #else 3616 return -1; 3617 #endif 3618 } 3619 3620 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, 3621 struct net_device *sb_dev, 3622 select_queue_fallback_t fallback) 3623 { 3624 return 0; 3625 } 3626 EXPORT_SYMBOL(dev_pick_tx_zero); 3627 3628 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, 3629 struct net_device *sb_dev, 3630 select_queue_fallback_t fallback) 3631 { 3632 return (u16)raw_smp_processor_id() % dev->real_num_tx_queues; 3633 } 3634 EXPORT_SYMBOL(dev_pick_tx_cpu_id); 3635 3636 static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, 3637 struct net_device *sb_dev) 3638 { 3639 struct sock *sk = skb->sk; 3640 int queue_index = sk_tx_queue_get(sk); 3641 3642 sb_dev = sb_dev ? : dev; 3643 3644 if (queue_index < 0 || skb->ooo_okay || 3645 queue_index >= dev->real_num_tx_queues) { 3646 int new_index = get_xps_queue(dev, sb_dev, skb); 3647 3648 if (new_index < 0) 3649 new_index = skb_tx_hash(dev, sb_dev, skb); 3650 3651 if (queue_index != new_index && sk && 3652 sk_fullsock(sk) && 3653 rcu_access_pointer(sk->sk_dst_cache)) 3654 sk_tx_queue_set(sk, new_index); 3655 3656 queue_index = new_index; 3657 } 3658 3659 return queue_index; 3660 } 3661 3662 struct netdev_queue *netdev_pick_tx(struct net_device *dev, 3663 struct sk_buff *skb, 3664 struct net_device *sb_dev) 3665 { 3666 int queue_index = 0; 3667 3668 #ifdef CONFIG_XPS 3669 u32 sender_cpu = skb->sender_cpu - 1; 3670 3671 if (sender_cpu >= (u32)NR_CPUS) 3672 skb->sender_cpu = raw_smp_processor_id() + 1; 3673 #endif 3674 3675 if (dev->real_num_tx_queues != 1) { 3676 const struct net_device_ops *ops = dev->netdev_ops; 3677 3678 if (ops->ndo_select_queue) 3679 queue_index = ops->ndo_select_queue(dev, skb, sb_dev, 3680 __netdev_pick_tx); 3681 else 3682 queue_index = __netdev_pick_tx(dev, skb, sb_dev); 3683 3684 queue_index = netdev_cap_txqueue(dev, queue_index); 3685 } 3686 3687 skb_set_queue_mapping(skb, queue_index); 3688 return netdev_get_tx_queue(dev, queue_index); 3689 } 3690 3691 /** 3692 * __dev_queue_xmit - transmit a buffer 3693 * @skb: buffer to transmit 3694 * @sb_dev: suboordinate device used for L2 forwarding offload 3695 * 3696 * Queue a buffer for transmission to a network device. The caller must 3697 * have set the device and priority and built the buffer before calling 3698 * this function. The function can be called from an interrupt. 3699 * 3700 * A negative errno code is returned on a failure. A success does not 3701 * guarantee the frame will be transmitted as it may be dropped due 3702 * to congestion or traffic shaping. 3703 * 3704 * ----------------------------------------------------------------------------------- 3705 * I notice this method can also return errors from the queue disciplines, 3706 * including NET_XMIT_DROP, which is a positive value. So, errors can also 3707 * be positive. 3708 * 3709 * Regardless of the return value, the skb is consumed, so it is currently 3710 * difficult to retry a send to this method. (You can bump the ref count 3711 * before sending to hold a reference for retry if you are careful.) 3712 * 3713 * When calling this method, interrupts MUST be enabled. This is because 3714 * the BH enable code must have IRQs enabled so that it will not deadlock. 3715 * --BLG 3716 */ 3717 static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) 3718 { 3719 struct net_device *dev = skb->dev; 3720 struct netdev_queue *txq; 3721 struct Qdisc *q; 3722 int rc = -ENOMEM; 3723 bool again = false; 3724 3725 skb_reset_mac_header(skb); 3726 3727 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP)) 3728 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED); 3729 3730 /* Disable soft irqs for various locks below. Also 3731 * stops preemption for RCU. 3732 */ 3733 rcu_read_lock_bh(); 3734 3735 skb_update_prio(skb); 3736 3737 qdisc_pkt_len_init(skb); 3738 #ifdef CONFIG_NET_CLS_ACT 3739 skb->tc_at_ingress = 0; 3740 # ifdef CONFIG_NET_EGRESS 3741 if (static_branch_unlikely(&egress_needed_key)) { 3742 skb = sch_handle_egress(skb, &rc, dev); 3743 if (!skb) 3744 goto out; 3745 } 3746 # endif 3747 #endif 3748 /* If device/qdisc don't need skb->dst, release it right now while 3749 * its hot in this cpu cache. 3750 */ 3751 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 3752 skb_dst_drop(skb); 3753 else 3754 skb_dst_force(skb); 3755 3756 txq = netdev_pick_tx(dev, skb, sb_dev); 3757 q = rcu_dereference_bh(txq->qdisc); 3758 3759 trace_net_dev_queue(skb); 3760 if (q->enqueue) { 3761 rc = __dev_xmit_skb(skb, q, dev, txq); 3762 goto out; 3763 } 3764 3765 /* The device has no queue. Common case for software devices: 3766 * loopback, all the sorts of tunnels... 3767 3768 * Really, it is unlikely that netif_tx_lock protection is necessary 3769 * here. (f.e. loopback and IP tunnels are clean ignoring statistics 3770 * counters.) 3771 * However, it is possible, that they rely on protection 3772 * made by us here. 3773 3774 * Check this and shot the lock. It is not prone from deadlocks. 3775 *Either shot noqueue qdisc, it is even simpler 8) 3776 */ 3777 if (dev->flags & IFF_UP) { 3778 int cpu = smp_processor_id(); /* ok because BHs are off */ 3779 3780 if (txq->xmit_lock_owner != cpu) { 3781 if (unlikely(__this_cpu_read(xmit_recursion) > 3782 XMIT_RECURSION_LIMIT)) 3783 goto recursion_alert; 3784 3785 skb = validate_xmit_skb(skb, dev, &again); 3786 if (!skb) 3787 goto out; 3788 3789 HARD_TX_LOCK(dev, txq, cpu); 3790 3791 if (!netif_xmit_stopped(txq)) { 3792 __this_cpu_inc(xmit_recursion); 3793 skb = dev_hard_start_xmit(skb, dev, txq, &rc); 3794 __this_cpu_dec(xmit_recursion); 3795 if (dev_xmit_complete(rc)) { 3796 HARD_TX_UNLOCK(dev, txq); 3797 goto out; 3798 } 3799 } 3800 HARD_TX_UNLOCK(dev, txq); 3801 net_crit_ratelimited("Virtual device %s asks to queue packet!\n", 3802 dev->name); 3803 } else { 3804 /* Recursion is detected! It is possible, 3805 * unfortunately 3806 */ 3807 recursion_alert: 3808 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n", 3809 dev->name); 3810 } 3811 } 3812 3813 rc = -ENETDOWN; 3814 rcu_read_unlock_bh(); 3815 3816 atomic_long_inc(&dev->tx_dropped); 3817 kfree_skb_list(skb); 3818 return rc; 3819 out: 3820 rcu_read_unlock_bh(); 3821 return rc; 3822 } 3823 3824 int dev_queue_xmit(struct sk_buff *skb) 3825 { 3826 return __dev_queue_xmit(skb, NULL); 3827 } 3828 EXPORT_SYMBOL(dev_queue_xmit); 3829 3830 int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev) 3831 { 3832 return __dev_queue_xmit(skb, sb_dev); 3833 } 3834 EXPORT_SYMBOL(dev_queue_xmit_accel); 3835 3836 int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) 3837 { 3838 struct net_device *dev = skb->dev; 3839 struct sk_buff *orig_skb = skb; 3840 struct netdev_queue *txq; 3841 int ret = NETDEV_TX_BUSY; 3842 bool again = false; 3843 3844 if (unlikely(!netif_running(dev) || 3845 !netif_carrier_ok(dev))) 3846 goto drop; 3847 3848 skb = validate_xmit_skb_list(skb, dev, &again); 3849 if (skb != orig_skb) 3850 goto drop; 3851 3852 skb_set_queue_mapping(skb, queue_id); 3853 txq = skb_get_tx_queue(dev, skb); 3854 3855 local_bh_disable(); 3856 3857 HARD_TX_LOCK(dev, txq, smp_processor_id()); 3858 if (!netif_xmit_frozen_or_drv_stopped(txq)) 3859 ret = netdev_start_xmit(skb, dev, txq, false); 3860 HARD_TX_UNLOCK(dev, txq); 3861 3862 local_bh_enable(); 3863 3864 if (!dev_xmit_complete(ret)) 3865 kfree_skb(skb); 3866 3867 return ret; 3868 drop: 3869 atomic_long_inc(&dev->tx_dropped); 3870 kfree_skb_list(skb); 3871 return NET_XMIT_DROP; 3872 } 3873 EXPORT_SYMBOL(dev_direct_xmit); 3874 3875 /************************************************************************* 3876 * Receiver routines 3877 *************************************************************************/ 3878 3879 int netdev_max_backlog __read_mostly = 1000; 3880 EXPORT_SYMBOL(netdev_max_backlog); 3881 3882 int netdev_tstamp_prequeue __read_mostly = 1; 3883 int netdev_budget __read_mostly = 300; 3884 unsigned int __read_mostly netdev_budget_usecs = 2000; 3885 int weight_p __read_mostly = 64; /* old backlog weight */ 3886 int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */ 3887 int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */ 3888 int dev_rx_weight __read_mostly = 64; 3889 int dev_tx_weight __read_mostly = 64; 3890 3891 /* Called with irq disabled */ 3892 static inline void ____napi_schedule(struct softnet_data *sd, 3893 struct napi_struct *napi) 3894 { 3895 list_add_tail(&napi->poll_list, &sd->poll_list); 3896 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 3897 } 3898 3899 #ifdef CONFIG_RPS 3900 3901 /* One global table that all flow-based protocols share. */ 3902 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; 3903 EXPORT_SYMBOL(rps_sock_flow_table); 3904 u32 rps_cpu_mask __read_mostly; 3905 EXPORT_SYMBOL(rps_cpu_mask); 3906 3907 struct static_key rps_needed __read_mostly; 3908 EXPORT_SYMBOL(rps_needed); 3909 struct static_key rfs_needed __read_mostly; 3910 EXPORT_SYMBOL(rfs_needed); 3911 3912 static struct rps_dev_flow * 3913 set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 3914 struct rps_dev_flow *rflow, u16 next_cpu) 3915 { 3916 if (next_cpu < nr_cpu_ids) { 3917 #ifdef CONFIG_RFS_ACCEL 3918 struct netdev_rx_queue *rxqueue; 3919 struct rps_dev_flow_table *flow_table; 3920 struct rps_dev_flow *old_rflow; 3921 u32 flow_id; 3922 u16 rxq_index; 3923 int rc; 3924 3925 /* Should we steer this flow to a different hardware queue? */ 3926 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || 3927 !(dev->features & NETIF_F_NTUPLE)) 3928 goto out; 3929 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); 3930 if (rxq_index == skb_get_rx_queue(skb)) 3931 goto out; 3932 3933 rxqueue = dev->_rx + rxq_index; 3934 flow_table = rcu_dereference(rxqueue->rps_flow_table); 3935 if (!flow_table) 3936 goto out; 3937 flow_id = skb_get_hash(skb) & flow_table->mask; 3938 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, 3939 rxq_index, flow_id); 3940 if (rc < 0) 3941 goto out; 3942 old_rflow = rflow; 3943 rflow = &flow_table->flows[flow_id]; 3944 rflow->filter = rc; 3945 if (old_rflow->filter == rflow->filter) 3946 old_rflow->filter = RPS_NO_FILTER; 3947 out: 3948 #endif 3949 rflow->last_qtail = 3950 per_cpu(softnet_data, next_cpu).input_queue_head; 3951 } 3952 3953 rflow->cpu = next_cpu; 3954 return rflow; 3955 } 3956 3957 /* 3958 * get_rps_cpu is called from netif_receive_skb and returns the target 3959 * CPU from the RPS map of the receiving queue for a given skb. 3960 * rcu_read_lock must be held on entry. 3961 */ 3962 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, 3963 struct rps_dev_flow **rflowp) 3964 { 3965 const struct rps_sock_flow_table *sock_flow_table; 3966 struct netdev_rx_queue *rxqueue = dev->_rx; 3967 struct rps_dev_flow_table *flow_table; 3968 struct rps_map *map; 3969 int cpu = -1; 3970 u32 tcpu; 3971 u32 hash; 3972 3973 if (skb_rx_queue_recorded(skb)) { 3974 u16 index = skb_get_rx_queue(skb); 3975 3976 if (unlikely(index >= dev->real_num_rx_queues)) { 3977 WARN_ONCE(dev->real_num_rx_queues > 1, 3978 "%s received packet on queue %u, but number " 3979 "of RX queues is %u\n", 3980 dev->name, index, dev->real_num_rx_queues); 3981 goto done; 3982 } 3983 rxqueue += index; 3984 } 3985 3986 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */ 3987 3988 flow_table = rcu_dereference(rxqueue->rps_flow_table); 3989 map = rcu_dereference(rxqueue->rps_map); 3990 if (!flow_table && !map) 3991 goto done; 3992 3993 skb_reset_network_header(skb); 3994 hash = skb_get_hash(skb); 3995 if (!hash) 3996 goto done; 3997 3998 sock_flow_table = rcu_dereference(rps_sock_flow_table); 3999 if (flow_table && sock_flow_table) { 4000 struct rps_dev_flow *rflow; 4001 u32 next_cpu; 4002 u32 ident; 4003 4004 /* First check into global flow table if there is a match */ 4005 ident = sock_flow_table->ents[hash & sock_flow_table->mask]; 4006 if ((ident ^ hash) & ~rps_cpu_mask) 4007 goto try_rps; 4008 4009 next_cpu = ident & rps_cpu_mask; 4010 4011 /* OK, now we know there is a match, 4012 * we can look at the local (per receive queue) flow table 4013 */ 4014 rflow = &flow_table->flows[hash & flow_table->mask]; 4015 tcpu = rflow->cpu; 4016 4017 /* 4018 * If the desired CPU (where last recvmsg was done) is 4019 * different from current CPU (one in the rx-queue flow 4020 * table entry), switch if one of the following holds: 4021 * - Current CPU is unset (>= nr_cpu_ids). 4022 * - Current CPU is offline. 4023 * - The current CPU's queue tail has advanced beyond the 4024 * last packet that was enqueued using this table entry. 4025 * This guarantees that all previous packets for the flow 4026 * have been dequeued, thus preserving in order delivery. 4027 */ 4028 if (unlikely(tcpu != next_cpu) && 4029 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) || 4030 ((int)(per_cpu(softnet_data, tcpu).input_queue_head - 4031 rflow->last_qtail)) >= 0)) { 4032 tcpu = next_cpu; 4033 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); 4034 } 4035 4036 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) { 4037 *rflowp = rflow; 4038 cpu = tcpu; 4039 goto done; 4040 } 4041 } 4042 4043 try_rps: 4044 4045 if (map) { 4046 tcpu = map->cpus[reciprocal_scale(hash, map->len)]; 4047 if (cpu_online(tcpu)) { 4048 cpu = tcpu; 4049 goto done; 4050 } 4051 } 4052 4053 done: 4054 return cpu; 4055 } 4056 4057 #ifdef CONFIG_RFS_ACCEL 4058 4059 /** 4060 * rps_may_expire_flow - check whether an RFS hardware filter may be removed 4061 * @dev: Device on which the filter was set 4062 * @rxq_index: RX queue index 4063 * @flow_id: Flow ID passed to ndo_rx_flow_steer() 4064 * @filter_id: Filter ID returned by ndo_rx_flow_steer() 4065 * 4066 * Drivers that implement ndo_rx_flow_steer() should periodically call 4067 * this function for each installed filter and remove the filters for 4068 * which it returns %true. 4069 */ 4070 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, 4071 u32 flow_id, u16 filter_id) 4072 { 4073 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index; 4074 struct rps_dev_flow_table *flow_table; 4075 struct rps_dev_flow *rflow; 4076 bool expire = true; 4077 unsigned int cpu; 4078 4079 rcu_read_lock(); 4080 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4081 if (flow_table && flow_id <= flow_table->mask) { 4082 rflow = &flow_table->flows[flow_id]; 4083 cpu = READ_ONCE(rflow->cpu); 4084 if (rflow->filter == filter_id && cpu < nr_cpu_ids && 4085 ((int)(per_cpu(softnet_data, cpu).input_queue_head - 4086 rflow->last_qtail) < 4087 (int)(10 * flow_table->mask))) 4088 expire = false; 4089 } 4090 rcu_read_unlock(); 4091 return expire; 4092 } 4093 EXPORT_SYMBOL(rps_may_expire_flow); 4094 4095 #endif /* CONFIG_RFS_ACCEL */ 4096 4097 /* Called from hardirq (IPI) context */ 4098 static void rps_trigger_softirq(void *data) 4099 { 4100 struct softnet_data *sd = data; 4101 4102 ____napi_schedule(sd, &sd->backlog); 4103 sd->received_rps++; 4104 } 4105 4106 #endif /* CONFIG_RPS */ 4107 4108 /* 4109 * Check if this softnet_data structure is another cpu one 4110 * If yes, queue it to our IPI list and return 1 4111 * If no, return 0 4112 */ 4113 static int rps_ipi_queued(struct softnet_data *sd) 4114 { 4115 #ifdef CONFIG_RPS 4116 struct softnet_data *mysd = this_cpu_ptr(&softnet_data); 4117 4118 if (sd != mysd) { 4119 sd->rps_ipi_next = mysd->rps_ipi_list; 4120 mysd->rps_ipi_list = sd; 4121 4122 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4123 return 1; 4124 } 4125 #endif /* CONFIG_RPS */ 4126 return 0; 4127 } 4128 4129 #ifdef CONFIG_NET_FLOW_LIMIT 4130 int netdev_flow_limit_table_len __read_mostly = (1 << 12); 4131 #endif 4132 4133 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) 4134 { 4135 #ifdef CONFIG_NET_FLOW_LIMIT 4136 struct sd_flow_limit *fl; 4137 struct softnet_data *sd; 4138 unsigned int old_flow, new_flow; 4139 4140 if (qlen < (netdev_max_backlog >> 1)) 4141 return false; 4142 4143 sd = this_cpu_ptr(&softnet_data); 4144 4145 rcu_read_lock(); 4146 fl = rcu_dereference(sd->flow_limit); 4147 if (fl) { 4148 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1); 4149 old_flow = fl->history[fl->history_head]; 4150 fl->history[fl->history_head] = new_flow; 4151 4152 fl->history_head++; 4153 fl->history_head &= FLOW_LIMIT_HISTORY - 1; 4154 4155 if (likely(fl->buckets[old_flow])) 4156 fl->buckets[old_flow]--; 4157 4158 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) { 4159 fl->count++; 4160 rcu_read_unlock(); 4161 return true; 4162 } 4163 } 4164 rcu_read_unlock(); 4165 #endif 4166 return false; 4167 } 4168 4169 /* 4170 * enqueue_to_backlog is called to queue an skb to a per CPU backlog 4171 * queue (may be a remote CPU queue). 4172 */ 4173 static int enqueue_to_backlog(struct sk_buff *skb, int cpu, 4174 unsigned int *qtail) 4175 { 4176 struct softnet_data *sd; 4177 unsigned long flags; 4178 unsigned int qlen; 4179 4180 sd = &per_cpu(softnet_data, cpu); 4181 4182 local_irq_save(flags); 4183 4184 rps_lock(sd); 4185 if (!netif_running(skb->dev)) 4186 goto drop; 4187 qlen = skb_queue_len(&sd->input_pkt_queue); 4188 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { 4189 if (qlen) { 4190 enqueue: 4191 __skb_queue_tail(&sd->input_pkt_queue, skb); 4192 input_queue_tail_incr_save(sd, qtail); 4193 rps_unlock(sd); 4194 local_irq_restore(flags); 4195 return NET_RX_SUCCESS; 4196 } 4197 4198 /* Schedule NAPI for backlog device 4199 * We can use non atomic operation since we own the queue lock 4200 */ 4201 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) { 4202 if (!rps_ipi_queued(sd)) 4203 ____napi_schedule(sd, &sd->backlog); 4204 } 4205 goto enqueue; 4206 } 4207 4208 drop: 4209 sd->dropped++; 4210 rps_unlock(sd); 4211 4212 local_irq_restore(flags); 4213 4214 atomic_long_inc(&skb->dev->rx_dropped); 4215 kfree_skb(skb); 4216 return NET_RX_DROP; 4217 } 4218 4219 static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb) 4220 { 4221 struct net_device *dev = skb->dev; 4222 struct netdev_rx_queue *rxqueue; 4223 4224 rxqueue = dev->_rx; 4225 4226 if (skb_rx_queue_recorded(skb)) { 4227 u16 index = skb_get_rx_queue(skb); 4228 4229 if (unlikely(index >= dev->real_num_rx_queues)) { 4230 WARN_ONCE(dev->real_num_rx_queues > 1, 4231 "%s received packet on queue %u, but number " 4232 "of RX queues is %u\n", 4233 dev->name, index, dev->real_num_rx_queues); 4234 4235 return rxqueue; /* Return first rxqueue */ 4236 } 4237 rxqueue += index; 4238 } 4239 return rxqueue; 4240 } 4241 4242 static u32 netif_receive_generic_xdp(struct sk_buff *skb, 4243 struct xdp_buff *xdp, 4244 struct bpf_prog *xdp_prog) 4245 { 4246 struct netdev_rx_queue *rxqueue; 4247 void *orig_data, *orig_data_end; 4248 u32 metalen, act = XDP_DROP; 4249 int hlen, off; 4250 u32 mac_len; 4251 4252 /* Reinjected packets coming from act_mirred or similar should 4253 * not get XDP generic processing. 4254 */ 4255 if (skb_cloned(skb)) 4256 return XDP_PASS; 4257 4258 /* XDP packets must be linear and must have sufficient headroom 4259 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also 4260 * native XDP provides, thus we need to do it here as well. 4261 */ 4262 if (skb_is_nonlinear(skb) || 4263 skb_headroom(skb) < XDP_PACKET_HEADROOM) { 4264 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb); 4265 int troom = skb->tail + skb->data_len - skb->end; 4266 4267 /* In case we have to go down the path and also linearize, 4268 * then lets do the pskb_expand_head() work just once here. 4269 */ 4270 if (pskb_expand_head(skb, 4271 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0, 4272 troom > 0 ? troom + 128 : 0, GFP_ATOMIC)) 4273 goto do_drop; 4274 if (skb_linearize(skb)) 4275 goto do_drop; 4276 } 4277 4278 /* The XDP program wants to see the packet starting at the MAC 4279 * header. 4280 */ 4281 mac_len = skb->data - skb_mac_header(skb); 4282 hlen = skb_headlen(skb) + mac_len; 4283 xdp->data = skb->data - mac_len; 4284 xdp->data_meta = xdp->data; 4285 xdp->data_end = xdp->data + hlen; 4286 xdp->data_hard_start = skb->data - skb_headroom(skb); 4287 orig_data_end = xdp->data_end; 4288 orig_data = xdp->data; 4289 4290 rxqueue = netif_get_rxqueue(skb); 4291 xdp->rxq = &rxqueue->xdp_rxq; 4292 4293 act = bpf_prog_run_xdp(xdp_prog, xdp); 4294 4295 off = xdp->data - orig_data; 4296 if (off > 0) 4297 __skb_pull(skb, off); 4298 else if (off < 0) 4299 __skb_push(skb, -off); 4300 skb->mac_header += off; 4301 4302 /* check if bpf_xdp_adjust_tail was used. it can only "shrink" 4303 * pckt. 4304 */ 4305 off = orig_data_end - xdp->data_end; 4306 if (off != 0) { 4307 skb_set_tail_pointer(skb, xdp->data_end - xdp->data); 4308 skb->len -= off; 4309 4310 } 4311 4312 switch (act) { 4313 case XDP_REDIRECT: 4314 case XDP_TX: 4315 __skb_push(skb, mac_len); 4316 break; 4317 case XDP_PASS: 4318 metalen = xdp->data - xdp->data_meta; 4319 if (metalen) 4320 skb_metadata_set(skb, metalen); 4321 break; 4322 default: 4323 bpf_warn_invalid_xdp_action(act); 4324 /* fall through */ 4325 case XDP_ABORTED: 4326 trace_xdp_exception(skb->dev, xdp_prog, act); 4327 /* fall through */ 4328 case XDP_DROP: 4329 do_drop: 4330 kfree_skb(skb); 4331 break; 4332 } 4333 4334 return act; 4335 } 4336 4337 /* When doing generic XDP we have to bypass the qdisc layer and the 4338 * network taps in order to match in-driver-XDP behavior. 4339 */ 4340 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog) 4341 { 4342 struct net_device *dev = skb->dev; 4343 struct netdev_queue *txq; 4344 bool free_skb = true; 4345 int cpu, rc; 4346 4347 txq = netdev_pick_tx(dev, skb, NULL); 4348 cpu = smp_processor_id(); 4349 HARD_TX_LOCK(dev, txq, cpu); 4350 if (!netif_xmit_stopped(txq)) { 4351 rc = netdev_start_xmit(skb, dev, txq, 0); 4352 if (dev_xmit_complete(rc)) 4353 free_skb = false; 4354 } 4355 HARD_TX_UNLOCK(dev, txq); 4356 if (free_skb) { 4357 trace_xdp_exception(dev, xdp_prog, XDP_TX); 4358 kfree_skb(skb); 4359 } 4360 } 4361 EXPORT_SYMBOL_GPL(generic_xdp_tx); 4362 4363 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key); 4364 4365 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb) 4366 { 4367 if (xdp_prog) { 4368 struct xdp_buff xdp; 4369 u32 act; 4370 int err; 4371 4372 act = netif_receive_generic_xdp(skb, &xdp, xdp_prog); 4373 if (act != XDP_PASS) { 4374 switch (act) { 4375 case XDP_REDIRECT: 4376 err = xdp_do_generic_redirect(skb->dev, skb, 4377 &xdp, xdp_prog); 4378 if (err) 4379 goto out_redir; 4380 break; 4381 case XDP_TX: 4382 generic_xdp_tx(skb, xdp_prog); 4383 break; 4384 } 4385 return XDP_DROP; 4386 } 4387 } 4388 return XDP_PASS; 4389 out_redir: 4390 kfree_skb(skb); 4391 return XDP_DROP; 4392 } 4393 EXPORT_SYMBOL_GPL(do_xdp_generic); 4394 4395 static int netif_rx_internal(struct sk_buff *skb) 4396 { 4397 int ret; 4398 4399 net_timestamp_check(netdev_tstamp_prequeue, skb); 4400 4401 trace_netif_rx(skb); 4402 4403 if (static_branch_unlikely(&generic_xdp_needed_key)) { 4404 int ret; 4405 4406 preempt_disable(); 4407 rcu_read_lock(); 4408 ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb); 4409 rcu_read_unlock(); 4410 preempt_enable(); 4411 4412 /* Consider XDP consuming the packet a success from 4413 * the netdev point of view we do not want to count 4414 * this as an error. 4415 */ 4416 if (ret != XDP_PASS) 4417 return NET_RX_SUCCESS; 4418 } 4419 4420 #ifdef CONFIG_RPS 4421 if (static_key_false(&rps_needed)) { 4422 struct rps_dev_flow voidflow, *rflow = &voidflow; 4423 int cpu; 4424 4425 preempt_disable(); 4426 rcu_read_lock(); 4427 4428 cpu = get_rps_cpu(skb->dev, skb, &rflow); 4429 if (cpu < 0) 4430 cpu = smp_processor_id(); 4431 4432 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 4433 4434 rcu_read_unlock(); 4435 preempt_enable(); 4436 } else 4437 #endif 4438 { 4439 unsigned int qtail; 4440 4441 ret = enqueue_to_backlog(skb, get_cpu(), &qtail); 4442 put_cpu(); 4443 } 4444 return ret; 4445 } 4446 4447 /** 4448 * netif_rx - post buffer to the network code 4449 * @skb: buffer to post 4450 * 4451 * This function receives a packet from a device driver and queues it for 4452 * the upper (protocol) levels to process. It always succeeds. The buffer 4453 * may be dropped during processing for congestion control or by the 4454 * protocol layers. 4455 * 4456 * return values: 4457 * NET_RX_SUCCESS (no congestion) 4458 * NET_RX_DROP (packet was dropped) 4459 * 4460 */ 4461 4462 int netif_rx(struct sk_buff *skb) 4463 { 4464 trace_netif_rx_entry(skb); 4465 4466 return netif_rx_internal(skb); 4467 } 4468 EXPORT_SYMBOL(netif_rx); 4469 4470 int netif_rx_ni(struct sk_buff *skb) 4471 { 4472 int err; 4473 4474 trace_netif_rx_ni_entry(skb); 4475 4476 preempt_disable(); 4477 err = netif_rx_internal(skb); 4478 if (local_softirq_pending()) 4479 do_softirq(); 4480 preempt_enable(); 4481 4482 return err; 4483 } 4484 EXPORT_SYMBOL(netif_rx_ni); 4485 4486 static __latent_entropy void net_tx_action(struct softirq_action *h) 4487 { 4488 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 4489 4490 if (sd->completion_queue) { 4491 struct sk_buff *clist; 4492 4493 local_irq_disable(); 4494 clist = sd->completion_queue; 4495 sd->completion_queue = NULL; 4496 local_irq_enable(); 4497 4498 while (clist) { 4499 struct sk_buff *skb = clist; 4500 4501 clist = clist->next; 4502 4503 WARN_ON(refcount_read(&skb->users)); 4504 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED)) 4505 trace_consume_skb(skb); 4506 else 4507 trace_kfree_skb(skb, net_tx_action); 4508 4509 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) 4510 __kfree_skb(skb); 4511 else 4512 __kfree_skb_defer(skb); 4513 } 4514 4515 __kfree_skb_flush(); 4516 } 4517 4518 if (sd->output_queue) { 4519 struct Qdisc *head; 4520 4521 local_irq_disable(); 4522 head = sd->output_queue; 4523 sd->output_queue = NULL; 4524 sd->output_queue_tailp = &sd->output_queue; 4525 local_irq_enable(); 4526 4527 while (head) { 4528 struct Qdisc *q = head; 4529 spinlock_t *root_lock = NULL; 4530 4531 head = head->next_sched; 4532 4533 if (!(q->flags & TCQ_F_NOLOCK)) { 4534 root_lock = qdisc_lock(q); 4535 spin_lock(root_lock); 4536 } 4537 /* We need to make sure head->next_sched is read 4538 * before clearing __QDISC_STATE_SCHED 4539 */ 4540 smp_mb__before_atomic(); 4541 clear_bit(__QDISC_STATE_SCHED, &q->state); 4542 qdisc_run(q); 4543 if (root_lock) 4544 spin_unlock(root_lock); 4545 } 4546 } 4547 4548 xfrm_dev_backlog(sd); 4549 } 4550 4551 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE) 4552 /* This hook is defined here for ATM LANE */ 4553 int (*br_fdb_test_addr_hook)(struct net_device *dev, 4554 unsigned char *addr) __read_mostly; 4555 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); 4556 #endif 4557 4558 static inline struct sk_buff * 4559 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, 4560 struct net_device *orig_dev) 4561 { 4562 #ifdef CONFIG_NET_CLS_ACT 4563 struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress); 4564 struct tcf_result cl_res; 4565 4566 /* If there's at least one ingress present somewhere (so 4567 * we get here via enabled static key), remaining devices 4568 * that are not configured with an ingress qdisc will bail 4569 * out here. 4570 */ 4571 if (!miniq) 4572 return skb; 4573 4574 if (*pt_prev) { 4575 *ret = deliver_skb(skb, *pt_prev, orig_dev); 4576 *pt_prev = NULL; 4577 } 4578 4579 qdisc_skb_cb(skb)->pkt_len = skb->len; 4580 skb->tc_at_ingress = 1; 4581 mini_qdisc_bstats_cpu_update(miniq, skb); 4582 4583 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) { 4584 case TC_ACT_OK: 4585 case TC_ACT_RECLASSIFY: 4586 skb->tc_index = TC_H_MIN(cl_res.classid); 4587 break; 4588 case TC_ACT_SHOT: 4589 mini_qdisc_qstats_cpu_drop(miniq); 4590 kfree_skb(skb); 4591 return NULL; 4592 case TC_ACT_STOLEN: 4593 case TC_ACT_QUEUED: 4594 case TC_ACT_TRAP: 4595 consume_skb(skb); 4596 return NULL; 4597 case TC_ACT_REDIRECT: 4598 /* skb_mac_header check was done by cls/act_bpf, so 4599 * we can safely push the L2 header back before 4600 * redirecting to another netdev 4601 */ 4602 __skb_push(skb, skb->mac_len); 4603 skb_do_redirect(skb); 4604 return NULL; 4605 default: 4606 break; 4607 } 4608 #endif /* CONFIG_NET_CLS_ACT */ 4609 return skb; 4610 } 4611 4612 /** 4613 * netdev_is_rx_handler_busy - check if receive handler is registered 4614 * @dev: device to check 4615 * 4616 * Check if a receive handler is already registered for a given device. 4617 * Return true if there one. 4618 * 4619 * The caller must hold the rtnl_mutex. 4620 */ 4621 bool netdev_is_rx_handler_busy(struct net_device *dev) 4622 { 4623 ASSERT_RTNL(); 4624 return dev && rtnl_dereference(dev->rx_handler); 4625 } 4626 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy); 4627 4628 /** 4629 * netdev_rx_handler_register - register receive handler 4630 * @dev: device to register a handler for 4631 * @rx_handler: receive handler to register 4632 * @rx_handler_data: data pointer that is used by rx handler 4633 * 4634 * Register a receive handler for a device. This handler will then be 4635 * called from __netif_receive_skb. A negative errno code is returned 4636 * on a failure. 4637 * 4638 * The caller must hold the rtnl_mutex. 4639 * 4640 * For a general description of rx_handler, see enum rx_handler_result. 4641 */ 4642 int netdev_rx_handler_register(struct net_device *dev, 4643 rx_handler_func_t *rx_handler, 4644 void *rx_handler_data) 4645 { 4646 if (netdev_is_rx_handler_busy(dev)) 4647 return -EBUSY; 4648 4649 if (dev->priv_flags & IFF_NO_RX_HANDLER) 4650 return -EINVAL; 4651 4652 /* Note: rx_handler_data must be set before rx_handler */ 4653 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); 4654 rcu_assign_pointer(dev->rx_handler, rx_handler); 4655 4656 return 0; 4657 } 4658 EXPORT_SYMBOL_GPL(netdev_rx_handler_register); 4659 4660 /** 4661 * netdev_rx_handler_unregister - unregister receive handler 4662 * @dev: device to unregister a handler from 4663 * 4664 * Unregister a receive handler from a device. 4665 * 4666 * The caller must hold the rtnl_mutex. 4667 */ 4668 void netdev_rx_handler_unregister(struct net_device *dev) 4669 { 4670 4671 ASSERT_RTNL(); 4672 RCU_INIT_POINTER(dev->rx_handler, NULL); 4673 /* a reader seeing a non NULL rx_handler in a rcu_read_lock() 4674 * section has a guarantee to see a non NULL rx_handler_data 4675 * as well. 4676 */ 4677 synchronize_net(); 4678 RCU_INIT_POINTER(dev->rx_handler_data, NULL); 4679 } 4680 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 4681 4682 /* 4683 * Limit the use of PFMEMALLOC reserves to those protocols that implement 4684 * the special handling of PFMEMALLOC skbs. 4685 */ 4686 static bool skb_pfmemalloc_protocol(struct sk_buff *skb) 4687 { 4688 switch (skb->protocol) { 4689 case htons(ETH_P_ARP): 4690 case htons(ETH_P_IP): 4691 case htons(ETH_P_IPV6): 4692 case htons(ETH_P_8021Q): 4693 case htons(ETH_P_8021AD): 4694 return true; 4695 default: 4696 return false; 4697 } 4698 } 4699 4700 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev, 4701 int *ret, struct net_device *orig_dev) 4702 { 4703 #ifdef CONFIG_NETFILTER_INGRESS 4704 if (nf_hook_ingress_active(skb)) { 4705 int ingress_retval; 4706 4707 if (*pt_prev) { 4708 *ret = deliver_skb(skb, *pt_prev, orig_dev); 4709 *pt_prev = NULL; 4710 } 4711 4712 rcu_read_lock(); 4713 ingress_retval = nf_hook_ingress(skb); 4714 rcu_read_unlock(); 4715 return ingress_retval; 4716 } 4717 #endif /* CONFIG_NETFILTER_INGRESS */ 4718 return 0; 4719 } 4720 4721 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc, 4722 struct packet_type **ppt_prev) 4723 { 4724 struct packet_type *ptype, *pt_prev; 4725 rx_handler_func_t *rx_handler; 4726 struct net_device *orig_dev; 4727 bool deliver_exact = false; 4728 int ret = NET_RX_DROP; 4729 __be16 type; 4730 4731 net_timestamp_check(!netdev_tstamp_prequeue, skb); 4732 4733 trace_netif_receive_skb(skb); 4734 4735 orig_dev = skb->dev; 4736 4737 skb_reset_network_header(skb); 4738 if (!skb_transport_header_was_set(skb)) 4739 skb_reset_transport_header(skb); 4740 skb_reset_mac_len(skb); 4741 4742 pt_prev = NULL; 4743 4744 another_round: 4745 skb->skb_iif = skb->dev->ifindex; 4746 4747 __this_cpu_inc(softnet_data.processed); 4748 4749 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || 4750 skb->protocol == cpu_to_be16(ETH_P_8021AD)) { 4751 skb = skb_vlan_untag(skb); 4752 if (unlikely(!skb)) 4753 goto out; 4754 } 4755 4756 if (skb_skip_tc_classify(skb)) 4757 goto skip_classify; 4758 4759 if (pfmemalloc) 4760 goto skip_taps; 4761 4762 list_for_each_entry_rcu(ptype, &ptype_all, list) { 4763 if (pt_prev) 4764 ret = deliver_skb(skb, pt_prev, orig_dev); 4765 pt_prev = ptype; 4766 } 4767 4768 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) { 4769 if (pt_prev) 4770 ret = deliver_skb(skb, pt_prev, orig_dev); 4771 pt_prev = ptype; 4772 } 4773 4774 skip_taps: 4775 #ifdef CONFIG_NET_INGRESS 4776 if (static_branch_unlikely(&ingress_needed_key)) { 4777 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev); 4778 if (!skb) 4779 goto out; 4780 4781 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0) 4782 goto out; 4783 } 4784 #endif 4785 skb_reset_tc(skb); 4786 skip_classify: 4787 if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) 4788 goto drop; 4789 4790 if (skb_vlan_tag_present(skb)) { 4791 if (pt_prev) { 4792 ret = deliver_skb(skb, pt_prev, orig_dev); 4793 pt_prev = NULL; 4794 } 4795 if (vlan_do_receive(&skb)) 4796 goto another_round; 4797 else if (unlikely(!skb)) 4798 goto out; 4799 } 4800 4801 rx_handler = rcu_dereference(skb->dev->rx_handler); 4802 if (rx_handler) { 4803 if (pt_prev) { 4804 ret = deliver_skb(skb, pt_prev, orig_dev); 4805 pt_prev = NULL; 4806 } 4807 switch (rx_handler(&skb)) { 4808 case RX_HANDLER_CONSUMED: 4809 ret = NET_RX_SUCCESS; 4810 goto out; 4811 case RX_HANDLER_ANOTHER: 4812 goto another_round; 4813 case RX_HANDLER_EXACT: 4814 deliver_exact = true; 4815 case RX_HANDLER_PASS: 4816 break; 4817 default: 4818 BUG(); 4819 } 4820 } 4821 4822 if (unlikely(skb_vlan_tag_present(skb))) { 4823 if (skb_vlan_tag_get_id(skb)) 4824 skb->pkt_type = PACKET_OTHERHOST; 4825 /* Note: we might in the future use prio bits 4826 * and set skb->priority like in vlan_do_receive() 4827 * For the time being, just ignore Priority Code Point 4828 */ 4829 skb->vlan_tci = 0; 4830 } 4831 4832 type = skb->protocol; 4833 4834 /* deliver only exact match when indicated */ 4835 if (likely(!deliver_exact)) { 4836 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 4837 &ptype_base[ntohs(type) & 4838 PTYPE_HASH_MASK]); 4839 } 4840 4841 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 4842 &orig_dev->ptype_specific); 4843 4844 if (unlikely(skb->dev != orig_dev)) { 4845 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 4846 &skb->dev->ptype_specific); 4847 } 4848 4849 if (pt_prev) { 4850 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 4851 goto drop; 4852 *ppt_prev = pt_prev; 4853 } else { 4854 drop: 4855 if (!deliver_exact) 4856 atomic_long_inc(&skb->dev->rx_dropped); 4857 else 4858 atomic_long_inc(&skb->dev->rx_nohandler); 4859 kfree_skb(skb); 4860 /* Jamal, now you will not able to escape explaining 4861 * me how you were going to use this. :-) 4862 */ 4863 ret = NET_RX_DROP; 4864 } 4865 4866 out: 4867 return ret; 4868 } 4869 4870 static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc) 4871 { 4872 struct net_device *orig_dev = skb->dev; 4873 struct packet_type *pt_prev = NULL; 4874 int ret; 4875 4876 ret = __netif_receive_skb_core(skb, pfmemalloc, &pt_prev); 4877 if (pt_prev) 4878 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 4879 return ret; 4880 } 4881 4882 /** 4883 * netif_receive_skb_core - special purpose version of netif_receive_skb 4884 * @skb: buffer to process 4885 * 4886 * More direct receive version of netif_receive_skb(). It should 4887 * only be used by callers that have a need to skip RPS and Generic XDP. 4888 * Caller must also take care of handling if (page_is_)pfmemalloc. 4889 * 4890 * This function may only be called from softirq context and interrupts 4891 * should be enabled. 4892 * 4893 * Return values (usually ignored): 4894 * NET_RX_SUCCESS: no congestion 4895 * NET_RX_DROP: packet was dropped 4896 */ 4897 int netif_receive_skb_core(struct sk_buff *skb) 4898 { 4899 int ret; 4900 4901 rcu_read_lock(); 4902 ret = __netif_receive_skb_one_core(skb, false); 4903 rcu_read_unlock(); 4904 4905 return ret; 4906 } 4907 EXPORT_SYMBOL(netif_receive_skb_core); 4908 4909 static inline void __netif_receive_skb_list_ptype(struct list_head *head, 4910 struct packet_type *pt_prev, 4911 struct net_device *orig_dev) 4912 { 4913 struct sk_buff *skb, *next; 4914 4915 if (!pt_prev) 4916 return; 4917 if (list_empty(head)) 4918 return; 4919 if (pt_prev->list_func != NULL) 4920 pt_prev->list_func(head, pt_prev, orig_dev); 4921 else 4922 list_for_each_entry_safe(skb, next, head, list) 4923 pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 4924 } 4925 4926 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc) 4927 { 4928 /* Fast-path assumptions: 4929 * - There is no RX handler. 4930 * - Only one packet_type matches. 4931 * If either of these fails, we will end up doing some per-packet 4932 * processing in-line, then handling the 'last ptype' for the whole 4933 * sublist. This can't cause out-of-order delivery to any single ptype, 4934 * because the 'last ptype' must be constant across the sublist, and all 4935 * other ptypes are handled per-packet. 4936 */ 4937 /* Current (common) ptype of sublist */ 4938 struct packet_type *pt_curr = NULL; 4939 /* Current (common) orig_dev of sublist */ 4940 struct net_device *od_curr = NULL; 4941 struct list_head sublist; 4942 struct sk_buff *skb, *next; 4943 4944 INIT_LIST_HEAD(&sublist); 4945 list_for_each_entry_safe(skb, next, head, list) { 4946 struct net_device *orig_dev = skb->dev; 4947 struct packet_type *pt_prev = NULL; 4948 4949 list_del(&skb->list); 4950 __netif_receive_skb_core(skb, pfmemalloc, &pt_prev); 4951 if (!pt_prev) 4952 continue; 4953 if (pt_curr != pt_prev || od_curr != orig_dev) { 4954 /* dispatch old sublist */ 4955 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); 4956 /* start new sublist */ 4957 INIT_LIST_HEAD(&sublist); 4958 pt_curr = pt_prev; 4959 od_curr = orig_dev; 4960 } 4961 list_add_tail(&skb->list, &sublist); 4962 } 4963 4964 /* dispatch final sublist */ 4965 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); 4966 } 4967 4968 static int __netif_receive_skb(struct sk_buff *skb) 4969 { 4970 int ret; 4971 4972 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) { 4973 unsigned int noreclaim_flag; 4974 4975 /* 4976 * PFMEMALLOC skbs are special, they should 4977 * - be delivered to SOCK_MEMALLOC sockets only 4978 * - stay away from userspace 4979 * - have bounded memory usage 4980 * 4981 * Use PF_MEMALLOC as this saves us from propagating the allocation 4982 * context down to all allocation sites. 4983 */ 4984 noreclaim_flag = memalloc_noreclaim_save(); 4985 ret = __netif_receive_skb_one_core(skb, true); 4986 memalloc_noreclaim_restore(noreclaim_flag); 4987 } else 4988 ret = __netif_receive_skb_one_core(skb, false); 4989 4990 return ret; 4991 } 4992 4993 static void __netif_receive_skb_list(struct list_head *head) 4994 { 4995 unsigned long noreclaim_flag = 0; 4996 struct sk_buff *skb, *next; 4997 bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */ 4998 4999 list_for_each_entry_safe(skb, next, head, list) { 5000 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) { 5001 struct list_head sublist; 5002 5003 /* Handle the previous sublist */ 5004 list_cut_before(&sublist, head, &skb->list); 5005 if (!list_empty(&sublist)) 5006 __netif_receive_skb_list_core(&sublist, pfmemalloc); 5007 pfmemalloc = !pfmemalloc; 5008 /* See comments in __netif_receive_skb */ 5009 if (pfmemalloc) 5010 noreclaim_flag = memalloc_noreclaim_save(); 5011 else 5012 memalloc_noreclaim_restore(noreclaim_flag); 5013 } 5014 } 5015 /* Handle the remaining sublist */ 5016 if (!list_empty(head)) 5017 __netif_receive_skb_list_core(head, pfmemalloc); 5018 /* Restore pflags */ 5019 if (pfmemalloc) 5020 memalloc_noreclaim_restore(noreclaim_flag); 5021 } 5022 5023 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) 5024 { 5025 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog); 5026 struct bpf_prog *new = xdp->prog; 5027 int ret = 0; 5028 5029 switch (xdp->command) { 5030 case XDP_SETUP_PROG: 5031 rcu_assign_pointer(dev->xdp_prog, new); 5032 if (old) 5033 bpf_prog_put(old); 5034 5035 if (old && !new) { 5036 static_branch_dec(&generic_xdp_needed_key); 5037 } else if (new && !old) { 5038 static_branch_inc(&generic_xdp_needed_key); 5039 dev_disable_lro(dev); 5040 dev_disable_gro_hw(dev); 5041 } 5042 break; 5043 5044 case XDP_QUERY_PROG: 5045 xdp->prog_id = old ? old->aux->id : 0; 5046 break; 5047 5048 default: 5049 ret = -EINVAL; 5050 break; 5051 } 5052 5053 return ret; 5054 } 5055 5056 static int netif_receive_skb_internal(struct sk_buff *skb) 5057 { 5058 int ret; 5059 5060 net_timestamp_check(netdev_tstamp_prequeue, skb); 5061 5062 if (skb_defer_rx_timestamp(skb)) 5063 return NET_RX_SUCCESS; 5064 5065 if (static_branch_unlikely(&generic_xdp_needed_key)) { 5066 int ret; 5067 5068 preempt_disable(); 5069 rcu_read_lock(); 5070 ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb); 5071 rcu_read_unlock(); 5072 preempt_enable(); 5073 5074 if (ret != XDP_PASS) 5075 return NET_RX_DROP; 5076 } 5077 5078 rcu_read_lock(); 5079 #ifdef CONFIG_RPS 5080 if (static_key_false(&rps_needed)) { 5081 struct rps_dev_flow voidflow, *rflow = &voidflow; 5082 int cpu = get_rps_cpu(skb->dev, skb, &rflow); 5083 5084 if (cpu >= 0) { 5085 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5086 rcu_read_unlock(); 5087 return ret; 5088 } 5089 } 5090 #endif 5091 ret = __netif_receive_skb(skb); 5092 rcu_read_unlock(); 5093 return ret; 5094 } 5095 5096 static void netif_receive_skb_list_internal(struct list_head *head) 5097 { 5098 struct bpf_prog *xdp_prog = NULL; 5099 struct sk_buff *skb, *next; 5100 struct list_head sublist; 5101 5102 INIT_LIST_HEAD(&sublist); 5103 list_for_each_entry_safe(skb, next, head, list) { 5104 net_timestamp_check(netdev_tstamp_prequeue, skb); 5105 list_del(&skb->list); 5106 if (!skb_defer_rx_timestamp(skb)) 5107 list_add_tail(&skb->list, &sublist); 5108 } 5109 list_splice_init(&sublist, head); 5110 5111 if (static_branch_unlikely(&generic_xdp_needed_key)) { 5112 preempt_disable(); 5113 rcu_read_lock(); 5114 list_for_each_entry_safe(skb, next, head, list) { 5115 xdp_prog = rcu_dereference(skb->dev->xdp_prog); 5116 list_del(&skb->list); 5117 if (do_xdp_generic(xdp_prog, skb) == XDP_PASS) 5118 list_add_tail(&skb->list, &sublist); 5119 } 5120 rcu_read_unlock(); 5121 preempt_enable(); 5122 /* Put passed packets back on main list */ 5123 list_splice_init(&sublist, head); 5124 } 5125 5126 rcu_read_lock(); 5127 #ifdef CONFIG_RPS 5128 if (static_key_false(&rps_needed)) { 5129 list_for_each_entry_safe(skb, next, head, list) { 5130 struct rps_dev_flow voidflow, *rflow = &voidflow; 5131 int cpu = get_rps_cpu(skb->dev, skb, &rflow); 5132 5133 if (cpu >= 0) { 5134 /* Will be handled, remove from list */ 5135 list_del(&skb->list); 5136 enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5137 } 5138 } 5139 } 5140 #endif 5141 __netif_receive_skb_list(head); 5142 rcu_read_unlock(); 5143 } 5144 5145 /** 5146 * netif_receive_skb - process receive buffer from network 5147 * @skb: buffer to process 5148 * 5149 * netif_receive_skb() is the main receive data processing function. 5150 * It always succeeds. The buffer may be dropped during processing 5151 * for congestion control or by the protocol layers. 5152 * 5153 * This function may only be called from softirq context and interrupts 5154 * should be enabled. 5155 * 5156 * Return values (usually ignored): 5157 * NET_RX_SUCCESS: no congestion 5158 * NET_RX_DROP: packet was dropped 5159 */ 5160 int netif_receive_skb(struct sk_buff *skb) 5161 { 5162 trace_netif_receive_skb_entry(skb); 5163 5164 return netif_receive_skb_internal(skb); 5165 } 5166 EXPORT_SYMBOL(netif_receive_skb); 5167 5168 /** 5169 * netif_receive_skb_list - process many receive buffers from network 5170 * @head: list of skbs to process. 5171 * 5172 * Since return value of netif_receive_skb() is normally ignored, and 5173 * wouldn't be meaningful for a list, this function returns void. 5174 * 5175 * This function may only be called from softirq context and interrupts 5176 * should be enabled. 5177 */ 5178 void netif_receive_skb_list(struct list_head *head) 5179 { 5180 struct sk_buff *skb; 5181 5182 if (list_empty(head)) 5183 return; 5184 list_for_each_entry(skb, head, list) 5185 trace_netif_receive_skb_list_entry(skb); 5186 netif_receive_skb_list_internal(head); 5187 } 5188 EXPORT_SYMBOL(netif_receive_skb_list); 5189 5190 DEFINE_PER_CPU(struct work_struct, flush_works); 5191 5192 /* Network device is going away, flush any packets still pending */ 5193 static void flush_backlog(struct work_struct *work) 5194 { 5195 struct sk_buff *skb, *tmp; 5196 struct softnet_data *sd; 5197 5198 local_bh_disable(); 5199 sd = this_cpu_ptr(&softnet_data); 5200 5201 local_irq_disable(); 5202 rps_lock(sd); 5203 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { 5204 if (skb->dev->reg_state == NETREG_UNREGISTERING) { 5205 __skb_unlink(skb, &sd->input_pkt_queue); 5206 kfree_skb(skb); 5207 input_queue_head_incr(sd); 5208 } 5209 } 5210 rps_unlock(sd); 5211 local_irq_enable(); 5212 5213 skb_queue_walk_safe(&sd->process_queue, skb, tmp) { 5214 if (skb->dev->reg_state == NETREG_UNREGISTERING) { 5215 __skb_unlink(skb, &sd->process_queue); 5216 kfree_skb(skb); 5217 input_queue_head_incr(sd); 5218 } 5219 } 5220 local_bh_enable(); 5221 } 5222 5223 static void flush_all_backlogs(void) 5224 { 5225 unsigned int cpu; 5226 5227 get_online_cpus(); 5228 5229 for_each_online_cpu(cpu) 5230 queue_work_on(cpu, system_highpri_wq, 5231 per_cpu_ptr(&flush_works, cpu)); 5232 5233 for_each_online_cpu(cpu) 5234 flush_work(per_cpu_ptr(&flush_works, cpu)); 5235 5236 put_online_cpus(); 5237 } 5238 5239 static int napi_gro_complete(struct sk_buff *skb) 5240 { 5241 struct packet_offload *ptype; 5242 __be16 type = skb->protocol; 5243 struct list_head *head = &offload_base; 5244 int err = -ENOENT; 5245 5246 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb)); 5247 5248 if (NAPI_GRO_CB(skb)->count == 1) { 5249 skb_shinfo(skb)->gso_size = 0; 5250 goto out; 5251 } 5252 5253 rcu_read_lock(); 5254 list_for_each_entry_rcu(ptype, head, list) { 5255 if (ptype->type != type || !ptype->callbacks.gro_complete) 5256 continue; 5257 5258 err = ptype->callbacks.gro_complete(skb, 0); 5259 break; 5260 } 5261 rcu_read_unlock(); 5262 5263 if (err) { 5264 WARN_ON(&ptype->list == head); 5265 kfree_skb(skb); 5266 return NET_RX_SUCCESS; 5267 } 5268 5269 out: 5270 return netif_receive_skb_internal(skb); 5271 } 5272 5273 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, 5274 bool flush_old) 5275 { 5276 struct list_head *head = &napi->gro_hash[index].list; 5277 struct sk_buff *skb, *p; 5278 5279 list_for_each_entry_safe_reverse(skb, p, head, list) { 5280 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) 5281 return; 5282 list_del(&skb->list); 5283 skb->next = NULL; 5284 napi_gro_complete(skb); 5285 napi->gro_count--; 5286 napi->gro_hash[index].count--; 5287 } 5288 } 5289 5290 /* napi->gro_hash[].list contains packets ordered by age. 5291 * youngest packets at the head of it. 5292 * Complete skbs in reverse order to reduce latencies. 5293 */ 5294 void napi_gro_flush(struct napi_struct *napi, bool flush_old) 5295 { 5296 u32 i; 5297 5298 for (i = 0; i < GRO_HASH_BUCKETS; i++) 5299 __napi_gro_flush_chain(napi, i, flush_old); 5300 } 5301 EXPORT_SYMBOL(napi_gro_flush); 5302 5303 static struct list_head *gro_list_prepare(struct napi_struct *napi, 5304 struct sk_buff *skb) 5305 { 5306 unsigned int maclen = skb->dev->hard_header_len; 5307 u32 hash = skb_get_hash_raw(skb); 5308 struct list_head *head; 5309 struct sk_buff *p; 5310 5311 head = &napi->gro_hash[hash & (GRO_HASH_BUCKETS - 1)].list; 5312 list_for_each_entry(p, head, list) { 5313 unsigned long diffs; 5314 5315 NAPI_GRO_CB(p)->flush = 0; 5316 5317 if (hash != skb_get_hash_raw(p)) { 5318 NAPI_GRO_CB(p)->same_flow = 0; 5319 continue; 5320 } 5321 5322 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; 5323 diffs |= p->vlan_tci ^ skb->vlan_tci; 5324 diffs |= skb_metadata_dst_cmp(p, skb); 5325 diffs |= skb_metadata_differs(p, skb); 5326 if (maclen == ETH_HLEN) 5327 diffs |= compare_ether_header(skb_mac_header(p), 5328 skb_mac_header(skb)); 5329 else if (!diffs) 5330 diffs = memcmp(skb_mac_header(p), 5331 skb_mac_header(skb), 5332 maclen); 5333 NAPI_GRO_CB(p)->same_flow = !diffs; 5334 } 5335 5336 return head; 5337 } 5338 5339 static void skb_gro_reset_offset(struct sk_buff *skb) 5340 { 5341 const struct skb_shared_info *pinfo = skb_shinfo(skb); 5342 const skb_frag_t *frag0 = &pinfo->frags[0]; 5343 5344 NAPI_GRO_CB(skb)->data_offset = 0; 5345 NAPI_GRO_CB(skb)->frag0 = NULL; 5346 NAPI_GRO_CB(skb)->frag0_len = 0; 5347 5348 if (skb_mac_header(skb) == skb_tail_pointer(skb) && 5349 pinfo->nr_frags && 5350 !PageHighMem(skb_frag_page(frag0))) { 5351 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); 5352 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int, 5353 skb_frag_size(frag0), 5354 skb->end - skb->tail); 5355 } 5356 } 5357 5358 static void gro_pull_from_frag0(struct sk_buff *skb, int grow) 5359 { 5360 struct skb_shared_info *pinfo = skb_shinfo(skb); 5361 5362 BUG_ON(skb->end - skb->tail < grow); 5363 5364 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); 5365 5366 skb->data_len -= grow; 5367 skb->tail += grow; 5368 5369 pinfo->frags[0].page_offset += grow; 5370 skb_frag_size_sub(&pinfo->frags[0], grow); 5371 5372 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) { 5373 skb_frag_unref(skb, 0); 5374 memmove(pinfo->frags, pinfo->frags + 1, 5375 --pinfo->nr_frags * sizeof(pinfo->frags[0])); 5376 } 5377 } 5378 5379 static void gro_flush_oldest(struct list_head *head) 5380 { 5381 struct sk_buff *oldest; 5382 5383 oldest = list_last_entry(head, struct sk_buff, list); 5384 5385 /* We are called with head length >= MAX_GRO_SKBS, so this is 5386 * impossible. 5387 */ 5388 if (WARN_ON_ONCE(!oldest)) 5389 return; 5390 5391 /* Do not adjust napi->gro_count, caller is adding a new SKB to 5392 * the chain. 5393 */ 5394 list_del(&oldest->list); 5395 napi_gro_complete(oldest); 5396 } 5397 5398 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 5399 { 5400 u32 hash = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1); 5401 struct list_head *head = &offload_base; 5402 struct packet_offload *ptype; 5403 __be16 type = skb->protocol; 5404 struct list_head *gro_head; 5405 struct sk_buff *pp = NULL; 5406 enum gro_result ret; 5407 int same_flow; 5408 int grow; 5409 5410 if (netif_elide_gro(skb->dev)) 5411 goto normal; 5412 5413 gro_head = gro_list_prepare(napi, skb); 5414 5415 rcu_read_lock(); 5416 list_for_each_entry_rcu(ptype, head, list) { 5417 if (ptype->type != type || !ptype->callbacks.gro_receive) 5418 continue; 5419 5420 skb_set_network_header(skb, skb_gro_offset(skb)); 5421 skb_reset_mac_len(skb); 5422 NAPI_GRO_CB(skb)->same_flow = 0; 5423 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb); 5424 NAPI_GRO_CB(skb)->free = 0; 5425 NAPI_GRO_CB(skb)->encap_mark = 0; 5426 NAPI_GRO_CB(skb)->recursion_counter = 0; 5427 NAPI_GRO_CB(skb)->is_fou = 0; 5428 NAPI_GRO_CB(skb)->is_atomic = 1; 5429 NAPI_GRO_CB(skb)->gro_remcsum_start = 0; 5430 5431 /* Setup for GRO checksum validation */ 5432 switch (skb->ip_summed) { 5433 case CHECKSUM_COMPLETE: 5434 NAPI_GRO_CB(skb)->csum = skb->csum; 5435 NAPI_GRO_CB(skb)->csum_valid = 1; 5436 NAPI_GRO_CB(skb)->csum_cnt = 0; 5437 break; 5438 case CHECKSUM_UNNECESSARY: 5439 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1; 5440 NAPI_GRO_CB(skb)->csum_valid = 0; 5441 break; 5442 default: 5443 NAPI_GRO_CB(skb)->csum_cnt = 0; 5444 NAPI_GRO_CB(skb)->csum_valid = 0; 5445 } 5446 5447 pp = ptype->callbacks.gro_receive(gro_head, skb); 5448 break; 5449 } 5450 rcu_read_unlock(); 5451 5452 if (&ptype->list == head) 5453 goto normal; 5454 5455 if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) { 5456 ret = GRO_CONSUMED; 5457 goto ok; 5458 } 5459 5460 same_flow = NAPI_GRO_CB(skb)->same_flow; 5461 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; 5462 5463 if (pp) { 5464 list_del(&pp->list); 5465 pp->next = NULL; 5466 napi_gro_complete(pp); 5467 napi->gro_count--; 5468 napi->gro_hash[hash].count--; 5469 } 5470 5471 if (same_flow) 5472 goto ok; 5473 5474 if (NAPI_GRO_CB(skb)->flush) 5475 goto normal; 5476 5477 if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) { 5478 gro_flush_oldest(gro_head); 5479 } else { 5480 napi->gro_count++; 5481 napi->gro_hash[hash].count++; 5482 } 5483 NAPI_GRO_CB(skb)->count = 1; 5484 NAPI_GRO_CB(skb)->age = jiffies; 5485 NAPI_GRO_CB(skb)->last = skb; 5486 skb_shinfo(skb)->gso_size = skb_gro_len(skb); 5487 list_add(&skb->list, gro_head); 5488 ret = GRO_HELD; 5489 5490 pull: 5491 grow = skb_gro_offset(skb) - skb_headlen(skb); 5492 if (grow > 0) 5493 gro_pull_from_frag0(skb, grow); 5494 ok: 5495 return ret; 5496 5497 normal: 5498 ret = GRO_NORMAL; 5499 goto pull; 5500 } 5501 5502 struct packet_offload *gro_find_receive_by_type(__be16 type) 5503 { 5504 struct list_head *offload_head = &offload_base; 5505 struct packet_offload *ptype; 5506 5507 list_for_each_entry_rcu(ptype, offload_head, list) { 5508 if (ptype->type != type || !ptype->callbacks.gro_receive) 5509 continue; 5510 return ptype; 5511 } 5512 return NULL; 5513 } 5514 EXPORT_SYMBOL(gro_find_receive_by_type); 5515 5516 struct packet_offload *gro_find_complete_by_type(__be16 type) 5517 { 5518 struct list_head *offload_head = &offload_base; 5519 struct packet_offload *ptype; 5520 5521 list_for_each_entry_rcu(ptype, offload_head, list) { 5522 if (ptype->type != type || !ptype->callbacks.gro_complete) 5523 continue; 5524 return ptype; 5525 } 5526 return NULL; 5527 } 5528 EXPORT_SYMBOL(gro_find_complete_by_type); 5529 5530 static void napi_skb_free_stolen_head(struct sk_buff *skb) 5531 { 5532 skb_dst_drop(skb); 5533 secpath_reset(skb); 5534 kmem_cache_free(skbuff_head_cache, skb); 5535 } 5536 5537 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) 5538 { 5539 switch (ret) { 5540 case GRO_NORMAL: 5541 if (netif_receive_skb_internal(skb)) 5542 ret = GRO_DROP; 5543 break; 5544 5545 case GRO_DROP: 5546 kfree_skb(skb); 5547 break; 5548 5549 case GRO_MERGED_FREE: 5550 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 5551 napi_skb_free_stolen_head(skb); 5552 else 5553 __kfree_skb(skb); 5554 break; 5555 5556 case GRO_HELD: 5557 case GRO_MERGED: 5558 case GRO_CONSUMED: 5559 break; 5560 } 5561 5562 return ret; 5563 } 5564 5565 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 5566 { 5567 skb_mark_napi_id(skb, napi); 5568 trace_napi_gro_receive_entry(skb); 5569 5570 skb_gro_reset_offset(skb); 5571 5572 return napi_skb_finish(dev_gro_receive(napi, skb), skb); 5573 } 5574 EXPORT_SYMBOL(napi_gro_receive); 5575 5576 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) 5577 { 5578 if (unlikely(skb->pfmemalloc)) { 5579 consume_skb(skb); 5580 return; 5581 } 5582 __skb_pull(skb, skb_headlen(skb)); 5583 /* restore the reserve we had after netdev_alloc_skb_ip_align() */ 5584 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); 5585 skb->vlan_tci = 0; 5586 skb->dev = napi->dev; 5587 skb->skb_iif = 0; 5588 skb->encapsulation = 0; 5589 skb_shinfo(skb)->gso_type = 0; 5590 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 5591 secpath_reset(skb); 5592 5593 napi->skb = skb; 5594 } 5595 5596 struct sk_buff *napi_get_frags(struct napi_struct *napi) 5597 { 5598 struct sk_buff *skb = napi->skb; 5599 5600 if (!skb) { 5601 skb = napi_alloc_skb(napi, GRO_MAX_HEAD); 5602 if (skb) { 5603 napi->skb = skb; 5604 skb_mark_napi_id(skb, napi); 5605 } 5606 } 5607 return skb; 5608 } 5609 EXPORT_SYMBOL(napi_get_frags); 5610 5611 static gro_result_t napi_frags_finish(struct napi_struct *napi, 5612 struct sk_buff *skb, 5613 gro_result_t ret) 5614 { 5615 switch (ret) { 5616 case GRO_NORMAL: 5617 case GRO_HELD: 5618 __skb_push(skb, ETH_HLEN); 5619 skb->protocol = eth_type_trans(skb, skb->dev); 5620 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb)) 5621 ret = GRO_DROP; 5622 break; 5623 5624 case GRO_DROP: 5625 napi_reuse_skb(napi, skb); 5626 break; 5627 5628 case GRO_MERGED_FREE: 5629 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 5630 napi_skb_free_stolen_head(skb); 5631 else 5632 napi_reuse_skb(napi, skb); 5633 break; 5634 5635 case GRO_MERGED: 5636 case GRO_CONSUMED: 5637 break; 5638 } 5639 5640 return ret; 5641 } 5642 5643 /* Upper GRO stack assumes network header starts at gro_offset=0 5644 * Drivers could call both napi_gro_frags() and napi_gro_receive() 5645 * We copy ethernet header into skb->data to have a common layout. 5646 */ 5647 static struct sk_buff *napi_frags_skb(struct napi_struct *napi) 5648 { 5649 struct sk_buff *skb = napi->skb; 5650 const struct ethhdr *eth; 5651 unsigned int hlen = sizeof(*eth); 5652 5653 napi->skb = NULL; 5654 5655 skb_reset_mac_header(skb); 5656 skb_gro_reset_offset(skb); 5657 5658 eth = skb_gro_header_fast(skb, 0); 5659 if (unlikely(skb_gro_header_hard(skb, hlen))) { 5660 eth = skb_gro_header_slow(skb, hlen, 0); 5661 if (unlikely(!eth)) { 5662 net_warn_ratelimited("%s: dropping impossible skb from %s\n", 5663 __func__, napi->dev->name); 5664 napi_reuse_skb(napi, skb); 5665 return NULL; 5666 } 5667 } else { 5668 gro_pull_from_frag0(skb, hlen); 5669 NAPI_GRO_CB(skb)->frag0 += hlen; 5670 NAPI_GRO_CB(skb)->frag0_len -= hlen; 5671 } 5672 __skb_pull(skb, hlen); 5673 5674 /* 5675 * This works because the only protocols we care about don't require 5676 * special handling. 5677 * We'll fix it up properly in napi_frags_finish() 5678 */ 5679 skb->protocol = eth->h_proto; 5680 5681 return skb; 5682 } 5683 5684 gro_result_t napi_gro_frags(struct napi_struct *napi) 5685 { 5686 struct sk_buff *skb = napi_frags_skb(napi); 5687 5688 if (!skb) 5689 return GRO_DROP; 5690 5691 trace_napi_gro_frags_entry(skb); 5692 5693 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb)); 5694 } 5695 EXPORT_SYMBOL(napi_gro_frags); 5696 5697 /* Compute the checksum from gro_offset and return the folded value 5698 * after adding in any pseudo checksum. 5699 */ 5700 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb) 5701 { 5702 __wsum wsum; 5703 __sum16 sum; 5704 5705 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0); 5706 5707 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */ 5708 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum)); 5709 if (likely(!sum)) { 5710 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 5711 !skb->csum_complete_sw) 5712 netdev_rx_csum_fault(skb->dev); 5713 } 5714 5715 NAPI_GRO_CB(skb)->csum = wsum; 5716 NAPI_GRO_CB(skb)->csum_valid = 1; 5717 5718 return sum; 5719 } 5720 EXPORT_SYMBOL(__skb_gro_checksum_complete); 5721 5722 static void net_rps_send_ipi(struct softnet_data *remsd) 5723 { 5724 #ifdef CONFIG_RPS 5725 while (remsd) { 5726 struct softnet_data *next = remsd->rps_ipi_next; 5727 5728 if (cpu_online(remsd->cpu)) 5729 smp_call_function_single_async(remsd->cpu, &remsd->csd); 5730 remsd = next; 5731 } 5732 #endif 5733 } 5734 5735 /* 5736 * net_rps_action_and_irq_enable sends any pending IPI's for rps. 5737 * Note: called with local irq disabled, but exits with local irq enabled. 5738 */ 5739 static void net_rps_action_and_irq_enable(struct softnet_data *sd) 5740 { 5741 #ifdef CONFIG_RPS 5742 struct softnet_data *remsd = sd->rps_ipi_list; 5743 5744 if (remsd) { 5745 sd->rps_ipi_list = NULL; 5746 5747 local_irq_enable(); 5748 5749 /* Send pending IPI's to kick RPS processing on remote cpus. */ 5750 net_rps_send_ipi(remsd); 5751 } else 5752 #endif 5753 local_irq_enable(); 5754 } 5755 5756 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) 5757 { 5758 #ifdef CONFIG_RPS 5759 return sd->rps_ipi_list != NULL; 5760 #else 5761 return false; 5762 #endif 5763 } 5764 5765 static int process_backlog(struct napi_struct *napi, int quota) 5766 { 5767 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); 5768 bool again = true; 5769 int work = 0; 5770 5771 /* Check if we have pending ipi, its better to send them now, 5772 * not waiting net_rx_action() end. 5773 */ 5774 if (sd_has_rps_ipi_waiting(sd)) { 5775 local_irq_disable(); 5776 net_rps_action_and_irq_enable(sd); 5777 } 5778 5779 napi->weight = dev_rx_weight; 5780 while (again) { 5781 struct sk_buff *skb; 5782 5783 while ((skb = __skb_dequeue(&sd->process_queue))) { 5784 rcu_read_lock(); 5785 __netif_receive_skb(skb); 5786 rcu_read_unlock(); 5787 input_queue_head_incr(sd); 5788 if (++work >= quota) 5789 return work; 5790 5791 } 5792 5793 local_irq_disable(); 5794 rps_lock(sd); 5795 if (skb_queue_empty(&sd->input_pkt_queue)) { 5796 /* 5797 * Inline a custom version of __napi_complete(). 5798 * only current cpu owns and manipulates this napi, 5799 * and NAPI_STATE_SCHED is the only possible flag set 5800 * on backlog. 5801 * We can use a plain write instead of clear_bit(), 5802 * and we dont need an smp_mb() memory barrier. 5803 */ 5804 napi->state = 0; 5805 again = false; 5806 } else { 5807 skb_queue_splice_tail_init(&sd->input_pkt_queue, 5808 &sd->process_queue); 5809 } 5810 rps_unlock(sd); 5811 local_irq_enable(); 5812 } 5813 5814 return work; 5815 } 5816 5817 /** 5818 * __napi_schedule - schedule for receive 5819 * @n: entry to schedule 5820 * 5821 * The entry's receive function will be scheduled to run. 5822 * Consider using __napi_schedule_irqoff() if hard irqs are masked. 5823 */ 5824 void __napi_schedule(struct napi_struct *n) 5825 { 5826 unsigned long flags; 5827 5828 local_irq_save(flags); 5829 ____napi_schedule(this_cpu_ptr(&softnet_data), n); 5830 local_irq_restore(flags); 5831 } 5832 EXPORT_SYMBOL(__napi_schedule); 5833 5834 /** 5835 * napi_schedule_prep - check if napi can be scheduled 5836 * @n: napi context 5837 * 5838 * Test if NAPI routine is already running, and if not mark 5839 * it as running. This is used as a condition variable 5840 * insure only one NAPI poll instance runs. We also make 5841 * sure there is no pending NAPI disable. 5842 */ 5843 bool napi_schedule_prep(struct napi_struct *n) 5844 { 5845 unsigned long val, new; 5846 5847 do { 5848 val = READ_ONCE(n->state); 5849 if (unlikely(val & NAPIF_STATE_DISABLE)) 5850 return false; 5851 new = val | NAPIF_STATE_SCHED; 5852 5853 /* Sets STATE_MISSED bit if STATE_SCHED was already set 5854 * This was suggested by Alexander Duyck, as compiler 5855 * emits better code than : 5856 * if (val & NAPIF_STATE_SCHED) 5857 * new |= NAPIF_STATE_MISSED; 5858 */ 5859 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED * 5860 NAPIF_STATE_MISSED; 5861 } while (cmpxchg(&n->state, val, new) != val); 5862 5863 return !(val & NAPIF_STATE_SCHED); 5864 } 5865 EXPORT_SYMBOL(napi_schedule_prep); 5866 5867 /** 5868 * __napi_schedule_irqoff - schedule for receive 5869 * @n: entry to schedule 5870 * 5871 * Variant of __napi_schedule() assuming hard irqs are masked 5872 */ 5873 void __napi_schedule_irqoff(struct napi_struct *n) 5874 { 5875 ____napi_schedule(this_cpu_ptr(&softnet_data), n); 5876 } 5877 EXPORT_SYMBOL(__napi_schedule_irqoff); 5878 5879 bool napi_complete_done(struct napi_struct *n, int work_done) 5880 { 5881 unsigned long flags, val, new; 5882 5883 /* 5884 * 1) Don't let napi dequeue from the cpu poll list 5885 * just in case its running on a different cpu. 5886 * 2) If we are busy polling, do nothing here, we have 5887 * the guarantee we will be called later. 5888 */ 5889 if (unlikely(n->state & (NAPIF_STATE_NPSVC | 5890 NAPIF_STATE_IN_BUSY_POLL))) 5891 return false; 5892 5893 if (n->gro_count) { 5894 unsigned long timeout = 0; 5895 5896 if (work_done) 5897 timeout = n->dev->gro_flush_timeout; 5898 5899 if (timeout) 5900 hrtimer_start(&n->timer, ns_to_ktime(timeout), 5901 HRTIMER_MODE_REL_PINNED); 5902 else 5903 napi_gro_flush(n, false); 5904 } 5905 if (unlikely(!list_empty(&n->poll_list))) { 5906 /* If n->poll_list is not empty, we need to mask irqs */ 5907 local_irq_save(flags); 5908 list_del_init(&n->poll_list); 5909 local_irq_restore(flags); 5910 } 5911 5912 do { 5913 val = READ_ONCE(n->state); 5914 5915 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED)); 5916 5917 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED); 5918 5919 /* If STATE_MISSED was set, leave STATE_SCHED set, 5920 * because we will call napi->poll() one more time. 5921 * This C code was suggested by Alexander Duyck to help gcc. 5922 */ 5923 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED * 5924 NAPIF_STATE_SCHED; 5925 } while (cmpxchg(&n->state, val, new) != val); 5926 5927 if (unlikely(val & NAPIF_STATE_MISSED)) { 5928 __napi_schedule(n); 5929 return false; 5930 } 5931 5932 return true; 5933 } 5934 EXPORT_SYMBOL(napi_complete_done); 5935 5936 /* must be called under rcu_read_lock(), as we dont take a reference */ 5937 static struct napi_struct *napi_by_id(unsigned int napi_id) 5938 { 5939 unsigned int hash = napi_id % HASH_SIZE(napi_hash); 5940 struct napi_struct *napi; 5941 5942 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node) 5943 if (napi->napi_id == napi_id) 5944 return napi; 5945 5946 return NULL; 5947 } 5948 5949 #if defined(CONFIG_NET_RX_BUSY_POLL) 5950 5951 #define BUSY_POLL_BUDGET 8 5952 5953 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock) 5954 { 5955 int rc; 5956 5957 /* Busy polling means there is a high chance device driver hard irq 5958 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was 5959 * set in napi_schedule_prep(). 5960 * Since we are about to call napi->poll() once more, we can safely 5961 * clear NAPI_STATE_MISSED. 5962 * 5963 * Note: x86 could use a single "lock and ..." instruction 5964 * to perform these two clear_bit() 5965 */ 5966 clear_bit(NAPI_STATE_MISSED, &napi->state); 5967 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state); 5968 5969 local_bh_disable(); 5970 5971 /* All we really want here is to re-enable device interrupts. 5972 * Ideally, a new ndo_busy_poll_stop() could avoid another round. 5973 */ 5974 rc = napi->poll(napi, BUSY_POLL_BUDGET); 5975 trace_napi_poll(napi, rc, BUSY_POLL_BUDGET); 5976 netpoll_poll_unlock(have_poll_lock); 5977 if (rc == BUSY_POLL_BUDGET) 5978 __napi_schedule(napi); 5979 local_bh_enable(); 5980 } 5981 5982 void napi_busy_loop(unsigned int napi_id, 5983 bool (*loop_end)(void *, unsigned long), 5984 void *loop_end_arg) 5985 { 5986 unsigned long start_time = loop_end ? busy_loop_current_time() : 0; 5987 int (*napi_poll)(struct napi_struct *napi, int budget); 5988 void *have_poll_lock = NULL; 5989 struct napi_struct *napi; 5990 5991 restart: 5992 napi_poll = NULL; 5993 5994 rcu_read_lock(); 5995 5996 napi = napi_by_id(napi_id); 5997 if (!napi) 5998 goto out; 5999 6000 preempt_disable(); 6001 for (;;) { 6002 int work = 0; 6003 6004 local_bh_disable(); 6005 if (!napi_poll) { 6006 unsigned long val = READ_ONCE(napi->state); 6007 6008 /* If multiple threads are competing for this napi, 6009 * we avoid dirtying napi->state as much as we can. 6010 */ 6011 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED | 6012 NAPIF_STATE_IN_BUSY_POLL)) 6013 goto count; 6014 if (cmpxchg(&napi->state, val, 6015 val | NAPIF_STATE_IN_BUSY_POLL | 6016 NAPIF_STATE_SCHED) != val) 6017 goto count; 6018 have_poll_lock = netpoll_poll_lock(napi); 6019 napi_poll = napi->poll; 6020 } 6021 work = napi_poll(napi, BUSY_POLL_BUDGET); 6022 trace_napi_poll(napi, work, BUSY_POLL_BUDGET); 6023 count: 6024 if (work > 0) 6025 __NET_ADD_STATS(dev_net(napi->dev), 6026 LINUX_MIB_BUSYPOLLRXPACKETS, work); 6027 local_bh_enable(); 6028 6029 if (!loop_end || loop_end(loop_end_arg, start_time)) 6030 break; 6031 6032 if (unlikely(need_resched())) { 6033 if (napi_poll) 6034 busy_poll_stop(napi, have_poll_lock); 6035 preempt_enable(); 6036 rcu_read_unlock(); 6037 cond_resched(); 6038 if (loop_end(loop_end_arg, start_time)) 6039 return; 6040 goto restart; 6041 } 6042 cpu_relax(); 6043 } 6044 if (napi_poll) 6045 busy_poll_stop(napi, have_poll_lock); 6046 preempt_enable(); 6047 out: 6048 rcu_read_unlock(); 6049 } 6050 EXPORT_SYMBOL(napi_busy_loop); 6051 6052 #endif /* CONFIG_NET_RX_BUSY_POLL */ 6053 6054 static void napi_hash_add(struct napi_struct *napi) 6055 { 6056 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) || 6057 test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) 6058 return; 6059 6060 spin_lock(&napi_hash_lock); 6061 6062 /* 0..NR_CPUS range is reserved for sender_cpu use */ 6063 do { 6064 if (unlikely(++napi_gen_id < MIN_NAPI_ID)) 6065 napi_gen_id = MIN_NAPI_ID; 6066 } while (napi_by_id(napi_gen_id)); 6067 napi->napi_id = napi_gen_id; 6068 6069 hlist_add_head_rcu(&napi->napi_hash_node, 6070 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); 6071 6072 spin_unlock(&napi_hash_lock); 6073 } 6074 6075 /* Warning : caller is responsible to make sure rcu grace period 6076 * is respected before freeing memory containing @napi 6077 */ 6078 bool napi_hash_del(struct napi_struct *napi) 6079 { 6080 bool rcu_sync_needed = false; 6081 6082 spin_lock(&napi_hash_lock); 6083 6084 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) { 6085 rcu_sync_needed = true; 6086 hlist_del_rcu(&napi->napi_hash_node); 6087 } 6088 spin_unlock(&napi_hash_lock); 6089 return rcu_sync_needed; 6090 } 6091 EXPORT_SYMBOL_GPL(napi_hash_del); 6092 6093 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) 6094 { 6095 struct napi_struct *napi; 6096 6097 napi = container_of(timer, struct napi_struct, timer); 6098 6099 /* Note : we use a relaxed variant of napi_schedule_prep() not setting 6100 * NAPI_STATE_MISSED, since we do not react to a device IRQ. 6101 */ 6102 if (napi->gro_count && !napi_disable_pending(napi) && 6103 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) 6104 __napi_schedule_irqoff(napi); 6105 6106 return HRTIMER_NORESTART; 6107 } 6108 6109 void netif_napi_add(struct net_device *dev, struct napi_struct *napi, 6110 int (*poll)(struct napi_struct *, int), int weight) 6111 { 6112 int i; 6113 6114 INIT_LIST_HEAD(&napi->poll_list); 6115 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); 6116 napi->timer.function = napi_watchdog; 6117 napi->gro_count = 0; 6118 for (i = 0; i < GRO_HASH_BUCKETS; i++) { 6119 INIT_LIST_HEAD(&napi->gro_hash[i].list); 6120 napi->gro_hash[i].count = 0; 6121 } 6122 napi->skb = NULL; 6123 napi->poll = poll; 6124 if (weight > NAPI_POLL_WEIGHT) 6125 pr_err_once("netif_napi_add() called with weight %d on device %s\n", 6126 weight, dev->name); 6127 napi->weight = weight; 6128 list_add(&napi->dev_list, &dev->napi_list); 6129 napi->dev = dev; 6130 #ifdef CONFIG_NETPOLL 6131 napi->poll_owner = -1; 6132 #endif 6133 set_bit(NAPI_STATE_SCHED, &napi->state); 6134 napi_hash_add(napi); 6135 } 6136 EXPORT_SYMBOL(netif_napi_add); 6137 6138 void napi_disable(struct napi_struct *n) 6139 { 6140 might_sleep(); 6141 set_bit(NAPI_STATE_DISABLE, &n->state); 6142 6143 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) 6144 msleep(1); 6145 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state)) 6146 msleep(1); 6147 6148 hrtimer_cancel(&n->timer); 6149 6150 clear_bit(NAPI_STATE_DISABLE, &n->state); 6151 } 6152 EXPORT_SYMBOL(napi_disable); 6153 6154 static void flush_gro_hash(struct napi_struct *napi) 6155 { 6156 int i; 6157 6158 for (i = 0; i < GRO_HASH_BUCKETS; i++) { 6159 struct sk_buff *skb, *n; 6160 6161 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list) 6162 kfree_skb(skb); 6163 napi->gro_hash[i].count = 0; 6164 } 6165 } 6166 6167 /* Must be called in process context */ 6168 void netif_napi_del(struct napi_struct *napi) 6169 { 6170 might_sleep(); 6171 if (napi_hash_del(napi)) 6172 synchronize_net(); 6173 list_del_init(&napi->dev_list); 6174 napi_free_frags(napi); 6175 6176 flush_gro_hash(napi); 6177 napi->gro_count = 0; 6178 } 6179 EXPORT_SYMBOL(netif_napi_del); 6180 6181 static int napi_poll(struct napi_struct *n, struct list_head *repoll) 6182 { 6183 void *have; 6184 int work, weight; 6185 6186 list_del_init(&n->poll_list); 6187 6188 have = netpoll_poll_lock(n); 6189 6190 weight = n->weight; 6191 6192 /* This NAPI_STATE_SCHED test is for avoiding a race 6193 * with netpoll's poll_napi(). Only the entity which 6194 * obtains the lock and sees NAPI_STATE_SCHED set will 6195 * actually make the ->poll() call. Therefore we avoid 6196 * accidentally calling ->poll() when NAPI is not scheduled. 6197 */ 6198 work = 0; 6199 if (test_bit(NAPI_STATE_SCHED, &n->state)) { 6200 work = n->poll(n, weight); 6201 trace_napi_poll(n, work, weight); 6202 } 6203 6204 WARN_ON_ONCE(work > weight); 6205 6206 if (likely(work < weight)) 6207 goto out_unlock; 6208 6209 /* Drivers must not modify the NAPI state if they 6210 * consume the entire weight. In such cases this code 6211 * still "owns" the NAPI instance and therefore can 6212 * move the instance around on the list at-will. 6213 */ 6214 if (unlikely(napi_disable_pending(n))) { 6215 napi_complete(n); 6216 goto out_unlock; 6217 } 6218 6219 if (n->gro_count) { 6220 /* flush too old packets 6221 * If HZ < 1000, flush all packets. 6222 */ 6223 napi_gro_flush(n, HZ >= 1000); 6224 } 6225 6226 /* Some drivers may have called napi_schedule 6227 * prior to exhausting their budget. 6228 */ 6229 if (unlikely(!list_empty(&n->poll_list))) { 6230 pr_warn_once("%s: Budget exhausted after napi rescheduled\n", 6231 n->dev ? n->dev->name : "backlog"); 6232 goto out_unlock; 6233 } 6234 6235 list_add_tail(&n->poll_list, repoll); 6236 6237 out_unlock: 6238 netpoll_poll_unlock(have); 6239 6240 return work; 6241 } 6242 6243 static __latent_entropy void net_rx_action(struct softirq_action *h) 6244 { 6245 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 6246 unsigned long time_limit = jiffies + 6247 usecs_to_jiffies(netdev_budget_usecs); 6248 int budget = netdev_budget; 6249 LIST_HEAD(list); 6250 LIST_HEAD(repoll); 6251 6252 local_irq_disable(); 6253 list_splice_init(&sd->poll_list, &list); 6254 local_irq_enable(); 6255 6256 for (;;) { 6257 struct napi_struct *n; 6258 6259 if (list_empty(&list)) { 6260 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll)) 6261 goto out; 6262 break; 6263 } 6264 6265 n = list_first_entry(&list, struct napi_struct, poll_list); 6266 budget -= napi_poll(n, &repoll); 6267 6268 /* If softirq window is exhausted then punt. 6269 * Allow this to run for 2 jiffies since which will allow 6270 * an average latency of 1.5/HZ. 6271 */ 6272 if (unlikely(budget <= 0 || 6273 time_after_eq(jiffies, time_limit))) { 6274 sd->time_squeeze++; 6275 break; 6276 } 6277 } 6278 6279 local_irq_disable(); 6280 6281 list_splice_tail_init(&sd->poll_list, &list); 6282 list_splice_tail(&repoll, &list); 6283 list_splice(&list, &sd->poll_list); 6284 if (!list_empty(&sd->poll_list)) 6285 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 6286 6287 net_rps_action_and_irq_enable(sd); 6288 out: 6289 __kfree_skb_flush(); 6290 } 6291 6292 struct netdev_adjacent { 6293 struct net_device *dev; 6294 6295 /* upper master flag, there can only be one master device per list */ 6296 bool master; 6297 6298 /* counter for the number of times this device was added to us */ 6299 u16 ref_nr; 6300 6301 /* private field for the users */ 6302 void *private; 6303 6304 struct list_head list; 6305 struct rcu_head rcu; 6306 }; 6307 6308 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev, 6309 struct list_head *adj_list) 6310 { 6311 struct netdev_adjacent *adj; 6312 6313 list_for_each_entry(adj, adj_list, list) { 6314 if (adj->dev == adj_dev) 6315 return adj; 6316 } 6317 return NULL; 6318 } 6319 6320 static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data) 6321 { 6322 struct net_device *dev = data; 6323 6324 return upper_dev == dev; 6325 } 6326 6327 /** 6328 * netdev_has_upper_dev - Check if device is linked to an upper device 6329 * @dev: device 6330 * @upper_dev: upper device to check 6331 * 6332 * Find out if a device is linked to specified upper device and return true 6333 * in case it is. Note that this checks only immediate upper device, 6334 * not through a complete stack of devices. The caller must hold the RTNL lock. 6335 */ 6336 bool netdev_has_upper_dev(struct net_device *dev, 6337 struct net_device *upper_dev) 6338 { 6339 ASSERT_RTNL(); 6340 6341 return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev, 6342 upper_dev); 6343 } 6344 EXPORT_SYMBOL(netdev_has_upper_dev); 6345 6346 /** 6347 * netdev_has_upper_dev_all - Check if device is linked to an upper device 6348 * @dev: device 6349 * @upper_dev: upper device to check 6350 * 6351 * Find out if a device is linked to specified upper device and return true 6352 * in case it is. Note that this checks the entire upper device chain. 6353 * The caller must hold rcu lock. 6354 */ 6355 6356 bool netdev_has_upper_dev_all_rcu(struct net_device *dev, 6357 struct net_device *upper_dev) 6358 { 6359 return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev, 6360 upper_dev); 6361 } 6362 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu); 6363 6364 /** 6365 * netdev_has_any_upper_dev - Check if device is linked to some device 6366 * @dev: device 6367 * 6368 * Find out if a device is linked to an upper device and return true in case 6369 * it is. The caller must hold the RTNL lock. 6370 */ 6371 bool netdev_has_any_upper_dev(struct net_device *dev) 6372 { 6373 ASSERT_RTNL(); 6374 6375 return !list_empty(&dev->adj_list.upper); 6376 } 6377 EXPORT_SYMBOL(netdev_has_any_upper_dev); 6378 6379 /** 6380 * netdev_master_upper_dev_get - Get master upper device 6381 * @dev: device 6382 * 6383 * Find a master upper device and return pointer to it or NULL in case 6384 * it's not there. The caller must hold the RTNL lock. 6385 */ 6386 struct net_device *netdev_master_upper_dev_get(struct net_device *dev) 6387 { 6388 struct netdev_adjacent *upper; 6389 6390 ASSERT_RTNL(); 6391 6392 if (list_empty(&dev->adj_list.upper)) 6393 return NULL; 6394 6395 upper = list_first_entry(&dev->adj_list.upper, 6396 struct netdev_adjacent, list); 6397 if (likely(upper->master)) 6398 return upper->dev; 6399 return NULL; 6400 } 6401 EXPORT_SYMBOL(netdev_master_upper_dev_get); 6402 6403 /** 6404 * netdev_has_any_lower_dev - Check if device is linked to some device 6405 * @dev: device 6406 * 6407 * Find out if a device is linked to a lower device and return true in case 6408 * it is. The caller must hold the RTNL lock. 6409 */ 6410 static bool netdev_has_any_lower_dev(struct net_device *dev) 6411 { 6412 ASSERT_RTNL(); 6413 6414 return !list_empty(&dev->adj_list.lower); 6415 } 6416 6417 void *netdev_adjacent_get_private(struct list_head *adj_list) 6418 { 6419 struct netdev_adjacent *adj; 6420 6421 adj = list_entry(adj_list, struct netdev_adjacent, list); 6422 6423 return adj->private; 6424 } 6425 EXPORT_SYMBOL(netdev_adjacent_get_private); 6426 6427 /** 6428 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list 6429 * @dev: device 6430 * @iter: list_head ** of the current position 6431 * 6432 * Gets the next device from the dev's upper list, starting from iter 6433 * position. The caller must hold RCU read lock. 6434 */ 6435 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, 6436 struct list_head **iter) 6437 { 6438 struct netdev_adjacent *upper; 6439 6440 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 6441 6442 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 6443 6444 if (&upper->list == &dev->adj_list.upper) 6445 return NULL; 6446 6447 *iter = &upper->list; 6448 6449 return upper->dev; 6450 } 6451 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu); 6452 6453 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev, 6454 struct list_head **iter) 6455 { 6456 struct netdev_adjacent *upper; 6457 6458 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 6459 6460 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 6461 6462 if (&upper->list == &dev->adj_list.upper) 6463 return NULL; 6464 6465 *iter = &upper->list; 6466 6467 return upper->dev; 6468 } 6469 6470 int netdev_walk_all_upper_dev_rcu(struct net_device *dev, 6471 int (*fn)(struct net_device *dev, 6472 void *data), 6473 void *data) 6474 { 6475 struct net_device *udev; 6476 struct list_head *iter; 6477 int ret; 6478 6479 for (iter = &dev->adj_list.upper, 6480 udev = netdev_next_upper_dev_rcu(dev, &iter); 6481 udev; 6482 udev = netdev_next_upper_dev_rcu(dev, &iter)) { 6483 /* first is the upper device itself */ 6484 ret = fn(udev, data); 6485 if (ret) 6486 return ret; 6487 6488 /* then look at all of its upper devices */ 6489 ret = netdev_walk_all_upper_dev_rcu(udev, fn, data); 6490 if (ret) 6491 return ret; 6492 } 6493 6494 return 0; 6495 } 6496 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu); 6497 6498 /** 6499 * netdev_lower_get_next_private - Get the next ->private from the 6500 * lower neighbour list 6501 * @dev: device 6502 * @iter: list_head ** of the current position 6503 * 6504 * Gets the next netdev_adjacent->private from the dev's lower neighbour 6505 * list, starting from iter position. The caller must hold either hold the 6506 * RTNL lock or its own locking that guarantees that the neighbour lower 6507 * list will remain unchanged. 6508 */ 6509 void *netdev_lower_get_next_private(struct net_device *dev, 6510 struct list_head **iter) 6511 { 6512 struct netdev_adjacent *lower; 6513 6514 lower = list_entry(*iter, struct netdev_adjacent, list); 6515 6516 if (&lower->list == &dev->adj_list.lower) 6517 return NULL; 6518 6519 *iter = lower->list.next; 6520 6521 return lower->private; 6522 } 6523 EXPORT_SYMBOL(netdev_lower_get_next_private); 6524 6525 /** 6526 * netdev_lower_get_next_private_rcu - Get the next ->private from the 6527 * lower neighbour list, RCU 6528 * variant 6529 * @dev: device 6530 * @iter: list_head ** of the current position 6531 * 6532 * Gets the next netdev_adjacent->private from the dev's lower neighbour 6533 * list, starting from iter position. The caller must hold RCU read lock. 6534 */ 6535 void *netdev_lower_get_next_private_rcu(struct net_device *dev, 6536 struct list_head **iter) 6537 { 6538 struct netdev_adjacent *lower; 6539 6540 WARN_ON_ONCE(!rcu_read_lock_held()); 6541 6542 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 6543 6544 if (&lower->list == &dev->adj_list.lower) 6545 return NULL; 6546 6547 *iter = &lower->list; 6548 6549 return lower->private; 6550 } 6551 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu); 6552 6553 /** 6554 * netdev_lower_get_next - Get the next device from the lower neighbour 6555 * list 6556 * @dev: device 6557 * @iter: list_head ** of the current position 6558 * 6559 * Gets the next netdev_adjacent from the dev's lower neighbour 6560 * list, starting from iter position. The caller must hold RTNL lock or 6561 * its own locking that guarantees that the neighbour lower 6562 * list will remain unchanged. 6563 */ 6564 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter) 6565 { 6566 struct netdev_adjacent *lower; 6567 6568 lower = list_entry(*iter, struct netdev_adjacent, list); 6569 6570 if (&lower->list == &dev->adj_list.lower) 6571 return NULL; 6572 6573 *iter = lower->list.next; 6574 6575 return lower->dev; 6576 } 6577 EXPORT_SYMBOL(netdev_lower_get_next); 6578 6579 static struct net_device *netdev_next_lower_dev(struct net_device *dev, 6580 struct list_head **iter) 6581 { 6582 struct netdev_adjacent *lower; 6583 6584 lower = list_entry((*iter)->next, struct netdev_adjacent, list); 6585 6586 if (&lower->list == &dev->adj_list.lower) 6587 return NULL; 6588 6589 *iter = &lower->list; 6590 6591 return lower->dev; 6592 } 6593 6594 int netdev_walk_all_lower_dev(struct net_device *dev, 6595 int (*fn)(struct net_device *dev, 6596 void *data), 6597 void *data) 6598 { 6599 struct net_device *ldev; 6600 struct list_head *iter; 6601 int ret; 6602 6603 for (iter = &dev->adj_list.lower, 6604 ldev = netdev_next_lower_dev(dev, &iter); 6605 ldev; 6606 ldev = netdev_next_lower_dev(dev, &iter)) { 6607 /* first is the lower device itself */ 6608 ret = fn(ldev, data); 6609 if (ret) 6610 return ret; 6611 6612 /* then look at all of its lower devices */ 6613 ret = netdev_walk_all_lower_dev(ldev, fn, data); 6614 if (ret) 6615 return ret; 6616 } 6617 6618 return 0; 6619 } 6620 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev); 6621 6622 static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, 6623 struct list_head **iter) 6624 { 6625 struct netdev_adjacent *lower; 6626 6627 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 6628 if (&lower->list == &dev->adj_list.lower) 6629 return NULL; 6630 6631 *iter = &lower->list; 6632 6633 return lower->dev; 6634 } 6635 6636 int netdev_walk_all_lower_dev_rcu(struct net_device *dev, 6637 int (*fn)(struct net_device *dev, 6638 void *data), 6639 void *data) 6640 { 6641 struct net_device *ldev; 6642 struct list_head *iter; 6643 int ret; 6644 6645 for (iter = &dev->adj_list.lower, 6646 ldev = netdev_next_lower_dev_rcu(dev, &iter); 6647 ldev; 6648 ldev = netdev_next_lower_dev_rcu(dev, &iter)) { 6649 /* first is the lower device itself */ 6650 ret = fn(ldev, data); 6651 if (ret) 6652 return ret; 6653 6654 /* then look at all of its lower devices */ 6655 ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data); 6656 if (ret) 6657 return ret; 6658 } 6659 6660 return 0; 6661 } 6662 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu); 6663 6664 /** 6665 * netdev_lower_get_first_private_rcu - Get the first ->private from the 6666 * lower neighbour list, RCU 6667 * variant 6668 * @dev: device 6669 * 6670 * Gets the first netdev_adjacent->private from the dev's lower neighbour 6671 * list. The caller must hold RCU read lock. 6672 */ 6673 void *netdev_lower_get_first_private_rcu(struct net_device *dev) 6674 { 6675 struct netdev_adjacent *lower; 6676 6677 lower = list_first_or_null_rcu(&dev->adj_list.lower, 6678 struct netdev_adjacent, list); 6679 if (lower) 6680 return lower->private; 6681 return NULL; 6682 } 6683 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu); 6684 6685 /** 6686 * netdev_master_upper_dev_get_rcu - Get master upper device 6687 * @dev: device 6688 * 6689 * Find a master upper device and return pointer to it or NULL in case 6690 * it's not there. The caller must hold the RCU read lock. 6691 */ 6692 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev) 6693 { 6694 struct netdev_adjacent *upper; 6695 6696 upper = list_first_or_null_rcu(&dev->adj_list.upper, 6697 struct netdev_adjacent, list); 6698 if (upper && likely(upper->master)) 6699 return upper->dev; 6700 return NULL; 6701 } 6702 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu); 6703 6704 static int netdev_adjacent_sysfs_add(struct net_device *dev, 6705 struct net_device *adj_dev, 6706 struct list_head *dev_list) 6707 { 6708 char linkname[IFNAMSIZ+7]; 6709 6710 sprintf(linkname, dev_list == &dev->adj_list.upper ? 6711 "upper_%s" : "lower_%s", adj_dev->name); 6712 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), 6713 linkname); 6714 } 6715 static void netdev_adjacent_sysfs_del(struct net_device *dev, 6716 char *name, 6717 struct list_head *dev_list) 6718 { 6719 char linkname[IFNAMSIZ+7]; 6720 6721 sprintf(linkname, dev_list == &dev->adj_list.upper ? 6722 "upper_%s" : "lower_%s", name); 6723 sysfs_remove_link(&(dev->dev.kobj), linkname); 6724 } 6725 6726 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev, 6727 struct net_device *adj_dev, 6728 struct list_head *dev_list) 6729 { 6730 return (dev_list == &dev->adj_list.upper || 6731 dev_list == &dev->adj_list.lower) && 6732 net_eq(dev_net(dev), dev_net(adj_dev)); 6733 } 6734 6735 static int __netdev_adjacent_dev_insert(struct net_device *dev, 6736 struct net_device *adj_dev, 6737 struct list_head *dev_list, 6738 void *private, bool master) 6739 { 6740 struct netdev_adjacent *adj; 6741 int ret; 6742 6743 adj = __netdev_find_adj(adj_dev, dev_list); 6744 6745 if (adj) { 6746 adj->ref_nr += 1; 6747 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n", 6748 dev->name, adj_dev->name, adj->ref_nr); 6749 6750 return 0; 6751 } 6752 6753 adj = kmalloc(sizeof(*adj), GFP_KERNEL); 6754 if (!adj) 6755 return -ENOMEM; 6756 6757 adj->dev = adj_dev; 6758 adj->master = master; 6759 adj->ref_nr = 1; 6760 adj->private = private; 6761 dev_hold(adj_dev); 6762 6763 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n", 6764 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name); 6765 6766 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) { 6767 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list); 6768 if (ret) 6769 goto free_adj; 6770 } 6771 6772 /* Ensure that master link is always the first item in list. */ 6773 if (master) { 6774 ret = sysfs_create_link(&(dev->dev.kobj), 6775 &(adj_dev->dev.kobj), "master"); 6776 if (ret) 6777 goto remove_symlinks; 6778 6779 list_add_rcu(&adj->list, dev_list); 6780 } else { 6781 list_add_tail_rcu(&adj->list, dev_list); 6782 } 6783 6784 return 0; 6785 6786 remove_symlinks: 6787 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) 6788 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 6789 free_adj: 6790 kfree(adj); 6791 dev_put(adj_dev); 6792 6793 return ret; 6794 } 6795 6796 static void __netdev_adjacent_dev_remove(struct net_device *dev, 6797 struct net_device *adj_dev, 6798 u16 ref_nr, 6799 struct list_head *dev_list) 6800 { 6801 struct netdev_adjacent *adj; 6802 6803 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n", 6804 dev->name, adj_dev->name, ref_nr); 6805 6806 adj = __netdev_find_adj(adj_dev, dev_list); 6807 6808 if (!adj) { 6809 pr_err("Adjacency does not exist for device %s from %s\n", 6810 dev->name, adj_dev->name); 6811 WARN_ON(1); 6812 return; 6813 } 6814 6815 if (adj->ref_nr > ref_nr) { 6816 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n", 6817 dev->name, adj_dev->name, ref_nr, 6818 adj->ref_nr - ref_nr); 6819 adj->ref_nr -= ref_nr; 6820 return; 6821 } 6822 6823 if (adj->master) 6824 sysfs_remove_link(&(dev->dev.kobj), "master"); 6825 6826 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) 6827 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 6828 6829 list_del_rcu(&adj->list); 6830 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n", 6831 adj_dev->name, dev->name, adj_dev->name); 6832 dev_put(adj_dev); 6833 kfree_rcu(adj, rcu); 6834 } 6835 6836 static int __netdev_adjacent_dev_link_lists(struct net_device *dev, 6837 struct net_device *upper_dev, 6838 struct list_head *up_list, 6839 struct list_head *down_list, 6840 void *private, bool master) 6841 { 6842 int ret; 6843 6844 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, 6845 private, master); 6846 if (ret) 6847 return ret; 6848 6849 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, 6850 private, false); 6851 if (ret) { 6852 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list); 6853 return ret; 6854 } 6855 6856 return 0; 6857 } 6858 6859 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev, 6860 struct net_device *upper_dev, 6861 u16 ref_nr, 6862 struct list_head *up_list, 6863 struct list_head *down_list) 6864 { 6865 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list); 6866 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list); 6867 } 6868 6869 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev, 6870 struct net_device *upper_dev, 6871 void *private, bool master) 6872 { 6873 return __netdev_adjacent_dev_link_lists(dev, upper_dev, 6874 &dev->adj_list.upper, 6875 &upper_dev->adj_list.lower, 6876 private, master); 6877 } 6878 6879 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev, 6880 struct net_device *upper_dev) 6881 { 6882 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1, 6883 &dev->adj_list.upper, 6884 &upper_dev->adj_list.lower); 6885 } 6886 6887 static int __netdev_upper_dev_link(struct net_device *dev, 6888 struct net_device *upper_dev, bool master, 6889 void *upper_priv, void *upper_info, 6890 struct netlink_ext_ack *extack) 6891 { 6892 struct netdev_notifier_changeupper_info changeupper_info = { 6893 .info = { 6894 .dev = dev, 6895 .extack = extack, 6896 }, 6897 .upper_dev = upper_dev, 6898 .master = master, 6899 .linking = true, 6900 .upper_info = upper_info, 6901 }; 6902 struct net_device *master_dev; 6903 int ret = 0; 6904 6905 ASSERT_RTNL(); 6906 6907 if (dev == upper_dev) 6908 return -EBUSY; 6909 6910 /* To prevent loops, check if dev is not upper device to upper_dev. */ 6911 if (netdev_has_upper_dev(upper_dev, dev)) 6912 return -EBUSY; 6913 6914 if (!master) { 6915 if (netdev_has_upper_dev(dev, upper_dev)) 6916 return -EEXIST; 6917 } else { 6918 master_dev = netdev_master_upper_dev_get(dev); 6919 if (master_dev) 6920 return master_dev == upper_dev ? -EEXIST : -EBUSY; 6921 } 6922 6923 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 6924 &changeupper_info.info); 6925 ret = notifier_to_errno(ret); 6926 if (ret) 6927 return ret; 6928 6929 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv, 6930 master); 6931 if (ret) 6932 return ret; 6933 6934 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 6935 &changeupper_info.info); 6936 ret = notifier_to_errno(ret); 6937 if (ret) 6938 goto rollback; 6939 6940 return 0; 6941 6942 rollback: 6943 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 6944 6945 return ret; 6946 } 6947 6948 /** 6949 * netdev_upper_dev_link - Add a link to the upper device 6950 * @dev: device 6951 * @upper_dev: new upper device 6952 * @extack: netlink extended ack 6953 * 6954 * Adds a link to device which is upper to this one. The caller must hold 6955 * the RTNL lock. On a failure a negative errno code is returned. 6956 * On success the reference counts are adjusted and the function 6957 * returns zero. 6958 */ 6959 int netdev_upper_dev_link(struct net_device *dev, 6960 struct net_device *upper_dev, 6961 struct netlink_ext_ack *extack) 6962 { 6963 return __netdev_upper_dev_link(dev, upper_dev, false, 6964 NULL, NULL, extack); 6965 } 6966 EXPORT_SYMBOL(netdev_upper_dev_link); 6967 6968 /** 6969 * netdev_master_upper_dev_link - Add a master link to the upper device 6970 * @dev: device 6971 * @upper_dev: new upper device 6972 * @upper_priv: upper device private 6973 * @upper_info: upper info to be passed down via notifier 6974 * @extack: netlink extended ack 6975 * 6976 * Adds a link to device which is upper to this one. In this case, only 6977 * one master upper device can be linked, although other non-master devices 6978 * might be linked as well. The caller must hold the RTNL lock. 6979 * On a failure a negative errno code is returned. On success the reference 6980 * counts are adjusted and the function returns zero. 6981 */ 6982 int netdev_master_upper_dev_link(struct net_device *dev, 6983 struct net_device *upper_dev, 6984 void *upper_priv, void *upper_info, 6985 struct netlink_ext_ack *extack) 6986 { 6987 return __netdev_upper_dev_link(dev, upper_dev, true, 6988 upper_priv, upper_info, extack); 6989 } 6990 EXPORT_SYMBOL(netdev_master_upper_dev_link); 6991 6992 /** 6993 * netdev_upper_dev_unlink - Removes a link to upper device 6994 * @dev: device 6995 * @upper_dev: new upper device 6996 * 6997 * Removes a link to device which is upper to this one. The caller must hold 6998 * the RTNL lock. 6999 */ 7000 void netdev_upper_dev_unlink(struct net_device *dev, 7001 struct net_device *upper_dev) 7002 { 7003 struct netdev_notifier_changeupper_info changeupper_info = { 7004 .info = { 7005 .dev = dev, 7006 }, 7007 .upper_dev = upper_dev, 7008 .linking = false, 7009 }; 7010 7011 ASSERT_RTNL(); 7012 7013 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev; 7014 7015 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 7016 &changeupper_info.info); 7017 7018 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 7019 7020 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 7021 &changeupper_info.info); 7022 } 7023 EXPORT_SYMBOL(netdev_upper_dev_unlink); 7024 7025 /** 7026 * netdev_bonding_info_change - Dispatch event about slave change 7027 * @dev: device 7028 * @bonding_info: info to dispatch 7029 * 7030 * Send NETDEV_BONDING_INFO to netdev notifiers with info. 7031 * The caller must hold the RTNL lock. 7032 */ 7033 void netdev_bonding_info_change(struct net_device *dev, 7034 struct netdev_bonding_info *bonding_info) 7035 { 7036 struct netdev_notifier_bonding_info info = { 7037 .info.dev = dev, 7038 }; 7039 7040 memcpy(&info.bonding_info, bonding_info, 7041 sizeof(struct netdev_bonding_info)); 7042 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, 7043 &info.info); 7044 } 7045 EXPORT_SYMBOL(netdev_bonding_info_change); 7046 7047 static void netdev_adjacent_add_links(struct net_device *dev) 7048 { 7049 struct netdev_adjacent *iter; 7050 7051 struct net *net = dev_net(dev); 7052 7053 list_for_each_entry(iter, &dev->adj_list.upper, list) { 7054 if (!net_eq(net, dev_net(iter->dev))) 7055 continue; 7056 netdev_adjacent_sysfs_add(iter->dev, dev, 7057 &iter->dev->adj_list.lower); 7058 netdev_adjacent_sysfs_add(dev, iter->dev, 7059 &dev->adj_list.upper); 7060 } 7061 7062 list_for_each_entry(iter, &dev->adj_list.lower, list) { 7063 if (!net_eq(net, dev_net(iter->dev))) 7064 continue; 7065 netdev_adjacent_sysfs_add(iter->dev, dev, 7066 &iter->dev->adj_list.upper); 7067 netdev_adjacent_sysfs_add(dev, iter->dev, 7068 &dev->adj_list.lower); 7069 } 7070 } 7071 7072 static void netdev_adjacent_del_links(struct net_device *dev) 7073 { 7074 struct netdev_adjacent *iter; 7075 7076 struct net *net = dev_net(dev); 7077 7078 list_for_each_entry(iter, &dev->adj_list.upper, list) { 7079 if (!net_eq(net, dev_net(iter->dev))) 7080 continue; 7081 netdev_adjacent_sysfs_del(iter->dev, dev->name, 7082 &iter->dev->adj_list.lower); 7083 netdev_adjacent_sysfs_del(dev, iter->dev->name, 7084 &dev->adj_list.upper); 7085 } 7086 7087 list_for_each_entry(iter, &dev->adj_list.lower, list) { 7088 if (!net_eq(net, dev_net(iter->dev))) 7089 continue; 7090 netdev_adjacent_sysfs_del(iter->dev, dev->name, 7091 &iter->dev->adj_list.upper); 7092 netdev_adjacent_sysfs_del(dev, iter->dev->name, 7093 &dev->adj_list.lower); 7094 } 7095 } 7096 7097 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname) 7098 { 7099 struct netdev_adjacent *iter; 7100 7101 struct net *net = dev_net(dev); 7102 7103 list_for_each_entry(iter, &dev->adj_list.upper, list) { 7104 if (!net_eq(net, dev_net(iter->dev))) 7105 continue; 7106 netdev_adjacent_sysfs_del(iter->dev, oldname, 7107 &iter->dev->adj_list.lower); 7108 netdev_adjacent_sysfs_add(iter->dev, dev, 7109 &iter->dev->adj_list.lower); 7110 } 7111 7112 list_for_each_entry(iter, &dev->adj_list.lower, list) { 7113 if (!net_eq(net, dev_net(iter->dev))) 7114 continue; 7115 netdev_adjacent_sysfs_del(iter->dev, oldname, 7116 &iter->dev->adj_list.upper); 7117 netdev_adjacent_sysfs_add(iter->dev, dev, 7118 &iter->dev->adj_list.upper); 7119 } 7120 } 7121 7122 void *netdev_lower_dev_get_private(struct net_device *dev, 7123 struct net_device *lower_dev) 7124 { 7125 struct netdev_adjacent *lower; 7126 7127 if (!lower_dev) 7128 return NULL; 7129 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower); 7130 if (!lower) 7131 return NULL; 7132 7133 return lower->private; 7134 } 7135 EXPORT_SYMBOL(netdev_lower_dev_get_private); 7136 7137 7138 int dev_get_nest_level(struct net_device *dev) 7139 { 7140 struct net_device *lower = NULL; 7141 struct list_head *iter; 7142 int max_nest = -1; 7143 int nest; 7144 7145 ASSERT_RTNL(); 7146 7147 netdev_for_each_lower_dev(dev, lower, iter) { 7148 nest = dev_get_nest_level(lower); 7149 if (max_nest < nest) 7150 max_nest = nest; 7151 } 7152 7153 return max_nest + 1; 7154 } 7155 EXPORT_SYMBOL(dev_get_nest_level); 7156 7157 /** 7158 * netdev_lower_change - Dispatch event about lower device state change 7159 * @lower_dev: device 7160 * @lower_state_info: state to dispatch 7161 * 7162 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info. 7163 * The caller must hold the RTNL lock. 7164 */ 7165 void netdev_lower_state_changed(struct net_device *lower_dev, 7166 void *lower_state_info) 7167 { 7168 struct netdev_notifier_changelowerstate_info changelowerstate_info = { 7169 .info.dev = lower_dev, 7170 }; 7171 7172 ASSERT_RTNL(); 7173 changelowerstate_info.lower_state_info = lower_state_info; 7174 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, 7175 &changelowerstate_info.info); 7176 } 7177 EXPORT_SYMBOL(netdev_lower_state_changed); 7178 7179 static void dev_change_rx_flags(struct net_device *dev, int flags) 7180 { 7181 const struct net_device_ops *ops = dev->netdev_ops; 7182 7183 if (ops->ndo_change_rx_flags) 7184 ops->ndo_change_rx_flags(dev, flags); 7185 } 7186 7187 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify) 7188 { 7189 unsigned int old_flags = dev->flags; 7190 kuid_t uid; 7191 kgid_t gid; 7192 7193 ASSERT_RTNL(); 7194 7195 dev->flags |= IFF_PROMISC; 7196 dev->promiscuity += inc; 7197 if (dev->promiscuity == 0) { 7198 /* 7199 * Avoid overflow. 7200 * If inc causes overflow, untouch promisc and return error. 7201 */ 7202 if (inc < 0) 7203 dev->flags &= ~IFF_PROMISC; 7204 else { 7205 dev->promiscuity -= inc; 7206 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n", 7207 dev->name); 7208 return -EOVERFLOW; 7209 } 7210 } 7211 if (dev->flags != old_flags) { 7212 pr_info("device %s %s promiscuous mode\n", 7213 dev->name, 7214 dev->flags & IFF_PROMISC ? "entered" : "left"); 7215 if (audit_enabled) { 7216 current_uid_gid(&uid, &gid); 7217 audit_log(audit_context(), GFP_ATOMIC, 7218 AUDIT_ANOM_PROMISCUOUS, 7219 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", 7220 dev->name, (dev->flags & IFF_PROMISC), 7221 (old_flags & IFF_PROMISC), 7222 from_kuid(&init_user_ns, audit_get_loginuid(current)), 7223 from_kuid(&init_user_ns, uid), 7224 from_kgid(&init_user_ns, gid), 7225 audit_get_sessionid(current)); 7226 } 7227 7228 dev_change_rx_flags(dev, IFF_PROMISC); 7229 } 7230 if (notify) 7231 __dev_notify_flags(dev, old_flags, IFF_PROMISC); 7232 return 0; 7233 } 7234 7235 /** 7236 * dev_set_promiscuity - update promiscuity count on a device 7237 * @dev: device 7238 * @inc: modifier 7239 * 7240 * Add or remove promiscuity from a device. While the count in the device 7241 * remains above zero the interface remains promiscuous. Once it hits zero 7242 * the device reverts back to normal filtering operation. A negative inc 7243 * value is used to drop promiscuity on the device. 7244 * Return 0 if successful or a negative errno code on error. 7245 */ 7246 int dev_set_promiscuity(struct net_device *dev, int inc) 7247 { 7248 unsigned int old_flags = dev->flags; 7249 int err; 7250 7251 err = __dev_set_promiscuity(dev, inc, true); 7252 if (err < 0) 7253 return err; 7254 if (dev->flags != old_flags) 7255 dev_set_rx_mode(dev); 7256 return err; 7257 } 7258 EXPORT_SYMBOL(dev_set_promiscuity); 7259 7260 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify) 7261 { 7262 unsigned int old_flags = dev->flags, old_gflags = dev->gflags; 7263 7264 ASSERT_RTNL(); 7265 7266 dev->flags |= IFF_ALLMULTI; 7267 dev->allmulti += inc; 7268 if (dev->allmulti == 0) { 7269 /* 7270 * Avoid overflow. 7271 * If inc causes overflow, untouch allmulti and return error. 7272 */ 7273 if (inc < 0) 7274 dev->flags &= ~IFF_ALLMULTI; 7275 else { 7276 dev->allmulti -= inc; 7277 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n", 7278 dev->name); 7279 return -EOVERFLOW; 7280 } 7281 } 7282 if (dev->flags ^ old_flags) { 7283 dev_change_rx_flags(dev, IFF_ALLMULTI); 7284 dev_set_rx_mode(dev); 7285 if (notify) 7286 __dev_notify_flags(dev, old_flags, 7287 dev->gflags ^ old_gflags); 7288 } 7289 return 0; 7290 } 7291 7292 /** 7293 * dev_set_allmulti - update allmulti count on a device 7294 * @dev: device 7295 * @inc: modifier 7296 * 7297 * Add or remove reception of all multicast frames to a device. While the 7298 * count in the device remains above zero the interface remains listening 7299 * to all interfaces. Once it hits zero the device reverts back to normal 7300 * filtering operation. A negative @inc value is used to drop the counter 7301 * when releasing a resource needing all multicasts. 7302 * Return 0 if successful or a negative errno code on error. 7303 */ 7304 7305 int dev_set_allmulti(struct net_device *dev, int inc) 7306 { 7307 return __dev_set_allmulti(dev, inc, true); 7308 } 7309 EXPORT_SYMBOL(dev_set_allmulti); 7310 7311 /* 7312 * Upload unicast and multicast address lists to device and 7313 * configure RX filtering. When the device doesn't support unicast 7314 * filtering it is put in promiscuous mode while unicast addresses 7315 * are present. 7316 */ 7317 void __dev_set_rx_mode(struct net_device *dev) 7318 { 7319 const struct net_device_ops *ops = dev->netdev_ops; 7320 7321 /* dev_open will call this function so the list will stay sane. */ 7322 if (!(dev->flags&IFF_UP)) 7323 return; 7324 7325 if (!netif_device_present(dev)) 7326 return; 7327 7328 if (!(dev->priv_flags & IFF_UNICAST_FLT)) { 7329 /* Unicast addresses changes may only happen under the rtnl, 7330 * therefore calling __dev_set_promiscuity here is safe. 7331 */ 7332 if (!netdev_uc_empty(dev) && !dev->uc_promisc) { 7333 __dev_set_promiscuity(dev, 1, false); 7334 dev->uc_promisc = true; 7335 } else if (netdev_uc_empty(dev) && dev->uc_promisc) { 7336 __dev_set_promiscuity(dev, -1, false); 7337 dev->uc_promisc = false; 7338 } 7339 } 7340 7341 if (ops->ndo_set_rx_mode) 7342 ops->ndo_set_rx_mode(dev); 7343 } 7344 7345 void dev_set_rx_mode(struct net_device *dev) 7346 { 7347 netif_addr_lock_bh(dev); 7348 __dev_set_rx_mode(dev); 7349 netif_addr_unlock_bh(dev); 7350 } 7351 7352 /** 7353 * dev_get_flags - get flags reported to userspace 7354 * @dev: device 7355 * 7356 * Get the combination of flag bits exported through APIs to userspace. 7357 */ 7358 unsigned int dev_get_flags(const struct net_device *dev) 7359 { 7360 unsigned int flags; 7361 7362 flags = (dev->flags & ~(IFF_PROMISC | 7363 IFF_ALLMULTI | 7364 IFF_RUNNING | 7365 IFF_LOWER_UP | 7366 IFF_DORMANT)) | 7367 (dev->gflags & (IFF_PROMISC | 7368 IFF_ALLMULTI)); 7369 7370 if (netif_running(dev)) { 7371 if (netif_oper_up(dev)) 7372 flags |= IFF_RUNNING; 7373 if (netif_carrier_ok(dev)) 7374 flags |= IFF_LOWER_UP; 7375 if (netif_dormant(dev)) 7376 flags |= IFF_DORMANT; 7377 } 7378 7379 return flags; 7380 } 7381 EXPORT_SYMBOL(dev_get_flags); 7382 7383 int __dev_change_flags(struct net_device *dev, unsigned int flags) 7384 { 7385 unsigned int old_flags = dev->flags; 7386 int ret; 7387 7388 ASSERT_RTNL(); 7389 7390 /* 7391 * Set the flags on our device. 7392 */ 7393 7394 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | 7395 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | 7396 IFF_AUTOMEDIA)) | 7397 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | 7398 IFF_ALLMULTI)); 7399 7400 /* 7401 * Load in the correct multicast list now the flags have changed. 7402 */ 7403 7404 if ((old_flags ^ flags) & IFF_MULTICAST) 7405 dev_change_rx_flags(dev, IFF_MULTICAST); 7406 7407 dev_set_rx_mode(dev); 7408 7409 /* 7410 * Have we downed the interface. We handle IFF_UP ourselves 7411 * according to user attempts to set it, rather than blindly 7412 * setting it. 7413 */ 7414 7415 ret = 0; 7416 if ((old_flags ^ flags) & IFF_UP) { 7417 if (old_flags & IFF_UP) 7418 __dev_close(dev); 7419 else 7420 ret = __dev_open(dev); 7421 } 7422 7423 if ((flags ^ dev->gflags) & IFF_PROMISC) { 7424 int inc = (flags & IFF_PROMISC) ? 1 : -1; 7425 unsigned int old_flags = dev->flags; 7426 7427 dev->gflags ^= IFF_PROMISC; 7428 7429 if (__dev_set_promiscuity(dev, inc, false) >= 0) 7430 if (dev->flags != old_flags) 7431 dev_set_rx_mode(dev); 7432 } 7433 7434 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI 7435 * is important. Some (broken) drivers set IFF_PROMISC, when 7436 * IFF_ALLMULTI is requested not asking us and not reporting. 7437 */ 7438 if ((flags ^ dev->gflags) & IFF_ALLMULTI) { 7439 int inc = (flags & IFF_ALLMULTI) ? 1 : -1; 7440 7441 dev->gflags ^= IFF_ALLMULTI; 7442 __dev_set_allmulti(dev, inc, false); 7443 } 7444 7445 return ret; 7446 } 7447 7448 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags, 7449 unsigned int gchanges) 7450 { 7451 unsigned int changes = dev->flags ^ old_flags; 7452 7453 if (gchanges) 7454 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC); 7455 7456 if (changes & IFF_UP) { 7457 if (dev->flags & IFF_UP) 7458 call_netdevice_notifiers(NETDEV_UP, dev); 7459 else 7460 call_netdevice_notifiers(NETDEV_DOWN, dev); 7461 } 7462 7463 if (dev->flags & IFF_UP && 7464 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) { 7465 struct netdev_notifier_change_info change_info = { 7466 .info = { 7467 .dev = dev, 7468 }, 7469 .flags_changed = changes, 7470 }; 7471 7472 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info); 7473 } 7474 } 7475 7476 /** 7477 * dev_change_flags - change device settings 7478 * @dev: device 7479 * @flags: device state flags 7480 * 7481 * Change settings on device based state flags. The flags are 7482 * in the userspace exported format. 7483 */ 7484 int dev_change_flags(struct net_device *dev, unsigned int flags) 7485 { 7486 int ret; 7487 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags; 7488 7489 ret = __dev_change_flags(dev, flags); 7490 if (ret < 0) 7491 return ret; 7492 7493 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags); 7494 __dev_notify_flags(dev, old_flags, changes); 7495 return ret; 7496 } 7497 EXPORT_SYMBOL(dev_change_flags); 7498 7499 int __dev_set_mtu(struct net_device *dev, int new_mtu) 7500 { 7501 const struct net_device_ops *ops = dev->netdev_ops; 7502 7503 if (ops->ndo_change_mtu) 7504 return ops->ndo_change_mtu(dev, new_mtu); 7505 7506 dev->mtu = new_mtu; 7507 return 0; 7508 } 7509 EXPORT_SYMBOL(__dev_set_mtu); 7510 7511 /** 7512 * dev_set_mtu - Change maximum transfer unit 7513 * @dev: device 7514 * @new_mtu: new transfer unit 7515 * 7516 * Change the maximum transfer size of the network device. 7517 */ 7518 int dev_set_mtu(struct net_device *dev, int new_mtu) 7519 { 7520 int err, orig_mtu; 7521 7522 if (new_mtu == dev->mtu) 7523 return 0; 7524 7525 /* MTU must be positive, and in range */ 7526 if (new_mtu < 0 || new_mtu < dev->min_mtu) { 7527 net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n", 7528 dev->name, new_mtu, dev->min_mtu); 7529 return -EINVAL; 7530 } 7531 7532 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) { 7533 net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n", 7534 dev->name, new_mtu, dev->max_mtu); 7535 return -EINVAL; 7536 } 7537 7538 if (!netif_device_present(dev)) 7539 return -ENODEV; 7540 7541 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev); 7542 err = notifier_to_errno(err); 7543 if (err) 7544 return err; 7545 7546 orig_mtu = dev->mtu; 7547 err = __dev_set_mtu(dev, new_mtu); 7548 7549 if (!err) { 7550 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); 7551 err = notifier_to_errno(err); 7552 if (err) { 7553 /* setting mtu back and notifying everyone again, 7554 * so that they have a chance to revert changes. 7555 */ 7556 __dev_set_mtu(dev, orig_mtu); 7557 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); 7558 } 7559 } 7560 return err; 7561 } 7562 EXPORT_SYMBOL(dev_set_mtu); 7563 7564 /** 7565 * dev_change_tx_queue_len - Change TX queue length of a netdevice 7566 * @dev: device 7567 * @new_len: new tx queue length 7568 */ 7569 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len) 7570 { 7571 unsigned int orig_len = dev->tx_queue_len; 7572 int res; 7573 7574 if (new_len != (unsigned int)new_len) 7575 return -ERANGE; 7576 7577 if (new_len != orig_len) { 7578 dev->tx_queue_len = new_len; 7579 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev); 7580 res = notifier_to_errno(res); 7581 if (res) { 7582 netdev_err(dev, 7583 "refused to change device tx_queue_len\n"); 7584 dev->tx_queue_len = orig_len; 7585 return res; 7586 } 7587 return dev_qdisc_change_tx_queue_len(dev); 7588 } 7589 7590 return 0; 7591 } 7592 7593 /** 7594 * dev_set_group - Change group this device belongs to 7595 * @dev: device 7596 * @new_group: group this device should belong to 7597 */ 7598 void dev_set_group(struct net_device *dev, int new_group) 7599 { 7600 dev->group = new_group; 7601 } 7602 EXPORT_SYMBOL(dev_set_group); 7603 7604 /** 7605 * dev_set_mac_address - Change Media Access Control Address 7606 * @dev: device 7607 * @sa: new address 7608 * 7609 * Change the hardware (MAC) address of the device 7610 */ 7611 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa) 7612 { 7613 const struct net_device_ops *ops = dev->netdev_ops; 7614 int err; 7615 7616 if (!ops->ndo_set_mac_address) 7617 return -EOPNOTSUPP; 7618 if (sa->sa_family != dev->type) 7619 return -EINVAL; 7620 if (!netif_device_present(dev)) 7621 return -ENODEV; 7622 err = ops->ndo_set_mac_address(dev, sa); 7623 if (err) 7624 return err; 7625 dev->addr_assign_type = NET_ADDR_SET; 7626 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 7627 add_device_randomness(dev->dev_addr, dev->addr_len); 7628 return 0; 7629 } 7630 EXPORT_SYMBOL(dev_set_mac_address); 7631 7632 /** 7633 * dev_change_carrier - Change device carrier 7634 * @dev: device 7635 * @new_carrier: new value 7636 * 7637 * Change device carrier 7638 */ 7639 int dev_change_carrier(struct net_device *dev, bool new_carrier) 7640 { 7641 const struct net_device_ops *ops = dev->netdev_ops; 7642 7643 if (!ops->ndo_change_carrier) 7644 return -EOPNOTSUPP; 7645 if (!netif_device_present(dev)) 7646 return -ENODEV; 7647 return ops->ndo_change_carrier(dev, new_carrier); 7648 } 7649 EXPORT_SYMBOL(dev_change_carrier); 7650 7651 /** 7652 * dev_get_phys_port_id - Get device physical port ID 7653 * @dev: device 7654 * @ppid: port ID 7655 * 7656 * Get device physical port ID 7657 */ 7658 int dev_get_phys_port_id(struct net_device *dev, 7659 struct netdev_phys_item_id *ppid) 7660 { 7661 const struct net_device_ops *ops = dev->netdev_ops; 7662 7663 if (!ops->ndo_get_phys_port_id) 7664 return -EOPNOTSUPP; 7665 return ops->ndo_get_phys_port_id(dev, ppid); 7666 } 7667 EXPORT_SYMBOL(dev_get_phys_port_id); 7668 7669 /** 7670 * dev_get_phys_port_name - Get device physical port name 7671 * @dev: device 7672 * @name: port name 7673 * @len: limit of bytes to copy to name 7674 * 7675 * Get device physical port name 7676 */ 7677 int dev_get_phys_port_name(struct net_device *dev, 7678 char *name, size_t len) 7679 { 7680 const struct net_device_ops *ops = dev->netdev_ops; 7681 7682 if (!ops->ndo_get_phys_port_name) 7683 return -EOPNOTSUPP; 7684 return ops->ndo_get_phys_port_name(dev, name, len); 7685 } 7686 EXPORT_SYMBOL(dev_get_phys_port_name); 7687 7688 /** 7689 * dev_change_proto_down - update protocol port state information 7690 * @dev: device 7691 * @proto_down: new value 7692 * 7693 * This info can be used by switch drivers to set the phys state of the 7694 * port. 7695 */ 7696 int dev_change_proto_down(struct net_device *dev, bool proto_down) 7697 { 7698 const struct net_device_ops *ops = dev->netdev_ops; 7699 7700 if (!ops->ndo_change_proto_down) 7701 return -EOPNOTSUPP; 7702 if (!netif_device_present(dev)) 7703 return -ENODEV; 7704 return ops->ndo_change_proto_down(dev, proto_down); 7705 } 7706 EXPORT_SYMBOL(dev_change_proto_down); 7707 7708 u32 __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op, 7709 enum bpf_netdev_command cmd) 7710 { 7711 struct netdev_bpf xdp; 7712 7713 if (!bpf_op) 7714 return 0; 7715 7716 memset(&xdp, 0, sizeof(xdp)); 7717 xdp.command = cmd; 7718 7719 /* Query must always succeed. */ 7720 WARN_ON(bpf_op(dev, &xdp) < 0 && cmd == XDP_QUERY_PROG); 7721 7722 return xdp.prog_id; 7723 } 7724 7725 static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op, 7726 struct netlink_ext_ack *extack, u32 flags, 7727 struct bpf_prog *prog) 7728 { 7729 struct netdev_bpf xdp; 7730 7731 memset(&xdp, 0, sizeof(xdp)); 7732 if (flags & XDP_FLAGS_HW_MODE) 7733 xdp.command = XDP_SETUP_PROG_HW; 7734 else 7735 xdp.command = XDP_SETUP_PROG; 7736 xdp.extack = extack; 7737 xdp.flags = flags; 7738 xdp.prog = prog; 7739 7740 return bpf_op(dev, &xdp); 7741 } 7742 7743 static void dev_xdp_uninstall(struct net_device *dev) 7744 { 7745 struct netdev_bpf xdp; 7746 bpf_op_t ndo_bpf; 7747 7748 /* Remove generic XDP */ 7749 WARN_ON(dev_xdp_install(dev, generic_xdp_install, NULL, 0, NULL)); 7750 7751 /* Remove from the driver */ 7752 ndo_bpf = dev->netdev_ops->ndo_bpf; 7753 if (!ndo_bpf) 7754 return; 7755 7756 memset(&xdp, 0, sizeof(xdp)); 7757 xdp.command = XDP_QUERY_PROG; 7758 WARN_ON(ndo_bpf(dev, &xdp)); 7759 if (xdp.prog_id) 7760 WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, 7761 NULL)); 7762 7763 /* Remove HW offload */ 7764 memset(&xdp, 0, sizeof(xdp)); 7765 xdp.command = XDP_QUERY_PROG_HW; 7766 if (!ndo_bpf(dev, &xdp) && xdp.prog_id) 7767 WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, 7768 NULL)); 7769 } 7770 7771 /** 7772 * dev_change_xdp_fd - set or clear a bpf program for a device rx path 7773 * @dev: device 7774 * @extack: netlink extended ack 7775 * @fd: new program fd or negative value to clear 7776 * @flags: xdp-related flags 7777 * 7778 * Set or clear a bpf program for a device 7779 */ 7780 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 7781 int fd, u32 flags) 7782 { 7783 const struct net_device_ops *ops = dev->netdev_ops; 7784 enum bpf_netdev_command query; 7785 struct bpf_prog *prog = NULL; 7786 bpf_op_t bpf_op, bpf_chk; 7787 int err; 7788 7789 ASSERT_RTNL(); 7790 7791 query = flags & XDP_FLAGS_HW_MODE ? XDP_QUERY_PROG_HW : XDP_QUERY_PROG; 7792 7793 bpf_op = bpf_chk = ops->ndo_bpf; 7794 if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE))) 7795 return -EOPNOTSUPP; 7796 if (!bpf_op || (flags & XDP_FLAGS_SKB_MODE)) 7797 bpf_op = generic_xdp_install; 7798 if (bpf_op == bpf_chk) 7799 bpf_chk = generic_xdp_install; 7800 7801 if (fd >= 0) { 7802 if (__dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG) || 7803 __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG_HW)) 7804 return -EEXIST; 7805 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && 7806 __dev_xdp_query(dev, bpf_op, query)) 7807 return -EBUSY; 7808 7809 prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP, 7810 bpf_op == ops->ndo_bpf); 7811 if (IS_ERR(prog)) 7812 return PTR_ERR(prog); 7813 7814 if (!(flags & XDP_FLAGS_HW_MODE) && 7815 bpf_prog_is_dev_bound(prog->aux)) { 7816 NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported"); 7817 bpf_prog_put(prog); 7818 return -EINVAL; 7819 } 7820 } 7821 7822 err = dev_xdp_install(dev, bpf_op, extack, flags, prog); 7823 if (err < 0 && prog) 7824 bpf_prog_put(prog); 7825 7826 return err; 7827 } 7828 7829 /** 7830 * dev_new_index - allocate an ifindex 7831 * @net: the applicable net namespace 7832 * 7833 * Returns a suitable unique value for a new device interface 7834 * number. The caller must hold the rtnl semaphore or the 7835 * dev_base_lock to be sure it remains unique. 7836 */ 7837 static int dev_new_index(struct net *net) 7838 { 7839 int ifindex = net->ifindex; 7840 7841 for (;;) { 7842 if (++ifindex <= 0) 7843 ifindex = 1; 7844 if (!__dev_get_by_index(net, ifindex)) 7845 return net->ifindex = ifindex; 7846 } 7847 } 7848 7849 /* Delayed registration/unregisteration */ 7850 static LIST_HEAD(net_todo_list); 7851 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); 7852 7853 static void net_set_todo(struct net_device *dev) 7854 { 7855 list_add_tail(&dev->todo_list, &net_todo_list); 7856 dev_net(dev)->dev_unreg_count++; 7857 } 7858 7859 static void rollback_registered_many(struct list_head *head) 7860 { 7861 struct net_device *dev, *tmp; 7862 LIST_HEAD(close_head); 7863 7864 BUG_ON(dev_boot_phase); 7865 ASSERT_RTNL(); 7866 7867 list_for_each_entry_safe(dev, tmp, head, unreg_list) { 7868 /* Some devices call without registering 7869 * for initialization unwind. Remove those 7870 * devices and proceed with the remaining. 7871 */ 7872 if (dev->reg_state == NETREG_UNINITIALIZED) { 7873 pr_debug("unregister_netdevice: device %s/%p never was registered\n", 7874 dev->name, dev); 7875 7876 WARN_ON(1); 7877 list_del(&dev->unreg_list); 7878 continue; 7879 } 7880 dev->dismantle = true; 7881 BUG_ON(dev->reg_state != NETREG_REGISTERED); 7882 } 7883 7884 /* If device is running, close it first. */ 7885 list_for_each_entry(dev, head, unreg_list) 7886 list_add_tail(&dev->close_list, &close_head); 7887 dev_close_many(&close_head, true); 7888 7889 list_for_each_entry(dev, head, unreg_list) { 7890 /* And unlink it from device chain. */ 7891 unlist_netdevice(dev); 7892 7893 dev->reg_state = NETREG_UNREGISTERING; 7894 } 7895 flush_all_backlogs(); 7896 7897 synchronize_net(); 7898 7899 list_for_each_entry(dev, head, unreg_list) { 7900 struct sk_buff *skb = NULL; 7901 7902 /* Shutdown queueing discipline. */ 7903 dev_shutdown(dev); 7904 7905 dev_xdp_uninstall(dev); 7906 7907 /* Notify protocols, that we are about to destroy 7908 * this device. They should clean all the things. 7909 */ 7910 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 7911 7912 if (!dev->rtnl_link_ops || 7913 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 7914 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0, 7915 GFP_KERNEL, NULL, 0); 7916 7917 /* 7918 * Flush the unicast and multicast chains 7919 */ 7920 dev_uc_flush(dev); 7921 dev_mc_flush(dev); 7922 7923 if (dev->netdev_ops->ndo_uninit) 7924 dev->netdev_ops->ndo_uninit(dev); 7925 7926 if (skb) 7927 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL); 7928 7929 /* Notifier chain MUST detach us all upper devices. */ 7930 WARN_ON(netdev_has_any_upper_dev(dev)); 7931 WARN_ON(netdev_has_any_lower_dev(dev)); 7932 7933 /* Remove entries from kobject tree */ 7934 netdev_unregister_kobject(dev); 7935 #ifdef CONFIG_XPS 7936 /* Remove XPS queueing entries */ 7937 netif_reset_xps_queues_gt(dev, 0); 7938 #endif 7939 } 7940 7941 synchronize_net(); 7942 7943 list_for_each_entry(dev, head, unreg_list) 7944 dev_put(dev); 7945 } 7946 7947 static void rollback_registered(struct net_device *dev) 7948 { 7949 LIST_HEAD(single); 7950 7951 list_add(&dev->unreg_list, &single); 7952 rollback_registered_many(&single); 7953 list_del(&single); 7954 } 7955 7956 static netdev_features_t netdev_sync_upper_features(struct net_device *lower, 7957 struct net_device *upper, netdev_features_t features) 7958 { 7959 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; 7960 netdev_features_t feature; 7961 int feature_bit; 7962 7963 for_each_netdev_feature(&upper_disables, feature_bit) { 7964 feature = __NETIF_F_BIT(feature_bit); 7965 if (!(upper->wanted_features & feature) 7966 && (features & feature)) { 7967 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n", 7968 &feature, upper->name); 7969 features &= ~feature; 7970 } 7971 } 7972 7973 return features; 7974 } 7975 7976 static void netdev_sync_lower_features(struct net_device *upper, 7977 struct net_device *lower, netdev_features_t features) 7978 { 7979 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; 7980 netdev_features_t feature; 7981 int feature_bit; 7982 7983 for_each_netdev_feature(&upper_disables, feature_bit) { 7984 feature = __NETIF_F_BIT(feature_bit); 7985 if (!(features & feature) && (lower->features & feature)) { 7986 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", 7987 &feature, lower->name); 7988 lower->wanted_features &= ~feature; 7989 netdev_update_features(lower); 7990 7991 if (unlikely(lower->features & feature)) 7992 netdev_WARN(upper, "failed to disable %pNF on %s!\n", 7993 &feature, lower->name); 7994 } 7995 } 7996 } 7997 7998 static netdev_features_t netdev_fix_features(struct net_device *dev, 7999 netdev_features_t features) 8000 { 8001 /* Fix illegal checksum combinations */ 8002 if ((features & NETIF_F_HW_CSUM) && 8003 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 8004 netdev_warn(dev, "mixed HW and IP checksum settings.\n"); 8005 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 8006 } 8007 8008 /* TSO requires that SG is present as well. */ 8009 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { 8010 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); 8011 features &= ~NETIF_F_ALL_TSO; 8012 } 8013 8014 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) && 8015 !(features & NETIF_F_IP_CSUM)) { 8016 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n"); 8017 features &= ~NETIF_F_TSO; 8018 features &= ~NETIF_F_TSO_ECN; 8019 } 8020 8021 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) && 8022 !(features & NETIF_F_IPV6_CSUM)) { 8023 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n"); 8024 features &= ~NETIF_F_TSO6; 8025 } 8026 8027 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */ 8028 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO)) 8029 features &= ~NETIF_F_TSO_MANGLEID; 8030 8031 /* TSO ECN requires that TSO is present as well. */ 8032 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) 8033 features &= ~NETIF_F_TSO_ECN; 8034 8035 /* Software GSO depends on SG. */ 8036 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { 8037 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); 8038 features &= ~NETIF_F_GSO; 8039 } 8040 8041 /* GSO partial features require GSO partial be set */ 8042 if ((features & dev->gso_partial_features) && 8043 !(features & NETIF_F_GSO_PARTIAL)) { 8044 netdev_dbg(dev, 8045 "Dropping partially supported GSO features since no GSO partial.\n"); 8046 features &= ~dev->gso_partial_features; 8047 } 8048 8049 if (!(features & NETIF_F_RXCSUM)) { 8050 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet 8051 * successfully merged by hardware must also have the 8052 * checksum verified by hardware. If the user does not 8053 * want to enable RXCSUM, logically, we should disable GRO_HW. 8054 */ 8055 if (features & NETIF_F_GRO_HW) { 8056 netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n"); 8057 features &= ~NETIF_F_GRO_HW; 8058 } 8059 } 8060 8061 /* LRO/HW-GRO features cannot be combined with RX-FCS */ 8062 if (features & NETIF_F_RXFCS) { 8063 if (features & NETIF_F_LRO) { 8064 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n"); 8065 features &= ~NETIF_F_LRO; 8066 } 8067 8068 if (features & NETIF_F_GRO_HW) { 8069 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n"); 8070 features &= ~NETIF_F_GRO_HW; 8071 } 8072 } 8073 8074 return features; 8075 } 8076 8077 int __netdev_update_features(struct net_device *dev) 8078 { 8079 struct net_device *upper, *lower; 8080 netdev_features_t features; 8081 struct list_head *iter; 8082 int err = -1; 8083 8084 ASSERT_RTNL(); 8085 8086 features = netdev_get_wanted_features(dev); 8087 8088 if (dev->netdev_ops->ndo_fix_features) 8089 features = dev->netdev_ops->ndo_fix_features(dev, features); 8090 8091 /* driver might be less strict about feature dependencies */ 8092 features = netdev_fix_features(dev, features); 8093 8094 /* some features can't be enabled if they're off an an upper device */ 8095 netdev_for_each_upper_dev_rcu(dev, upper, iter) 8096 features = netdev_sync_upper_features(dev, upper, features); 8097 8098 if (dev->features == features) 8099 goto sync_lower; 8100 8101 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n", 8102 &dev->features, &features); 8103 8104 if (dev->netdev_ops->ndo_set_features) 8105 err = dev->netdev_ops->ndo_set_features(dev, features); 8106 else 8107 err = 0; 8108 8109 if (unlikely(err < 0)) { 8110 netdev_err(dev, 8111 "set_features() failed (%d); wanted %pNF, left %pNF\n", 8112 err, &features, &dev->features); 8113 /* return non-0 since some features might have changed and 8114 * it's better to fire a spurious notification than miss it 8115 */ 8116 return -1; 8117 } 8118 8119 sync_lower: 8120 /* some features must be disabled on lower devices when disabled 8121 * on an upper device (think: bonding master or bridge) 8122 */ 8123 netdev_for_each_lower_dev(dev, lower, iter) 8124 netdev_sync_lower_features(dev, lower, features); 8125 8126 if (!err) { 8127 netdev_features_t diff = features ^ dev->features; 8128 8129 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) { 8130 /* udp_tunnel_{get,drop}_rx_info both need 8131 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the 8132 * device, or they won't do anything. 8133 * Thus we need to update dev->features 8134 * *before* calling udp_tunnel_get_rx_info, 8135 * but *after* calling udp_tunnel_drop_rx_info. 8136 */ 8137 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) { 8138 dev->features = features; 8139 udp_tunnel_get_rx_info(dev); 8140 } else { 8141 udp_tunnel_drop_rx_info(dev); 8142 } 8143 } 8144 8145 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) { 8146 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { 8147 dev->features = features; 8148 err |= vlan_get_rx_ctag_filter_info(dev); 8149 } else { 8150 vlan_drop_rx_ctag_filter_info(dev); 8151 } 8152 } 8153 8154 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) { 8155 if (features & NETIF_F_HW_VLAN_STAG_FILTER) { 8156 dev->features = features; 8157 err |= vlan_get_rx_stag_filter_info(dev); 8158 } else { 8159 vlan_drop_rx_stag_filter_info(dev); 8160 } 8161 } 8162 8163 dev->features = features; 8164 } 8165 8166 return err < 0 ? 0 : 1; 8167 } 8168 8169 /** 8170 * netdev_update_features - recalculate device features 8171 * @dev: the device to check 8172 * 8173 * Recalculate dev->features set and send notifications if it 8174 * has changed. Should be called after driver or hardware dependent 8175 * conditions might have changed that influence the features. 8176 */ 8177 void netdev_update_features(struct net_device *dev) 8178 { 8179 if (__netdev_update_features(dev)) 8180 netdev_features_change(dev); 8181 } 8182 EXPORT_SYMBOL(netdev_update_features); 8183 8184 /** 8185 * netdev_change_features - recalculate device features 8186 * @dev: the device to check 8187 * 8188 * Recalculate dev->features set and send notifications even 8189 * if they have not changed. Should be called instead of 8190 * netdev_update_features() if also dev->vlan_features might 8191 * have changed to allow the changes to be propagated to stacked 8192 * VLAN devices. 8193 */ 8194 void netdev_change_features(struct net_device *dev) 8195 { 8196 __netdev_update_features(dev); 8197 netdev_features_change(dev); 8198 } 8199 EXPORT_SYMBOL(netdev_change_features); 8200 8201 /** 8202 * netif_stacked_transfer_operstate - transfer operstate 8203 * @rootdev: the root or lower level device to transfer state from 8204 * @dev: the device to transfer operstate to 8205 * 8206 * Transfer operational state from root to device. This is normally 8207 * called when a stacking relationship exists between the root 8208 * device and the device(a leaf device). 8209 */ 8210 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 8211 struct net_device *dev) 8212 { 8213 if (rootdev->operstate == IF_OPER_DORMANT) 8214 netif_dormant_on(dev); 8215 else 8216 netif_dormant_off(dev); 8217 8218 if (netif_carrier_ok(rootdev)) 8219 netif_carrier_on(dev); 8220 else 8221 netif_carrier_off(dev); 8222 } 8223 EXPORT_SYMBOL(netif_stacked_transfer_operstate); 8224 8225 static int netif_alloc_rx_queues(struct net_device *dev) 8226 { 8227 unsigned int i, count = dev->num_rx_queues; 8228 struct netdev_rx_queue *rx; 8229 size_t sz = count * sizeof(*rx); 8230 int err = 0; 8231 8232 BUG_ON(count < 1); 8233 8234 rx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL); 8235 if (!rx) 8236 return -ENOMEM; 8237 8238 dev->_rx = rx; 8239 8240 for (i = 0; i < count; i++) { 8241 rx[i].dev = dev; 8242 8243 /* XDP RX-queue setup */ 8244 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i); 8245 if (err < 0) 8246 goto err_rxq_info; 8247 } 8248 return 0; 8249 8250 err_rxq_info: 8251 /* Rollback successful reg's and free other resources */ 8252 while (i--) 8253 xdp_rxq_info_unreg(&rx[i].xdp_rxq); 8254 kvfree(dev->_rx); 8255 dev->_rx = NULL; 8256 return err; 8257 } 8258 8259 static void netif_free_rx_queues(struct net_device *dev) 8260 { 8261 unsigned int i, count = dev->num_rx_queues; 8262 8263 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */ 8264 if (!dev->_rx) 8265 return; 8266 8267 for (i = 0; i < count; i++) 8268 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq); 8269 8270 kvfree(dev->_rx); 8271 } 8272 8273 static void netdev_init_one_queue(struct net_device *dev, 8274 struct netdev_queue *queue, void *_unused) 8275 { 8276 /* Initialize queue lock */ 8277 spin_lock_init(&queue->_xmit_lock); 8278 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); 8279 queue->xmit_lock_owner = -1; 8280 netdev_queue_numa_node_write(queue, NUMA_NO_NODE); 8281 queue->dev = dev; 8282 #ifdef CONFIG_BQL 8283 dql_init(&queue->dql, HZ); 8284 #endif 8285 } 8286 8287 static void netif_free_tx_queues(struct net_device *dev) 8288 { 8289 kvfree(dev->_tx); 8290 } 8291 8292 static int netif_alloc_netdev_queues(struct net_device *dev) 8293 { 8294 unsigned int count = dev->num_tx_queues; 8295 struct netdev_queue *tx; 8296 size_t sz = count * sizeof(*tx); 8297 8298 if (count < 1 || count > 0xffff) 8299 return -EINVAL; 8300 8301 tx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL); 8302 if (!tx) 8303 return -ENOMEM; 8304 8305 dev->_tx = tx; 8306 8307 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 8308 spin_lock_init(&dev->tx_global_lock); 8309 8310 return 0; 8311 } 8312 8313 void netif_tx_stop_all_queues(struct net_device *dev) 8314 { 8315 unsigned int i; 8316 8317 for (i = 0; i < dev->num_tx_queues; i++) { 8318 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 8319 8320 netif_tx_stop_queue(txq); 8321 } 8322 } 8323 EXPORT_SYMBOL(netif_tx_stop_all_queues); 8324 8325 /** 8326 * register_netdevice - register a network device 8327 * @dev: device to register 8328 * 8329 * Take a completed network device structure and add it to the kernel 8330 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 8331 * chain. 0 is returned on success. A negative errno code is returned 8332 * on a failure to set up the device, or if the name is a duplicate. 8333 * 8334 * Callers must hold the rtnl semaphore. You may want 8335 * register_netdev() instead of this. 8336 * 8337 * BUGS: 8338 * The locking appears insufficient to guarantee two parallel registers 8339 * will not get the same name. 8340 */ 8341 8342 int register_netdevice(struct net_device *dev) 8343 { 8344 int ret; 8345 struct net *net = dev_net(dev); 8346 8347 BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE < 8348 NETDEV_FEATURE_COUNT); 8349 BUG_ON(dev_boot_phase); 8350 ASSERT_RTNL(); 8351 8352 might_sleep(); 8353 8354 /* When net_device's are persistent, this will be fatal. */ 8355 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 8356 BUG_ON(!net); 8357 8358 spin_lock_init(&dev->addr_list_lock); 8359 netdev_set_addr_lockdep_class(dev); 8360 8361 ret = dev_get_valid_name(net, dev, dev->name); 8362 if (ret < 0) 8363 goto out; 8364 8365 /* Init, if this function is available */ 8366 if (dev->netdev_ops->ndo_init) { 8367 ret = dev->netdev_ops->ndo_init(dev); 8368 if (ret) { 8369 if (ret > 0) 8370 ret = -EIO; 8371 goto out; 8372 } 8373 } 8374 8375 if (((dev->hw_features | dev->features) & 8376 NETIF_F_HW_VLAN_CTAG_FILTER) && 8377 (!dev->netdev_ops->ndo_vlan_rx_add_vid || 8378 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) { 8379 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n"); 8380 ret = -EINVAL; 8381 goto err_uninit; 8382 } 8383 8384 ret = -EBUSY; 8385 if (!dev->ifindex) 8386 dev->ifindex = dev_new_index(net); 8387 else if (__dev_get_by_index(net, dev->ifindex)) 8388 goto err_uninit; 8389 8390 /* Transfer changeable features to wanted_features and enable 8391 * software offloads (GSO and GRO). 8392 */ 8393 dev->hw_features |= NETIF_F_SOFT_FEATURES; 8394 dev->features |= NETIF_F_SOFT_FEATURES; 8395 8396 if (dev->netdev_ops->ndo_udp_tunnel_add) { 8397 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT; 8398 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT; 8399 } 8400 8401 dev->wanted_features = dev->features & dev->hw_features; 8402 8403 if (!(dev->flags & IFF_LOOPBACK)) 8404 dev->hw_features |= NETIF_F_NOCACHE_COPY; 8405 8406 /* If IPv4 TCP segmentation offload is supported we should also 8407 * allow the device to enable segmenting the frame with the option 8408 * of ignoring a static IP ID value. This doesn't enable the 8409 * feature itself but allows the user to enable it later. 8410 */ 8411 if (dev->hw_features & NETIF_F_TSO) 8412 dev->hw_features |= NETIF_F_TSO_MANGLEID; 8413 if (dev->vlan_features & NETIF_F_TSO) 8414 dev->vlan_features |= NETIF_F_TSO_MANGLEID; 8415 if (dev->mpls_features & NETIF_F_TSO) 8416 dev->mpls_features |= NETIF_F_TSO_MANGLEID; 8417 if (dev->hw_enc_features & NETIF_F_TSO) 8418 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 8419 8420 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices. 8421 */ 8422 dev->vlan_features |= NETIF_F_HIGHDMA; 8423 8424 /* Make NETIF_F_SG inheritable to tunnel devices. 8425 */ 8426 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL; 8427 8428 /* Make NETIF_F_SG inheritable to MPLS. 8429 */ 8430 dev->mpls_features |= NETIF_F_SG; 8431 8432 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); 8433 ret = notifier_to_errno(ret); 8434 if (ret) 8435 goto err_uninit; 8436 8437 ret = netdev_register_kobject(dev); 8438 if (ret) 8439 goto err_uninit; 8440 dev->reg_state = NETREG_REGISTERED; 8441 8442 __netdev_update_features(dev); 8443 8444 /* 8445 * Default initial state at registry is that the 8446 * device is present. 8447 */ 8448 8449 set_bit(__LINK_STATE_PRESENT, &dev->state); 8450 8451 linkwatch_init_dev(dev); 8452 8453 dev_init_scheduler(dev); 8454 dev_hold(dev); 8455 list_netdevice(dev); 8456 add_device_randomness(dev->dev_addr, dev->addr_len); 8457 8458 /* If the device has permanent device address, driver should 8459 * set dev_addr and also addr_assign_type should be set to 8460 * NET_ADDR_PERM (default value). 8461 */ 8462 if (dev->addr_assign_type == NET_ADDR_PERM) 8463 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 8464 8465 /* Notify protocols, that a new device appeared. */ 8466 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); 8467 ret = notifier_to_errno(ret); 8468 if (ret) { 8469 rollback_registered(dev); 8470 dev->reg_state = NETREG_UNREGISTERED; 8471 } 8472 /* 8473 * Prevent userspace races by waiting until the network 8474 * device is fully setup before sending notifications. 8475 */ 8476 if (!dev->rtnl_link_ops || 8477 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 8478 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL); 8479 8480 out: 8481 return ret; 8482 8483 err_uninit: 8484 if (dev->netdev_ops->ndo_uninit) 8485 dev->netdev_ops->ndo_uninit(dev); 8486 if (dev->priv_destructor) 8487 dev->priv_destructor(dev); 8488 goto out; 8489 } 8490 EXPORT_SYMBOL(register_netdevice); 8491 8492 /** 8493 * init_dummy_netdev - init a dummy network device for NAPI 8494 * @dev: device to init 8495 * 8496 * This takes a network device structure and initialize the minimum 8497 * amount of fields so it can be used to schedule NAPI polls without 8498 * registering a full blown interface. This is to be used by drivers 8499 * that need to tie several hardware interfaces to a single NAPI 8500 * poll scheduler due to HW limitations. 8501 */ 8502 int init_dummy_netdev(struct net_device *dev) 8503 { 8504 /* Clear everything. Note we don't initialize spinlocks 8505 * are they aren't supposed to be taken by any of the 8506 * NAPI code and this dummy netdev is supposed to be 8507 * only ever used for NAPI polls 8508 */ 8509 memset(dev, 0, sizeof(struct net_device)); 8510 8511 /* make sure we BUG if trying to hit standard 8512 * register/unregister code path 8513 */ 8514 dev->reg_state = NETREG_DUMMY; 8515 8516 /* NAPI wants this */ 8517 INIT_LIST_HEAD(&dev->napi_list); 8518 8519 /* a dummy interface is started by default */ 8520 set_bit(__LINK_STATE_PRESENT, &dev->state); 8521 set_bit(__LINK_STATE_START, &dev->state); 8522 8523 /* Note : We dont allocate pcpu_refcnt for dummy devices, 8524 * because users of this 'device' dont need to change 8525 * its refcount. 8526 */ 8527 8528 return 0; 8529 } 8530 EXPORT_SYMBOL_GPL(init_dummy_netdev); 8531 8532 8533 /** 8534 * register_netdev - register a network device 8535 * @dev: device to register 8536 * 8537 * Take a completed network device structure and add it to the kernel 8538 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 8539 * chain. 0 is returned on success. A negative errno code is returned 8540 * on a failure to set up the device, or if the name is a duplicate. 8541 * 8542 * This is a wrapper around register_netdevice that takes the rtnl semaphore 8543 * and expands the device name if you passed a format string to 8544 * alloc_netdev. 8545 */ 8546 int register_netdev(struct net_device *dev) 8547 { 8548 int err; 8549 8550 if (rtnl_lock_killable()) 8551 return -EINTR; 8552 err = register_netdevice(dev); 8553 rtnl_unlock(); 8554 return err; 8555 } 8556 EXPORT_SYMBOL(register_netdev); 8557 8558 int netdev_refcnt_read(const struct net_device *dev) 8559 { 8560 int i, refcnt = 0; 8561 8562 for_each_possible_cpu(i) 8563 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i); 8564 return refcnt; 8565 } 8566 EXPORT_SYMBOL(netdev_refcnt_read); 8567 8568 /** 8569 * netdev_wait_allrefs - wait until all references are gone. 8570 * @dev: target net_device 8571 * 8572 * This is called when unregistering network devices. 8573 * 8574 * Any protocol or device that holds a reference should register 8575 * for netdevice notification, and cleanup and put back the 8576 * reference if they receive an UNREGISTER event. 8577 * We can get stuck here if buggy protocols don't correctly 8578 * call dev_put. 8579 */ 8580 static void netdev_wait_allrefs(struct net_device *dev) 8581 { 8582 unsigned long rebroadcast_time, warning_time; 8583 int refcnt; 8584 8585 linkwatch_forget_dev(dev); 8586 8587 rebroadcast_time = warning_time = jiffies; 8588 refcnt = netdev_refcnt_read(dev); 8589 8590 while (refcnt != 0) { 8591 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { 8592 rtnl_lock(); 8593 8594 /* Rebroadcast unregister notification */ 8595 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 8596 8597 __rtnl_unlock(); 8598 rcu_barrier(); 8599 rtnl_lock(); 8600 8601 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 8602 &dev->state)) { 8603 /* We must not have linkwatch events 8604 * pending on unregister. If this 8605 * happens, we simply run the queue 8606 * unscheduled, resulting in a noop 8607 * for this device. 8608 */ 8609 linkwatch_run_queue(); 8610 } 8611 8612 __rtnl_unlock(); 8613 8614 rebroadcast_time = jiffies; 8615 } 8616 8617 msleep(250); 8618 8619 refcnt = netdev_refcnt_read(dev); 8620 8621 if (time_after(jiffies, warning_time + 10 * HZ)) { 8622 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", 8623 dev->name, refcnt); 8624 warning_time = jiffies; 8625 } 8626 } 8627 } 8628 8629 /* The sequence is: 8630 * 8631 * rtnl_lock(); 8632 * ... 8633 * register_netdevice(x1); 8634 * register_netdevice(x2); 8635 * ... 8636 * unregister_netdevice(y1); 8637 * unregister_netdevice(y2); 8638 * ... 8639 * rtnl_unlock(); 8640 * free_netdev(y1); 8641 * free_netdev(y2); 8642 * 8643 * We are invoked by rtnl_unlock(). 8644 * This allows us to deal with problems: 8645 * 1) We can delete sysfs objects which invoke hotplug 8646 * without deadlocking with linkwatch via keventd. 8647 * 2) Since we run with the RTNL semaphore not held, we can sleep 8648 * safely in order to wait for the netdev refcnt to drop to zero. 8649 * 8650 * We must not return until all unregister events added during 8651 * the interval the lock was held have been completed. 8652 */ 8653 void netdev_run_todo(void) 8654 { 8655 struct list_head list; 8656 8657 /* Snapshot list, allow later requests */ 8658 list_replace_init(&net_todo_list, &list); 8659 8660 __rtnl_unlock(); 8661 8662 8663 /* Wait for rcu callbacks to finish before next phase */ 8664 if (!list_empty(&list)) 8665 rcu_barrier(); 8666 8667 while (!list_empty(&list)) { 8668 struct net_device *dev 8669 = list_first_entry(&list, struct net_device, todo_list); 8670 list_del(&dev->todo_list); 8671 8672 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 8673 pr_err("network todo '%s' but state %d\n", 8674 dev->name, dev->reg_state); 8675 dump_stack(); 8676 continue; 8677 } 8678 8679 dev->reg_state = NETREG_UNREGISTERED; 8680 8681 netdev_wait_allrefs(dev); 8682 8683 /* paranoia */ 8684 BUG_ON(netdev_refcnt_read(dev)); 8685 BUG_ON(!list_empty(&dev->ptype_all)); 8686 BUG_ON(!list_empty(&dev->ptype_specific)); 8687 WARN_ON(rcu_access_pointer(dev->ip_ptr)); 8688 WARN_ON(rcu_access_pointer(dev->ip6_ptr)); 8689 #if IS_ENABLED(CONFIG_DECNET) 8690 WARN_ON(dev->dn_ptr); 8691 #endif 8692 if (dev->priv_destructor) 8693 dev->priv_destructor(dev); 8694 if (dev->needs_free_netdev) 8695 free_netdev(dev); 8696 8697 /* Report a network device has been unregistered */ 8698 rtnl_lock(); 8699 dev_net(dev)->dev_unreg_count--; 8700 __rtnl_unlock(); 8701 wake_up(&netdev_unregistering_wq); 8702 8703 /* Free network device */ 8704 kobject_put(&dev->dev.kobj); 8705 } 8706 } 8707 8708 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has 8709 * all the same fields in the same order as net_device_stats, with only 8710 * the type differing, but rtnl_link_stats64 may have additional fields 8711 * at the end for newer counters. 8712 */ 8713 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 8714 const struct net_device_stats *netdev_stats) 8715 { 8716 #if BITS_PER_LONG == 64 8717 BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats)); 8718 memcpy(stats64, netdev_stats, sizeof(*netdev_stats)); 8719 /* zero out counters that only exist in rtnl_link_stats64 */ 8720 memset((char *)stats64 + sizeof(*netdev_stats), 0, 8721 sizeof(*stats64) - sizeof(*netdev_stats)); 8722 #else 8723 size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long); 8724 const unsigned long *src = (const unsigned long *)netdev_stats; 8725 u64 *dst = (u64 *)stats64; 8726 8727 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64)); 8728 for (i = 0; i < n; i++) 8729 dst[i] = src[i]; 8730 /* zero out counters that only exist in rtnl_link_stats64 */ 8731 memset((char *)stats64 + n * sizeof(u64), 0, 8732 sizeof(*stats64) - n * sizeof(u64)); 8733 #endif 8734 } 8735 EXPORT_SYMBOL(netdev_stats_to_stats64); 8736 8737 /** 8738 * dev_get_stats - get network device statistics 8739 * @dev: device to get statistics from 8740 * @storage: place to store stats 8741 * 8742 * Get network statistics from device. Return @storage. 8743 * The device driver may provide its own method by setting 8744 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats; 8745 * otherwise the internal statistics structure is used. 8746 */ 8747 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 8748 struct rtnl_link_stats64 *storage) 8749 { 8750 const struct net_device_ops *ops = dev->netdev_ops; 8751 8752 if (ops->ndo_get_stats64) { 8753 memset(storage, 0, sizeof(*storage)); 8754 ops->ndo_get_stats64(dev, storage); 8755 } else if (ops->ndo_get_stats) { 8756 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); 8757 } else { 8758 netdev_stats_to_stats64(storage, &dev->stats); 8759 } 8760 storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped); 8761 storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped); 8762 storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler); 8763 return storage; 8764 } 8765 EXPORT_SYMBOL(dev_get_stats); 8766 8767 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) 8768 { 8769 struct netdev_queue *queue = dev_ingress_queue(dev); 8770 8771 #ifdef CONFIG_NET_CLS_ACT 8772 if (queue) 8773 return queue; 8774 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 8775 if (!queue) 8776 return NULL; 8777 netdev_init_one_queue(dev, queue, NULL); 8778 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc); 8779 queue->qdisc_sleeping = &noop_qdisc; 8780 rcu_assign_pointer(dev->ingress_queue, queue); 8781 #endif 8782 return queue; 8783 } 8784 8785 static const struct ethtool_ops default_ethtool_ops; 8786 8787 void netdev_set_default_ethtool_ops(struct net_device *dev, 8788 const struct ethtool_ops *ops) 8789 { 8790 if (dev->ethtool_ops == &default_ethtool_ops) 8791 dev->ethtool_ops = ops; 8792 } 8793 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops); 8794 8795 void netdev_freemem(struct net_device *dev) 8796 { 8797 char *addr = (char *)dev - dev->padded; 8798 8799 kvfree(addr); 8800 } 8801 8802 /** 8803 * alloc_netdev_mqs - allocate network device 8804 * @sizeof_priv: size of private data to allocate space for 8805 * @name: device name format string 8806 * @name_assign_type: origin of device name 8807 * @setup: callback to initialize device 8808 * @txqs: the number of TX subqueues to allocate 8809 * @rxqs: the number of RX subqueues to allocate 8810 * 8811 * Allocates a struct net_device with private data area for driver use 8812 * and performs basic initialization. Also allocates subqueue structs 8813 * for each queue on the device. 8814 */ 8815 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 8816 unsigned char name_assign_type, 8817 void (*setup)(struct net_device *), 8818 unsigned int txqs, unsigned int rxqs) 8819 { 8820 struct net_device *dev; 8821 unsigned int alloc_size; 8822 struct net_device *p; 8823 8824 BUG_ON(strlen(name) >= sizeof(dev->name)); 8825 8826 if (txqs < 1) { 8827 pr_err("alloc_netdev: Unable to allocate device with zero queues\n"); 8828 return NULL; 8829 } 8830 8831 if (rxqs < 1) { 8832 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n"); 8833 return NULL; 8834 } 8835 8836 alloc_size = sizeof(struct net_device); 8837 if (sizeof_priv) { 8838 /* ensure 32-byte alignment of private area */ 8839 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); 8840 alloc_size += sizeof_priv; 8841 } 8842 /* ensure 32-byte alignment of whole construct */ 8843 alloc_size += NETDEV_ALIGN - 1; 8844 8845 p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL); 8846 if (!p) 8847 return NULL; 8848 8849 dev = PTR_ALIGN(p, NETDEV_ALIGN); 8850 dev->padded = (char *)dev - (char *)p; 8851 8852 dev->pcpu_refcnt = alloc_percpu(int); 8853 if (!dev->pcpu_refcnt) 8854 goto free_dev; 8855 8856 if (dev_addr_init(dev)) 8857 goto free_pcpu; 8858 8859 dev_mc_init(dev); 8860 dev_uc_init(dev); 8861 8862 dev_net_set(dev, &init_net); 8863 8864 dev->gso_max_size = GSO_MAX_SIZE; 8865 dev->gso_max_segs = GSO_MAX_SEGS; 8866 8867 INIT_LIST_HEAD(&dev->napi_list); 8868 INIT_LIST_HEAD(&dev->unreg_list); 8869 INIT_LIST_HEAD(&dev->close_list); 8870 INIT_LIST_HEAD(&dev->link_watch_list); 8871 INIT_LIST_HEAD(&dev->adj_list.upper); 8872 INIT_LIST_HEAD(&dev->adj_list.lower); 8873 INIT_LIST_HEAD(&dev->ptype_all); 8874 INIT_LIST_HEAD(&dev->ptype_specific); 8875 #ifdef CONFIG_NET_SCHED 8876 hash_init(dev->qdisc_hash); 8877 #endif 8878 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; 8879 setup(dev); 8880 8881 if (!dev->tx_queue_len) { 8882 dev->priv_flags |= IFF_NO_QUEUE; 8883 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; 8884 } 8885 8886 dev->num_tx_queues = txqs; 8887 dev->real_num_tx_queues = txqs; 8888 if (netif_alloc_netdev_queues(dev)) 8889 goto free_all; 8890 8891 dev->num_rx_queues = rxqs; 8892 dev->real_num_rx_queues = rxqs; 8893 if (netif_alloc_rx_queues(dev)) 8894 goto free_all; 8895 8896 strcpy(dev->name, name); 8897 dev->name_assign_type = name_assign_type; 8898 dev->group = INIT_NETDEV_GROUP; 8899 if (!dev->ethtool_ops) 8900 dev->ethtool_ops = &default_ethtool_ops; 8901 8902 nf_hook_ingress_init(dev); 8903 8904 return dev; 8905 8906 free_all: 8907 free_netdev(dev); 8908 return NULL; 8909 8910 free_pcpu: 8911 free_percpu(dev->pcpu_refcnt); 8912 free_dev: 8913 netdev_freemem(dev); 8914 return NULL; 8915 } 8916 EXPORT_SYMBOL(alloc_netdev_mqs); 8917 8918 /** 8919 * free_netdev - free network device 8920 * @dev: device 8921 * 8922 * This function does the last stage of destroying an allocated device 8923 * interface. The reference to the device object is released. If this 8924 * is the last reference then it will be freed.Must be called in process 8925 * context. 8926 */ 8927 void free_netdev(struct net_device *dev) 8928 { 8929 struct napi_struct *p, *n; 8930 8931 might_sleep(); 8932 netif_free_tx_queues(dev); 8933 netif_free_rx_queues(dev); 8934 8935 kfree(rcu_dereference_protected(dev->ingress_queue, 1)); 8936 8937 /* Flush device addresses */ 8938 dev_addr_flush(dev); 8939 8940 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 8941 netif_napi_del(p); 8942 8943 free_percpu(dev->pcpu_refcnt); 8944 dev->pcpu_refcnt = NULL; 8945 8946 /* Compatibility with error handling in drivers */ 8947 if (dev->reg_state == NETREG_UNINITIALIZED) { 8948 netdev_freemem(dev); 8949 return; 8950 } 8951 8952 BUG_ON(dev->reg_state != NETREG_UNREGISTERED); 8953 dev->reg_state = NETREG_RELEASED; 8954 8955 /* will free via device release */ 8956 put_device(&dev->dev); 8957 } 8958 EXPORT_SYMBOL(free_netdev); 8959 8960 /** 8961 * synchronize_net - Synchronize with packet receive processing 8962 * 8963 * Wait for packets currently being received to be done. 8964 * Does not block later packets from starting. 8965 */ 8966 void synchronize_net(void) 8967 { 8968 might_sleep(); 8969 if (rtnl_is_locked()) 8970 synchronize_rcu_expedited(); 8971 else 8972 synchronize_rcu(); 8973 } 8974 EXPORT_SYMBOL(synchronize_net); 8975 8976 /** 8977 * unregister_netdevice_queue - remove device from the kernel 8978 * @dev: device 8979 * @head: list 8980 * 8981 * This function shuts down a device interface and removes it 8982 * from the kernel tables. 8983 * If head not NULL, device is queued to be unregistered later. 8984 * 8985 * Callers must hold the rtnl semaphore. You may want 8986 * unregister_netdev() instead of this. 8987 */ 8988 8989 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) 8990 { 8991 ASSERT_RTNL(); 8992 8993 if (head) { 8994 list_move_tail(&dev->unreg_list, head); 8995 } else { 8996 rollback_registered(dev); 8997 /* Finish processing unregister after unlock */ 8998 net_set_todo(dev); 8999 } 9000 } 9001 EXPORT_SYMBOL(unregister_netdevice_queue); 9002 9003 /** 9004 * unregister_netdevice_many - unregister many devices 9005 * @head: list of devices 9006 * 9007 * Note: As most callers use a stack allocated list_head, 9008 * we force a list_del() to make sure stack wont be corrupted later. 9009 */ 9010 void unregister_netdevice_many(struct list_head *head) 9011 { 9012 struct net_device *dev; 9013 9014 if (!list_empty(head)) { 9015 rollback_registered_many(head); 9016 list_for_each_entry(dev, head, unreg_list) 9017 net_set_todo(dev); 9018 list_del(head); 9019 } 9020 } 9021 EXPORT_SYMBOL(unregister_netdevice_many); 9022 9023 /** 9024 * unregister_netdev - remove device from the kernel 9025 * @dev: device 9026 * 9027 * This function shuts down a device interface and removes it 9028 * from the kernel tables. 9029 * 9030 * This is just a wrapper for unregister_netdevice that takes 9031 * the rtnl semaphore. In general you want to use this and not 9032 * unregister_netdevice. 9033 */ 9034 void unregister_netdev(struct net_device *dev) 9035 { 9036 rtnl_lock(); 9037 unregister_netdevice(dev); 9038 rtnl_unlock(); 9039 } 9040 EXPORT_SYMBOL(unregister_netdev); 9041 9042 /** 9043 * dev_change_net_namespace - move device to different nethost namespace 9044 * @dev: device 9045 * @net: network namespace 9046 * @pat: If not NULL name pattern to try if the current device name 9047 * is already taken in the destination network namespace. 9048 * 9049 * This function shuts down a device interface and moves it 9050 * to a new network namespace. On success 0 is returned, on 9051 * a failure a netagive errno code is returned. 9052 * 9053 * Callers must hold the rtnl semaphore. 9054 */ 9055 9056 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat) 9057 { 9058 int err, new_nsid, new_ifindex; 9059 9060 ASSERT_RTNL(); 9061 9062 /* Don't allow namespace local devices to be moved. */ 9063 err = -EINVAL; 9064 if (dev->features & NETIF_F_NETNS_LOCAL) 9065 goto out; 9066 9067 /* Ensure the device has been registrered */ 9068 if (dev->reg_state != NETREG_REGISTERED) 9069 goto out; 9070 9071 /* Get out if there is nothing todo */ 9072 err = 0; 9073 if (net_eq(dev_net(dev), net)) 9074 goto out; 9075 9076 /* Pick the destination device name, and ensure 9077 * we can use it in the destination network namespace. 9078 */ 9079 err = -EEXIST; 9080 if (__dev_get_by_name(net, dev->name)) { 9081 /* We get here if we can't use the current device name */ 9082 if (!pat) 9083 goto out; 9084 err = dev_get_valid_name(net, dev, pat); 9085 if (err < 0) 9086 goto out; 9087 } 9088 9089 /* 9090 * And now a mini version of register_netdevice unregister_netdevice. 9091 */ 9092 9093 /* If device is running close it first. */ 9094 dev_close(dev); 9095 9096 /* And unlink it from device chain */ 9097 unlist_netdevice(dev); 9098 9099 synchronize_net(); 9100 9101 /* Shutdown queueing discipline. */ 9102 dev_shutdown(dev); 9103 9104 /* Notify protocols, that we are about to destroy 9105 * this device. They should clean all the things. 9106 * 9107 * Note that dev->reg_state stays at NETREG_REGISTERED. 9108 * This is wanted because this way 8021q and macvlan know 9109 * the device is just moving and can keep their slaves up. 9110 */ 9111 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 9112 rcu_barrier(); 9113 9114 new_nsid = peernet2id_alloc(dev_net(dev), net); 9115 /* If there is an ifindex conflict assign a new one */ 9116 if (__dev_get_by_index(net, dev->ifindex)) 9117 new_ifindex = dev_new_index(net); 9118 else 9119 new_ifindex = dev->ifindex; 9120 9121 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid, 9122 new_ifindex); 9123 9124 /* 9125 * Flush the unicast and multicast chains 9126 */ 9127 dev_uc_flush(dev); 9128 dev_mc_flush(dev); 9129 9130 /* Send a netdev-removed uevent to the old namespace */ 9131 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE); 9132 netdev_adjacent_del_links(dev); 9133 9134 /* Actually switch the network namespace */ 9135 dev_net_set(dev, net); 9136 dev->ifindex = new_ifindex; 9137 9138 /* Send a netdev-add uevent to the new namespace */ 9139 kobject_uevent(&dev->dev.kobj, KOBJ_ADD); 9140 netdev_adjacent_add_links(dev); 9141 9142 /* Fixup kobjects */ 9143 err = device_rename(&dev->dev, dev->name); 9144 WARN_ON(err); 9145 9146 /* Add the device back in the hashes */ 9147 list_netdevice(dev); 9148 9149 /* Notify protocols, that a new device appeared. */ 9150 call_netdevice_notifiers(NETDEV_REGISTER, dev); 9151 9152 /* 9153 * Prevent userspace races by waiting until the network 9154 * device is fully setup before sending notifications. 9155 */ 9156 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL); 9157 9158 synchronize_net(); 9159 err = 0; 9160 out: 9161 return err; 9162 } 9163 EXPORT_SYMBOL_GPL(dev_change_net_namespace); 9164 9165 static int dev_cpu_dead(unsigned int oldcpu) 9166 { 9167 struct sk_buff **list_skb; 9168 struct sk_buff *skb; 9169 unsigned int cpu; 9170 struct softnet_data *sd, *oldsd, *remsd = NULL; 9171 9172 local_irq_disable(); 9173 cpu = smp_processor_id(); 9174 sd = &per_cpu(softnet_data, cpu); 9175 oldsd = &per_cpu(softnet_data, oldcpu); 9176 9177 /* Find end of our completion_queue. */ 9178 list_skb = &sd->completion_queue; 9179 while (*list_skb) 9180 list_skb = &(*list_skb)->next; 9181 /* Append completion queue from offline CPU. */ 9182 *list_skb = oldsd->completion_queue; 9183 oldsd->completion_queue = NULL; 9184 9185 /* Append output queue from offline CPU. */ 9186 if (oldsd->output_queue) { 9187 *sd->output_queue_tailp = oldsd->output_queue; 9188 sd->output_queue_tailp = oldsd->output_queue_tailp; 9189 oldsd->output_queue = NULL; 9190 oldsd->output_queue_tailp = &oldsd->output_queue; 9191 } 9192 /* Append NAPI poll list from offline CPU, with one exception : 9193 * process_backlog() must be called by cpu owning percpu backlog. 9194 * We properly handle process_queue & input_pkt_queue later. 9195 */ 9196 while (!list_empty(&oldsd->poll_list)) { 9197 struct napi_struct *napi = list_first_entry(&oldsd->poll_list, 9198 struct napi_struct, 9199 poll_list); 9200 9201 list_del_init(&napi->poll_list); 9202 if (napi->poll == process_backlog) 9203 napi->state = 0; 9204 else 9205 ____napi_schedule(sd, napi); 9206 } 9207 9208 raise_softirq_irqoff(NET_TX_SOFTIRQ); 9209 local_irq_enable(); 9210 9211 #ifdef CONFIG_RPS 9212 remsd = oldsd->rps_ipi_list; 9213 oldsd->rps_ipi_list = NULL; 9214 #endif 9215 /* send out pending IPI's on offline CPU */ 9216 net_rps_send_ipi(remsd); 9217 9218 /* Process offline CPU's input_pkt_queue */ 9219 while ((skb = __skb_dequeue(&oldsd->process_queue))) { 9220 netif_rx_ni(skb); 9221 input_queue_head_incr(oldsd); 9222 } 9223 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { 9224 netif_rx_ni(skb); 9225 input_queue_head_incr(oldsd); 9226 } 9227 9228 return 0; 9229 } 9230 9231 /** 9232 * netdev_increment_features - increment feature set by one 9233 * @all: current feature set 9234 * @one: new feature set 9235 * @mask: mask feature set 9236 * 9237 * Computes a new feature set after adding a device with feature set 9238 * @one to the master device with current feature set @all. Will not 9239 * enable anything that is off in @mask. Returns the new feature set. 9240 */ 9241 netdev_features_t netdev_increment_features(netdev_features_t all, 9242 netdev_features_t one, netdev_features_t mask) 9243 { 9244 if (mask & NETIF_F_HW_CSUM) 9245 mask |= NETIF_F_CSUM_MASK; 9246 mask |= NETIF_F_VLAN_CHALLENGED; 9247 9248 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask; 9249 all &= one | ~NETIF_F_ALL_FOR_ALL; 9250 9251 /* If one device supports hw checksumming, set for all. */ 9252 if (all & NETIF_F_HW_CSUM) 9253 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM); 9254 9255 return all; 9256 } 9257 EXPORT_SYMBOL(netdev_increment_features); 9258 9259 static struct hlist_head * __net_init netdev_create_hash(void) 9260 { 9261 int i; 9262 struct hlist_head *hash; 9263 9264 hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL); 9265 if (hash != NULL) 9266 for (i = 0; i < NETDEV_HASHENTRIES; i++) 9267 INIT_HLIST_HEAD(&hash[i]); 9268 9269 return hash; 9270 } 9271 9272 /* Initialize per network namespace state */ 9273 static int __net_init netdev_init(struct net *net) 9274 { 9275 if (net != &init_net) 9276 INIT_LIST_HEAD(&net->dev_base_head); 9277 9278 net->dev_name_head = netdev_create_hash(); 9279 if (net->dev_name_head == NULL) 9280 goto err_name; 9281 9282 net->dev_index_head = netdev_create_hash(); 9283 if (net->dev_index_head == NULL) 9284 goto err_idx; 9285 9286 return 0; 9287 9288 err_idx: 9289 kfree(net->dev_name_head); 9290 err_name: 9291 return -ENOMEM; 9292 } 9293 9294 /** 9295 * netdev_drivername - network driver for the device 9296 * @dev: network device 9297 * 9298 * Determine network driver for device. 9299 */ 9300 const char *netdev_drivername(const struct net_device *dev) 9301 { 9302 const struct device_driver *driver; 9303 const struct device *parent; 9304 const char *empty = ""; 9305 9306 parent = dev->dev.parent; 9307 if (!parent) 9308 return empty; 9309 9310 driver = parent->driver; 9311 if (driver && driver->name) 9312 return driver->name; 9313 return empty; 9314 } 9315 9316 static void __netdev_printk(const char *level, const struct net_device *dev, 9317 struct va_format *vaf) 9318 { 9319 if (dev && dev->dev.parent) { 9320 dev_printk_emit(level[1] - '0', 9321 dev->dev.parent, 9322 "%s %s %s%s: %pV", 9323 dev_driver_string(dev->dev.parent), 9324 dev_name(dev->dev.parent), 9325 netdev_name(dev), netdev_reg_state(dev), 9326 vaf); 9327 } else if (dev) { 9328 printk("%s%s%s: %pV", 9329 level, netdev_name(dev), netdev_reg_state(dev), vaf); 9330 } else { 9331 printk("%s(NULL net_device): %pV", level, vaf); 9332 } 9333 } 9334 9335 void netdev_printk(const char *level, const struct net_device *dev, 9336 const char *format, ...) 9337 { 9338 struct va_format vaf; 9339 va_list args; 9340 9341 va_start(args, format); 9342 9343 vaf.fmt = format; 9344 vaf.va = &args; 9345 9346 __netdev_printk(level, dev, &vaf); 9347 9348 va_end(args); 9349 } 9350 EXPORT_SYMBOL(netdev_printk); 9351 9352 #define define_netdev_printk_level(func, level) \ 9353 void func(const struct net_device *dev, const char *fmt, ...) \ 9354 { \ 9355 struct va_format vaf; \ 9356 va_list args; \ 9357 \ 9358 va_start(args, fmt); \ 9359 \ 9360 vaf.fmt = fmt; \ 9361 vaf.va = &args; \ 9362 \ 9363 __netdev_printk(level, dev, &vaf); \ 9364 \ 9365 va_end(args); \ 9366 } \ 9367 EXPORT_SYMBOL(func); 9368 9369 define_netdev_printk_level(netdev_emerg, KERN_EMERG); 9370 define_netdev_printk_level(netdev_alert, KERN_ALERT); 9371 define_netdev_printk_level(netdev_crit, KERN_CRIT); 9372 define_netdev_printk_level(netdev_err, KERN_ERR); 9373 define_netdev_printk_level(netdev_warn, KERN_WARNING); 9374 define_netdev_printk_level(netdev_notice, KERN_NOTICE); 9375 define_netdev_printk_level(netdev_info, KERN_INFO); 9376 9377 static void __net_exit netdev_exit(struct net *net) 9378 { 9379 kfree(net->dev_name_head); 9380 kfree(net->dev_index_head); 9381 if (net != &init_net) 9382 WARN_ON_ONCE(!list_empty(&net->dev_base_head)); 9383 } 9384 9385 static struct pernet_operations __net_initdata netdev_net_ops = { 9386 .init = netdev_init, 9387 .exit = netdev_exit, 9388 }; 9389 9390 static void __net_exit default_device_exit(struct net *net) 9391 { 9392 struct net_device *dev, *aux; 9393 /* 9394 * Push all migratable network devices back to the 9395 * initial network namespace 9396 */ 9397 rtnl_lock(); 9398 for_each_netdev_safe(net, dev, aux) { 9399 int err; 9400 char fb_name[IFNAMSIZ]; 9401 9402 /* Ignore unmoveable devices (i.e. loopback) */ 9403 if (dev->features & NETIF_F_NETNS_LOCAL) 9404 continue; 9405 9406 /* Leave virtual devices for the generic cleanup */ 9407 if (dev->rtnl_link_ops) 9408 continue; 9409 9410 /* Push remaining network devices to init_net */ 9411 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 9412 err = dev_change_net_namespace(dev, &init_net, fb_name); 9413 if (err) { 9414 pr_emerg("%s: failed to move %s to init_net: %d\n", 9415 __func__, dev->name, err); 9416 BUG(); 9417 } 9418 } 9419 rtnl_unlock(); 9420 } 9421 9422 static void __net_exit rtnl_lock_unregistering(struct list_head *net_list) 9423 { 9424 /* Return with the rtnl_lock held when there are no network 9425 * devices unregistering in any network namespace in net_list. 9426 */ 9427 struct net *net; 9428 bool unregistering; 9429 DEFINE_WAIT_FUNC(wait, woken_wake_function); 9430 9431 add_wait_queue(&netdev_unregistering_wq, &wait); 9432 for (;;) { 9433 unregistering = false; 9434 rtnl_lock(); 9435 list_for_each_entry(net, net_list, exit_list) { 9436 if (net->dev_unreg_count > 0) { 9437 unregistering = true; 9438 break; 9439 } 9440 } 9441 if (!unregistering) 9442 break; 9443 __rtnl_unlock(); 9444 9445 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 9446 } 9447 remove_wait_queue(&netdev_unregistering_wq, &wait); 9448 } 9449 9450 static void __net_exit default_device_exit_batch(struct list_head *net_list) 9451 { 9452 /* At exit all network devices most be removed from a network 9453 * namespace. Do this in the reverse order of registration. 9454 * Do this across as many network namespaces as possible to 9455 * improve batching efficiency. 9456 */ 9457 struct net_device *dev; 9458 struct net *net; 9459 LIST_HEAD(dev_kill_list); 9460 9461 /* To prevent network device cleanup code from dereferencing 9462 * loopback devices or network devices that have been freed 9463 * wait here for all pending unregistrations to complete, 9464 * before unregistring the loopback device and allowing the 9465 * network namespace be freed. 9466 * 9467 * The netdev todo list containing all network devices 9468 * unregistrations that happen in default_device_exit_batch 9469 * will run in the rtnl_unlock() at the end of 9470 * default_device_exit_batch. 9471 */ 9472 rtnl_lock_unregistering(net_list); 9473 list_for_each_entry(net, net_list, exit_list) { 9474 for_each_netdev_reverse(net, dev) { 9475 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) 9476 dev->rtnl_link_ops->dellink(dev, &dev_kill_list); 9477 else 9478 unregister_netdevice_queue(dev, &dev_kill_list); 9479 } 9480 } 9481 unregister_netdevice_many(&dev_kill_list); 9482 rtnl_unlock(); 9483 } 9484 9485 static struct pernet_operations __net_initdata default_device_ops = { 9486 .exit = default_device_exit, 9487 .exit_batch = default_device_exit_batch, 9488 }; 9489 9490 /* 9491 * Initialize the DEV module. At boot time this walks the device list and 9492 * unhooks any devices that fail to initialise (normally hardware not 9493 * present) and leaves us with a valid list of present and active devices. 9494 * 9495 */ 9496 9497 /* 9498 * This is called single threaded during boot, so no need 9499 * to take the rtnl semaphore. 9500 */ 9501 static int __init net_dev_init(void) 9502 { 9503 int i, rc = -ENOMEM; 9504 9505 BUG_ON(!dev_boot_phase); 9506 9507 if (dev_proc_init()) 9508 goto out; 9509 9510 if (netdev_kobject_init()) 9511 goto out; 9512 9513 INIT_LIST_HEAD(&ptype_all); 9514 for (i = 0; i < PTYPE_HASH_SIZE; i++) 9515 INIT_LIST_HEAD(&ptype_base[i]); 9516 9517 INIT_LIST_HEAD(&offload_base); 9518 9519 if (register_pernet_subsys(&netdev_net_ops)) 9520 goto out; 9521 9522 /* 9523 * Initialise the packet receive queues. 9524 */ 9525 9526 for_each_possible_cpu(i) { 9527 struct work_struct *flush = per_cpu_ptr(&flush_works, i); 9528 struct softnet_data *sd = &per_cpu(softnet_data, i); 9529 9530 INIT_WORK(flush, flush_backlog); 9531 9532 skb_queue_head_init(&sd->input_pkt_queue); 9533 skb_queue_head_init(&sd->process_queue); 9534 #ifdef CONFIG_XFRM_OFFLOAD 9535 skb_queue_head_init(&sd->xfrm_backlog); 9536 #endif 9537 INIT_LIST_HEAD(&sd->poll_list); 9538 sd->output_queue_tailp = &sd->output_queue; 9539 #ifdef CONFIG_RPS 9540 sd->csd.func = rps_trigger_softirq; 9541 sd->csd.info = sd; 9542 sd->cpu = i; 9543 #endif 9544 9545 sd->backlog.poll = process_backlog; 9546 sd->backlog.weight = weight_p; 9547 } 9548 9549 dev_boot_phase = 0; 9550 9551 /* The loopback device is special if any other network devices 9552 * is present in a network namespace the loopback device must 9553 * be present. Since we now dynamically allocate and free the 9554 * loopback device ensure this invariant is maintained by 9555 * keeping the loopback device as the first device on the 9556 * list of network devices. Ensuring the loopback devices 9557 * is the first device that appears and the last network device 9558 * that disappears. 9559 */ 9560 if (register_pernet_device(&loopback_net_ops)) 9561 goto out; 9562 9563 if (register_pernet_device(&default_device_ops)) 9564 goto out; 9565 9566 open_softirq(NET_TX_SOFTIRQ, net_tx_action); 9567 open_softirq(NET_RX_SOFTIRQ, net_rx_action); 9568 9569 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead", 9570 NULL, dev_cpu_dead); 9571 WARN_ON(rc < 0); 9572 rc = 0; 9573 out: 9574 return rc; 9575 } 9576 9577 subsys_initcall(net_dev_init); 9578