1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * NET3 Protocol independent device support routines. 4 * 5 * Derived from the non IP parts of dev.c 1.0.19 6 * Authors: Ross Biro 7 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 8 * Mark Evans, <evansmp@uhura.aston.ac.uk> 9 * 10 * Additional Authors: 11 * Florian la Roche <rzsfl@rz.uni-sb.de> 12 * Alan Cox <gw4pts@gw4pts.ampr.org> 13 * David Hinds <dahinds@users.sourceforge.net> 14 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 15 * Adam Sulmicki <adam@cfar.umd.edu> 16 * Pekka Riikonen <priikone@poesidon.pspt.fi> 17 * 18 * Changes: 19 * D.J. Barrow : Fixed bug where dev->refcnt gets set 20 * to 2 if register_netdev gets called 21 * before net_dev_init & also removed a 22 * few lines of code in the process. 23 * Alan Cox : device private ioctl copies fields back. 24 * Alan Cox : Transmit queue code does relevant 25 * stunts to keep the queue safe. 26 * Alan Cox : Fixed double lock. 27 * Alan Cox : Fixed promisc NULL pointer trap 28 * ???????? : Support the full private ioctl range 29 * Alan Cox : Moved ioctl permission check into 30 * drivers 31 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI 32 * Alan Cox : 100 backlog just doesn't cut it when 33 * you start doing multicast video 8) 34 * Alan Cox : Rewrote net_bh and list manager. 35 * Alan Cox : Fix ETH_P_ALL echoback lengths. 36 * Alan Cox : Took out transmit every packet pass 37 * Saved a few bytes in the ioctl handler 38 * Alan Cox : Network driver sets packet type before 39 * calling netif_rx. Saves a function 40 * call a packet. 41 * Alan Cox : Hashed net_bh() 42 * Richard Kooijman: Timestamp fixes. 43 * Alan Cox : Wrong field in SIOCGIFDSTADDR 44 * Alan Cox : Device lock protection. 45 * Alan Cox : Fixed nasty side effect of device close 46 * changes. 47 * Rudi Cilibrasi : Pass the right thing to 48 * set_mac_address() 49 * Dave Miller : 32bit quantity for the device lock to 50 * make it work out on a Sparc. 51 * Bjorn Ekwall : Added KERNELD hack. 52 * Alan Cox : Cleaned up the backlog initialise. 53 * Craig Metz : SIOCGIFCONF fix if space for under 54 * 1 device. 55 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there 56 * is no device open function. 57 * Andi Kleen : Fix error reporting for SIOCGIFCONF 58 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF 59 * Cyrus Durgin : Cleaned for KMOD 60 * Adam Sulmicki : Bug Fix : Network Device Unload 61 * A network device unload needs to purge 62 * the backlog queue. 63 * Paul Rusty Russell : SIOCSIFNAME 64 * Pekka Riikonen : Netdev boot-time settings code 65 * Andrew Morton : Make unregister_netdevice wait 66 * indefinitely on dev->refcnt 67 * J Hadi Salim : - Backlog queue sampling 68 * - netif_rx() feedback 69 */ 70 71 #include <linux/uaccess.h> 72 #include <linux/bitops.h> 73 #include <linux/capability.h> 74 #include <linux/cpu.h> 75 #include <linux/types.h> 76 #include <linux/kernel.h> 77 #include <linux/hash.h> 78 #include <linux/slab.h> 79 #include <linux/sched.h> 80 #include <linux/sched/mm.h> 81 #include <linux/mutex.h> 82 #include <linux/string.h> 83 #include <linux/mm.h> 84 #include <linux/socket.h> 85 #include <linux/sockios.h> 86 #include <linux/errno.h> 87 #include <linux/interrupt.h> 88 #include <linux/if_ether.h> 89 #include <linux/netdevice.h> 90 #include <linux/etherdevice.h> 91 #include <linux/ethtool.h> 92 #include <linux/skbuff.h> 93 #include <linux/bpf.h> 94 #include <linux/bpf_trace.h> 95 #include <net/net_namespace.h> 96 #include <net/sock.h> 97 #include <net/busy_poll.h> 98 #include <linux/rtnetlink.h> 99 #include <linux/stat.h> 100 #include <net/dst.h> 101 #include <net/dst_metadata.h> 102 #include <net/pkt_sched.h> 103 #include <net/pkt_cls.h> 104 #include <net/checksum.h> 105 #include <net/xfrm.h> 106 #include <linux/highmem.h> 107 #include <linux/init.h> 108 #include <linux/module.h> 109 #include <linux/netpoll.h> 110 #include <linux/rcupdate.h> 111 #include <linux/delay.h> 112 #include <net/iw_handler.h> 113 #include <asm/current.h> 114 #include <linux/audit.h> 115 #include <linux/dmaengine.h> 116 #include <linux/err.h> 117 #include <linux/ctype.h> 118 #include <linux/if_arp.h> 119 #include <linux/if_vlan.h> 120 #include <linux/ip.h> 121 #include <net/ip.h> 122 #include <net/mpls.h> 123 #include <linux/ipv6.h> 124 #include <linux/in.h> 125 #include <linux/jhash.h> 126 #include <linux/random.h> 127 #include <trace/events/napi.h> 128 #include <trace/events/net.h> 129 #include <trace/events/skb.h> 130 #include <linux/inetdevice.h> 131 #include <linux/cpu_rmap.h> 132 #include <linux/static_key.h> 133 #include <linux/hashtable.h> 134 #include <linux/vmalloc.h> 135 #include <linux/if_macvlan.h> 136 #include <linux/errqueue.h> 137 #include <linux/hrtimer.h> 138 #include <linux/netfilter_ingress.h> 139 #include <linux/crash_dump.h> 140 #include <linux/sctp.h> 141 #include <net/udp_tunnel.h> 142 #include <linux/net_namespace.h> 143 #include <linux/indirect_call_wrapper.h> 144 #include <net/devlink.h> 145 146 #include "net-sysfs.h" 147 148 #define MAX_GRO_SKBS 8 149 150 /* This should be increased if a protocol with a bigger head is added. */ 151 #define GRO_MAX_HEAD (MAX_HEADER + 128) 152 153 static DEFINE_SPINLOCK(ptype_lock); 154 static DEFINE_SPINLOCK(offload_lock); 155 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 156 struct list_head ptype_all __read_mostly; /* Taps */ 157 static struct list_head offload_base __read_mostly; 158 159 static int netif_rx_internal(struct sk_buff *skb); 160 static int call_netdevice_notifiers_info(unsigned long val, 161 struct netdev_notifier_info *info); 162 static int call_netdevice_notifiers_extack(unsigned long val, 163 struct net_device *dev, 164 struct netlink_ext_ack *extack); 165 static struct napi_struct *napi_by_id(unsigned int napi_id); 166 167 /* 168 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 169 * semaphore. 170 * 171 * Pure readers hold dev_base_lock for reading, or rcu_read_lock() 172 * 173 * Writers must hold the rtnl semaphore while they loop through the 174 * dev_base_head list, and hold dev_base_lock for writing when they do the 175 * actual updates. This allows pure readers to access the list even 176 * while a writer is preparing to update it. 177 * 178 * To put it another way, dev_base_lock is held for writing only to 179 * protect against pure readers; the rtnl semaphore provides the 180 * protection against other writers. 181 * 182 * See, for example usages, register_netdevice() and 183 * unregister_netdevice(), which must be called with the rtnl 184 * semaphore held. 185 */ 186 DEFINE_RWLOCK(dev_base_lock); 187 EXPORT_SYMBOL(dev_base_lock); 188 189 static DEFINE_MUTEX(ifalias_mutex); 190 191 /* protects napi_hash addition/deletion and napi_gen_id */ 192 static DEFINE_SPINLOCK(napi_hash_lock); 193 194 static unsigned int napi_gen_id = NR_CPUS; 195 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); 196 197 static seqcount_t devnet_rename_seq; 198 199 static inline void dev_base_seq_inc(struct net *net) 200 { 201 while (++net->dev_base_seq == 0) 202 ; 203 } 204 205 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) 206 { 207 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ)); 208 209 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; 210 } 211 212 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) 213 { 214 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; 215 } 216 217 static inline void rps_lock(struct softnet_data *sd) 218 { 219 #ifdef CONFIG_RPS 220 spin_lock(&sd->input_pkt_queue.lock); 221 #endif 222 } 223 224 static inline void rps_unlock(struct softnet_data *sd) 225 { 226 #ifdef CONFIG_RPS 227 spin_unlock(&sd->input_pkt_queue.lock); 228 #endif 229 } 230 231 /* Device list insertion */ 232 static void list_netdevice(struct net_device *dev) 233 { 234 struct net *net = dev_net(dev); 235 236 ASSERT_RTNL(); 237 238 write_lock_bh(&dev_base_lock); 239 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); 240 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); 241 hlist_add_head_rcu(&dev->index_hlist, 242 dev_index_hash(net, dev->ifindex)); 243 write_unlock_bh(&dev_base_lock); 244 245 dev_base_seq_inc(net); 246 } 247 248 /* Device list removal 249 * caller must respect a RCU grace period before freeing/reusing dev 250 */ 251 static void unlist_netdevice(struct net_device *dev) 252 { 253 ASSERT_RTNL(); 254 255 /* Unlink dev from the device chain */ 256 write_lock_bh(&dev_base_lock); 257 list_del_rcu(&dev->dev_list); 258 hlist_del_rcu(&dev->name_hlist); 259 hlist_del_rcu(&dev->index_hlist); 260 write_unlock_bh(&dev_base_lock); 261 262 dev_base_seq_inc(dev_net(dev)); 263 } 264 265 /* 266 * Our notifier list 267 */ 268 269 static RAW_NOTIFIER_HEAD(netdev_chain); 270 271 /* 272 * Device drivers call our routines to queue packets here. We empty the 273 * queue in the local softnet handler. 274 */ 275 276 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 277 EXPORT_PER_CPU_SYMBOL(softnet_data); 278 279 #ifdef CONFIG_LOCKDEP 280 /* 281 * register_netdevice() inits txq->_xmit_lock and sets lockdep class 282 * according to dev->type 283 */ 284 static const unsigned short netdev_lock_type[] = { 285 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, 286 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, 287 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, 288 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, 289 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, 290 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, 291 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, 292 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, 293 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, 294 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, 295 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, 296 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 297 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, 298 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, 299 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; 300 301 static const char *const netdev_lock_name[] = { 302 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", 303 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", 304 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", 305 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", 306 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", 307 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", 308 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", 309 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", 310 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", 311 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", 312 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", 313 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 314 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", 315 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", 316 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; 317 318 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 319 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; 320 321 static inline unsigned short netdev_lock_pos(unsigned short dev_type) 322 { 323 int i; 324 325 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) 326 if (netdev_lock_type[i] == dev_type) 327 return i; 328 /* the last key is used by default */ 329 return ARRAY_SIZE(netdev_lock_type) - 1; 330 } 331 332 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 333 unsigned short dev_type) 334 { 335 int i; 336 337 i = netdev_lock_pos(dev_type); 338 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], 339 netdev_lock_name[i]); 340 } 341 342 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 343 { 344 int i; 345 346 i = netdev_lock_pos(dev->type); 347 lockdep_set_class_and_name(&dev->addr_list_lock, 348 &netdev_addr_lock_key[i], 349 netdev_lock_name[i]); 350 } 351 #else 352 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 353 unsigned short dev_type) 354 { 355 } 356 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 357 { 358 } 359 #endif 360 361 /******************************************************************************* 362 * 363 * Protocol management and registration routines 364 * 365 *******************************************************************************/ 366 367 368 /* 369 * Add a protocol ID to the list. Now that the input handler is 370 * smarter we can dispense with all the messy stuff that used to be 371 * here. 372 * 373 * BEWARE!!! Protocol handlers, mangling input packets, 374 * MUST BE last in hash buckets and checking protocol handlers 375 * MUST start from promiscuous ptype_all chain in net_bh. 376 * It is true now, do not change it. 377 * Explanation follows: if protocol handler, mangling packet, will 378 * be the first on list, it is not able to sense, that packet 379 * is cloned and should be copied-on-write, so that it will 380 * change it and subsequent readers will get broken packet. 381 * --ANK (980803) 382 */ 383 384 static inline struct list_head *ptype_head(const struct packet_type *pt) 385 { 386 if (pt->type == htons(ETH_P_ALL)) 387 return pt->dev ? &pt->dev->ptype_all : &ptype_all; 388 else 389 return pt->dev ? &pt->dev->ptype_specific : 390 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; 391 } 392 393 /** 394 * dev_add_pack - add packet handler 395 * @pt: packet type declaration 396 * 397 * Add a protocol handler to the networking stack. The passed &packet_type 398 * is linked into kernel lists and may not be freed until it has been 399 * removed from the kernel lists. 400 * 401 * This call does not sleep therefore it can not 402 * guarantee all CPU's that are in middle of receiving packets 403 * will see the new packet type (until the next received packet). 404 */ 405 406 void dev_add_pack(struct packet_type *pt) 407 { 408 struct list_head *head = ptype_head(pt); 409 410 spin_lock(&ptype_lock); 411 list_add_rcu(&pt->list, head); 412 spin_unlock(&ptype_lock); 413 } 414 EXPORT_SYMBOL(dev_add_pack); 415 416 /** 417 * __dev_remove_pack - remove packet handler 418 * @pt: packet type declaration 419 * 420 * Remove a protocol handler that was previously added to the kernel 421 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 422 * from the kernel lists and can be freed or reused once this function 423 * returns. 424 * 425 * The packet type might still be in use by receivers 426 * and must not be freed until after all the CPU's have gone 427 * through a quiescent state. 428 */ 429 void __dev_remove_pack(struct packet_type *pt) 430 { 431 struct list_head *head = ptype_head(pt); 432 struct packet_type *pt1; 433 434 spin_lock(&ptype_lock); 435 436 list_for_each_entry(pt1, head, list) { 437 if (pt == pt1) { 438 list_del_rcu(&pt->list); 439 goto out; 440 } 441 } 442 443 pr_warn("dev_remove_pack: %p not found\n", pt); 444 out: 445 spin_unlock(&ptype_lock); 446 } 447 EXPORT_SYMBOL(__dev_remove_pack); 448 449 /** 450 * dev_remove_pack - remove packet handler 451 * @pt: packet type declaration 452 * 453 * Remove a protocol handler that was previously added to the kernel 454 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 455 * from the kernel lists and can be freed or reused once this function 456 * returns. 457 * 458 * This call sleeps to guarantee that no CPU is looking at the packet 459 * type after return. 460 */ 461 void dev_remove_pack(struct packet_type *pt) 462 { 463 __dev_remove_pack(pt); 464 465 synchronize_net(); 466 } 467 EXPORT_SYMBOL(dev_remove_pack); 468 469 470 /** 471 * dev_add_offload - register offload handlers 472 * @po: protocol offload declaration 473 * 474 * Add protocol offload handlers to the networking stack. The passed 475 * &proto_offload is linked into kernel lists and may not be freed until 476 * it has been removed from the kernel lists. 477 * 478 * This call does not sleep therefore it can not 479 * guarantee all CPU's that are in middle of receiving packets 480 * will see the new offload handlers (until the next received packet). 481 */ 482 void dev_add_offload(struct packet_offload *po) 483 { 484 struct packet_offload *elem; 485 486 spin_lock(&offload_lock); 487 list_for_each_entry(elem, &offload_base, list) { 488 if (po->priority < elem->priority) 489 break; 490 } 491 list_add_rcu(&po->list, elem->list.prev); 492 spin_unlock(&offload_lock); 493 } 494 EXPORT_SYMBOL(dev_add_offload); 495 496 /** 497 * __dev_remove_offload - remove offload handler 498 * @po: packet offload declaration 499 * 500 * Remove a protocol offload handler that was previously added to the 501 * kernel offload handlers by dev_add_offload(). The passed &offload_type 502 * is removed from the kernel lists and can be freed or reused once this 503 * function returns. 504 * 505 * The packet type might still be in use by receivers 506 * and must not be freed until after all the CPU's have gone 507 * through a quiescent state. 508 */ 509 static void __dev_remove_offload(struct packet_offload *po) 510 { 511 struct list_head *head = &offload_base; 512 struct packet_offload *po1; 513 514 spin_lock(&offload_lock); 515 516 list_for_each_entry(po1, head, list) { 517 if (po == po1) { 518 list_del_rcu(&po->list); 519 goto out; 520 } 521 } 522 523 pr_warn("dev_remove_offload: %p not found\n", po); 524 out: 525 spin_unlock(&offload_lock); 526 } 527 528 /** 529 * dev_remove_offload - remove packet offload handler 530 * @po: packet offload declaration 531 * 532 * Remove a packet offload handler that was previously added to the kernel 533 * offload handlers by dev_add_offload(). The passed &offload_type is 534 * removed from the kernel lists and can be freed or reused once this 535 * function returns. 536 * 537 * This call sleeps to guarantee that no CPU is looking at the packet 538 * type after return. 539 */ 540 void dev_remove_offload(struct packet_offload *po) 541 { 542 __dev_remove_offload(po); 543 544 synchronize_net(); 545 } 546 EXPORT_SYMBOL(dev_remove_offload); 547 548 /****************************************************************************** 549 * 550 * Device Boot-time Settings Routines 551 * 552 ******************************************************************************/ 553 554 /* Boot time configuration table */ 555 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX]; 556 557 /** 558 * netdev_boot_setup_add - add new setup entry 559 * @name: name of the device 560 * @map: configured settings for the device 561 * 562 * Adds new setup entry to the dev_boot_setup list. The function 563 * returns 0 on error and 1 on success. This is a generic routine to 564 * all netdevices. 565 */ 566 static int netdev_boot_setup_add(char *name, struct ifmap *map) 567 { 568 struct netdev_boot_setup *s; 569 int i; 570 571 s = dev_boot_setup; 572 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { 573 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') { 574 memset(s[i].name, 0, sizeof(s[i].name)); 575 strlcpy(s[i].name, name, IFNAMSIZ); 576 memcpy(&s[i].map, map, sizeof(s[i].map)); 577 break; 578 } 579 } 580 581 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1; 582 } 583 584 /** 585 * netdev_boot_setup_check - check boot time settings 586 * @dev: the netdevice 587 * 588 * Check boot time settings for the device. 589 * The found settings are set for the device to be used 590 * later in the device probing. 591 * Returns 0 if no settings found, 1 if they are. 592 */ 593 int netdev_boot_setup_check(struct net_device *dev) 594 { 595 struct netdev_boot_setup *s = dev_boot_setup; 596 int i; 597 598 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { 599 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' && 600 !strcmp(dev->name, s[i].name)) { 601 dev->irq = s[i].map.irq; 602 dev->base_addr = s[i].map.base_addr; 603 dev->mem_start = s[i].map.mem_start; 604 dev->mem_end = s[i].map.mem_end; 605 return 1; 606 } 607 } 608 return 0; 609 } 610 EXPORT_SYMBOL(netdev_boot_setup_check); 611 612 613 /** 614 * netdev_boot_base - get address from boot time settings 615 * @prefix: prefix for network device 616 * @unit: id for network device 617 * 618 * Check boot time settings for the base address of device. 619 * The found settings are set for the device to be used 620 * later in the device probing. 621 * Returns 0 if no settings found. 622 */ 623 unsigned long netdev_boot_base(const char *prefix, int unit) 624 { 625 const struct netdev_boot_setup *s = dev_boot_setup; 626 char name[IFNAMSIZ]; 627 int i; 628 629 sprintf(name, "%s%d", prefix, unit); 630 631 /* 632 * If device already registered then return base of 1 633 * to indicate not to probe for this interface 634 */ 635 if (__dev_get_by_name(&init_net, name)) 636 return 1; 637 638 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) 639 if (!strcmp(name, s[i].name)) 640 return s[i].map.base_addr; 641 return 0; 642 } 643 644 /* 645 * Saves at boot time configured settings for any netdevice. 646 */ 647 int __init netdev_boot_setup(char *str) 648 { 649 int ints[5]; 650 struct ifmap map; 651 652 str = get_options(str, ARRAY_SIZE(ints), ints); 653 if (!str || !*str) 654 return 0; 655 656 /* Save settings */ 657 memset(&map, 0, sizeof(map)); 658 if (ints[0] > 0) 659 map.irq = ints[1]; 660 if (ints[0] > 1) 661 map.base_addr = ints[2]; 662 if (ints[0] > 2) 663 map.mem_start = ints[3]; 664 if (ints[0] > 3) 665 map.mem_end = ints[4]; 666 667 /* Add new entry to the list */ 668 return netdev_boot_setup_add(str, &map); 669 } 670 671 __setup("netdev=", netdev_boot_setup); 672 673 /******************************************************************************* 674 * 675 * Device Interface Subroutines 676 * 677 *******************************************************************************/ 678 679 /** 680 * dev_get_iflink - get 'iflink' value of a interface 681 * @dev: targeted interface 682 * 683 * Indicates the ifindex the interface is linked to. 684 * Physical interfaces have the same 'ifindex' and 'iflink' values. 685 */ 686 687 int dev_get_iflink(const struct net_device *dev) 688 { 689 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) 690 return dev->netdev_ops->ndo_get_iflink(dev); 691 692 return dev->ifindex; 693 } 694 EXPORT_SYMBOL(dev_get_iflink); 695 696 /** 697 * dev_fill_metadata_dst - Retrieve tunnel egress information. 698 * @dev: targeted interface 699 * @skb: The packet. 700 * 701 * For better visibility of tunnel traffic OVS needs to retrieve 702 * egress tunnel information for a packet. Following API allows 703 * user to get this info. 704 */ 705 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 706 { 707 struct ip_tunnel_info *info; 708 709 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst) 710 return -EINVAL; 711 712 info = skb_tunnel_info_unclone(skb); 713 if (!info) 714 return -ENOMEM; 715 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX))) 716 return -EINVAL; 717 718 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb); 719 } 720 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst); 721 722 /** 723 * __dev_get_by_name - find a device by its name 724 * @net: the applicable net namespace 725 * @name: name to find 726 * 727 * Find an interface by name. Must be called under RTNL semaphore 728 * or @dev_base_lock. If the name is found a pointer to the device 729 * is returned. If the name is not found then %NULL is returned. The 730 * reference counters are not incremented so the caller must be 731 * careful with locks. 732 */ 733 734 struct net_device *__dev_get_by_name(struct net *net, const char *name) 735 { 736 struct net_device *dev; 737 struct hlist_head *head = dev_name_hash(net, name); 738 739 hlist_for_each_entry(dev, head, name_hlist) 740 if (!strncmp(dev->name, name, IFNAMSIZ)) 741 return dev; 742 743 return NULL; 744 } 745 EXPORT_SYMBOL(__dev_get_by_name); 746 747 /** 748 * dev_get_by_name_rcu - find a device by its name 749 * @net: the applicable net namespace 750 * @name: name to find 751 * 752 * Find an interface by name. 753 * If the name is found a pointer to the device is returned. 754 * If the name is not found then %NULL is returned. 755 * The reference counters are not incremented so the caller must be 756 * careful with locks. The caller must hold RCU lock. 757 */ 758 759 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) 760 { 761 struct net_device *dev; 762 struct hlist_head *head = dev_name_hash(net, name); 763 764 hlist_for_each_entry_rcu(dev, head, name_hlist) 765 if (!strncmp(dev->name, name, IFNAMSIZ)) 766 return dev; 767 768 return NULL; 769 } 770 EXPORT_SYMBOL(dev_get_by_name_rcu); 771 772 /** 773 * dev_get_by_name - find a device by its name 774 * @net: the applicable net namespace 775 * @name: name to find 776 * 777 * Find an interface by name. This can be called from any 778 * context and does its own locking. The returned handle has 779 * the usage count incremented and the caller must use dev_put() to 780 * release it when it is no longer needed. %NULL is returned if no 781 * matching device is found. 782 */ 783 784 struct net_device *dev_get_by_name(struct net *net, const char *name) 785 { 786 struct net_device *dev; 787 788 rcu_read_lock(); 789 dev = dev_get_by_name_rcu(net, name); 790 if (dev) 791 dev_hold(dev); 792 rcu_read_unlock(); 793 return dev; 794 } 795 EXPORT_SYMBOL(dev_get_by_name); 796 797 /** 798 * __dev_get_by_index - find a device by its ifindex 799 * @net: the applicable net namespace 800 * @ifindex: index of device 801 * 802 * Search for an interface by index. Returns %NULL if the device 803 * is not found or a pointer to the device. The device has not 804 * had its reference counter increased so the caller must be careful 805 * about locking. The caller must hold either the RTNL semaphore 806 * or @dev_base_lock. 807 */ 808 809 struct net_device *__dev_get_by_index(struct net *net, int ifindex) 810 { 811 struct net_device *dev; 812 struct hlist_head *head = dev_index_hash(net, ifindex); 813 814 hlist_for_each_entry(dev, head, index_hlist) 815 if (dev->ifindex == ifindex) 816 return dev; 817 818 return NULL; 819 } 820 EXPORT_SYMBOL(__dev_get_by_index); 821 822 /** 823 * dev_get_by_index_rcu - find a device by its ifindex 824 * @net: the applicable net namespace 825 * @ifindex: index of device 826 * 827 * Search for an interface by index. Returns %NULL if the device 828 * is not found or a pointer to the device. The device has not 829 * had its reference counter increased so the caller must be careful 830 * about locking. The caller must hold RCU lock. 831 */ 832 833 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) 834 { 835 struct net_device *dev; 836 struct hlist_head *head = dev_index_hash(net, ifindex); 837 838 hlist_for_each_entry_rcu(dev, head, index_hlist) 839 if (dev->ifindex == ifindex) 840 return dev; 841 842 return NULL; 843 } 844 EXPORT_SYMBOL(dev_get_by_index_rcu); 845 846 847 /** 848 * dev_get_by_index - find a device by its ifindex 849 * @net: the applicable net namespace 850 * @ifindex: index of device 851 * 852 * Search for an interface by index. Returns NULL if the device 853 * is not found or a pointer to the device. The device returned has 854 * had a reference added and the pointer is safe until the user calls 855 * dev_put to indicate they have finished with it. 856 */ 857 858 struct net_device *dev_get_by_index(struct net *net, int ifindex) 859 { 860 struct net_device *dev; 861 862 rcu_read_lock(); 863 dev = dev_get_by_index_rcu(net, ifindex); 864 if (dev) 865 dev_hold(dev); 866 rcu_read_unlock(); 867 return dev; 868 } 869 EXPORT_SYMBOL(dev_get_by_index); 870 871 /** 872 * dev_get_by_napi_id - find a device by napi_id 873 * @napi_id: ID of the NAPI struct 874 * 875 * Search for an interface by NAPI ID. Returns %NULL if the device 876 * is not found or a pointer to the device. The device has not had 877 * its reference counter increased so the caller must be careful 878 * about locking. The caller must hold RCU lock. 879 */ 880 881 struct net_device *dev_get_by_napi_id(unsigned int napi_id) 882 { 883 struct napi_struct *napi; 884 885 WARN_ON_ONCE(!rcu_read_lock_held()); 886 887 if (napi_id < MIN_NAPI_ID) 888 return NULL; 889 890 napi = napi_by_id(napi_id); 891 892 return napi ? napi->dev : NULL; 893 } 894 EXPORT_SYMBOL(dev_get_by_napi_id); 895 896 /** 897 * netdev_get_name - get a netdevice name, knowing its ifindex. 898 * @net: network namespace 899 * @name: a pointer to the buffer where the name will be stored. 900 * @ifindex: the ifindex of the interface to get the name from. 901 * 902 * The use of raw_seqcount_begin() and cond_resched() before 903 * retrying is required as we want to give the writers a chance 904 * to complete when CONFIG_PREEMPT is not set. 905 */ 906 int netdev_get_name(struct net *net, char *name, int ifindex) 907 { 908 struct net_device *dev; 909 unsigned int seq; 910 911 retry: 912 seq = raw_seqcount_begin(&devnet_rename_seq); 913 rcu_read_lock(); 914 dev = dev_get_by_index_rcu(net, ifindex); 915 if (!dev) { 916 rcu_read_unlock(); 917 return -ENODEV; 918 } 919 920 strcpy(name, dev->name); 921 rcu_read_unlock(); 922 if (read_seqcount_retry(&devnet_rename_seq, seq)) { 923 cond_resched(); 924 goto retry; 925 } 926 927 return 0; 928 } 929 930 /** 931 * dev_getbyhwaddr_rcu - find a device by its hardware address 932 * @net: the applicable net namespace 933 * @type: media type of device 934 * @ha: hardware address 935 * 936 * Search for an interface by MAC address. Returns NULL if the device 937 * is not found or a pointer to the device. 938 * The caller must hold RCU or RTNL. 939 * The returned device has not had its ref count increased 940 * and the caller must therefore be careful about locking 941 * 942 */ 943 944 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 945 const char *ha) 946 { 947 struct net_device *dev; 948 949 for_each_netdev_rcu(net, dev) 950 if (dev->type == type && 951 !memcmp(dev->dev_addr, ha, dev->addr_len)) 952 return dev; 953 954 return NULL; 955 } 956 EXPORT_SYMBOL(dev_getbyhwaddr_rcu); 957 958 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) 959 { 960 struct net_device *dev; 961 962 ASSERT_RTNL(); 963 for_each_netdev(net, dev) 964 if (dev->type == type) 965 return dev; 966 967 return NULL; 968 } 969 EXPORT_SYMBOL(__dev_getfirstbyhwtype); 970 971 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) 972 { 973 struct net_device *dev, *ret = NULL; 974 975 rcu_read_lock(); 976 for_each_netdev_rcu(net, dev) 977 if (dev->type == type) { 978 dev_hold(dev); 979 ret = dev; 980 break; 981 } 982 rcu_read_unlock(); 983 return ret; 984 } 985 EXPORT_SYMBOL(dev_getfirstbyhwtype); 986 987 /** 988 * __dev_get_by_flags - find any device with given flags 989 * @net: the applicable net namespace 990 * @if_flags: IFF_* values 991 * @mask: bitmask of bits in if_flags to check 992 * 993 * Search for any interface with the given flags. Returns NULL if a device 994 * is not found or a pointer to the device. Must be called inside 995 * rtnl_lock(), and result refcount is unchanged. 996 */ 997 998 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags, 999 unsigned short mask) 1000 { 1001 struct net_device *dev, *ret; 1002 1003 ASSERT_RTNL(); 1004 1005 ret = NULL; 1006 for_each_netdev(net, dev) { 1007 if (((dev->flags ^ if_flags) & mask) == 0) { 1008 ret = dev; 1009 break; 1010 } 1011 } 1012 return ret; 1013 } 1014 EXPORT_SYMBOL(__dev_get_by_flags); 1015 1016 /** 1017 * dev_valid_name - check if name is okay for network device 1018 * @name: name string 1019 * 1020 * Network device names need to be valid file names to 1021 * to allow sysfs to work. We also disallow any kind of 1022 * whitespace. 1023 */ 1024 bool dev_valid_name(const char *name) 1025 { 1026 if (*name == '\0') 1027 return false; 1028 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ) 1029 return false; 1030 if (!strcmp(name, ".") || !strcmp(name, "..")) 1031 return false; 1032 1033 while (*name) { 1034 if (*name == '/' || *name == ':' || isspace(*name)) 1035 return false; 1036 name++; 1037 } 1038 return true; 1039 } 1040 EXPORT_SYMBOL(dev_valid_name); 1041 1042 /** 1043 * __dev_alloc_name - allocate a name for a device 1044 * @net: network namespace to allocate the device name in 1045 * @name: name format string 1046 * @buf: scratch buffer and result name string 1047 * 1048 * Passed a format string - eg "lt%d" it will try and find a suitable 1049 * id. It scans list of devices to build up a free map, then chooses 1050 * the first empty slot. The caller must hold the dev_base or rtnl lock 1051 * while allocating the name and adding the device in order to avoid 1052 * duplicates. 1053 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1054 * Returns the number of the unit assigned or a negative errno code. 1055 */ 1056 1057 static int __dev_alloc_name(struct net *net, const char *name, char *buf) 1058 { 1059 int i = 0; 1060 const char *p; 1061 const int max_netdevices = 8*PAGE_SIZE; 1062 unsigned long *inuse; 1063 struct net_device *d; 1064 1065 if (!dev_valid_name(name)) 1066 return -EINVAL; 1067 1068 p = strchr(name, '%'); 1069 if (p) { 1070 /* 1071 * Verify the string as this thing may have come from 1072 * the user. There must be either one "%d" and no other "%" 1073 * characters. 1074 */ 1075 if (p[1] != 'd' || strchr(p + 2, '%')) 1076 return -EINVAL; 1077 1078 /* Use one page as a bit array of possible slots */ 1079 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC); 1080 if (!inuse) 1081 return -ENOMEM; 1082 1083 for_each_netdev(net, d) { 1084 if (!sscanf(d->name, name, &i)) 1085 continue; 1086 if (i < 0 || i >= max_netdevices) 1087 continue; 1088 1089 /* avoid cases where sscanf is not exact inverse of printf */ 1090 snprintf(buf, IFNAMSIZ, name, i); 1091 if (!strncmp(buf, d->name, IFNAMSIZ)) 1092 set_bit(i, inuse); 1093 } 1094 1095 i = find_first_zero_bit(inuse, max_netdevices); 1096 free_page((unsigned long) inuse); 1097 } 1098 1099 snprintf(buf, IFNAMSIZ, name, i); 1100 if (!__dev_get_by_name(net, buf)) 1101 return i; 1102 1103 /* It is possible to run out of possible slots 1104 * when the name is long and there isn't enough space left 1105 * for the digits, or if all bits are used. 1106 */ 1107 return -ENFILE; 1108 } 1109 1110 static int dev_alloc_name_ns(struct net *net, 1111 struct net_device *dev, 1112 const char *name) 1113 { 1114 char buf[IFNAMSIZ]; 1115 int ret; 1116 1117 BUG_ON(!net); 1118 ret = __dev_alloc_name(net, name, buf); 1119 if (ret >= 0) 1120 strlcpy(dev->name, buf, IFNAMSIZ); 1121 return ret; 1122 } 1123 1124 /** 1125 * dev_alloc_name - allocate a name for a device 1126 * @dev: device 1127 * @name: name format string 1128 * 1129 * Passed a format string - eg "lt%d" it will try and find a suitable 1130 * id. It scans list of devices to build up a free map, then chooses 1131 * the first empty slot. The caller must hold the dev_base or rtnl lock 1132 * while allocating the name and adding the device in order to avoid 1133 * duplicates. 1134 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1135 * Returns the number of the unit assigned or a negative errno code. 1136 */ 1137 1138 int dev_alloc_name(struct net_device *dev, const char *name) 1139 { 1140 return dev_alloc_name_ns(dev_net(dev), dev, name); 1141 } 1142 EXPORT_SYMBOL(dev_alloc_name); 1143 1144 int dev_get_valid_name(struct net *net, struct net_device *dev, 1145 const char *name) 1146 { 1147 BUG_ON(!net); 1148 1149 if (!dev_valid_name(name)) 1150 return -EINVAL; 1151 1152 if (strchr(name, '%')) 1153 return dev_alloc_name_ns(net, dev, name); 1154 else if (__dev_get_by_name(net, name)) 1155 return -EEXIST; 1156 else if (dev->name != name) 1157 strlcpy(dev->name, name, IFNAMSIZ); 1158 1159 return 0; 1160 } 1161 EXPORT_SYMBOL(dev_get_valid_name); 1162 1163 /** 1164 * dev_change_name - change name of a device 1165 * @dev: device 1166 * @newname: name (or format string) must be at least IFNAMSIZ 1167 * 1168 * Change name of a device, can pass format strings "eth%d". 1169 * for wildcarding. 1170 */ 1171 int dev_change_name(struct net_device *dev, const char *newname) 1172 { 1173 unsigned char old_assign_type; 1174 char oldname[IFNAMSIZ]; 1175 int err = 0; 1176 int ret; 1177 struct net *net; 1178 1179 ASSERT_RTNL(); 1180 BUG_ON(!dev_net(dev)); 1181 1182 net = dev_net(dev); 1183 1184 /* Some auto-enslaved devices e.g. failover slaves are 1185 * special, as userspace might rename the device after 1186 * the interface had been brought up and running since 1187 * the point kernel initiated auto-enslavement. Allow 1188 * live name change even when these slave devices are 1189 * up and running. 1190 * 1191 * Typically, users of these auto-enslaving devices 1192 * don't actually care about slave name change, as 1193 * they are supposed to operate on master interface 1194 * directly. 1195 */ 1196 if (dev->flags & IFF_UP && 1197 likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK))) 1198 return -EBUSY; 1199 1200 write_seqcount_begin(&devnet_rename_seq); 1201 1202 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { 1203 write_seqcount_end(&devnet_rename_seq); 1204 return 0; 1205 } 1206 1207 memcpy(oldname, dev->name, IFNAMSIZ); 1208 1209 err = dev_get_valid_name(net, dev, newname); 1210 if (err < 0) { 1211 write_seqcount_end(&devnet_rename_seq); 1212 return err; 1213 } 1214 1215 if (oldname[0] && !strchr(oldname, '%')) 1216 netdev_info(dev, "renamed from %s\n", oldname); 1217 1218 old_assign_type = dev->name_assign_type; 1219 dev->name_assign_type = NET_NAME_RENAMED; 1220 1221 rollback: 1222 ret = device_rename(&dev->dev, dev->name); 1223 if (ret) { 1224 memcpy(dev->name, oldname, IFNAMSIZ); 1225 dev->name_assign_type = old_assign_type; 1226 write_seqcount_end(&devnet_rename_seq); 1227 return ret; 1228 } 1229 1230 write_seqcount_end(&devnet_rename_seq); 1231 1232 netdev_adjacent_rename_links(dev, oldname); 1233 1234 write_lock_bh(&dev_base_lock); 1235 hlist_del_rcu(&dev->name_hlist); 1236 write_unlock_bh(&dev_base_lock); 1237 1238 synchronize_rcu(); 1239 1240 write_lock_bh(&dev_base_lock); 1241 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); 1242 write_unlock_bh(&dev_base_lock); 1243 1244 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); 1245 ret = notifier_to_errno(ret); 1246 1247 if (ret) { 1248 /* err >= 0 after dev_alloc_name() or stores the first errno */ 1249 if (err >= 0) { 1250 err = ret; 1251 write_seqcount_begin(&devnet_rename_seq); 1252 memcpy(dev->name, oldname, IFNAMSIZ); 1253 memcpy(oldname, newname, IFNAMSIZ); 1254 dev->name_assign_type = old_assign_type; 1255 old_assign_type = NET_NAME_RENAMED; 1256 goto rollback; 1257 } else { 1258 pr_err("%s: name change rollback failed: %d\n", 1259 dev->name, ret); 1260 } 1261 } 1262 1263 return err; 1264 } 1265 1266 /** 1267 * dev_set_alias - change ifalias of a device 1268 * @dev: device 1269 * @alias: name up to IFALIASZ 1270 * @len: limit of bytes to copy from info 1271 * 1272 * Set ifalias for a device, 1273 */ 1274 int dev_set_alias(struct net_device *dev, const char *alias, size_t len) 1275 { 1276 struct dev_ifalias *new_alias = NULL; 1277 1278 if (len >= IFALIASZ) 1279 return -EINVAL; 1280 1281 if (len) { 1282 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL); 1283 if (!new_alias) 1284 return -ENOMEM; 1285 1286 memcpy(new_alias->ifalias, alias, len); 1287 new_alias->ifalias[len] = 0; 1288 } 1289 1290 mutex_lock(&ifalias_mutex); 1291 rcu_swap_protected(dev->ifalias, new_alias, 1292 mutex_is_locked(&ifalias_mutex)); 1293 mutex_unlock(&ifalias_mutex); 1294 1295 if (new_alias) 1296 kfree_rcu(new_alias, rcuhead); 1297 1298 return len; 1299 } 1300 EXPORT_SYMBOL(dev_set_alias); 1301 1302 /** 1303 * dev_get_alias - get ifalias of a device 1304 * @dev: device 1305 * @name: buffer to store name of ifalias 1306 * @len: size of buffer 1307 * 1308 * get ifalias for a device. Caller must make sure dev cannot go 1309 * away, e.g. rcu read lock or own a reference count to device. 1310 */ 1311 int dev_get_alias(const struct net_device *dev, char *name, size_t len) 1312 { 1313 const struct dev_ifalias *alias; 1314 int ret = 0; 1315 1316 rcu_read_lock(); 1317 alias = rcu_dereference(dev->ifalias); 1318 if (alias) 1319 ret = snprintf(name, len, "%s", alias->ifalias); 1320 rcu_read_unlock(); 1321 1322 return ret; 1323 } 1324 1325 /** 1326 * netdev_features_change - device changes features 1327 * @dev: device to cause notification 1328 * 1329 * Called to indicate a device has changed features. 1330 */ 1331 void netdev_features_change(struct net_device *dev) 1332 { 1333 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); 1334 } 1335 EXPORT_SYMBOL(netdev_features_change); 1336 1337 /** 1338 * netdev_state_change - device changes state 1339 * @dev: device to cause notification 1340 * 1341 * Called to indicate a device has changed state. This function calls 1342 * the notifier chains for netdev_chain and sends a NEWLINK message 1343 * to the routing socket. 1344 */ 1345 void netdev_state_change(struct net_device *dev) 1346 { 1347 if (dev->flags & IFF_UP) { 1348 struct netdev_notifier_change_info change_info = { 1349 .info.dev = dev, 1350 }; 1351 1352 call_netdevice_notifiers_info(NETDEV_CHANGE, 1353 &change_info.info); 1354 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); 1355 } 1356 } 1357 EXPORT_SYMBOL(netdev_state_change); 1358 1359 /** 1360 * netdev_notify_peers - notify network peers about existence of @dev 1361 * @dev: network device 1362 * 1363 * Generate traffic such that interested network peers are aware of 1364 * @dev, such as by generating a gratuitous ARP. This may be used when 1365 * a device wants to inform the rest of the network about some sort of 1366 * reconfiguration such as a failover event or virtual machine 1367 * migration. 1368 */ 1369 void netdev_notify_peers(struct net_device *dev) 1370 { 1371 rtnl_lock(); 1372 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); 1373 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev); 1374 rtnl_unlock(); 1375 } 1376 EXPORT_SYMBOL(netdev_notify_peers); 1377 1378 static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack) 1379 { 1380 const struct net_device_ops *ops = dev->netdev_ops; 1381 int ret; 1382 1383 ASSERT_RTNL(); 1384 1385 if (!netif_device_present(dev)) 1386 return -ENODEV; 1387 1388 /* Block netpoll from trying to do any rx path servicing. 1389 * If we don't do this there is a chance ndo_poll_controller 1390 * or ndo_poll may be running while we open the device 1391 */ 1392 netpoll_poll_disable(dev); 1393 1394 ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack); 1395 ret = notifier_to_errno(ret); 1396 if (ret) 1397 return ret; 1398 1399 set_bit(__LINK_STATE_START, &dev->state); 1400 1401 if (ops->ndo_validate_addr) 1402 ret = ops->ndo_validate_addr(dev); 1403 1404 if (!ret && ops->ndo_open) 1405 ret = ops->ndo_open(dev); 1406 1407 netpoll_poll_enable(dev); 1408 1409 if (ret) 1410 clear_bit(__LINK_STATE_START, &dev->state); 1411 else { 1412 dev->flags |= IFF_UP; 1413 dev_set_rx_mode(dev); 1414 dev_activate(dev); 1415 add_device_randomness(dev->dev_addr, dev->addr_len); 1416 } 1417 1418 return ret; 1419 } 1420 1421 /** 1422 * dev_open - prepare an interface for use. 1423 * @dev: device to open 1424 * @extack: netlink extended ack 1425 * 1426 * Takes a device from down to up state. The device's private open 1427 * function is invoked and then the multicast lists are loaded. Finally 1428 * the device is moved into the up state and a %NETDEV_UP message is 1429 * sent to the netdev notifier chain. 1430 * 1431 * Calling this function on an active interface is a nop. On a failure 1432 * a negative errno code is returned. 1433 */ 1434 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack) 1435 { 1436 int ret; 1437 1438 if (dev->flags & IFF_UP) 1439 return 0; 1440 1441 ret = __dev_open(dev, extack); 1442 if (ret < 0) 1443 return ret; 1444 1445 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); 1446 call_netdevice_notifiers(NETDEV_UP, dev); 1447 1448 return ret; 1449 } 1450 EXPORT_SYMBOL(dev_open); 1451 1452 static void __dev_close_many(struct list_head *head) 1453 { 1454 struct net_device *dev; 1455 1456 ASSERT_RTNL(); 1457 might_sleep(); 1458 1459 list_for_each_entry(dev, head, close_list) { 1460 /* Temporarily disable netpoll until the interface is down */ 1461 netpoll_poll_disable(dev); 1462 1463 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); 1464 1465 clear_bit(__LINK_STATE_START, &dev->state); 1466 1467 /* Synchronize to scheduled poll. We cannot touch poll list, it 1468 * can be even on different cpu. So just clear netif_running(). 1469 * 1470 * dev->stop() will invoke napi_disable() on all of it's 1471 * napi_struct instances on this device. 1472 */ 1473 smp_mb__after_atomic(); /* Commit netif_running(). */ 1474 } 1475 1476 dev_deactivate_many(head); 1477 1478 list_for_each_entry(dev, head, close_list) { 1479 const struct net_device_ops *ops = dev->netdev_ops; 1480 1481 /* 1482 * Call the device specific close. This cannot fail. 1483 * Only if device is UP 1484 * 1485 * We allow it to be called even after a DETACH hot-plug 1486 * event. 1487 */ 1488 if (ops->ndo_stop) 1489 ops->ndo_stop(dev); 1490 1491 dev->flags &= ~IFF_UP; 1492 netpoll_poll_enable(dev); 1493 } 1494 } 1495 1496 static void __dev_close(struct net_device *dev) 1497 { 1498 LIST_HEAD(single); 1499 1500 list_add(&dev->close_list, &single); 1501 __dev_close_many(&single); 1502 list_del(&single); 1503 } 1504 1505 void dev_close_many(struct list_head *head, bool unlink) 1506 { 1507 struct net_device *dev, *tmp; 1508 1509 /* Remove the devices that don't need to be closed */ 1510 list_for_each_entry_safe(dev, tmp, head, close_list) 1511 if (!(dev->flags & IFF_UP)) 1512 list_del_init(&dev->close_list); 1513 1514 __dev_close_many(head); 1515 1516 list_for_each_entry_safe(dev, tmp, head, close_list) { 1517 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); 1518 call_netdevice_notifiers(NETDEV_DOWN, dev); 1519 if (unlink) 1520 list_del_init(&dev->close_list); 1521 } 1522 } 1523 EXPORT_SYMBOL(dev_close_many); 1524 1525 /** 1526 * dev_close - shutdown an interface. 1527 * @dev: device to shutdown 1528 * 1529 * This function moves an active device into down state. A 1530 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device 1531 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier 1532 * chain. 1533 */ 1534 void dev_close(struct net_device *dev) 1535 { 1536 if (dev->flags & IFF_UP) { 1537 LIST_HEAD(single); 1538 1539 list_add(&dev->close_list, &single); 1540 dev_close_many(&single, true); 1541 list_del(&single); 1542 } 1543 } 1544 EXPORT_SYMBOL(dev_close); 1545 1546 1547 /** 1548 * dev_disable_lro - disable Large Receive Offload on a device 1549 * @dev: device 1550 * 1551 * Disable Large Receive Offload (LRO) on a net device. Must be 1552 * called under RTNL. This is needed if received packets may be 1553 * forwarded to another interface. 1554 */ 1555 void dev_disable_lro(struct net_device *dev) 1556 { 1557 struct net_device *lower_dev; 1558 struct list_head *iter; 1559 1560 dev->wanted_features &= ~NETIF_F_LRO; 1561 netdev_update_features(dev); 1562 1563 if (unlikely(dev->features & NETIF_F_LRO)) 1564 netdev_WARN(dev, "failed to disable LRO!\n"); 1565 1566 netdev_for_each_lower_dev(dev, lower_dev, iter) 1567 dev_disable_lro(lower_dev); 1568 } 1569 EXPORT_SYMBOL(dev_disable_lro); 1570 1571 /** 1572 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device 1573 * @dev: device 1574 * 1575 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be 1576 * called under RTNL. This is needed if Generic XDP is installed on 1577 * the device. 1578 */ 1579 static void dev_disable_gro_hw(struct net_device *dev) 1580 { 1581 dev->wanted_features &= ~NETIF_F_GRO_HW; 1582 netdev_update_features(dev); 1583 1584 if (unlikely(dev->features & NETIF_F_GRO_HW)) 1585 netdev_WARN(dev, "failed to disable GRO_HW!\n"); 1586 } 1587 1588 const char *netdev_cmd_to_name(enum netdev_cmd cmd) 1589 { 1590 #define N(val) \ 1591 case NETDEV_##val: \ 1592 return "NETDEV_" __stringify(val); 1593 switch (cmd) { 1594 N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER) 1595 N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE) 1596 N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE) 1597 N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER) 1598 N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO) 1599 N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO) 1600 N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN) 1601 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO) 1602 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO) 1603 N(PRE_CHANGEADDR) 1604 } 1605 #undef N 1606 return "UNKNOWN_NETDEV_EVENT"; 1607 } 1608 EXPORT_SYMBOL_GPL(netdev_cmd_to_name); 1609 1610 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val, 1611 struct net_device *dev) 1612 { 1613 struct netdev_notifier_info info = { 1614 .dev = dev, 1615 }; 1616 1617 return nb->notifier_call(nb, val, &info); 1618 } 1619 1620 static int dev_boot_phase = 1; 1621 1622 /** 1623 * register_netdevice_notifier - register a network notifier block 1624 * @nb: notifier 1625 * 1626 * Register a notifier to be called when network device events occur. 1627 * The notifier passed is linked into the kernel structures and must 1628 * not be reused until it has been unregistered. A negative errno code 1629 * is returned on a failure. 1630 * 1631 * When registered all registration and up events are replayed 1632 * to the new notifier to allow device to have a race free 1633 * view of the network device list. 1634 */ 1635 1636 int register_netdevice_notifier(struct notifier_block *nb) 1637 { 1638 struct net_device *dev; 1639 struct net_device *last; 1640 struct net *net; 1641 int err; 1642 1643 /* Close race with setup_net() and cleanup_net() */ 1644 down_write(&pernet_ops_rwsem); 1645 rtnl_lock(); 1646 err = raw_notifier_chain_register(&netdev_chain, nb); 1647 if (err) 1648 goto unlock; 1649 if (dev_boot_phase) 1650 goto unlock; 1651 for_each_net(net) { 1652 for_each_netdev(net, dev) { 1653 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev); 1654 err = notifier_to_errno(err); 1655 if (err) 1656 goto rollback; 1657 1658 if (!(dev->flags & IFF_UP)) 1659 continue; 1660 1661 call_netdevice_notifier(nb, NETDEV_UP, dev); 1662 } 1663 } 1664 1665 unlock: 1666 rtnl_unlock(); 1667 up_write(&pernet_ops_rwsem); 1668 return err; 1669 1670 rollback: 1671 last = dev; 1672 for_each_net(net) { 1673 for_each_netdev(net, dev) { 1674 if (dev == last) 1675 goto outroll; 1676 1677 if (dev->flags & IFF_UP) { 1678 call_netdevice_notifier(nb, NETDEV_GOING_DOWN, 1679 dev); 1680 call_netdevice_notifier(nb, NETDEV_DOWN, dev); 1681 } 1682 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); 1683 } 1684 } 1685 1686 outroll: 1687 raw_notifier_chain_unregister(&netdev_chain, nb); 1688 goto unlock; 1689 } 1690 EXPORT_SYMBOL(register_netdevice_notifier); 1691 1692 /** 1693 * unregister_netdevice_notifier - unregister a network notifier block 1694 * @nb: notifier 1695 * 1696 * Unregister a notifier previously registered by 1697 * register_netdevice_notifier(). The notifier is unlinked into the 1698 * kernel structures and may then be reused. A negative errno code 1699 * is returned on a failure. 1700 * 1701 * After unregistering unregister and down device events are synthesized 1702 * for all devices on the device list to the removed notifier to remove 1703 * the need for special case cleanup code. 1704 */ 1705 1706 int unregister_netdevice_notifier(struct notifier_block *nb) 1707 { 1708 struct net_device *dev; 1709 struct net *net; 1710 int err; 1711 1712 /* Close race with setup_net() and cleanup_net() */ 1713 down_write(&pernet_ops_rwsem); 1714 rtnl_lock(); 1715 err = raw_notifier_chain_unregister(&netdev_chain, nb); 1716 if (err) 1717 goto unlock; 1718 1719 for_each_net(net) { 1720 for_each_netdev(net, dev) { 1721 if (dev->flags & IFF_UP) { 1722 call_netdevice_notifier(nb, NETDEV_GOING_DOWN, 1723 dev); 1724 call_netdevice_notifier(nb, NETDEV_DOWN, dev); 1725 } 1726 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); 1727 } 1728 } 1729 unlock: 1730 rtnl_unlock(); 1731 up_write(&pernet_ops_rwsem); 1732 return err; 1733 } 1734 EXPORT_SYMBOL(unregister_netdevice_notifier); 1735 1736 /** 1737 * call_netdevice_notifiers_info - call all network notifier blocks 1738 * @val: value passed unmodified to notifier function 1739 * @info: notifier information data 1740 * 1741 * Call all network notifier blocks. Parameters and return value 1742 * are as for raw_notifier_call_chain(). 1743 */ 1744 1745 static int call_netdevice_notifiers_info(unsigned long val, 1746 struct netdev_notifier_info *info) 1747 { 1748 ASSERT_RTNL(); 1749 return raw_notifier_call_chain(&netdev_chain, val, info); 1750 } 1751 1752 static int call_netdevice_notifiers_extack(unsigned long val, 1753 struct net_device *dev, 1754 struct netlink_ext_ack *extack) 1755 { 1756 struct netdev_notifier_info info = { 1757 .dev = dev, 1758 .extack = extack, 1759 }; 1760 1761 return call_netdevice_notifiers_info(val, &info); 1762 } 1763 1764 /** 1765 * call_netdevice_notifiers - call all network notifier blocks 1766 * @val: value passed unmodified to notifier function 1767 * @dev: net_device pointer passed unmodified to notifier function 1768 * 1769 * Call all network notifier blocks. Parameters and return value 1770 * are as for raw_notifier_call_chain(). 1771 */ 1772 1773 int call_netdevice_notifiers(unsigned long val, struct net_device *dev) 1774 { 1775 return call_netdevice_notifiers_extack(val, dev, NULL); 1776 } 1777 EXPORT_SYMBOL(call_netdevice_notifiers); 1778 1779 /** 1780 * call_netdevice_notifiers_mtu - call all network notifier blocks 1781 * @val: value passed unmodified to notifier function 1782 * @dev: net_device pointer passed unmodified to notifier function 1783 * @arg: additional u32 argument passed to the notifier function 1784 * 1785 * Call all network notifier blocks. Parameters and return value 1786 * are as for raw_notifier_call_chain(). 1787 */ 1788 static int call_netdevice_notifiers_mtu(unsigned long val, 1789 struct net_device *dev, u32 arg) 1790 { 1791 struct netdev_notifier_info_ext info = { 1792 .info.dev = dev, 1793 .ext.mtu = arg, 1794 }; 1795 1796 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0); 1797 1798 return call_netdevice_notifiers_info(val, &info.info); 1799 } 1800 1801 #ifdef CONFIG_NET_INGRESS 1802 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key); 1803 1804 void net_inc_ingress_queue(void) 1805 { 1806 static_branch_inc(&ingress_needed_key); 1807 } 1808 EXPORT_SYMBOL_GPL(net_inc_ingress_queue); 1809 1810 void net_dec_ingress_queue(void) 1811 { 1812 static_branch_dec(&ingress_needed_key); 1813 } 1814 EXPORT_SYMBOL_GPL(net_dec_ingress_queue); 1815 #endif 1816 1817 #ifdef CONFIG_NET_EGRESS 1818 static DEFINE_STATIC_KEY_FALSE(egress_needed_key); 1819 1820 void net_inc_egress_queue(void) 1821 { 1822 static_branch_inc(&egress_needed_key); 1823 } 1824 EXPORT_SYMBOL_GPL(net_inc_egress_queue); 1825 1826 void net_dec_egress_queue(void) 1827 { 1828 static_branch_dec(&egress_needed_key); 1829 } 1830 EXPORT_SYMBOL_GPL(net_dec_egress_queue); 1831 #endif 1832 1833 static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key); 1834 #ifdef CONFIG_JUMP_LABEL 1835 static atomic_t netstamp_needed_deferred; 1836 static atomic_t netstamp_wanted; 1837 static void netstamp_clear(struct work_struct *work) 1838 { 1839 int deferred = atomic_xchg(&netstamp_needed_deferred, 0); 1840 int wanted; 1841 1842 wanted = atomic_add_return(deferred, &netstamp_wanted); 1843 if (wanted > 0) 1844 static_branch_enable(&netstamp_needed_key); 1845 else 1846 static_branch_disable(&netstamp_needed_key); 1847 } 1848 static DECLARE_WORK(netstamp_work, netstamp_clear); 1849 #endif 1850 1851 void net_enable_timestamp(void) 1852 { 1853 #ifdef CONFIG_JUMP_LABEL 1854 int wanted; 1855 1856 while (1) { 1857 wanted = atomic_read(&netstamp_wanted); 1858 if (wanted <= 0) 1859 break; 1860 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted) 1861 return; 1862 } 1863 atomic_inc(&netstamp_needed_deferred); 1864 schedule_work(&netstamp_work); 1865 #else 1866 static_branch_inc(&netstamp_needed_key); 1867 #endif 1868 } 1869 EXPORT_SYMBOL(net_enable_timestamp); 1870 1871 void net_disable_timestamp(void) 1872 { 1873 #ifdef CONFIG_JUMP_LABEL 1874 int wanted; 1875 1876 while (1) { 1877 wanted = atomic_read(&netstamp_wanted); 1878 if (wanted <= 1) 1879 break; 1880 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted) 1881 return; 1882 } 1883 atomic_dec(&netstamp_needed_deferred); 1884 schedule_work(&netstamp_work); 1885 #else 1886 static_branch_dec(&netstamp_needed_key); 1887 #endif 1888 } 1889 EXPORT_SYMBOL(net_disable_timestamp); 1890 1891 static inline void net_timestamp_set(struct sk_buff *skb) 1892 { 1893 skb->tstamp = 0; 1894 if (static_branch_unlikely(&netstamp_needed_key)) 1895 __net_timestamp(skb); 1896 } 1897 1898 #define net_timestamp_check(COND, SKB) \ 1899 if (static_branch_unlikely(&netstamp_needed_key)) { \ 1900 if ((COND) && !(SKB)->tstamp) \ 1901 __net_timestamp(SKB); \ 1902 } \ 1903 1904 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb) 1905 { 1906 unsigned int len; 1907 1908 if (!(dev->flags & IFF_UP)) 1909 return false; 1910 1911 len = dev->mtu + dev->hard_header_len + VLAN_HLEN; 1912 if (skb->len <= len) 1913 return true; 1914 1915 /* if TSO is enabled, we don't care about the length as the packet 1916 * could be forwarded without being segmented before 1917 */ 1918 if (skb_is_gso(skb)) 1919 return true; 1920 1921 return false; 1922 } 1923 EXPORT_SYMBOL_GPL(is_skb_forwardable); 1924 1925 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1926 { 1927 int ret = ____dev_forward_skb(dev, skb); 1928 1929 if (likely(!ret)) { 1930 skb->protocol = eth_type_trans(skb, dev); 1931 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 1932 } 1933 1934 return ret; 1935 } 1936 EXPORT_SYMBOL_GPL(__dev_forward_skb); 1937 1938 /** 1939 * dev_forward_skb - loopback an skb to another netif 1940 * 1941 * @dev: destination network device 1942 * @skb: buffer to forward 1943 * 1944 * return values: 1945 * NET_RX_SUCCESS (no congestion) 1946 * NET_RX_DROP (packet was dropped, but freed) 1947 * 1948 * dev_forward_skb can be used for injecting an skb from the 1949 * start_xmit function of one device into the receive queue 1950 * of another device. 1951 * 1952 * The receiving device may be in another namespace, so 1953 * we have to clear all information in the skb that could 1954 * impact namespace isolation. 1955 */ 1956 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1957 { 1958 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb); 1959 } 1960 EXPORT_SYMBOL_GPL(dev_forward_skb); 1961 1962 static inline int deliver_skb(struct sk_buff *skb, 1963 struct packet_type *pt_prev, 1964 struct net_device *orig_dev) 1965 { 1966 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 1967 return -ENOMEM; 1968 refcount_inc(&skb->users); 1969 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 1970 } 1971 1972 static inline void deliver_ptype_list_skb(struct sk_buff *skb, 1973 struct packet_type **pt, 1974 struct net_device *orig_dev, 1975 __be16 type, 1976 struct list_head *ptype_list) 1977 { 1978 struct packet_type *ptype, *pt_prev = *pt; 1979 1980 list_for_each_entry_rcu(ptype, ptype_list, list) { 1981 if (ptype->type != type) 1982 continue; 1983 if (pt_prev) 1984 deliver_skb(skb, pt_prev, orig_dev); 1985 pt_prev = ptype; 1986 } 1987 *pt = pt_prev; 1988 } 1989 1990 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) 1991 { 1992 if (!ptype->af_packet_priv || !skb->sk) 1993 return false; 1994 1995 if (ptype->id_match) 1996 return ptype->id_match(ptype, skb->sk); 1997 else if ((struct sock *)ptype->af_packet_priv == skb->sk) 1998 return true; 1999 2000 return false; 2001 } 2002 2003 /** 2004 * dev_nit_active - return true if any network interface taps are in use 2005 * 2006 * @dev: network device to check for the presence of taps 2007 */ 2008 bool dev_nit_active(struct net_device *dev) 2009 { 2010 return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all); 2011 } 2012 EXPORT_SYMBOL_GPL(dev_nit_active); 2013 2014 /* 2015 * Support routine. Sends outgoing frames to any network 2016 * taps currently in use. 2017 */ 2018 2019 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) 2020 { 2021 struct packet_type *ptype; 2022 struct sk_buff *skb2 = NULL; 2023 struct packet_type *pt_prev = NULL; 2024 struct list_head *ptype_list = &ptype_all; 2025 2026 rcu_read_lock(); 2027 again: 2028 list_for_each_entry_rcu(ptype, ptype_list, list) { 2029 if (ptype->ignore_outgoing) 2030 continue; 2031 2032 /* Never send packets back to the socket 2033 * they originated from - MvS (miquels@drinkel.ow.org) 2034 */ 2035 if (skb_loop_sk(ptype, skb)) 2036 continue; 2037 2038 if (pt_prev) { 2039 deliver_skb(skb2, pt_prev, skb->dev); 2040 pt_prev = ptype; 2041 continue; 2042 } 2043 2044 /* need to clone skb, done only once */ 2045 skb2 = skb_clone(skb, GFP_ATOMIC); 2046 if (!skb2) 2047 goto out_unlock; 2048 2049 net_timestamp_set(skb2); 2050 2051 /* skb->nh should be correctly 2052 * set by sender, so that the second statement is 2053 * just protection against buggy protocols. 2054 */ 2055 skb_reset_mac_header(skb2); 2056 2057 if (skb_network_header(skb2) < skb2->data || 2058 skb_network_header(skb2) > skb_tail_pointer(skb2)) { 2059 net_crit_ratelimited("protocol %04x is buggy, dev %s\n", 2060 ntohs(skb2->protocol), 2061 dev->name); 2062 skb_reset_network_header(skb2); 2063 } 2064 2065 skb2->transport_header = skb2->network_header; 2066 skb2->pkt_type = PACKET_OUTGOING; 2067 pt_prev = ptype; 2068 } 2069 2070 if (ptype_list == &ptype_all) { 2071 ptype_list = &dev->ptype_all; 2072 goto again; 2073 } 2074 out_unlock: 2075 if (pt_prev) { 2076 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC)) 2077 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); 2078 else 2079 kfree_skb(skb2); 2080 } 2081 rcu_read_unlock(); 2082 } 2083 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit); 2084 2085 /** 2086 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change 2087 * @dev: Network device 2088 * @txq: number of queues available 2089 * 2090 * If real_num_tx_queues is changed the tc mappings may no longer be 2091 * valid. To resolve this verify the tc mapping remains valid and if 2092 * not NULL the mapping. With no priorities mapping to this 2093 * offset/count pair it will no longer be used. In the worst case TC0 2094 * is invalid nothing can be done so disable priority mappings. If is 2095 * expected that drivers will fix this mapping if they can before 2096 * calling netif_set_real_num_tx_queues. 2097 */ 2098 static void netif_setup_tc(struct net_device *dev, unsigned int txq) 2099 { 2100 int i; 2101 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 2102 2103 /* If TC0 is invalidated disable TC mapping */ 2104 if (tc->offset + tc->count > txq) { 2105 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); 2106 dev->num_tc = 0; 2107 return; 2108 } 2109 2110 /* Invalidated prio to tc mappings set to TC0 */ 2111 for (i = 1; i < TC_BITMASK + 1; i++) { 2112 int q = netdev_get_prio_tc_map(dev, i); 2113 2114 tc = &dev->tc_to_txq[q]; 2115 if (tc->offset + tc->count > txq) { 2116 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", 2117 i, q); 2118 netdev_set_prio_tc_map(dev, i, 0); 2119 } 2120 } 2121 } 2122 2123 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq) 2124 { 2125 if (dev->num_tc) { 2126 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 2127 int i; 2128 2129 /* walk through the TCs and see if it falls into any of them */ 2130 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) { 2131 if ((txq - tc->offset) < tc->count) 2132 return i; 2133 } 2134 2135 /* didn't find it, just return -1 to indicate no match */ 2136 return -1; 2137 } 2138 2139 return 0; 2140 } 2141 EXPORT_SYMBOL(netdev_txq_to_tc); 2142 2143 #ifdef CONFIG_XPS 2144 struct static_key xps_needed __read_mostly; 2145 EXPORT_SYMBOL(xps_needed); 2146 struct static_key xps_rxqs_needed __read_mostly; 2147 EXPORT_SYMBOL(xps_rxqs_needed); 2148 static DEFINE_MUTEX(xps_map_mutex); 2149 #define xmap_dereference(P) \ 2150 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) 2151 2152 static bool remove_xps_queue(struct xps_dev_maps *dev_maps, 2153 int tci, u16 index) 2154 { 2155 struct xps_map *map = NULL; 2156 int pos; 2157 2158 if (dev_maps) 2159 map = xmap_dereference(dev_maps->attr_map[tci]); 2160 if (!map) 2161 return false; 2162 2163 for (pos = map->len; pos--;) { 2164 if (map->queues[pos] != index) 2165 continue; 2166 2167 if (map->len > 1) { 2168 map->queues[pos] = map->queues[--map->len]; 2169 break; 2170 } 2171 2172 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL); 2173 kfree_rcu(map, rcu); 2174 return false; 2175 } 2176 2177 return true; 2178 } 2179 2180 static bool remove_xps_queue_cpu(struct net_device *dev, 2181 struct xps_dev_maps *dev_maps, 2182 int cpu, u16 offset, u16 count) 2183 { 2184 int num_tc = dev->num_tc ? : 1; 2185 bool active = false; 2186 int tci; 2187 2188 for (tci = cpu * num_tc; num_tc--; tci++) { 2189 int i, j; 2190 2191 for (i = count, j = offset; i--; j++) { 2192 if (!remove_xps_queue(dev_maps, tci, j)) 2193 break; 2194 } 2195 2196 active |= i < 0; 2197 } 2198 2199 return active; 2200 } 2201 2202 static void reset_xps_maps(struct net_device *dev, 2203 struct xps_dev_maps *dev_maps, 2204 bool is_rxqs_map) 2205 { 2206 if (is_rxqs_map) { 2207 static_key_slow_dec_cpuslocked(&xps_rxqs_needed); 2208 RCU_INIT_POINTER(dev->xps_rxqs_map, NULL); 2209 } else { 2210 RCU_INIT_POINTER(dev->xps_cpus_map, NULL); 2211 } 2212 static_key_slow_dec_cpuslocked(&xps_needed); 2213 kfree_rcu(dev_maps, rcu); 2214 } 2215 2216 static void clean_xps_maps(struct net_device *dev, const unsigned long *mask, 2217 struct xps_dev_maps *dev_maps, unsigned int nr_ids, 2218 u16 offset, u16 count, bool is_rxqs_map) 2219 { 2220 bool active = false; 2221 int i, j; 2222 2223 for (j = -1; j = netif_attrmask_next(j, mask, nr_ids), 2224 j < nr_ids;) 2225 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, 2226 count); 2227 if (!active) 2228 reset_xps_maps(dev, dev_maps, is_rxqs_map); 2229 2230 if (!is_rxqs_map) { 2231 for (i = offset + (count - 1); count--; i--) { 2232 netdev_queue_numa_node_write( 2233 netdev_get_tx_queue(dev, i), 2234 NUMA_NO_NODE); 2235 } 2236 } 2237 } 2238 2239 static void netif_reset_xps_queues(struct net_device *dev, u16 offset, 2240 u16 count) 2241 { 2242 const unsigned long *possible_mask = NULL; 2243 struct xps_dev_maps *dev_maps; 2244 unsigned int nr_ids; 2245 2246 if (!static_key_false(&xps_needed)) 2247 return; 2248 2249 cpus_read_lock(); 2250 mutex_lock(&xps_map_mutex); 2251 2252 if (static_key_false(&xps_rxqs_needed)) { 2253 dev_maps = xmap_dereference(dev->xps_rxqs_map); 2254 if (dev_maps) { 2255 nr_ids = dev->num_rx_queues; 2256 clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, 2257 offset, count, true); 2258 } 2259 } 2260 2261 dev_maps = xmap_dereference(dev->xps_cpus_map); 2262 if (!dev_maps) 2263 goto out_no_maps; 2264 2265 if (num_possible_cpus() > 1) 2266 possible_mask = cpumask_bits(cpu_possible_mask); 2267 nr_ids = nr_cpu_ids; 2268 clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset, count, 2269 false); 2270 2271 out_no_maps: 2272 mutex_unlock(&xps_map_mutex); 2273 cpus_read_unlock(); 2274 } 2275 2276 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index) 2277 { 2278 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index); 2279 } 2280 2281 static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index, 2282 u16 index, bool is_rxqs_map) 2283 { 2284 struct xps_map *new_map; 2285 int alloc_len = XPS_MIN_MAP_ALLOC; 2286 int i, pos; 2287 2288 for (pos = 0; map && pos < map->len; pos++) { 2289 if (map->queues[pos] != index) 2290 continue; 2291 return map; 2292 } 2293 2294 /* Need to add tx-queue to this CPU's/rx-queue's existing map */ 2295 if (map) { 2296 if (pos < map->alloc_len) 2297 return map; 2298 2299 alloc_len = map->alloc_len * 2; 2300 } 2301 2302 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's 2303 * map 2304 */ 2305 if (is_rxqs_map) 2306 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL); 2307 else 2308 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL, 2309 cpu_to_node(attr_index)); 2310 if (!new_map) 2311 return NULL; 2312 2313 for (i = 0; i < pos; i++) 2314 new_map->queues[i] = map->queues[i]; 2315 new_map->alloc_len = alloc_len; 2316 new_map->len = pos; 2317 2318 return new_map; 2319 } 2320 2321 /* Must be called under cpus_read_lock */ 2322 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, 2323 u16 index, bool is_rxqs_map) 2324 { 2325 const unsigned long *online_mask = NULL, *possible_mask = NULL; 2326 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL; 2327 int i, j, tci, numa_node_id = -2; 2328 int maps_sz, num_tc = 1, tc = 0; 2329 struct xps_map *map, *new_map; 2330 bool active = false; 2331 unsigned int nr_ids; 2332 2333 if (dev->num_tc) { 2334 /* Do not allow XPS on subordinate device directly */ 2335 num_tc = dev->num_tc; 2336 if (num_tc < 0) 2337 return -EINVAL; 2338 2339 /* If queue belongs to subordinate dev use its map */ 2340 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; 2341 2342 tc = netdev_txq_to_tc(dev, index); 2343 if (tc < 0) 2344 return -EINVAL; 2345 } 2346 2347 mutex_lock(&xps_map_mutex); 2348 if (is_rxqs_map) { 2349 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues); 2350 dev_maps = xmap_dereference(dev->xps_rxqs_map); 2351 nr_ids = dev->num_rx_queues; 2352 } else { 2353 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc); 2354 if (num_possible_cpus() > 1) { 2355 online_mask = cpumask_bits(cpu_online_mask); 2356 possible_mask = cpumask_bits(cpu_possible_mask); 2357 } 2358 dev_maps = xmap_dereference(dev->xps_cpus_map); 2359 nr_ids = nr_cpu_ids; 2360 } 2361 2362 if (maps_sz < L1_CACHE_BYTES) 2363 maps_sz = L1_CACHE_BYTES; 2364 2365 /* allocate memory for queue storage */ 2366 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids), 2367 j < nr_ids;) { 2368 if (!new_dev_maps) 2369 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); 2370 if (!new_dev_maps) { 2371 mutex_unlock(&xps_map_mutex); 2372 return -ENOMEM; 2373 } 2374 2375 tci = j * num_tc + tc; 2376 map = dev_maps ? xmap_dereference(dev_maps->attr_map[tci]) : 2377 NULL; 2378 2379 map = expand_xps_map(map, j, index, is_rxqs_map); 2380 if (!map) 2381 goto error; 2382 2383 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2384 } 2385 2386 if (!new_dev_maps) 2387 goto out_no_new_maps; 2388 2389 if (!dev_maps) { 2390 /* Increment static keys at most once per type */ 2391 static_key_slow_inc_cpuslocked(&xps_needed); 2392 if (is_rxqs_map) 2393 static_key_slow_inc_cpuslocked(&xps_rxqs_needed); 2394 } 2395 2396 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), 2397 j < nr_ids;) { 2398 /* copy maps belonging to foreign traffic classes */ 2399 for (i = tc, tci = j * num_tc; dev_maps && i--; tci++) { 2400 /* fill in the new device map from the old device map */ 2401 map = xmap_dereference(dev_maps->attr_map[tci]); 2402 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2403 } 2404 2405 /* We need to explicitly update tci as prevous loop 2406 * could break out early if dev_maps is NULL. 2407 */ 2408 tci = j * num_tc + tc; 2409 2410 if (netif_attr_test_mask(j, mask, nr_ids) && 2411 netif_attr_test_online(j, online_mask, nr_ids)) { 2412 /* add tx-queue to CPU/rx-queue maps */ 2413 int pos = 0; 2414 2415 map = xmap_dereference(new_dev_maps->attr_map[tci]); 2416 while ((pos < map->len) && (map->queues[pos] != index)) 2417 pos++; 2418 2419 if (pos == map->len) 2420 map->queues[map->len++] = index; 2421 #ifdef CONFIG_NUMA 2422 if (!is_rxqs_map) { 2423 if (numa_node_id == -2) 2424 numa_node_id = cpu_to_node(j); 2425 else if (numa_node_id != cpu_to_node(j)) 2426 numa_node_id = -1; 2427 } 2428 #endif 2429 } else if (dev_maps) { 2430 /* fill in the new device map from the old device map */ 2431 map = xmap_dereference(dev_maps->attr_map[tci]); 2432 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2433 } 2434 2435 /* copy maps belonging to foreign traffic classes */ 2436 for (i = num_tc - tc, tci++; dev_maps && --i; tci++) { 2437 /* fill in the new device map from the old device map */ 2438 map = xmap_dereference(dev_maps->attr_map[tci]); 2439 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2440 } 2441 } 2442 2443 if (is_rxqs_map) 2444 rcu_assign_pointer(dev->xps_rxqs_map, new_dev_maps); 2445 else 2446 rcu_assign_pointer(dev->xps_cpus_map, new_dev_maps); 2447 2448 /* Cleanup old maps */ 2449 if (!dev_maps) 2450 goto out_no_old_maps; 2451 2452 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), 2453 j < nr_ids;) { 2454 for (i = num_tc, tci = j * num_tc; i--; tci++) { 2455 new_map = xmap_dereference(new_dev_maps->attr_map[tci]); 2456 map = xmap_dereference(dev_maps->attr_map[tci]); 2457 if (map && map != new_map) 2458 kfree_rcu(map, rcu); 2459 } 2460 } 2461 2462 kfree_rcu(dev_maps, rcu); 2463 2464 out_no_old_maps: 2465 dev_maps = new_dev_maps; 2466 active = true; 2467 2468 out_no_new_maps: 2469 if (!is_rxqs_map) { 2470 /* update Tx queue numa node */ 2471 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index), 2472 (numa_node_id >= 0) ? 2473 numa_node_id : NUMA_NO_NODE); 2474 } 2475 2476 if (!dev_maps) 2477 goto out_no_maps; 2478 2479 /* removes tx-queue from unused CPUs/rx-queues */ 2480 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), 2481 j < nr_ids;) { 2482 for (i = tc, tci = j * num_tc; i--; tci++) 2483 active |= remove_xps_queue(dev_maps, tci, index); 2484 if (!netif_attr_test_mask(j, mask, nr_ids) || 2485 !netif_attr_test_online(j, online_mask, nr_ids)) 2486 active |= remove_xps_queue(dev_maps, tci, index); 2487 for (i = num_tc - tc, tci++; --i; tci++) 2488 active |= remove_xps_queue(dev_maps, tci, index); 2489 } 2490 2491 /* free map if not active */ 2492 if (!active) 2493 reset_xps_maps(dev, dev_maps, is_rxqs_map); 2494 2495 out_no_maps: 2496 mutex_unlock(&xps_map_mutex); 2497 2498 return 0; 2499 error: 2500 /* remove any maps that we added */ 2501 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), 2502 j < nr_ids;) { 2503 for (i = num_tc, tci = j * num_tc; i--; tci++) { 2504 new_map = xmap_dereference(new_dev_maps->attr_map[tci]); 2505 map = dev_maps ? 2506 xmap_dereference(dev_maps->attr_map[tci]) : 2507 NULL; 2508 if (new_map && new_map != map) 2509 kfree(new_map); 2510 } 2511 } 2512 2513 mutex_unlock(&xps_map_mutex); 2514 2515 kfree(new_dev_maps); 2516 return -ENOMEM; 2517 } 2518 EXPORT_SYMBOL_GPL(__netif_set_xps_queue); 2519 2520 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 2521 u16 index) 2522 { 2523 int ret; 2524 2525 cpus_read_lock(); 2526 ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, false); 2527 cpus_read_unlock(); 2528 2529 return ret; 2530 } 2531 EXPORT_SYMBOL(netif_set_xps_queue); 2532 2533 #endif 2534 static void netdev_unbind_all_sb_channels(struct net_device *dev) 2535 { 2536 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; 2537 2538 /* Unbind any subordinate channels */ 2539 while (txq-- != &dev->_tx[0]) { 2540 if (txq->sb_dev) 2541 netdev_unbind_sb_channel(dev, txq->sb_dev); 2542 } 2543 } 2544 2545 void netdev_reset_tc(struct net_device *dev) 2546 { 2547 #ifdef CONFIG_XPS 2548 netif_reset_xps_queues_gt(dev, 0); 2549 #endif 2550 netdev_unbind_all_sb_channels(dev); 2551 2552 /* Reset TC configuration of device */ 2553 dev->num_tc = 0; 2554 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); 2555 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); 2556 } 2557 EXPORT_SYMBOL(netdev_reset_tc); 2558 2559 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) 2560 { 2561 if (tc >= dev->num_tc) 2562 return -EINVAL; 2563 2564 #ifdef CONFIG_XPS 2565 netif_reset_xps_queues(dev, offset, count); 2566 #endif 2567 dev->tc_to_txq[tc].count = count; 2568 dev->tc_to_txq[tc].offset = offset; 2569 return 0; 2570 } 2571 EXPORT_SYMBOL(netdev_set_tc_queue); 2572 2573 int netdev_set_num_tc(struct net_device *dev, u8 num_tc) 2574 { 2575 if (num_tc > TC_MAX_QUEUE) 2576 return -EINVAL; 2577 2578 #ifdef CONFIG_XPS 2579 netif_reset_xps_queues_gt(dev, 0); 2580 #endif 2581 netdev_unbind_all_sb_channels(dev); 2582 2583 dev->num_tc = num_tc; 2584 return 0; 2585 } 2586 EXPORT_SYMBOL(netdev_set_num_tc); 2587 2588 void netdev_unbind_sb_channel(struct net_device *dev, 2589 struct net_device *sb_dev) 2590 { 2591 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; 2592 2593 #ifdef CONFIG_XPS 2594 netif_reset_xps_queues_gt(sb_dev, 0); 2595 #endif 2596 memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq)); 2597 memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map)); 2598 2599 while (txq-- != &dev->_tx[0]) { 2600 if (txq->sb_dev == sb_dev) 2601 txq->sb_dev = NULL; 2602 } 2603 } 2604 EXPORT_SYMBOL(netdev_unbind_sb_channel); 2605 2606 int netdev_bind_sb_channel_queue(struct net_device *dev, 2607 struct net_device *sb_dev, 2608 u8 tc, u16 count, u16 offset) 2609 { 2610 /* Make certain the sb_dev and dev are already configured */ 2611 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc) 2612 return -EINVAL; 2613 2614 /* We cannot hand out queues we don't have */ 2615 if ((offset + count) > dev->real_num_tx_queues) 2616 return -EINVAL; 2617 2618 /* Record the mapping */ 2619 sb_dev->tc_to_txq[tc].count = count; 2620 sb_dev->tc_to_txq[tc].offset = offset; 2621 2622 /* Provide a way for Tx queue to find the tc_to_txq map or 2623 * XPS map for itself. 2624 */ 2625 while (count--) 2626 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev; 2627 2628 return 0; 2629 } 2630 EXPORT_SYMBOL(netdev_bind_sb_channel_queue); 2631 2632 int netdev_set_sb_channel(struct net_device *dev, u16 channel) 2633 { 2634 /* Do not use a multiqueue device to represent a subordinate channel */ 2635 if (netif_is_multiqueue(dev)) 2636 return -ENODEV; 2637 2638 /* We allow channels 1 - 32767 to be used for subordinate channels. 2639 * Channel 0 is meant to be "native" mode and used only to represent 2640 * the main root device. We allow writing 0 to reset the device back 2641 * to normal mode after being used as a subordinate channel. 2642 */ 2643 if (channel > S16_MAX) 2644 return -EINVAL; 2645 2646 dev->num_tc = -channel; 2647 2648 return 0; 2649 } 2650 EXPORT_SYMBOL(netdev_set_sb_channel); 2651 2652 /* 2653 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues 2654 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed. 2655 */ 2656 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) 2657 { 2658 bool disabling; 2659 int rc; 2660 2661 disabling = txq < dev->real_num_tx_queues; 2662 2663 if (txq < 1 || txq > dev->num_tx_queues) 2664 return -EINVAL; 2665 2666 if (dev->reg_state == NETREG_REGISTERED || 2667 dev->reg_state == NETREG_UNREGISTERING) { 2668 ASSERT_RTNL(); 2669 2670 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, 2671 txq); 2672 if (rc) 2673 return rc; 2674 2675 if (dev->num_tc) 2676 netif_setup_tc(dev, txq); 2677 2678 dev->real_num_tx_queues = txq; 2679 2680 if (disabling) { 2681 synchronize_net(); 2682 qdisc_reset_all_tx_gt(dev, txq); 2683 #ifdef CONFIG_XPS 2684 netif_reset_xps_queues_gt(dev, txq); 2685 #endif 2686 } 2687 } else { 2688 dev->real_num_tx_queues = txq; 2689 } 2690 2691 return 0; 2692 } 2693 EXPORT_SYMBOL(netif_set_real_num_tx_queues); 2694 2695 #ifdef CONFIG_SYSFS 2696 /** 2697 * netif_set_real_num_rx_queues - set actual number of RX queues used 2698 * @dev: Network device 2699 * @rxq: Actual number of RX queues 2700 * 2701 * This must be called either with the rtnl_lock held or before 2702 * registration of the net device. Returns 0 on success, or a 2703 * negative error code. If called before registration, it always 2704 * succeeds. 2705 */ 2706 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) 2707 { 2708 int rc; 2709 2710 if (rxq < 1 || rxq > dev->num_rx_queues) 2711 return -EINVAL; 2712 2713 if (dev->reg_state == NETREG_REGISTERED) { 2714 ASSERT_RTNL(); 2715 2716 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, 2717 rxq); 2718 if (rc) 2719 return rc; 2720 } 2721 2722 dev->real_num_rx_queues = rxq; 2723 return 0; 2724 } 2725 EXPORT_SYMBOL(netif_set_real_num_rx_queues); 2726 #endif 2727 2728 /** 2729 * netif_get_num_default_rss_queues - default number of RSS queues 2730 * 2731 * This routine should set an upper limit on the number of RSS queues 2732 * used by default by multiqueue devices. 2733 */ 2734 int netif_get_num_default_rss_queues(void) 2735 { 2736 return is_kdump_kernel() ? 2737 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus()); 2738 } 2739 EXPORT_SYMBOL(netif_get_num_default_rss_queues); 2740 2741 static void __netif_reschedule(struct Qdisc *q) 2742 { 2743 struct softnet_data *sd; 2744 unsigned long flags; 2745 2746 local_irq_save(flags); 2747 sd = this_cpu_ptr(&softnet_data); 2748 q->next_sched = NULL; 2749 *sd->output_queue_tailp = q; 2750 sd->output_queue_tailp = &q->next_sched; 2751 raise_softirq_irqoff(NET_TX_SOFTIRQ); 2752 local_irq_restore(flags); 2753 } 2754 2755 void __netif_schedule(struct Qdisc *q) 2756 { 2757 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) 2758 __netif_reschedule(q); 2759 } 2760 EXPORT_SYMBOL(__netif_schedule); 2761 2762 struct dev_kfree_skb_cb { 2763 enum skb_free_reason reason; 2764 }; 2765 2766 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb) 2767 { 2768 return (struct dev_kfree_skb_cb *)skb->cb; 2769 } 2770 2771 void netif_schedule_queue(struct netdev_queue *txq) 2772 { 2773 rcu_read_lock(); 2774 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) { 2775 struct Qdisc *q = rcu_dereference(txq->qdisc); 2776 2777 __netif_schedule(q); 2778 } 2779 rcu_read_unlock(); 2780 } 2781 EXPORT_SYMBOL(netif_schedule_queue); 2782 2783 void netif_tx_wake_queue(struct netdev_queue *dev_queue) 2784 { 2785 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) { 2786 struct Qdisc *q; 2787 2788 rcu_read_lock(); 2789 q = rcu_dereference(dev_queue->qdisc); 2790 __netif_schedule(q); 2791 rcu_read_unlock(); 2792 } 2793 } 2794 EXPORT_SYMBOL(netif_tx_wake_queue); 2795 2796 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) 2797 { 2798 unsigned long flags; 2799 2800 if (unlikely(!skb)) 2801 return; 2802 2803 if (likely(refcount_read(&skb->users) == 1)) { 2804 smp_rmb(); 2805 refcount_set(&skb->users, 0); 2806 } else if (likely(!refcount_dec_and_test(&skb->users))) { 2807 return; 2808 } 2809 get_kfree_skb_cb(skb)->reason = reason; 2810 local_irq_save(flags); 2811 skb->next = __this_cpu_read(softnet_data.completion_queue); 2812 __this_cpu_write(softnet_data.completion_queue, skb); 2813 raise_softirq_irqoff(NET_TX_SOFTIRQ); 2814 local_irq_restore(flags); 2815 } 2816 EXPORT_SYMBOL(__dev_kfree_skb_irq); 2817 2818 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason) 2819 { 2820 if (in_irq() || irqs_disabled()) 2821 __dev_kfree_skb_irq(skb, reason); 2822 else 2823 dev_kfree_skb(skb); 2824 } 2825 EXPORT_SYMBOL(__dev_kfree_skb_any); 2826 2827 2828 /** 2829 * netif_device_detach - mark device as removed 2830 * @dev: network device 2831 * 2832 * Mark device as removed from system and therefore no longer available. 2833 */ 2834 void netif_device_detach(struct net_device *dev) 2835 { 2836 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && 2837 netif_running(dev)) { 2838 netif_tx_stop_all_queues(dev); 2839 } 2840 } 2841 EXPORT_SYMBOL(netif_device_detach); 2842 2843 /** 2844 * netif_device_attach - mark device as attached 2845 * @dev: network device 2846 * 2847 * Mark device as attached from system and restart if needed. 2848 */ 2849 void netif_device_attach(struct net_device *dev) 2850 { 2851 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && 2852 netif_running(dev)) { 2853 netif_tx_wake_all_queues(dev); 2854 __netdev_watchdog_up(dev); 2855 } 2856 } 2857 EXPORT_SYMBOL(netif_device_attach); 2858 2859 /* 2860 * Returns a Tx hash based on the given packet descriptor a Tx queues' number 2861 * to be used as a distribution range. 2862 */ 2863 static u16 skb_tx_hash(const struct net_device *dev, 2864 const struct net_device *sb_dev, 2865 struct sk_buff *skb) 2866 { 2867 u32 hash; 2868 u16 qoffset = 0; 2869 u16 qcount = dev->real_num_tx_queues; 2870 2871 if (dev->num_tc) { 2872 u8 tc = netdev_get_prio_tc_map(dev, skb->priority); 2873 2874 qoffset = sb_dev->tc_to_txq[tc].offset; 2875 qcount = sb_dev->tc_to_txq[tc].count; 2876 } 2877 2878 if (skb_rx_queue_recorded(skb)) { 2879 hash = skb_get_rx_queue(skb); 2880 while (unlikely(hash >= qcount)) 2881 hash -= qcount; 2882 return hash + qoffset; 2883 } 2884 2885 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; 2886 } 2887 2888 static void skb_warn_bad_offload(const struct sk_buff *skb) 2889 { 2890 static const netdev_features_t null_features; 2891 struct net_device *dev = skb->dev; 2892 const char *name = ""; 2893 2894 if (!net_ratelimit()) 2895 return; 2896 2897 if (dev) { 2898 if (dev->dev.parent) 2899 name = dev_driver_string(dev->dev.parent); 2900 else 2901 name = netdev_name(dev); 2902 } 2903 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d " 2904 "gso_type=%d ip_summed=%d\n", 2905 name, dev ? &dev->features : &null_features, 2906 skb->sk ? &skb->sk->sk_route_caps : &null_features, 2907 skb->len, skb->data_len, skb_shinfo(skb)->gso_size, 2908 skb_shinfo(skb)->gso_type, skb->ip_summed); 2909 } 2910 2911 /* 2912 * Invalidate hardware checksum when packet is to be mangled, and 2913 * complete checksum manually on outgoing path. 2914 */ 2915 int skb_checksum_help(struct sk_buff *skb) 2916 { 2917 __wsum csum; 2918 int ret = 0, offset; 2919 2920 if (skb->ip_summed == CHECKSUM_COMPLETE) 2921 goto out_set_summed; 2922 2923 if (unlikely(skb_shinfo(skb)->gso_size)) { 2924 skb_warn_bad_offload(skb); 2925 return -EINVAL; 2926 } 2927 2928 /* Before computing a checksum, we should make sure no frag could 2929 * be modified by an external entity : checksum could be wrong. 2930 */ 2931 if (skb_has_shared_frag(skb)) { 2932 ret = __skb_linearize(skb); 2933 if (ret) 2934 goto out; 2935 } 2936 2937 offset = skb_checksum_start_offset(skb); 2938 BUG_ON(offset >= skb_headlen(skb)); 2939 csum = skb_checksum(skb, offset, skb->len - offset, 0); 2940 2941 offset += skb->csum_offset; 2942 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); 2943 2944 if (skb_cloned(skb) && 2945 !skb_clone_writable(skb, offset + sizeof(__sum16))) { 2946 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2947 if (ret) 2948 goto out; 2949 } 2950 2951 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0; 2952 out_set_summed: 2953 skb->ip_summed = CHECKSUM_NONE; 2954 out: 2955 return ret; 2956 } 2957 EXPORT_SYMBOL(skb_checksum_help); 2958 2959 int skb_crc32c_csum_help(struct sk_buff *skb) 2960 { 2961 __le32 crc32c_csum; 2962 int ret = 0, offset, start; 2963 2964 if (skb->ip_summed != CHECKSUM_PARTIAL) 2965 goto out; 2966 2967 if (unlikely(skb_is_gso(skb))) 2968 goto out; 2969 2970 /* Before computing a checksum, we should make sure no frag could 2971 * be modified by an external entity : checksum could be wrong. 2972 */ 2973 if (unlikely(skb_has_shared_frag(skb))) { 2974 ret = __skb_linearize(skb); 2975 if (ret) 2976 goto out; 2977 } 2978 start = skb_checksum_start_offset(skb); 2979 offset = start + offsetof(struct sctphdr, checksum); 2980 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) { 2981 ret = -EINVAL; 2982 goto out; 2983 } 2984 if (skb_cloned(skb) && 2985 !skb_clone_writable(skb, offset + sizeof(__le32))) { 2986 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2987 if (ret) 2988 goto out; 2989 } 2990 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start, 2991 skb->len - start, ~(__u32)0, 2992 crc32c_csum_stub)); 2993 *(__le32 *)(skb->data + offset) = crc32c_csum; 2994 skb->ip_summed = CHECKSUM_NONE; 2995 skb->csum_not_inet = 0; 2996 out: 2997 return ret; 2998 } 2999 3000 __be16 skb_network_protocol(struct sk_buff *skb, int *depth) 3001 { 3002 __be16 type = skb->protocol; 3003 3004 /* Tunnel gso handlers can set protocol to ethernet. */ 3005 if (type == htons(ETH_P_TEB)) { 3006 struct ethhdr *eth; 3007 3008 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) 3009 return 0; 3010 3011 eth = (struct ethhdr *)skb->data; 3012 type = eth->h_proto; 3013 } 3014 3015 return __vlan_get_protocol(skb, type, depth); 3016 } 3017 3018 /** 3019 * skb_mac_gso_segment - mac layer segmentation handler. 3020 * @skb: buffer to segment 3021 * @features: features for the output path (see dev->features) 3022 */ 3023 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, 3024 netdev_features_t features) 3025 { 3026 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 3027 struct packet_offload *ptype; 3028 int vlan_depth = skb->mac_len; 3029 __be16 type = skb_network_protocol(skb, &vlan_depth); 3030 3031 if (unlikely(!type)) 3032 return ERR_PTR(-EINVAL); 3033 3034 __skb_pull(skb, vlan_depth); 3035 3036 rcu_read_lock(); 3037 list_for_each_entry_rcu(ptype, &offload_base, list) { 3038 if (ptype->type == type && ptype->callbacks.gso_segment) { 3039 segs = ptype->callbacks.gso_segment(skb, features); 3040 break; 3041 } 3042 } 3043 rcu_read_unlock(); 3044 3045 __skb_push(skb, skb->data - skb_mac_header(skb)); 3046 3047 return segs; 3048 } 3049 EXPORT_SYMBOL(skb_mac_gso_segment); 3050 3051 3052 /* openvswitch calls this on rx path, so we need a different check. 3053 */ 3054 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) 3055 { 3056 if (tx_path) 3057 return skb->ip_summed != CHECKSUM_PARTIAL && 3058 skb->ip_summed != CHECKSUM_UNNECESSARY; 3059 3060 return skb->ip_summed == CHECKSUM_NONE; 3061 } 3062 3063 /** 3064 * __skb_gso_segment - Perform segmentation on skb. 3065 * @skb: buffer to segment 3066 * @features: features for the output path (see dev->features) 3067 * @tx_path: whether it is called in TX path 3068 * 3069 * This function segments the given skb and returns a list of segments. 3070 * 3071 * It may return NULL if the skb requires no segmentation. This is 3072 * only possible when GSO is used for verifying header integrity. 3073 * 3074 * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb. 3075 */ 3076 struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 3077 netdev_features_t features, bool tx_path) 3078 { 3079 struct sk_buff *segs; 3080 3081 if (unlikely(skb_needs_check(skb, tx_path))) { 3082 int err; 3083 3084 /* We're going to init ->check field in TCP or UDP header */ 3085 err = skb_cow_head(skb, 0); 3086 if (err < 0) 3087 return ERR_PTR(err); 3088 } 3089 3090 /* Only report GSO partial support if it will enable us to 3091 * support segmentation on this frame without needing additional 3092 * work. 3093 */ 3094 if (features & NETIF_F_GSO_PARTIAL) { 3095 netdev_features_t partial_features = NETIF_F_GSO_ROBUST; 3096 struct net_device *dev = skb->dev; 3097 3098 partial_features |= dev->features & dev->gso_partial_features; 3099 if (!skb_gso_ok(skb, features | partial_features)) 3100 features &= ~NETIF_F_GSO_PARTIAL; 3101 } 3102 3103 BUILD_BUG_ON(SKB_SGO_CB_OFFSET + 3104 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb)); 3105 3106 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb); 3107 SKB_GSO_CB(skb)->encap_level = 0; 3108 3109 skb_reset_mac_header(skb); 3110 skb_reset_mac_len(skb); 3111 3112 segs = skb_mac_gso_segment(skb, features); 3113 3114 if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs))) 3115 skb_warn_bad_offload(skb); 3116 3117 return segs; 3118 } 3119 EXPORT_SYMBOL(__skb_gso_segment); 3120 3121 /* Take action when hardware reception checksum errors are detected. */ 3122 #ifdef CONFIG_BUG 3123 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) 3124 { 3125 if (net_ratelimit()) { 3126 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>"); 3127 if (dev) 3128 pr_err("dev features: %pNF\n", &dev->features); 3129 pr_err("skb len=%u data_len=%u pkt_type=%u gso_size=%u gso_type=%u nr_frags=%u ip_summed=%u csum=%x csum_complete_sw=%d csum_valid=%d csum_level=%u\n", 3130 skb->len, skb->data_len, skb->pkt_type, 3131 skb_shinfo(skb)->gso_size, skb_shinfo(skb)->gso_type, 3132 skb_shinfo(skb)->nr_frags, skb->ip_summed, skb->csum, 3133 skb->csum_complete_sw, skb->csum_valid, skb->csum_level); 3134 dump_stack(); 3135 } 3136 } 3137 EXPORT_SYMBOL(netdev_rx_csum_fault); 3138 #endif 3139 3140 /* XXX: check that highmem exists at all on the given machine. */ 3141 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 3142 { 3143 #ifdef CONFIG_HIGHMEM 3144 int i; 3145 3146 if (!(dev->features & NETIF_F_HIGHDMA)) { 3147 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3148 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3149 3150 if (PageHighMem(skb_frag_page(frag))) 3151 return 1; 3152 } 3153 } 3154 #endif 3155 return 0; 3156 } 3157 3158 /* If MPLS offload request, verify we are testing hardware MPLS features 3159 * instead of standard features for the netdev. 3160 */ 3161 #if IS_ENABLED(CONFIG_NET_MPLS_GSO) 3162 static netdev_features_t net_mpls_features(struct sk_buff *skb, 3163 netdev_features_t features, 3164 __be16 type) 3165 { 3166 if (eth_p_mpls(type)) 3167 features &= skb->dev->mpls_features; 3168 3169 return features; 3170 } 3171 #else 3172 static netdev_features_t net_mpls_features(struct sk_buff *skb, 3173 netdev_features_t features, 3174 __be16 type) 3175 { 3176 return features; 3177 } 3178 #endif 3179 3180 static netdev_features_t harmonize_features(struct sk_buff *skb, 3181 netdev_features_t features) 3182 { 3183 int tmp; 3184 __be16 type; 3185 3186 type = skb_network_protocol(skb, &tmp); 3187 features = net_mpls_features(skb, features, type); 3188 3189 if (skb->ip_summed != CHECKSUM_NONE && 3190 !can_checksum_protocol(features, type)) { 3191 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3192 } 3193 if (illegal_highdma(skb->dev, skb)) 3194 features &= ~NETIF_F_SG; 3195 3196 return features; 3197 } 3198 3199 netdev_features_t passthru_features_check(struct sk_buff *skb, 3200 struct net_device *dev, 3201 netdev_features_t features) 3202 { 3203 return features; 3204 } 3205 EXPORT_SYMBOL(passthru_features_check); 3206 3207 static netdev_features_t dflt_features_check(struct sk_buff *skb, 3208 struct net_device *dev, 3209 netdev_features_t features) 3210 { 3211 return vlan_features_check(skb, features); 3212 } 3213 3214 static netdev_features_t gso_features_check(const struct sk_buff *skb, 3215 struct net_device *dev, 3216 netdev_features_t features) 3217 { 3218 u16 gso_segs = skb_shinfo(skb)->gso_segs; 3219 3220 if (gso_segs > dev->gso_max_segs) 3221 return features & ~NETIF_F_GSO_MASK; 3222 3223 /* Support for GSO partial features requires software 3224 * intervention before we can actually process the packets 3225 * so we need to strip support for any partial features now 3226 * and we can pull them back in after we have partially 3227 * segmented the frame. 3228 */ 3229 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL)) 3230 features &= ~dev->gso_partial_features; 3231 3232 /* Make sure to clear the IPv4 ID mangling feature if the 3233 * IPv4 header has the potential to be fragmented. 3234 */ 3235 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { 3236 struct iphdr *iph = skb->encapsulation ? 3237 inner_ip_hdr(skb) : ip_hdr(skb); 3238 3239 if (!(iph->frag_off & htons(IP_DF))) 3240 features &= ~NETIF_F_TSO_MANGLEID; 3241 } 3242 3243 return features; 3244 } 3245 3246 netdev_features_t netif_skb_features(struct sk_buff *skb) 3247 { 3248 struct net_device *dev = skb->dev; 3249 netdev_features_t features = dev->features; 3250 3251 if (skb_is_gso(skb)) 3252 features = gso_features_check(skb, dev, features); 3253 3254 /* If encapsulation offload request, verify we are testing 3255 * hardware encapsulation features instead of standard 3256 * features for the netdev 3257 */ 3258 if (skb->encapsulation) 3259 features &= dev->hw_enc_features; 3260 3261 if (skb_vlan_tagged(skb)) 3262 features = netdev_intersect_features(features, 3263 dev->vlan_features | 3264 NETIF_F_HW_VLAN_CTAG_TX | 3265 NETIF_F_HW_VLAN_STAG_TX); 3266 3267 if (dev->netdev_ops->ndo_features_check) 3268 features &= dev->netdev_ops->ndo_features_check(skb, dev, 3269 features); 3270 else 3271 features &= dflt_features_check(skb, dev, features); 3272 3273 return harmonize_features(skb, features); 3274 } 3275 EXPORT_SYMBOL(netif_skb_features); 3276 3277 static int xmit_one(struct sk_buff *skb, struct net_device *dev, 3278 struct netdev_queue *txq, bool more) 3279 { 3280 unsigned int len; 3281 int rc; 3282 3283 if (dev_nit_active(dev)) 3284 dev_queue_xmit_nit(skb, dev); 3285 3286 len = skb->len; 3287 trace_net_dev_start_xmit(skb, dev); 3288 rc = netdev_start_xmit(skb, dev, txq, more); 3289 trace_net_dev_xmit(skb, rc, dev, len); 3290 3291 return rc; 3292 } 3293 3294 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev, 3295 struct netdev_queue *txq, int *ret) 3296 { 3297 struct sk_buff *skb = first; 3298 int rc = NETDEV_TX_OK; 3299 3300 while (skb) { 3301 struct sk_buff *next = skb->next; 3302 3303 skb_mark_not_on_list(skb); 3304 rc = xmit_one(skb, dev, txq, next != NULL); 3305 if (unlikely(!dev_xmit_complete(rc))) { 3306 skb->next = next; 3307 goto out; 3308 } 3309 3310 skb = next; 3311 if (netif_tx_queue_stopped(txq) && skb) { 3312 rc = NETDEV_TX_BUSY; 3313 break; 3314 } 3315 } 3316 3317 out: 3318 *ret = rc; 3319 return skb; 3320 } 3321 3322 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, 3323 netdev_features_t features) 3324 { 3325 if (skb_vlan_tag_present(skb) && 3326 !vlan_hw_offload_capable(features, skb->vlan_proto)) 3327 skb = __vlan_hwaccel_push_inside(skb); 3328 return skb; 3329 } 3330 3331 int skb_csum_hwoffload_help(struct sk_buff *skb, 3332 const netdev_features_t features) 3333 { 3334 if (unlikely(skb->csum_not_inet)) 3335 return !!(features & NETIF_F_SCTP_CRC) ? 0 : 3336 skb_crc32c_csum_help(skb); 3337 3338 return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb); 3339 } 3340 EXPORT_SYMBOL(skb_csum_hwoffload_help); 3341 3342 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again) 3343 { 3344 netdev_features_t features; 3345 3346 features = netif_skb_features(skb); 3347 skb = validate_xmit_vlan(skb, features); 3348 if (unlikely(!skb)) 3349 goto out_null; 3350 3351 skb = sk_validate_xmit_skb(skb, dev); 3352 if (unlikely(!skb)) 3353 goto out_null; 3354 3355 if (netif_needs_gso(skb, features)) { 3356 struct sk_buff *segs; 3357 3358 segs = skb_gso_segment(skb, features); 3359 if (IS_ERR(segs)) { 3360 goto out_kfree_skb; 3361 } else if (segs) { 3362 consume_skb(skb); 3363 skb = segs; 3364 } 3365 } else { 3366 if (skb_needs_linearize(skb, features) && 3367 __skb_linearize(skb)) 3368 goto out_kfree_skb; 3369 3370 /* If packet is not checksummed and device does not 3371 * support checksumming for this protocol, complete 3372 * checksumming here. 3373 */ 3374 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3375 if (skb->encapsulation) 3376 skb_set_inner_transport_header(skb, 3377 skb_checksum_start_offset(skb)); 3378 else 3379 skb_set_transport_header(skb, 3380 skb_checksum_start_offset(skb)); 3381 if (skb_csum_hwoffload_help(skb, features)) 3382 goto out_kfree_skb; 3383 } 3384 } 3385 3386 skb = validate_xmit_xfrm(skb, features, again); 3387 3388 return skb; 3389 3390 out_kfree_skb: 3391 kfree_skb(skb); 3392 out_null: 3393 atomic_long_inc(&dev->tx_dropped); 3394 return NULL; 3395 } 3396 3397 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again) 3398 { 3399 struct sk_buff *next, *head = NULL, *tail; 3400 3401 for (; skb != NULL; skb = next) { 3402 next = skb->next; 3403 skb_mark_not_on_list(skb); 3404 3405 /* in case skb wont be segmented, point to itself */ 3406 skb->prev = skb; 3407 3408 skb = validate_xmit_skb(skb, dev, again); 3409 if (!skb) 3410 continue; 3411 3412 if (!head) 3413 head = skb; 3414 else 3415 tail->next = skb; 3416 /* If skb was segmented, skb->prev points to 3417 * the last segment. If not, it still contains skb. 3418 */ 3419 tail = skb->prev; 3420 } 3421 return head; 3422 } 3423 EXPORT_SYMBOL_GPL(validate_xmit_skb_list); 3424 3425 static void qdisc_pkt_len_init(struct sk_buff *skb) 3426 { 3427 const struct skb_shared_info *shinfo = skb_shinfo(skb); 3428 3429 qdisc_skb_cb(skb)->pkt_len = skb->len; 3430 3431 /* To get more precise estimation of bytes sent on wire, 3432 * we add to pkt_len the headers size of all segments 3433 */ 3434 if (shinfo->gso_size && skb_transport_header_was_set(skb)) { 3435 unsigned int hdr_len; 3436 u16 gso_segs = shinfo->gso_segs; 3437 3438 /* mac layer + network layer */ 3439 hdr_len = skb_transport_header(skb) - skb_mac_header(skb); 3440 3441 /* + transport layer */ 3442 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 3443 const struct tcphdr *th; 3444 struct tcphdr _tcphdr; 3445 3446 th = skb_header_pointer(skb, skb_transport_offset(skb), 3447 sizeof(_tcphdr), &_tcphdr); 3448 if (likely(th)) 3449 hdr_len += __tcp_hdrlen(th); 3450 } else { 3451 struct udphdr _udphdr; 3452 3453 if (skb_header_pointer(skb, skb_transport_offset(skb), 3454 sizeof(_udphdr), &_udphdr)) 3455 hdr_len += sizeof(struct udphdr); 3456 } 3457 3458 if (shinfo->gso_type & SKB_GSO_DODGY) 3459 gso_segs = DIV_ROUND_UP(skb->len - hdr_len, 3460 shinfo->gso_size); 3461 3462 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len; 3463 } 3464 } 3465 3466 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, 3467 struct net_device *dev, 3468 struct netdev_queue *txq) 3469 { 3470 spinlock_t *root_lock = qdisc_lock(q); 3471 struct sk_buff *to_free = NULL; 3472 bool contended; 3473 int rc; 3474 3475 qdisc_calculate_pkt_len(skb, q); 3476 3477 if (q->flags & TCQ_F_NOLOCK) { 3478 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { 3479 __qdisc_drop(skb, &to_free); 3480 rc = NET_XMIT_DROP; 3481 } else if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty && 3482 qdisc_run_begin(q)) { 3483 qdisc_bstats_cpu_update(q, skb); 3484 3485 if (sch_direct_xmit(skb, q, dev, txq, NULL, true)) 3486 __qdisc_run(q); 3487 3488 qdisc_run_end(q); 3489 rc = NET_XMIT_SUCCESS; 3490 } else { 3491 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; 3492 qdisc_run(q); 3493 } 3494 3495 if (unlikely(to_free)) 3496 kfree_skb_list(to_free); 3497 return rc; 3498 } 3499 3500 /* 3501 * Heuristic to force contended enqueues to serialize on a 3502 * separate lock before trying to get qdisc main lock. 3503 * This permits qdisc->running owner to get the lock more 3504 * often and dequeue packets faster. 3505 */ 3506 contended = qdisc_is_running(q); 3507 if (unlikely(contended)) 3508 spin_lock(&q->busylock); 3509 3510 spin_lock(root_lock); 3511 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { 3512 __qdisc_drop(skb, &to_free); 3513 rc = NET_XMIT_DROP; 3514 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && 3515 qdisc_run_begin(q)) { 3516 /* 3517 * This is a work-conserving queue; there are no old skbs 3518 * waiting to be sent out; and the qdisc is not running - 3519 * xmit the skb directly. 3520 */ 3521 3522 qdisc_bstats_update(q, skb); 3523 3524 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) { 3525 if (unlikely(contended)) { 3526 spin_unlock(&q->busylock); 3527 contended = false; 3528 } 3529 __qdisc_run(q); 3530 } 3531 3532 qdisc_run_end(q); 3533 rc = NET_XMIT_SUCCESS; 3534 } else { 3535 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; 3536 if (qdisc_run_begin(q)) { 3537 if (unlikely(contended)) { 3538 spin_unlock(&q->busylock); 3539 contended = false; 3540 } 3541 __qdisc_run(q); 3542 qdisc_run_end(q); 3543 } 3544 } 3545 spin_unlock(root_lock); 3546 if (unlikely(to_free)) 3547 kfree_skb_list(to_free); 3548 if (unlikely(contended)) 3549 spin_unlock(&q->busylock); 3550 return rc; 3551 } 3552 3553 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 3554 static void skb_update_prio(struct sk_buff *skb) 3555 { 3556 const struct netprio_map *map; 3557 const struct sock *sk; 3558 unsigned int prioidx; 3559 3560 if (skb->priority) 3561 return; 3562 map = rcu_dereference_bh(skb->dev->priomap); 3563 if (!map) 3564 return; 3565 sk = skb_to_full_sk(skb); 3566 if (!sk) 3567 return; 3568 3569 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data); 3570 3571 if (prioidx < map->priomap_len) 3572 skb->priority = map->priomap[prioidx]; 3573 } 3574 #else 3575 #define skb_update_prio(skb) 3576 #endif 3577 3578 /** 3579 * dev_loopback_xmit - loop back @skb 3580 * @net: network namespace this loopback is happening in 3581 * @sk: sk needed to be a netfilter okfn 3582 * @skb: buffer to transmit 3583 */ 3584 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) 3585 { 3586 skb_reset_mac_header(skb); 3587 __skb_pull(skb, skb_network_offset(skb)); 3588 skb->pkt_type = PACKET_LOOPBACK; 3589 skb->ip_summed = CHECKSUM_UNNECESSARY; 3590 WARN_ON(!skb_dst(skb)); 3591 skb_dst_force(skb); 3592 netif_rx_ni(skb); 3593 return 0; 3594 } 3595 EXPORT_SYMBOL(dev_loopback_xmit); 3596 3597 #ifdef CONFIG_NET_EGRESS 3598 static struct sk_buff * 3599 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) 3600 { 3601 struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress); 3602 struct tcf_result cl_res; 3603 3604 if (!miniq) 3605 return skb; 3606 3607 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */ 3608 mini_qdisc_bstats_cpu_update(miniq, skb); 3609 3610 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) { 3611 case TC_ACT_OK: 3612 case TC_ACT_RECLASSIFY: 3613 skb->tc_index = TC_H_MIN(cl_res.classid); 3614 break; 3615 case TC_ACT_SHOT: 3616 mini_qdisc_qstats_cpu_drop(miniq); 3617 *ret = NET_XMIT_DROP; 3618 kfree_skb(skb); 3619 return NULL; 3620 case TC_ACT_STOLEN: 3621 case TC_ACT_QUEUED: 3622 case TC_ACT_TRAP: 3623 *ret = NET_XMIT_SUCCESS; 3624 consume_skb(skb); 3625 return NULL; 3626 case TC_ACT_REDIRECT: 3627 /* No need to push/pop skb's mac_header here on egress! */ 3628 skb_do_redirect(skb); 3629 *ret = NET_XMIT_SUCCESS; 3630 return NULL; 3631 default: 3632 break; 3633 } 3634 3635 return skb; 3636 } 3637 #endif /* CONFIG_NET_EGRESS */ 3638 3639 #ifdef CONFIG_XPS 3640 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb, 3641 struct xps_dev_maps *dev_maps, unsigned int tci) 3642 { 3643 struct xps_map *map; 3644 int queue_index = -1; 3645 3646 if (dev->num_tc) { 3647 tci *= dev->num_tc; 3648 tci += netdev_get_prio_tc_map(dev, skb->priority); 3649 } 3650 3651 map = rcu_dereference(dev_maps->attr_map[tci]); 3652 if (map) { 3653 if (map->len == 1) 3654 queue_index = map->queues[0]; 3655 else 3656 queue_index = map->queues[reciprocal_scale( 3657 skb_get_hash(skb), map->len)]; 3658 if (unlikely(queue_index >= dev->real_num_tx_queues)) 3659 queue_index = -1; 3660 } 3661 return queue_index; 3662 } 3663 #endif 3664 3665 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev, 3666 struct sk_buff *skb) 3667 { 3668 #ifdef CONFIG_XPS 3669 struct xps_dev_maps *dev_maps; 3670 struct sock *sk = skb->sk; 3671 int queue_index = -1; 3672 3673 if (!static_key_false(&xps_needed)) 3674 return -1; 3675 3676 rcu_read_lock(); 3677 if (!static_key_false(&xps_rxqs_needed)) 3678 goto get_cpus_map; 3679 3680 dev_maps = rcu_dereference(sb_dev->xps_rxqs_map); 3681 if (dev_maps) { 3682 int tci = sk_rx_queue_get(sk); 3683 3684 if (tci >= 0 && tci < dev->num_rx_queues) 3685 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, 3686 tci); 3687 } 3688 3689 get_cpus_map: 3690 if (queue_index < 0) { 3691 dev_maps = rcu_dereference(sb_dev->xps_cpus_map); 3692 if (dev_maps) { 3693 unsigned int tci = skb->sender_cpu - 1; 3694 3695 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, 3696 tci); 3697 } 3698 } 3699 rcu_read_unlock(); 3700 3701 return queue_index; 3702 #else 3703 return -1; 3704 #endif 3705 } 3706 3707 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, 3708 struct net_device *sb_dev) 3709 { 3710 return 0; 3711 } 3712 EXPORT_SYMBOL(dev_pick_tx_zero); 3713 3714 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, 3715 struct net_device *sb_dev) 3716 { 3717 return (u16)raw_smp_processor_id() % dev->real_num_tx_queues; 3718 } 3719 EXPORT_SYMBOL(dev_pick_tx_cpu_id); 3720 3721 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, 3722 struct net_device *sb_dev) 3723 { 3724 struct sock *sk = skb->sk; 3725 int queue_index = sk_tx_queue_get(sk); 3726 3727 sb_dev = sb_dev ? : dev; 3728 3729 if (queue_index < 0 || skb->ooo_okay || 3730 queue_index >= dev->real_num_tx_queues) { 3731 int new_index = get_xps_queue(dev, sb_dev, skb); 3732 3733 if (new_index < 0) 3734 new_index = skb_tx_hash(dev, sb_dev, skb); 3735 3736 if (queue_index != new_index && sk && 3737 sk_fullsock(sk) && 3738 rcu_access_pointer(sk->sk_dst_cache)) 3739 sk_tx_queue_set(sk, new_index); 3740 3741 queue_index = new_index; 3742 } 3743 3744 return queue_index; 3745 } 3746 EXPORT_SYMBOL(netdev_pick_tx); 3747 3748 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, 3749 struct sk_buff *skb, 3750 struct net_device *sb_dev) 3751 { 3752 int queue_index = 0; 3753 3754 #ifdef CONFIG_XPS 3755 u32 sender_cpu = skb->sender_cpu - 1; 3756 3757 if (sender_cpu >= (u32)NR_CPUS) 3758 skb->sender_cpu = raw_smp_processor_id() + 1; 3759 #endif 3760 3761 if (dev->real_num_tx_queues != 1) { 3762 const struct net_device_ops *ops = dev->netdev_ops; 3763 3764 if (ops->ndo_select_queue) 3765 queue_index = ops->ndo_select_queue(dev, skb, sb_dev); 3766 else 3767 queue_index = netdev_pick_tx(dev, skb, sb_dev); 3768 3769 queue_index = netdev_cap_txqueue(dev, queue_index); 3770 } 3771 3772 skb_set_queue_mapping(skb, queue_index); 3773 return netdev_get_tx_queue(dev, queue_index); 3774 } 3775 3776 /** 3777 * __dev_queue_xmit - transmit a buffer 3778 * @skb: buffer to transmit 3779 * @sb_dev: suboordinate device used for L2 forwarding offload 3780 * 3781 * Queue a buffer for transmission to a network device. The caller must 3782 * have set the device and priority and built the buffer before calling 3783 * this function. The function can be called from an interrupt. 3784 * 3785 * A negative errno code is returned on a failure. A success does not 3786 * guarantee the frame will be transmitted as it may be dropped due 3787 * to congestion or traffic shaping. 3788 * 3789 * ----------------------------------------------------------------------------------- 3790 * I notice this method can also return errors from the queue disciplines, 3791 * including NET_XMIT_DROP, which is a positive value. So, errors can also 3792 * be positive. 3793 * 3794 * Regardless of the return value, the skb is consumed, so it is currently 3795 * difficult to retry a send to this method. (You can bump the ref count 3796 * before sending to hold a reference for retry if you are careful.) 3797 * 3798 * When calling this method, interrupts MUST be enabled. This is because 3799 * the BH enable code must have IRQs enabled so that it will not deadlock. 3800 * --BLG 3801 */ 3802 static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) 3803 { 3804 struct net_device *dev = skb->dev; 3805 struct netdev_queue *txq; 3806 struct Qdisc *q; 3807 int rc = -ENOMEM; 3808 bool again = false; 3809 3810 skb_reset_mac_header(skb); 3811 3812 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP)) 3813 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED); 3814 3815 /* Disable soft irqs for various locks below. Also 3816 * stops preemption for RCU. 3817 */ 3818 rcu_read_lock_bh(); 3819 3820 skb_update_prio(skb); 3821 3822 qdisc_pkt_len_init(skb); 3823 #ifdef CONFIG_NET_CLS_ACT 3824 skb->tc_at_ingress = 0; 3825 # ifdef CONFIG_NET_EGRESS 3826 if (static_branch_unlikely(&egress_needed_key)) { 3827 skb = sch_handle_egress(skb, &rc, dev); 3828 if (!skb) 3829 goto out; 3830 } 3831 # endif 3832 #endif 3833 /* If device/qdisc don't need skb->dst, release it right now while 3834 * its hot in this cpu cache. 3835 */ 3836 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 3837 skb_dst_drop(skb); 3838 else 3839 skb_dst_force(skb); 3840 3841 txq = netdev_core_pick_tx(dev, skb, sb_dev); 3842 q = rcu_dereference_bh(txq->qdisc); 3843 3844 trace_net_dev_queue(skb); 3845 if (q->enqueue) { 3846 rc = __dev_xmit_skb(skb, q, dev, txq); 3847 goto out; 3848 } 3849 3850 /* The device has no queue. Common case for software devices: 3851 * loopback, all the sorts of tunnels... 3852 3853 * Really, it is unlikely that netif_tx_lock protection is necessary 3854 * here. (f.e. loopback and IP tunnels are clean ignoring statistics 3855 * counters.) 3856 * However, it is possible, that they rely on protection 3857 * made by us here. 3858 3859 * Check this and shot the lock. It is not prone from deadlocks. 3860 *Either shot noqueue qdisc, it is even simpler 8) 3861 */ 3862 if (dev->flags & IFF_UP) { 3863 int cpu = smp_processor_id(); /* ok because BHs are off */ 3864 3865 if (txq->xmit_lock_owner != cpu) { 3866 if (dev_xmit_recursion()) 3867 goto recursion_alert; 3868 3869 skb = validate_xmit_skb(skb, dev, &again); 3870 if (!skb) 3871 goto out; 3872 3873 HARD_TX_LOCK(dev, txq, cpu); 3874 3875 if (!netif_xmit_stopped(txq)) { 3876 dev_xmit_recursion_inc(); 3877 skb = dev_hard_start_xmit(skb, dev, txq, &rc); 3878 dev_xmit_recursion_dec(); 3879 if (dev_xmit_complete(rc)) { 3880 HARD_TX_UNLOCK(dev, txq); 3881 goto out; 3882 } 3883 } 3884 HARD_TX_UNLOCK(dev, txq); 3885 net_crit_ratelimited("Virtual device %s asks to queue packet!\n", 3886 dev->name); 3887 } else { 3888 /* Recursion is detected! It is possible, 3889 * unfortunately 3890 */ 3891 recursion_alert: 3892 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n", 3893 dev->name); 3894 } 3895 } 3896 3897 rc = -ENETDOWN; 3898 rcu_read_unlock_bh(); 3899 3900 atomic_long_inc(&dev->tx_dropped); 3901 kfree_skb_list(skb); 3902 return rc; 3903 out: 3904 rcu_read_unlock_bh(); 3905 return rc; 3906 } 3907 3908 int dev_queue_xmit(struct sk_buff *skb) 3909 { 3910 return __dev_queue_xmit(skb, NULL); 3911 } 3912 EXPORT_SYMBOL(dev_queue_xmit); 3913 3914 int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev) 3915 { 3916 return __dev_queue_xmit(skb, sb_dev); 3917 } 3918 EXPORT_SYMBOL(dev_queue_xmit_accel); 3919 3920 int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) 3921 { 3922 struct net_device *dev = skb->dev; 3923 struct sk_buff *orig_skb = skb; 3924 struct netdev_queue *txq; 3925 int ret = NETDEV_TX_BUSY; 3926 bool again = false; 3927 3928 if (unlikely(!netif_running(dev) || 3929 !netif_carrier_ok(dev))) 3930 goto drop; 3931 3932 skb = validate_xmit_skb_list(skb, dev, &again); 3933 if (skb != orig_skb) 3934 goto drop; 3935 3936 skb_set_queue_mapping(skb, queue_id); 3937 txq = skb_get_tx_queue(dev, skb); 3938 3939 local_bh_disable(); 3940 3941 HARD_TX_LOCK(dev, txq, smp_processor_id()); 3942 if (!netif_xmit_frozen_or_drv_stopped(txq)) 3943 ret = netdev_start_xmit(skb, dev, txq, false); 3944 HARD_TX_UNLOCK(dev, txq); 3945 3946 local_bh_enable(); 3947 3948 if (!dev_xmit_complete(ret)) 3949 kfree_skb(skb); 3950 3951 return ret; 3952 drop: 3953 atomic_long_inc(&dev->tx_dropped); 3954 kfree_skb_list(skb); 3955 return NET_XMIT_DROP; 3956 } 3957 EXPORT_SYMBOL(dev_direct_xmit); 3958 3959 /************************************************************************* 3960 * Receiver routines 3961 *************************************************************************/ 3962 3963 int netdev_max_backlog __read_mostly = 1000; 3964 EXPORT_SYMBOL(netdev_max_backlog); 3965 3966 int netdev_tstamp_prequeue __read_mostly = 1; 3967 int netdev_budget __read_mostly = 300; 3968 unsigned int __read_mostly netdev_budget_usecs = 2000; 3969 int weight_p __read_mostly = 64; /* old backlog weight */ 3970 int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */ 3971 int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */ 3972 int dev_rx_weight __read_mostly = 64; 3973 int dev_tx_weight __read_mostly = 64; 3974 3975 /* Called with irq disabled */ 3976 static inline void ____napi_schedule(struct softnet_data *sd, 3977 struct napi_struct *napi) 3978 { 3979 list_add_tail(&napi->poll_list, &sd->poll_list); 3980 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 3981 } 3982 3983 #ifdef CONFIG_RPS 3984 3985 /* One global table that all flow-based protocols share. */ 3986 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; 3987 EXPORT_SYMBOL(rps_sock_flow_table); 3988 u32 rps_cpu_mask __read_mostly; 3989 EXPORT_SYMBOL(rps_cpu_mask); 3990 3991 struct static_key_false rps_needed __read_mostly; 3992 EXPORT_SYMBOL(rps_needed); 3993 struct static_key_false rfs_needed __read_mostly; 3994 EXPORT_SYMBOL(rfs_needed); 3995 3996 static struct rps_dev_flow * 3997 set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 3998 struct rps_dev_flow *rflow, u16 next_cpu) 3999 { 4000 if (next_cpu < nr_cpu_ids) { 4001 #ifdef CONFIG_RFS_ACCEL 4002 struct netdev_rx_queue *rxqueue; 4003 struct rps_dev_flow_table *flow_table; 4004 struct rps_dev_flow *old_rflow; 4005 u32 flow_id; 4006 u16 rxq_index; 4007 int rc; 4008 4009 /* Should we steer this flow to a different hardware queue? */ 4010 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || 4011 !(dev->features & NETIF_F_NTUPLE)) 4012 goto out; 4013 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); 4014 if (rxq_index == skb_get_rx_queue(skb)) 4015 goto out; 4016 4017 rxqueue = dev->_rx + rxq_index; 4018 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4019 if (!flow_table) 4020 goto out; 4021 flow_id = skb_get_hash(skb) & flow_table->mask; 4022 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, 4023 rxq_index, flow_id); 4024 if (rc < 0) 4025 goto out; 4026 old_rflow = rflow; 4027 rflow = &flow_table->flows[flow_id]; 4028 rflow->filter = rc; 4029 if (old_rflow->filter == rflow->filter) 4030 old_rflow->filter = RPS_NO_FILTER; 4031 out: 4032 #endif 4033 rflow->last_qtail = 4034 per_cpu(softnet_data, next_cpu).input_queue_head; 4035 } 4036 4037 rflow->cpu = next_cpu; 4038 return rflow; 4039 } 4040 4041 /* 4042 * get_rps_cpu is called from netif_receive_skb and returns the target 4043 * CPU from the RPS map of the receiving queue for a given skb. 4044 * rcu_read_lock must be held on entry. 4045 */ 4046 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, 4047 struct rps_dev_flow **rflowp) 4048 { 4049 const struct rps_sock_flow_table *sock_flow_table; 4050 struct netdev_rx_queue *rxqueue = dev->_rx; 4051 struct rps_dev_flow_table *flow_table; 4052 struct rps_map *map; 4053 int cpu = -1; 4054 u32 tcpu; 4055 u32 hash; 4056 4057 if (skb_rx_queue_recorded(skb)) { 4058 u16 index = skb_get_rx_queue(skb); 4059 4060 if (unlikely(index >= dev->real_num_rx_queues)) { 4061 WARN_ONCE(dev->real_num_rx_queues > 1, 4062 "%s received packet on queue %u, but number " 4063 "of RX queues is %u\n", 4064 dev->name, index, dev->real_num_rx_queues); 4065 goto done; 4066 } 4067 rxqueue += index; 4068 } 4069 4070 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */ 4071 4072 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4073 map = rcu_dereference(rxqueue->rps_map); 4074 if (!flow_table && !map) 4075 goto done; 4076 4077 skb_reset_network_header(skb); 4078 hash = skb_get_hash(skb); 4079 if (!hash) 4080 goto done; 4081 4082 sock_flow_table = rcu_dereference(rps_sock_flow_table); 4083 if (flow_table && sock_flow_table) { 4084 struct rps_dev_flow *rflow; 4085 u32 next_cpu; 4086 u32 ident; 4087 4088 /* First check into global flow table if there is a match */ 4089 ident = sock_flow_table->ents[hash & sock_flow_table->mask]; 4090 if ((ident ^ hash) & ~rps_cpu_mask) 4091 goto try_rps; 4092 4093 next_cpu = ident & rps_cpu_mask; 4094 4095 /* OK, now we know there is a match, 4096 * we can look at the local (per receive queue) flow table 4097 */ 4098 rflow = &flow_table->flows[hash & flow_table->mask]; 4099 tcpu = rflow->cpu; 4100 4101 /* 4102 * If the desired CPU (where last recvmsg was done) is 4103 * different from current CPU (one in the rx-queue flow 4104 * table entry), switch if one of the following holds: 4105 * - Current CPU is unset (>= nr_cpu_ids). 4106 * - Current CPU is offline. 4107 * - The current CPU's queue tail has advanced beyond the 4108 * last packet that was enqueued using this table entry. 4109 * This guarantees that all previous packets for the flow 4110 * have been dequeued, thus preserving in order delivery. 4111 */ 4112 if (unlikely(tcpu != next_cpu) && 4113 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) || 4114 ((int)(per_cpu(softnet_data, tcpu).input_queue_head - 4115 rflow->last_qtail)) >= 0)) { 4116 tcpu = next_cpu; 4117 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); 4118 } 4119 4120 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) { 4121 *rflowp = rflow; 4122 cpu = tcpu; 4123 goto done; 4124 } 4125 } 4126 4127 try_rps: 4128 4129 if (map) { 4130 tcpu = map->cpus[reciprocal_scale(hash, map->len)]; 4131 if (cpu_online(tcpu)) { 4132 cpu = tcpu; 4133 goto done; 4134 } 4135 } 4136 4137 done: 4138 return cpu; 4139 } 4140 4141 #ifdef CONFIG_RFS_ACCEL 4142 4143 /** 4144 * rps_may_expire_flow - check whether an RFS hardware filter may be removed 4145 * @dev: Device on which the filter was set 4146 * @rxq_index: RX queue index 4147 * @flow_id: Flow ID passed to ndo_rx_flow_steer() 4148 * @filter_id: Filter ID returned by ndo_rx_flow_steer() 4149 * 4150 * Drivers that implement ndo_rx_flow_steer() should periodically call 4151 * this function for each installed filter and remove the filters for 4152 * which it returns %true. 4153 */ 4154 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, 4155 u32 flow_id, u16 filter_id) 4156 { 4157 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index; 4158 struct rps_dev_flow_table *flow_table; 4159 struct rps_dev_flow *rflow; 4160 bool expire = true; 4161 unsigned int cpu; 4162 4163 rcu_read_lock(); 4164 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4165 if (flow_table && flow_id <= flow_table->mask) { 4166 rflow = &flow_table->flows[flow_id]; 4167 cpu = READ_ONCE(rflow->cpu); 4168 if (rflow->filter == filter_id && cpu < nr_cpu_ids && 4169 ((int)(per_cpu(softnet_data, cpu).input_queue_head - 4170 rflow->last_qtail) < 4171 (int)(10 * flow_table->mask))) 4172 expire = false; 4173 } 4174 rcu_read_unlock(); 4175 return expire; 4176 } 4177 EXPORT_SYMBOL(rps_may_expire_flow); 4178 4179 #endif /* CONFIG_RFS_ACCEL */ 4180 4181 /* Called from hardirq (IPI) context */ 4182 static void rps_trigger_softirq(void *data) 4183 { 4184 struct softnet_data *sd = data; 4185 4186 ____napi_schedule(sd, &sd->backlog); 4187 sd->received_rps++; 4188 } 4189 4190 #endif /* CONFIG_RPS */ 4191 4192 /* 4193 * Check if this softnet_data structure is another cpu one 4194 * If yes, queue it to our IPI list and return 1 4195 * If no, return 0 4196 */ 4197 static int rps_ipi_queued(struct softnet_data *sd) 4198 { 4199 #ifdef CONFIG_RPS 4200 struct softnet_data *mysd = this_cpu_ptr(&softnet_data); 4201 4202 if (sd != mysd) { 4203 sd->rps_ipi_next = mysd->rps_ipi_list; 4204 mysd->rps_ipi_list = sd; 4205 4206 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4207 return 1; 4208 } 4209 #endif /* CONFIG_RPS */ 4210 return 0; 4211 } 4212 4213 #ifdef CONFIG_NET_FLOW_LIMIT 4214 int netdev_flow_limit_table_len __read_mostly = (1 << 12); 4215 #endif 4216 4217 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) 4218 { 4219 #ifdef CONFIG_NET_FLOW_LIMIT 4220 struct sd_flow_limit *fl; 4221 struct softnet_data *sd; 4222 unsigned int old_flow, new_flow; 4223 4224 if (qlen < (netdev_max_backlog >> 1)) 4225 return false; 4226 4227 sd = this_cpu_ptr(&softnet_data); 4228 4229 rcu_read_lock(); 4230 fl = rcu_dereference(sd->flow_limit); 4231 if (fl) { 4232 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1); 4233 old_flow = fl->history[fl->history_head]; 4234 fl->history[fl->history_head] = new_flow; 4235 4236 fl->history_head++; 4237 fl->history_head &= FLOW_LIMIT_HISTORY - 1; 4238 4239 if (likely(fl->buckets[old_flow])) 4240 fl->buckets[old_flow]--; 4241 4242 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) { 4243 fl->count++; 4244 rcu_read_unlock(); 4245 return true; 4246 } 4247 } 4248 rcu_read_unlock(); 4249 #endif 4250 return false; 4251 } 4252 4253 /* 4254 * enqueue_to_backlog is called to queue an skb to a per CPU backlog 4255 * queue (may be a remote CPU queue). 4256 */ 4257 static int enqueue_to_backlog(struct sk_buff *skb, int cpu, 4258 unsigned int *qtail) 4259 { 4260 struct softnet_data *sd; 4261 unsigned long flags; 4262 unsigned int qlen; 4263 4264 sd = &per_cpu(softnet_data, cpu); 4265 4266 local_irq_save(flags); 4267 4268 rps_lock(sd); 4269 if (!netif_running(skb->dev)) 4270 goto drop; 4271 qlen = skb_queue_len(&sd->input_pkt_queue); 4272 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { 4273 if (qlen) { 4274 enqueue: 4275 __skb_queue_tail(&sd->input_pkt_queue, skb); 4276 input_queue_tail_incr_save(sd, qtail); 4277 rps_unlock(sd); 4278 local_irq_restore(flags); 4279 return NET_RX_SUCCESS; 4280 } 4281 4282 /* Schedule NAPI for backlog device 4283 * We can use non atomic operation since we own the queue lock 4284 */ 4285 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) { 4286 if (!rps_ipi_queued(sd)) 4287 ____napi_schedule(sd, &sd->backlog); 4288 } 4289 goto enqueue; 4290 } 4291 4292 drop: 4293 sd->dropped++; 4294 rps_unlock(sd); 4295 4296 local_irq_restore(flags); 4297 4298 atomic_long_inc(&skb->dev->rx_dropped); 4299 kfree_skb(skb); 4300 return NET_RX_DROP; 4301 } 4302 4303 static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb) 4304 { 4305 struct net_device *dev = skb->dev; 4306 struct netdev_rx_queue *rxqueue; 4307 4308 rxqueue = dev->_rx; 4309 4310 if (skb_rx_queue_recorded(skb)) { 4311 u16 index = skb_get_rx_queue(skb); 4312 4313 if (unlikely(index >= dev->real_num_rx_queues)) { 4314 WARN_ONCE(dev->real_num_rx_queues > 1, 4315 "%s received packet on queue %u, but number " 4316 "of RX queues is %u\n", 4317 dev->name, index, dev->real_num_rx_queues); 4318 4319 return rxqueue; /* Return first rxqueue */ 4320 } 4321 rxqueue += index; 4322 } 4323 return rxqueue; 4324 } 4325 4326 static u32 netif_receive_generic_xdp(struct sk_buff *skb, 4327 struct xdp_buff *xdp, 4328 struct bpf_prog *xdp_prog) 4329 { 4330 struct netdev_rx_queue *rxqueue; 4331 void *orig_data, *orig_data_end; 4332 u32 metalen, act = XDP_DROP; 4333 __be16 orig_eth_type; 4334 struct ethhdr *eth; 4335 bool orig_bcast; 4336 int hlen, off; 4337 u32 mac_len; 4338 4339 /* Reinjected packets coming from act_mirred or similar should 4340 * not get XDP generic processing. 4341 */ 4342 if (skb_cloned(skb) || skb_is_tc_redirected(skb)) 4343 return XDP_PASS; 4344 4345 /* XDP packets must be linear and must have sufficient headroom 4346 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also 4347 * native XDP provides, thus we need to do it here as well. 4348 */ 4349 if (skb_is_nonlinear(skb) || 4350 skb_headroom(skb) < XDP_PACKET_HEADROOM) { 4351 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb); 4352 int troom = skb->tail + skb->data_len - skb->end; 4353 4354 /* In case we have to go down the path and also linearize, 4355 * then lets do the pskb_expand_head() work just once here. 4356 */ 4357 if (pskb_expand_head(skb, 4358 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0, 4359 troom > 0 ? troom + 128 : 0, GFP_ATOMIC)) 4360 goto do_drop; 4361 if (skb_linearize(skb)) 4362 goto do_drop; 4363 } 4364 4365 /* The XDP program wants to see the packet starting at the MAC 4366 * header. 4367 */ 4368 mac_len = skb->data - skb_mac_header(skb); 4369 hlen = skb_headlen(skb) + mac_len; 4370 xdp->data = skb->data - mac_len; 4371 xdp->data_meta = xdp->data; 4372 xdp->data_end = xdp->data + hlen; 4373 xdp->data_hard_start = skb->data - skb_headroom(skb); 4374 orig_data_end = xdp->data_end; 4375 orig_data = xdp->data; 4376 eth = (struct ethhdr *)xdp->data; 4377 orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest); 4378 orig_eth_type = eth->h_proto; 4379 4380 rxqueue = netif_get_rxqueue(skb); 4381 xdp->rxq = &rxqueue->xdp_rxq; 4382 4383 act = bpf_prog_run_xdp(xdp_prog, xdp); 4384 4385 off = xdp->data - orig_data; 4386 if (off > 0) 4387 __skb_pull(skb, off); 4388 else if (off < 0) 4389 __skb_push(skb, -off); 4390 skb->mac_header += off; 4391 4392 /* check if bpf_xdp_adjust_tail was used. it can only "shrink" 4393 * pckt. 4394 */ 4395 off = orig_data_end - xdp->data_end; 4396 if (off != 0) { 4397 skb_set_tail_pointer(skb, xdp->data_end - xdp->data); 4398 skb->len -= off; 4399 4400 } 4401 4402 /* check if XDP changed eth hdr such SKB needs update */ 4403 eth = (struct ethhdr *)xdp->data; 4404 if ((orig_eth_type != eth->h_proto) || 4405 (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) { 4406 __skb_push(skb, ETH_HLEN); 4407 skb->protocol = eth_type_trans(skb, skb->dev); 4408 } 4409 4410 switch (act) { 4411 case XDP_REDIRECT: 4412 case XDP_TX: 4413 __skb_push(skb, mac_len); 4414 break; 4415 case XDP_PASS: 4416 metalen = xdp->data - xdp->data_meta; 4417 if (metalen) 4418 skb_metadata_set(skb, metalen); 4419 break; 4420 default: 4421 bpf_warn_invalid_xdp_action(act); 4422 /* fall through */ 4423 case XDP_ABORTED: 4424 trace_xdp_exception(skb->dev, xdp_prog, act); 4425 /* fall through */ 4426 case XDP_DROP: 4427 do_drop: 4428 kfree_skb(skb); 4429 break; 4430 } 4431 4432 return act; 4433 } 4434 4435 /* When doing generic XDP we have to bypass the qdisc layer and the 4436 * network taps in order to match in-driver-XDP behavior. 4437 */ 4438 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog) 4439 { 4440 struct net_device *dev = skb->dev; 4441 struct netdev_queue *txq; 4442 bool free_skb = true; 4443 int cpu, rc; 4444 4445 txq = netdev_core_pick_tx(dev, skb, NULL); 4446 cpu = smp_processor_id(); 4447 HARD_TX_LOCK(dev, txq, cpu); 4448 if (!netif_xmit_stopped(txq)) { 4449 rc = netdev_start_xmit(skb, dev, txq, 0); 4450 if (dev_xmit_complete(rc)) 4451 free_skb = false; 4452 } 4453 HARD_TX_UNLOCK(dev, txq); 4454 if (free_skb) { 4455 trace_xdp_exception(dev, xdp_prog, XDP_TX); 4456 kfree_skb(skb); 4457 } 4458 } 4459 EXPORT_SYMBOL_GPL(generic_xdp_tx); 4460 4461 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key); 4462 4463 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb) 4464 { 4465 if (xdp_prog) { 4466 struct xdp_buff xdp; 4467 u32 act; 4468 int err; 4469 4470 act = netif_receive_generic_xdp(skb, &xdp, xdp_prog); 4471 if (act != XDP_PASS) { 4472 switch (act) { 4473 case XDP_REDIRECT: 4474 err = xdp_do_generic_redirect(skb->dev, skb, 4475 &xdp, xdp_prog); 4476 if (err) 4477 goto out_redir; 4478 break; 4479 case XDP_TX: 4480 generic_xdp_tx(skb, xdp_prog); 4481 break; 4482 } 4483 return XDP_DROP; 4484 } 4485 } 4486 return XDP_PASS; 4487 out_redir: 4488 kfree_skb(skb); 4489 return XDP_DROP; 4490 } 4491 EXPORT_SYMBOL_GPL(do_xdp_generic); 4492 4493 static int netif_rx_internal(struct sk_buff *skb) 4494 { 4495 int ret; 4496 4497 net_timestamp_check(netdev_tstamp_prequeue, skb); 4498 4499 trace_netif_rx(skb); 4500 4501 #ifdef CONFIG_RPS 4502 if (static_branch_unlikely(&rps_needed)) { 4503 struct rps_dev_flow voidflow, *rflow = &voidflow; 4504 int cpu; 4505 4506 preempt_disable(); 4507 rcu_read_lock(); 4508 4509 cpu = get_rps_cpu(skb->dev, skb, &rflow); 4510 if (cpu < 0) 4511 cpu = smp_processor_id(); 4512 4513 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 4514 4515 rcu_read_unlock(); 4516 preempt_enable(); 4517 } else 4518 #endif 4519 { 4520 unsigned int qtail; 4521 4522 ret = enqueue_to_backlog(skb, get_cpu(), &qtail); 4523 put_cpu(); 4524 } 4525 return ret; 4526 } 4527 4528 /** 4529 * netif_rx - post buffer to the network code 4530 * @skb: buffer to post 4531 * 4532 * This function receives a packet from a device driver and queues it for 4533 * the upper (protocol) levels to process. It always succeeds. The buffer 4534 * may be dropped during processing for congestion control or by the 4535 * protocol layers. 4536 * 4537 * return values: 4538 * NET_RX_SUCCESS (no congestion) 4539 * NET_RX_DROP (packet was dropped) 4540 * 4541 */ 4542 4543 int netif_rx(struct sk_buff *skb) 4544 { 4545 int ret; 4546 4547 trace_netif_rx_entry(skb); 4548 4549 ret = netif_rx_internal(skb); 4550 trace_netif_rx_exit(ret); 4551 4552 return ret; 4553 } 4554 EXPORT_SYMBOL(netif_rx); 4555 4556 int netif_rx_ni(struct sk_buff *skb) 4557 { 4558 int err; 4559 4560 trace_netif_rx_ni_entry(skb); 4561 4562 preempt_disable(); 4563 err = netif_rx_internal(skb); 4564 if (local_softirq_pending()) 4565 do_softirq(); 4566 preempt_enable(); 4567 trace_netif_rx_ni_exit(err); 4568 4569 return err; 4570 } 4571 EXPORT_SYMBOL(netif_rx_ni); 4572 4573 static __latent_entropy void net_tx_action(struct softirq_action *h) 4574 { 4575 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 4576 4577 if (sd->completion_queue) { 4578 struct sk_buff *clist; 4579 4580 local_irq_disable(); 4581 clist = sd->completion_queue; 4582 sd->completion_queue = NULL; 4583 local_irq_enable(); 4584 4585 while (clist) { 4586 struct sk_buff *skb = clist; 4587 4588 clist = clist->next; 4589 4590 WARN_ON(refcount_read(&skb->users)); 4591 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED)) 4592 trace_consume_skb(skb); 4593 else 4594 trace_kfree_skb(skb, net_tx_action); 4595 4596 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) 4597 __kfree_skb(skb); 4598 else 4599 __kfree_skb_defer(skb); 4600 } 4601 4602 __kfree_skb_flush(); 4603 } 4604 4605 if (sd->output_queue) { 4606 struct Qdisc *head; 4607 4608 local_irq_disable(); 4609 head = sd->output_queue; 4610 sd->output_queue = NULL; 4611 sd->output_queue_tailp = &sd->output_queue; 4612 local_irq_enable(); 4613 4614 while (head) { 4615 struct Qdisc *q = head; 4616 spinlock_t *root_lock = NULL; 4617 4618 head = head->next_sched; 4619 4620 if (!(q->flags & TCQ_F_NOLOCK)) { 4621 root_lock = qdisc_lock(q); 4622 spin_lock(root_lock); 4623 } 4624 /* We need to make sure head->next_sched is read 4625 * before clearing __QDISC_STATE_SCHED 4626 */ 4627 smp_mb__before_atomic(); 4628 clear_bit(__QDISC_STATE_SCHED, &q->state); 4629 qdisc_run(q); 4630 if (root_lock) 4631 spin_unlock(root_lock); 4632 } 4633 } 4634 4635 xfrm_dev_backlog(sd); 4636 } 4637 4638 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE) 4639 /* This hook is defined here for ATM LANE */ 4640 int (*br_fdb_test_addr_hook)(struct net_device *dev, 4641 unsigned char *addr) __read_mostly; 4642 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); 4643 #endif 4644 4645 static inline struct sk_buff * 4646 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, 4647 struct net_device *orig_dev) 4648 { 4649 #ifdef CONFIG_NET_CLS_ACT 4650 struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress); 4651 struct tcf_result cl_res; 4652 4653 /* If there's at least one ingress present somewhere (so 4654 * we get here via enabled static key), remaining devices 4655 * that are not configured with an ingress qdisc will bail 4656 * out here. 4657 */ 4658 if (!miniq) 4659 return skb; 4660 4661 if (*pt_prev) { 4662 *ret = deliver_skb(skb, *pt_prev, orig_dev); 4663 *pt_prev = NULL; 4664 } 4665 4666 qdisc_skb_cb(skb)->pkt_len = skb->len; 4667 skb->tc_at_ingress = 1; 4668 mini_qdisc_bstats_cpu_update(miniq, skb); 4669 4670 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) { 4671 case TC_ACT_OK: 4672 case TC_ACT_RECLASSIFY: 4673 skb->tc_index = TC_H_MIN(cl_res.classid); 4674 break; 4675 case TC_ACT_SHOT: 4676 mini_qdisc_qstats_cpu_drop(miniq); 4677 kfree_skb(skb); 4678 return NULL; 4679 case TC_ACT_STOLEN: 4680 case TC_ACT_QUEUED: 4681 case TC_ACT_TRAP: 4682 consume_skb(skb); 4683 return NULL; 4684 case TC_ACT_REDIRECT: 4685 /* skb_mac_header check was done by cls/act_bpf, so 4686 * we can safely push the L2 header back before 4687 * redirecting to another netdev 4688 */ 4689 __skb_push(skb, skb->mac_len); 4690 skb_do_redirect(skb); 4691 return NULL; 4692 case TC_ACT_CONSUMED: 4693 return NULL; 4694 default: 4695 break; 4696 } 4697 #endif /* CONFIG_NET_CLS_ACT */ 4698 return skb; 4699 } 4700 4701 /** 4702 * netdev_is_rx_handler_busy - check if receive handler is registered 4703 * @dev: device to check 4704 * 4705 * Check if a receive handler is already registered for a given device. 4706 * Return true if there one. 4707 * 4708 * The caller must hold the rtnl_mutex. 4709 */ 4710 bool netdev_is_rx_handler_busy(struct net_device *dev) 4711 { 4712 ASSERT_RTNL(); 4713 return dev && rtnl_dereference(dev->rx_handler); 4714 } 4715 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy); 4716 4717 /** 4718 * netdev_rx_handler_register - register receive handler 4719 * @dev: device to register a handler for 4720 * @rx_handler: receive handler to register 4721 * @rx_handler_data: data pointer that is used by rx handler 4722 * 4723 * Register a receive handler for a device. This handler will then be 4724 * called from __netif_receive_skb. A negative errno code is returned 4725 * on a failure. 4726 * 4727 * The caller must hold the rtnl_mutex. 4728 * 4729 * For a general description of rx_handler, see enum rx_handler_result. 4730 */ 4731 int netdev_rx_handler_register(struct net_device *dev, 4732 rx_handler_func_t *rx_handler, 4733 void *rx_handler_data) 4734 { 4735 if (netdev_is_rx_handler_busy(dev)) 4736 return -EBUSY; 4737 4738 if (dev->priv_flags & IFF_NO_RX_HANDLER) 4739 return -EINVAL; 4740 4741 /* Note: rx_handler_data must be set before rx_handler */ 4742 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); 4743 rcu_assign_pointer(dev->rx_handler, rx_handler); 4744 4745 return 0; 4746 } 4747 EXPORT_SYMBOL_GPL(netdev_rx_handler_register); 4748 4749 /** 4750 * netdev_rx_handler_unregister - unregister receive handler 4751 * @dev: device to unregister a handler from 4752 * 4753 * Unregister a receive handler from a device. 4754 * 4755 * The caller must hold the rtnl_mutex. 4756 */ 4757 void netdev_rx_handler_unregister(struct net_device *dev) 4758 { 4759 4760 ASSERT_RTNL(); 4761 RCU_INIT_POINTER(dev->rx_handler, NULL); 4762 /* a reader seeing a non NULL rx_handler in a rcu_read_lock() 4763 * section has a guarantee to see a non NULL rx_handler_data 4764 * as well. 4765 */ 4766 synchronize_net(); 4767 RCU_INIT_POINTER(dev->rx_handler_data, NULL); 4768 } 4769 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 4770 4771 /* 4772 * Limit the use of PFMEMALLOC reserves to those protocols that implement 4773 * the special handling of PFMEMALLOC skbs. 4774 */ 4775 static bool skb_pfmemalloc_protocol(struct sk_buff *skb) 4776 { 4777 switch (skb->protocol) { 4778 case htons(ETH_P_ARP): 4779 case htons(ETH_P_IP): 4780 case htons(ETH_P_IPV6): 4781 case htons(ETH_P_8021Q): 4782 case htons(ETH_P_8021AD): 4783 return true; 4784 default: 4785 return false; 4786 } 4787 } 4788 4789 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev, 4790 int *ret, struct net_device *orig_dev) 4791 { 4792 #ifdef CONFIG_NETFILTER_INGRESS 4793 if (nf_hook_ingress_active(skb)) { 4794 int ingress_retval; 4795 4796 if (*pt_prev) { 4797 *ret = deliver_skb(skb, *pt_prev, orig_dev); 4798 *pt_prev = NULL; 4799 } 4800 4801 rcu_read_lock(); 4802 ingress_retval = nf_hook_ingress(skb); 4803 rcu_read_unlock(); 4804 return ingress_retval; 4805 } 4806 #endif /* CONFIG_NETFILTER_INGRESS */ 4807 return 0; 4808 } 4809 4810 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc, 4811 struct packet_type **ppt_prev) 4812 { 4813 struct packet_type *ptype, *pt_prev; 4814 rx_handler_func_t *rx_handler; 4815 struct net_device *orig_dev; 4816 bool deliver_exact = false; 4817 int ret = NET_RX_DROP; 4818 __be16 type; 4819 4820 net_timestamp_check(!netdev_tstamp_prequeue, skb); 4821 4822 trace_netif_receive_skb(skb); 4823 4824 orig_dev = skb->dev; 4825 4826 skb_reset_network_header(skb); 4827 if (!skb_transport_header_was_set(skb)) 4828 skb_reset_transport_header(skb); 4829 skb_reset_mac_len(skb); 4830 4831 pt_prev = NULL; 4832 4833 another_round: 4834 skb->skb_iif = skb->dev->ifindex; 4835 4836 __this_cpu_inc(softnet_data.processed); 4837 4838 if (static_branch_unlikely(&generic_xdp_needed_key)) { 4839 int ret2; 4840 4841 preempt_disable(); 4842 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb); 4843 preempt_enable(); 4844 4845 if (ret2 != XDP_PASS) 4846 return NET_RX_DROP; 4847 skb_reset_mac_len(skb); 4848 } 4849 4850 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || 4851 skb->protocol == cpu_to_be16(ETH_P_8021AD)) { 4852 skb = skb_vlan_untag(skb); 4853 if (unlikely(!skb)) 4854 goto out; 4855 } 4856 4857 if (skb_skip_tc_classify(skb)) 4858 goto skip_classify; 4859 4860 if (pfmemalloc) 4861 goto skip_taps; 4862 4863 list_for_each_entry_rcu(ptype, &ptype_all, list) { 4864 if (pt_prev) 4865 ret = deliver_skb(skb, pt_prev, orig_dev); 4866 pt_prev = ptype; 4867 } 4868 4869 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) { 4870 if (pt_prev) 4871 ret = deliver_skb(skb, pt_prev, orig_dev); 4872 pt_prev = ptype; 4873 } 4874 4875 skip_taps: 4876 #ifdef CONFIG_NET_INGRESS 4877 if (static_branch_unlikely(&ingress_needed_key)) { 4878 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev); 4879 if (!skb) 4880 goto out; 4881 4882 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0) 4883 goto out; 4884 } 4885 #endif 4886 skb_reset_tc(skb); 4887 skip_classify: 4888 if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) 4889 goto drop; 4890 4891 if (skb_vlan_tag_present(skb)) { 4892 if (pt_prev) { 4893 ret = deliver_skb(skb, pt_prev, orig_dev); 4894 pt_prev = NULL; 4895 } 4896 if (vlan_do_receive(&skb)) 4897 goto another_round; 4898 else if (unlikely(!skb)) 4899 goto out; 4900 } 4901 4902 rx_handler = rcu_dereference(skb->dev->rx_handler); 4903 if (rx_handler) { 4904 if (pt_prev) { 4905 ret = deliver_skb(skb, pt_prev, orig_dev); 4906 pt_prev = NULL; 4907 } 4908 switch (rx_handler(&skb)) { 4909 case RX_HANDLER_CONSUMED: 4910 ret = NET_RX_SUCCESS; 4911 goto out; 4912 case RX_HANDLER_ANOTHER: 4913 goto another_round; 4914 case RX_HANDLER_EXACT: 4915 deliver_exact = true; 4916 case RX_HANDLER_PASS: 4917 break; 4918 default: 4919 BUG(); 4920 } 4921 } 4922 4923 if (unlikely(skb_vlan_tag_present(skb))) { 4924 check_vlan_id: 4925 if (skb_vlan_tag_get_id(skb)) { 4926 /* Vlan id is non 0 and vlan_do_receive() above couldn't 4927 * find vlan device. 4928 */ 4929 skb->pkt_type = PACKET_OTHERHOST; 4930 } else if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || 4931 skb->protocol == cpu_to_be16(ETH_P_8021AD)) { 4932 /* Outer header is 802.1P with vlan 0, inner header is 4933 * 802.1Q or 802.1AD and vlan_do_receive() above could 4934 * not find vlan dev for vlan id 0. 4935 */ 4936 __vlan_hwaccel_clear_tag(skb); 4937 skb = skb_vlan_untag(skb); 4938 if (unlikely(!skb)) 4939 goto out; 4940 if (vlan_do_receive(&skb)) 4941 /* After stripping off 802.1P header with vlan 0 4942 * vlan dev is found for inner header. 4943 */ 4944 goto another_round; 4945 else if (unlikely(!skb)) 4946 goto out; 4947 else 4948 /* We have stripped outer 802.1P vlan 0 header. 4949 * But could not find vlan dev. 4950 * check again for vlan id to set OTHERHOST. 4951 */ 4952 goto check_vlan_id; 4953 } 4954 /* Note: we might in the future use prio bits 4955 * and set skb->priority like in vlan_do_receive() 4956 * For the time being, just ignore Priority Code Point 4957 */ 4958 __vlan_hwaccel_clear_tag(skb); 4959 } 4960 4961 type = skb->protocol; 4962 4963 /* deliver only exact match when indicated */ 4964 if (likely(!deliver_exact)) { 4965 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 4966 &ptype_base[ntohs(type) & 4967 PTYPE_HASH_MASK]); 4968 } 4969 4970 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 4971 &orig_dev->ptype_specific); 4972 4973 if (unlikely(skb->dev != orig_dev)) { 4974 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 4975 &skb->dev->ptype_specific); 4976 } 4977 4978 if (pt_prev) { 4979 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 4980 goto drop; 4981 *ppt_prev = pt_prev; 4982 } else { 4983 drop: 4984 if (!deliver_exact) 4985 atomic_long_inc(&skb->dev->rx_dropped); 4986 else 4987 atomic_long_inc(&skb->dev->rx_nohandler); 4988 kfree_skb(skb); 4989 /* Jamal, now you will not able to escape explaining 4990 * me how you were going to use this. :-) 4991 */ 4992 ret = NET_RX_DROP; 4993 } 4994 4995 out: 4996 return ret; 4997 } 4998 4999 static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc) 5000 { 5001 struct net_device *orig_dev = skb->dev; 5002 struct packet_type *pt_prev = NULL; 5003 int ret; 5004 5005 ret = __netif_receive_skb_core(skb, pfmemalloc, &pt_prev); 5006 if (pt_prev) 5007 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb, 5008 skb->dev, pt_prev, orig_dev); 5009 return ret; 5010 } 5011 5012 /** 5013 * netif_receive_skb_core - special purpose version of netif_receive_skb 5014 * @skb: buffer to process 5015 * 5016 * More direct receive version of netif_receive_skb(). It should 5017 * only be used by callers that have a need to skip RPS and Generic XDP. 5018 * Caller must also take care of handling if (page_is_)pfmemalloc. 5019 * 5020 * This function may only be called from softirq context and interrupts 5021 * should be enabled. 5022 * 5023 * Return values (usually ignored): 5024 * NET_RX_SUCCESS: no congestion 5025 * NET_RX_DROP: packet was dropped 5026 */ 5027 int netif_receive_skb_core(struct sk_buff *skb) 5028 { 5029 int ret; 5030 5031 rcu_read_lock(); 5032 ret = __netif_receive_skb_one_core(skb, false); 5033 rcu_read_unlock(); 5034 5035 return ret; 5036 } 5037 EXPORT_SYMBOL(netif_receive_skb_core); 5038 5039 static inline void __netif_receive_skb_list_ptype(struct list_head *head, 5040 struct packet_type *pt_prev, 5041 struct net_device *orig_dev) 5042 { 5043 struct sk_buff *skb, *next; 5044 5045 if (!pt_prev) 5046 return; 5047 if (list_empty(head)) 5048 return; 5049 if (pt_prev->list_func != NULL) 5050 INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv, 5051 ip_list_rcv, head, pt_prev, orig_dev); 5052 else 5053 list_for_each_entry_safe(skb, next, head, list) { 5054 skb_list_del_init(skb); 5055 pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 5056 } 5057 } 5058 5059 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc) 5060 { 5061 /* Fast-path assumptions: 5062 * - There is no RX handler. 5063 * - Only one packet_type matches. 5064 * If either of these fails, we will end up doing some per-packet 5065 * processing in-line, then handling the 'last ptype' for the whole 5066 * sublist. This can't cause out-of-order delivery to any single ptype, 5067 * because the 'last ptype' must be constant across the sublist, and all 5068 * other ptypes are handled per-packet. 5069 */ 5070 /* Current (common) ptype of sublist */ 5071 struct packet_type *pt_curr = NULL; 5072 /* Current (common) orig_dev of sublist */ 5073 struct net_device *od_curr = NULL; 5074 struct list_head sublist; 5075 struct sk_buff *skb, *next; 5076 5077 INIT_LIST_HEAD(&sublist); 5078 list_for_each_entry_safe(skb, next, head, list) { 5079 struct net_device *orig_dev = skb->dev; 5080 struct packet_type *pt_prev = NULL; 5081 5082 skb_list_del_init(skb); 5083 __netif_receive_skb_core(skb, pfmemalloc, &pt_prev); 5084 if (!pt_prev) 5085 continue; 5086 if (pt_curr != pt_prev || od_curr != orig_dev) { 5087 /* dispatch old sublist */ 5088 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); 5089 /* start new sublist */ 5090 INIT_LIST_HEAD(&sublist); 5091 pt_curr = pt_prev; 5092 od_curr = orig_dev; 5093 } 5094 list_add_tail(&skb->list, &sublist); 5095 } 5096 5097 /* dispatch final sublist */ 5098 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); 5099 } 5100 5101 static int __netif_receive_skb(struct sk_buff *skb) 5102 { 5103 int ret; 5104 5105 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) { 5106 unsigned int noreclaim_flag; 5107 5108 /* 5109 * PFMEMALLOC skbs are special, they should 5110 * - be delivered to SOCK_MEMALLOC sockets only 5111 * - stay away from userspace 5112 * - have bounded memory usage 5113 * 5114 * Use PF_MEMALLOC as this saves us from propagating the allocation 5115 * context down to all allocation sites. 5116 */ 5117 noreclaim_flag = memalloc_noreclaim_save(); 5118 ret = __netif_receive_skb_one_core(skb, true); 5119 memalloc_noreclaim_restore(noreclaim_flag); 5120 } else 5121 ret = __netif_receive_skb_one_core(skb, false); 5122 5123 return ret; 5124 } 5125 5126 static void __netif_receive_skb_list(struct list_head *head) 5127 { 5128 unsigned long noreclaim_flag = 0; 5129 struct sk_buff *skb, *next; 5130 bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */ 5131 5132 list_for_each_entry_safe(skb, next, head, list) { 5133 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) { 5134 struct list_head sublist; 5135 5136 /* Handle the previous sublist */ 5137 list_cut_before(&sublist, head, &skb->list); 5138 if (!list_empty(&sublist)) 5139 __netif_receive_skb_list_core(&sublist, pfmemalloc); 5140 pfmemalloc = !pfmemalloc; 5141 /* See comments in __netif_receive_skb */ 5142 if (pfmemalloc) 5143 noreclaim_flag = memalloc_noreclaim_save(); 5144 else 5145 memalloc_noreclaim_restore(noreclaim_flag); 5146 } 5147 } 5148 /* Handle the remaining sublist */ 5149 if (!list_empty(head)) 5150 __netif_receive_skb_list_core(head, pfmemalloc); 5151 /* Restore pflags */ 5152 if (pfmemalloc) 5153 memalloc_noreclaim_restore(noreclaim_flag); 5154 } 5155 5156 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) 5157 { 5158 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog); 5159 struct bpf_prog *new = xdp->prog; 5160 int ret = 0; 5161 5162 switch (xdp->command) { 5163 case XDP_SETUP_PROG: 5164 rcu_assign_pointer(dev->xdp_prog, new); 5165 if (old) 5166 bpf_prog_put(old); 5167 5168 if (old && !new) { 5169 static_branch_dec(&generic_xdp_needed_key); 5170 } else if (new && !old) { 5171 static_branch_inc(&generic_xdp_needed_key); 5172 dev_disable_lro(dev); 5173 dev_disable_gro_hw(dev); 5174 } 5175 break; 5176 5177 case XDP_QUERY_PROG: 5178 xdp->prog_id = old ? old->aux->id : 0; 5179 break; 5180 5181 default: 5182 ret = -EINVAL; 5183 break; 5184 } 5185 5186 return ret; 5187 } 5188 5189 static int netif_receive_skb_internal(struct sk_buff *skb) 5190 { 5191 int ret; 5192 5193 net_timestamp_check(netdev_tstamp_prequeue, skb); 5194 5195 if (skb_defer_rx_timestamp(skb)) 5196 return NET_RX_SUCCESS; 5197 5198 rcu_read_lock(); 5199 #ifdef CONFIG_RPS 5200 if (static_branch_unlikely(&rps_needed)) { 5201 struct rps_dev_flow voidflow, *rflow = &voidflow; 5202 int cpu = get_rps_cpu(skb->dev, skb, &rflow); 5203 5204 if (cpu >= 0) { 5205 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5206 rcu_read_unlock(); 5207 return ret; 5208 } 5209 } 5210 #endif 5211 ret = __netif_receive_skb(skb); 5212 rcu_read_unlock(); 5213 return ret; 5214 } 5215 5216 static void netif_receive_skb_list_internal(struct list_head *head) 5217 { 5218 struct sk_buff *skb, *next; 5219 struct list_head sublist; 5220 5221 INIT_LIST_HEAD(&sublist); 5222 list_for_each_entry_safe(skb, next, head, list) { 5223 net_timestamp_check(netdev_tstamp_prequeue, skb); 5224 skb_list_del_init(skb); 5225 if (!skb_defer_rx_timestamp(skb)) 5226 list_add_tail(&skb->list, &sublist); 5227 } 5228 list_splice_init(&sublist, head); 5229 5230 rcu_read_lock(); 5231 #ifdef CONFIG_RPS 5232 if (static_branch_unlikely(&rps_needed)) { 5233 list_for_each_entry_safe(skb, next, head, list) { 5234 struct rps_dev_flow voidflow, *rflow = &voidflow; 5235 int cpu = get_rps_cpu(skb->dev, skb, &rflow); 5236 5237 if (cpu >= 0) { 5238 /* Will be handled, remove from list */ 5239 skb_list_del_init(skb); 5240 enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5241 } 5242 } 5243 } 5244 #endif 5245 __netif_receive_skb_list(head); 5246 rcu_read_unlock(); 5247 } 5248 5249 /** 5250 * netif_receive_skb - process receive buffer from network 5251 * @skb: buffer to process 5252 * 5253 * netif_receive_skb() is the main receive data processing function. 5254 * It always succeeds. The buffer may be dropped during processing 5255 * for congestion control or by the protocol layers. 5256 * 5257 * This function may only be called from softirq context and interrupts 5258 * should be enabled. 5259 * 5260 * Return values (usually ignored): 5261 * NET_RX_SUCCESS: no congestion 5262 * NET_RX_DROP: packet was dropped 5263 */ 5264 int netif_receive_skb(struct sk_buff *skb) 5265 { 5266 int ret; 5267 5268 trace_netif_receive_skb_entry(skb); 5269 5270 ret = netif_receive_skb_internal(skb); 5271 trace_netif_receive_skb_exit(ret); 5272 5273 return ret; 5274 } 5275 EXPORT_SYMBOL(netif_receive_skb); 5276 5277 /** 5278 * netif_receive_skb_list - process many receive buffers from network 5279 * @head: list of skbs to process. 5280 * 5281 * Since return value of netif_receive_skb() is normally ignored, and 5282 * wouldn't be meaningful for a list, this function returns void. 5283 * 5284 * This function may only be called from softirq context and interrupts 5285 * should be enabled. 5286 */ 5287 void netif_receive_skb_list(struct list_head *head) 5288 { 5289 struct sk_buff *skb; 5290 5291 if (list_empty(head)) 5292 return; 5293 if (trace_netif_receive_skb_list_entry_enabled()) { 5294 list_for_each_entry(skb, head, list) 5295 trace_netif_receive_skb_list_entry(skb); 5296 } 5297 netif_receive_skb_list_internal(head); 5298 trace_netif_receive_skb_list_exit(0); 5299 } 5300 EXPORT_SYMBOL(netif_receive_skb_list); 5301 5302 DEFINE_PER_CPU(struct work_struct, flush_works); 5303 5304 /* Network device is going away, flush any packets still pending */ 5305 static void flush_backlog(struct work_struct *work) 5306 { 5307 struct sk_buff *skb, *tmp; 5308 struct softnet_data *sd; 5309 5310 local_bh_disable(); 5311 sd = this_cpu_ptr(&softnet_data); 5312 5313 local_irq_disable(); 5314 rps_lock(sd); 5315 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { 5316 if (skb->dev->reg_state == NETREG_UNREGISTERING) { 5317 __skb_unlink(skb, &sd->input_pkt_queue); 5318 kfree_skb(skb); 5319 input_queue_head_incr(sd); 5320 } 5321 } 5322 rps_unlock(sd); 5323 local_irq_enable(); 5324 5325 skb_queue_walk_safe(&sd->process_queue, skb, tmp) { 5326 if (skb->dev->reg_state == NETREG_UNREGISTERING) { 5327 __skb_unlink(skb, &sd->process_queue); 5328 kfree_skb(skb); 5329 input_queue_head_incr(sd); 5330 } 5331 } 5332 local_bh_enable(); 5333 } 5334 5335 static void flush_all_backlogs(void) 5336 { 5337 unsigned int cpu; 5338 5339 get_online_cpus(); 5340 5341 for_each_online_cpu(cpu) 5342 queue_work_on(cpu, system_highpri_wq, 5343 per_cpu_ptr(&flush_works, cpu)); 5344 5345 for_each_online_cpu(cpu) 5346 flush_work(per_cpu_ptr(&flush_works, cpu)); 5347 5348 put_online_cpus(); 5349 } 5350 5351 INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int)); 5352 INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int)); 5353 static int napi_gro_complete(struct sk_buff *skb) 5354 { 5355 struct packet_offload *ptype; 5356 __be16 type = skb->protocol; 5357 struct list_head *head = &offload_base; 5358 int err = -ENOENT; 5359 5360 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb)); 5361 5362 if (NAPI_GRO_CB(skb)->count == 1) { 5363 skb_shinfo(skb)->gso_size = 0; 5364 goto out; 5365 } 5366 5367 rcu_read_lock(); 5368 list_for_each_entry_rcu(ptype, head, list) { 5369 if (ptype->type != type || !ptype->callbacks.gro_complete) 5370 continue; 5371 5372 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete, 5373 ipv6_gro_complete, inet_gro_complete, 5374 skb, 0); 5375 break; 5376 } 5377 rcu_read_unlock(); 5378 5379 if (err) { 5380 WARN_ON(&ptype->list == head); 5381 kfree_skb(skb); 5382 return NET_RX_SUCCESS; 5383 } 5384 5385 out: 5386 return netif_receive_skb_internal(skb); 5387 } 5388 5389 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, 5390 bool flush_old) 5391 { 5392 struct list_head *head = &napi->gro_hash[index].list; 5393 struct sk_buff *skb, *p; 5394 5395 list_for_each_entry_safe_reverse(skb, p, head, list) { 5396 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) 5397 return; 5398 skb_list_del_init(skb); 5399 napi_gro_complete(skb); 5400 napi->gro_hash[index].count--; 5401 } 5402 5403 if (!napi->gro_hash[index].count) 5404 __clear_bit(index, &napi->gro_bitmask); 5405 } 5406 5407 /* napi->gro_hash[].list contains packets ordered by age. 5408 * youngest packets at the head of it. 5409 * Complete skbs in reverse order to reduce latencies. 5410 */ 5411 void napi_gro_flush(struct napi_struct *napi, bool flush_old) 5412 { 5413 unsigned long bitmask = napi->gro_bitmask; 5414 unsigned int i, base = ~0U; 5415 5416 while ((i = ffs(bitmask)) != 0) { 5417 bitmask >>= i; 5418 base += i; 5419 __napi_gro_flush_chain(napi, base, flush_old); 5420 } 5421 } 5422 EXPORT_SYMBOL(napi_gro_flush); 5423 5424 static struct list_head *gro_list_prepare(struct napi_struct *napi, 5425 struct sk_buff *skb) 5426 { 5427 unsigned int maclen = skb->dev->hard_header_len; 5428 u32 hash = skb_get_hash_raw(skb); 5429 struct list_head *head; 5430 struct sk_buff *p; 5431 5432 head = &napi->gro_hash[hash & (GRO_HASH_BUCKETS - 1)].list; 5433 list_for_each_entry(p, head, list) { 5434 unsigned long diffs; 5435 5436 NAPI_GRO_CB(p)->flush = 0; 5437 5438 if (hash != skb_get_hash_raw(p)) { 5439 NAPI_GRO_CB(p)->same_flow = 0; 5440 continue; 5441 } 5442 5443 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; 5444 diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb); 5445 if (skb_vlan_tag_present(p)) 5446 diffs |= p->vlan_tci ^ skb->vlan_tci; 5447 diffs |= skb_metadata_dst_cmp(p, skb); 5448 diffs |= skb_metadata_differs(p, skb); 5449 if (maclen == ETH_HLEN) 5450 diffs |= compare_ether_header(skb_mac_header(p), 5451 skb_mac_header(skb)); 5452 else if (!diffs) 5453 diffs = memcmp(skb_mac_header(p), 5454 skb_mac_header(skb), 5455 maclen); 5456 NAPI_GRO_CB(p)->same_flow = !diffs; 5457 } 5458 5459 return head; 5460 } 5461 5462 static void skb_gro_reset_offset(struct sk_buff *skb) 5463 { 5464 const struct skb_shared_info *pinfo = skb_shinfo(skb); 5465 const skb_frag_t *frag0 = &pinfo->frags[0]; 5466 5467 NAPI_GRO_CB(skb)->data_offset = 0; 5468 NAPI_GRO_CB(skb)->frag0 = NULL; 5469 NAPI_GRO_CB(skb)->frag0_len = 0; 5470 5471 if (skb_mac_header(skb) == skb_tail_pointer(skb) && 5472 pinfo->nr_frags && 5473 !PageHighMem(skb_frag_page(frag0))) { 5474 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); 5475 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int, 5476 skb_frag_size(frag0), 5477 skb->end - skb->tail); 5478 } 5479 } 5480 5481 static void gro_pull_from_frag0(struct sk_buff *skb, int grow) 5482 { 5483 struct skb_shared_info *pinfo = skb_shinfo(skb); 5484 5485 BUG_ON(skb->end - skb->tail < grow); 5486 5487 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); 5488 5489 skb->data_len -= grow; 5490 skb->tail += grow; 5491 5492 pinfo->frags[0].page_offset += grow; 5493 skb_frag_size_sub(&pinfo->frags[0], grow); 5494 5495 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) { 5496 skb_frag_unref(skb, 0); 5497 memmove(pinfo->frags, pinfo->frags + 1, 5498 --pinfo->nr_frags * sizeof(pinfo->frags[0])); 5499 } 5500 } 5501 5502 static void gro_flush_oldest(struct list_head *head) 5503 { 5504 struct sk_buff *oldest; 5505 5506 oldest = list_last_entry(head, struct sk_buff, list); 5507 5508 /* We are called with head length >= MAX_GRO_SKBS, so this is 5509 * impossible. 5510 */ 5511 if (WARN_ON_ONCE(!oldest)) 5512 return; 5513 5514 /* Do not adjust napi->gro_hash[].count, caller is adding a new 5515 * SKB to the chain. 5516 */ 5517 skb_list_del_init(oldest); 5518 napi_gro_complete(oldest); 5519 } 5520 5521 INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *, 5522 struct sk_buff *)); 5523 INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *, 5524 struct sk_buff *)); 5525 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 5526 { 5527 u32 hash = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1); 5528 struct list_head *head = &offload_base; 5529 struct packet_offload *ptype; 5530 __be16 type = skb->protocol; 5531 struct list_head *gro_head; 5532 struct sk_buff *pp = NULL; 5533 enum gro_result ret; 5534 int same_flow; 5535 int grow; 5536 5537 if (netif_elide_gro(skb->dev)) 5538 goto normal; 5539 5540 gro_head = gro_list_prepare(napi, skb); 5541 5542 rcu_read_lock(); 5543 list_for_each_entry_rcu(ptype, head, list) { 5544 if (ptype->type != type || !ptype->callbacks.gro_receive) 5545 continue; 5546 5547 skb_set_network_header(skb, skb_gro_offset(skb)); 5548 skb_reset_mac_len(skb); 5549 NAPI_GRO_CB(skb)->same_flow = 0; 5550 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb); 5551 NAPI_GRO_CB(skb)->free = 0; 5552 NAPI_GRO_CB(skb)->encap_mark = 0; 5553 NAPI_GRO_CB(skb)->recursion_counter = 0; 5554 NAPI_GRO_CB(skb)->is_fou = 0; 5555 NAPI_GRO_CB(skb)->is_atomic = 1; 5556 NAPI_GRO_CB(skb)->gro_remcsum_start = 0; 5557 5558 /* Setup for GRO checksum validation */ 5559 switch (skb->ip_summed) { 5560 case CHECKSUM_COMPLETE: 5561 NAPI_GRO_CB(skb)->csum = skb->csum; 5562 NAPI_GRO_CB(skb)->csum_valid = 1; 5563 NAPI_GRO_CB(skb)->csum_cnt = 0; 5564 break; 5565 case CHECKSUM_UNNECESSARY: 5566 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1; 5567 NAPI_GRO_CB(skb)->csum_valid = 0; 5568 break; 5569 default: 5570 NAPI_GRO_CB(skb)->csum_cnt = 0; 5571 NAPI_GRO_CB(skb)->csum_valid = 0; 5572 } 5573 5574 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive, 5575 ipv6_gro_receive, inet_gro_receive, 5576 gro_head, skb); 5577 break; 5578 } 5579 rcu_read_unlock(); 5580 5581 if (&ptype->list == head) 5582 goto normal; 5583 5584 if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) { 5585 ret = GRO_CONSUMED; 5586 goto ok; 5587 } 5588 5589 same_flow = NAPI_GRO_CB(skb)->same_flow; 5590 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; 5591 5592 if (pp) { 5593 skb_list_del_init(pp); 5594 napi_gro_complete(pp); 5595 napi->gro_hash[hash].count--; 5596 } 5597 5598 if (same_flow) 5599 goto ok; 5600 5601 if (NAPI_GRO_CB(skb)->flush) 5602 goto normal; 5603 5604 if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) { 5605 gro_flush_oldest(gro_head); 5606 } else { 5607 napi->gro_hash[hash].count++; 5608 } 5609 NAPI_GRO_CB(skb)->count = 1; 5610 NAPI_GRO_CB(skb)->age = jiffies; 5611 NAPI_GRO_CB(skb)->last = skb; 5612 skb_shinfo(skb)->gso_size = skb_gro_len(skb); 5613 list_add(&skb->list, gro_head); 5614 ret = GRO_HELD; 5615 5616 pull: 5617 grow = skb_gro_offset(skb) - skb_headlen(skb); 5618 if (grow > 0) 5619 gro_pull_from_frag0(skb, grow); 5620 ok: 5621 if (napi->gro_hash[hash].count) { 5622 if (!test_bit(hash, &napi->gro_bitmask)) 5623 __set_bit(hash, &napi->gro_bitmask); 5624 } else if (test_bit(hash, &napi->gro_bitmask)) { 5625 __clear_bit(hash, &napi->gro_bitmask); 5626 } 5627 5628 return ret; 5629 5630 normal: 5631 ret = GRO_NORMAL; 5632 goto pull; 5633 } 5634 5635 struct packet_offload *gro_find_receive_by_type(__be16 type) 5636 { 5637 struct list_head *offload_head = &offload_base; 5638 struct packet_offload *ptype; 5639 5640 list_for_each_entry_rcu(ptype, offload_head, list) { 5641 if (ptype->type != type || !ptype->callbacks.gro_receive) 5642 continue; 5643 return ptype; 5644 } 5645 return NULL; 5646 } 5647 EXPORT_SYMBOL(gro_find_receive_by_type); 5648 5649 struct packet_offload *gro_find_complete_by_type(__be16 type) 5650 { 5651 struct list_head *offload_head = &offload_base; 5652 struct packet_offload *ptype; 5653 5654 list_for_each_entry_rcu(ptype, offload_head, list) { 5655 if (ptype->type != type || !ptype->callbacks.gro_complete) 5656 continue; 5657 return ptype; 5658 } 5659 return NULL; 5660 } 5661 EXPORT_SYMBOL(gro_find_complete_by_type); 5662 5663 static void napi_skb_free_stolen_head(struct sk_buff *skb) 5664 { 5665 skb_dst_drop(skb); 5666 secpath_reset(skb); 5667 kmem_cache_free(skbuff_head_cache, skb); 5668 } 5669 5670 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) 5671 { 5672 switch (ret) { 5673 case GRO_NORMAL: 5674 if (netif_receive_skb_internal(skb)) 5675 ret = GRO_DROP; 5676 break; 5677 5678 case GRO_DROP: 5679 kfree_skb(skb); 5680 break; 5681 5682 case GRO_MERGED_FREE: 5683 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 5684 napi_skb_free_stolen_head(skb); 5685 else 5686 __kfree_skb(skb); 5687 break; 5688 5689 case GRO_HELD: 5690 case GRO_MERGED: 5691 case GRO_CONSUMED: 5692 break; 5693 } 5694 5695 return ret; 5696 } 5697 5698 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 5699 { 5700 gro_result_t ret; 5701 5702 skb_mark_napi_id(skb, napi); 5703 trace_napi_gro_receive_entry(skb); 5704 5705 skb_gro_reset_offset(skb); 5706 5707 ret = napi_skb_finish(dev_gro_receive(napi, skb), skb); 5708 trace_napi_gro_receive_exit(ret); 5709 5710 return ret; 5711 } 5712 EXPORT_SYMBOL(napi_gro_receive); 5713 5714 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) 5715 { 5716 if (unlikely(skb->pfmemalloc)) { 5717 consume_skb(skb); 5718 return; 5719 } 5720 __skb_pull(skb, skb_headlen(skb)); 5721 /* restore the reserve we had after netdev_alloc_skb_ip_align() */ 5722 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); 5723 __vlan_hwaccel_clear_tag(skb); 5724 skb->dev = napi->dev; 5725 skb->skb_iif = 0; 5726 5727 /* eth_type_trans() assumes pkt_type is PACKET_HOST */ 5728 skb->pkt_type = PACKET_HOST; 5729 5730 skb->encapsulation = 0; 5731 skb_shinfo(skb)->gso_type = 0; 5732 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 5733 secpath_reset(skb); 5734 5735 napi->skb = skb; 5736 } 5737 5738 struct sk_buff *napi_get_frags(struct napi_struct *napi) 5739 { 5740 struct sk_buff *skb = napi->skb; 5741 5742 if (!skb) { 5743 skb = napi_alloc_skb(napi, GRO_MAX_HEAD); 5744 if (skb) { 5745 napi->skb = skb; 5746 skb_mark_napi_id(skb, napi); 5747 } 5748 } 5749 return skb; 5750 } 5751 EXPORT_SYMBOL(napi_get_frags); 5752 5753 static gro_result_t napi_frags_finish(struct napi_struct *napi, 5754 struct sk_buff *skb, 5755 gro_result_t ret) 5756 { 5757 switch (ret) { 5758 case GRO_NORMAL: 5759 case GRO_HELD: 5760 __skb_push(skb, ETH_HLEN); 5761 skb->protocol = eth_type_trans(skb, skb->dev); 5762 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb)) 5763 ret = GRO_DROP; 5764 break; 5765 5766 case GRO_DROP: 5767 napi_reuse_skb(napi, skb); 5768 break; 5769 5770 case GRO_MERGED_FREE: 5771 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 5772 napi_skb_free_stolen_head(skb); 5773 else 5774 napi_reuse_skb(napi, skb); 5775 break; 5776 5777 case GRO_MERGED: 5778 case GRO_CONSUMED: 5779 break; 5780 } 5781 5782 return ret; 5783 } 5784 5785 /* Upper GRO stack assumes network header starts at gro_offset=0 5786 * Drivers could call both napi_gro_frags() and napi_gro_receive() 5787 * We copy ethernet header into skb->data to have a common layout. 5788 */ 5789 static struct sk_buff *napi_frags_skb(struct napi_struct *napi) 5790 { 5791 struct sk_buff *skb = napi->skb; 5792 const struct ethhdr *eth; 5793 unsigned int hlen = sizeof(*eth); 5794 5795 napi->skb = NULL; 5796 5797 skb_reset_mac_header(skb); 5798 skb_gro_reset_offset(skb); 5799 5800 if (unlikely(skb_gro_header_hard(skb, hlen))) { 5801 eth = skb_gro_header_slow(skb, hlen, 0); 5802 if (unlikely(!eth)) { 5803 net_warn_ratelimited("%s: dropping impossible skb from %s\n", 5804 __func__, napi->dev->name); 5805 napi_reuse_skb(napi, skb); 5806 return NULL; 5807 } 5808 } else { 5809 eth = (const struct ethhdr *)skb->data; 5810 gro_pull_from_frag0(skb, hlen); 5811 NAPI_GRO_CB(skb)->frag0 += hlen; 5812 NAPI_GRO_CB(skb)->frag0_len -= hlen; 5813 } 5814 __skb_pull(skb, hlen); 5815 5816 /* 5817 * This works because the only protocols we care about don't require 5818 * special handling. 5819 * We'll fix it up properly in napi_frags_finish() 5820 */ 5821 skb->protocol = eth->h_proto; 5822 5823 return skb; 5824 } 5825 5826 gro_result_t napi_gro_frags(struct napi_struct *napi) 5827 { 5828 gro_result_t ret; 5829 struct sk_buff *skb = napi_frags_skb(napi); 5830 5831 if (!skb) 5832 return GRO_DROP; 5833 5834 trace_napi_gro_frags_entry(skb); 5835 5836 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb)); 5837 trace_napi_gro_frags_exit(ret); 5838 5839 return ret; 5840 } 5841 EXPORT_SYMBOL(napi_gro_frags); 5842 5843 /* Compute the checksum from gro_offset and return the folded value 5844 * after adding in any pseudo checksum. 5845 */ 5846 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb) 5847 { 5848 __wsum wsum; 5849 __sum16 sum; 5850 5851 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0); 5852 5853 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */ 5854 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum)); 5855 /* See comments in __skb_checksum_complete(). */ 5856 if (likely(!sum)) { 5857 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 5858 !skb->csum_complete_sw) 5859 netdev_rx_csum_fault(skb->dev, skb); 5860 } 5861 5862 NAPI_GRO_CB(skb)->csum = wsum; 5863 NAPI_GRO_CB(skb)->csum_valid = 1; 5864 5865 return sum; 5866 } 5867 EXPORT_SYMBOL(__skb_gro_checksum_complete); 5868 5869 static void net_rps_send_ipi(struct softnet_data *remsd) 5870 { 5871 #ifdef CONFIG_RPS 5872 while (remsd) { 5873 struct softnet_data *next = remsd->rps_ipi_next; 5874 5875 if (cpu_online(remsd->cpu)) 5876 smp_call_function_single_async(remsd->cpu, &remsd->csd); 5877 remsd = next; 5878 } 5879 #endif 5880 } 5881 5882 /* 5883 * net_rps_action_and_irq_enable sends any pending IPI's for rps. 5884 * Note: called with local irq disabled, but exits with local irq enabled. 5885 */ 5886 static void net_rps_action_and_irq_enable(struct softnet_data *sd) 5887 { 5888 #ifdef CONFIG_RPS 5889 struct softnet_data *remsd = sd->rps_ipi_list; 5890 5891 if (remsd) { 5892 sd->rps_ipi_list = NULL; 5893 5894 local_irq_enable(); 5895 5896 /* Send pending IPI's to kick RPS processing on remote cpus. */ 5897 net_rps_send_ipi(remsd); 5898 } else 5899 #endif 5900 local_irq_enable(); 5901 } 5902 5903 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) 5904 { 5905 #ifdef CONFIG_RPS 5906 return sd->rps_ipi_list != NULL; 5907 #else 5908 return false; 5909 #endif 5910 } 5911 5912 static int process_backlog(struct napi_struct *napi, int quota) 5913 { 5914 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); 5915 bool again = true; 5916 int work = 0; 5917 5918 /* Check if we have pending ipi, its better to send them now, 5919 * not waiting net_rx_action() end. 5920 */ 5921 if (sd_has_rps_ipi_waiting(sd)) { 5922 local_irq_disable(); 5923 net_rps_action_and_irq_enable(sd); 5924 } 5925 5926 napi->weight = dev_rx_weight; 5927 while (again) { 5928 struct sk_buff *skb; 5929 5930 while ((skb = __skb_dequeue(&sd->process_queue))) { 5931 rcu_read_lock(); 5932 __netif_receive_skb(skb); 5933 rcu_read_unlock(); 5934 input_queue_head_incr(sd); 5935 if (++work >= quota) 5936 return work; 5937 5938 } 5939 5940 local_irq_disable(); 5941 rps_lock(sd); 5942 if (skb_queue_empty(&sd->input_pkt_queue)) { 5943 /* 5944 * Inline a custom version of __napi_complete(). 5945 * only current cpu owns and manipulates this napi, 5946 * and NAPI_STATE_SCHED is the only possible flag set 5947 * on backlog. 5948 * We can use a plain write instead of clear_bit(), 5949 * and we dont need an smp_mb() memory barrier. 5950 */ 5951 napi->state = 0; 5952 again = false; 5953 } else { 5954 skb_queue_splice_tail_init(&sd->input_pkt_queue, 5955 &sd->process_queue); 5956 } 5957 rps_unlock(sd); 5958 local_irq_enable(); 5959 } 5960 5961 return work; 5962 } 5963 5964 /** 5965 * __napi_schedule - schedule for receive 5966 * @n: entry to schedule 5967 * 5968 * The entry's receive function will be scheduled to run. 5969 * Consider using __napi_schedule_irqoff() if hard irqs are masked. 5970 */ 5971 void __napi_schedule(struct napi_struct *n) 5972 { 5973 unsigned long flags; 5974 5975 local_irq_save(flags); 5976 ____napi_schedule(this_cpu_ptr(&softnet_data), n); 5977 local_irq_restore(flags); 5978 } 5979 EXPORT_SYMBOL(__napi_schedule); 5980 5981 /** 5982 * napi_schedule_prep - check if napi can be scheduled 5983 * @n: napi context 5984 * 5985 * Test if NAPI routine is already running, and if not mark 5986 * it as running. This is used as a condition variable 5987 * insure only one NAPI poll instance runs. We also make 5988 * sure there is no pending NAPI disable. 5989 */ 5990 bool napi_schedule_prep(struct napi_struct *n) 5991 { 5992 unsigned long val, new; 5993 5994 do { 5995 val = READ_ONCE(n->state); 5996 if (unlikely(val & NAPIF_STATE_DISABLE)) 5997 return false; 5998 new = val | NAPIF_STATE_SCHED; 5999 6000 /* Sets STATE_MISSED bit if STATE_SCHED was already set 6001 * This was suggested by Alexander Duyck, as compiler 6002 * emits better code than : 6003 * if (val & NAPIF_STATE_SCHED) 6004 * new |= NAPIF_STATE_MISSED; 6005 */ 6006 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED * 6007 NAPIF_STATE_MISSED; 6008 } while (cmpxchg(&n->state, val, new) != val); 6009 6010 return !(val & NAPIF_STATE_SCHED); 6011 } 6012 EXPORT_SYMBOL(napi_schedule_prep); 6013 6014 /** 6015 * __napi_schedule_irqoff - schedule for receive 6016 * @n: entry to schedule 6017 * 6018 * Variant of __napi_schedule() assuming hard irqs are masked 6019 */ 6020 void __napi_schedule_irqoff(struct napi_struct *n) 6021 { 6022 ____napi_schedule(this_cpu_ptr(&softnet_data), n); 6023 } 6024 EXPORT_SYMBOL(__napi_schedule_irqoff); 6025 6026 bool napi_complete_done(struct napi_struct *n, int work_done) 6027 { 6028 unsigned long flags, val, new; 6029 6030 /* 6031 * 1) Don't let napi dequeue from the cpu poll list 6032 * just in case its running on a different cpu. 6033 * 2) If we are busy polling, do nothing here, we have 6034 * the guarantee we will be called later. 6035 */ 6036 if (unlikely(n->state & (NAPIF_STATE_NPSVC | 6037 NAPIF_STATE_IN_BUSY_POLL))) 6038 return false; 6039 6040 if (n->gro_bitmask) { 6041 unsigned long timeout = 0; 6042 6043 if (work_done) 6044 timeout = n->dev->gro_flush_timeout; 6045 6046 /* When the NAPI instance uses a timeout and keeps postponing 6047 * it, we need to bound somehow the time packets are kept in 6048 * the GRO layer 6049 */ 6050 napi_gro_flush(n, !!timeout); 6051 if (timeout) 6052 hrtimer_start(&n->timer, ns_to_ktime(timeout), 6053 HRTIMER_MODE_REL_PINNED); 6054 } 6055 if (unlikely(!list_empty(&n->poll_list))) { 6056 /* If n->poll_list is not empty, we need to mask irqs */ 6057 local_irq_save(flags); 6058 list_del_init(&n->poll_list); 6059 local_irq_restore(flags); 6060 } 6061 6062 do { 6063 val = READ_ONCE(n->state); 6064 6065 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED)); 6066 6067 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED); 6068 6069 /* If STATE_MISSED was set, leave STATE_SCHED set, 6070 * because we will call napi->poll() one more time. 6071 * This C code was suggested by Alexander Duyck to help gcc. 6072 */ 6073 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED * 6074 NAPIF_STATE_SCHED; 6075 } while (cmpxchg(&n->state, val, new) != val); 6076 6077 if (unlikely(val & NAPIF_STATE_MISSED)) { 6078 __napi_schedule(n); 6079 return false; 6080 } 6081 6082 return true; 6083 } 6084 EXPORT_SYMBOL(napi_complete_done); 6085 6086 /* must be called under rcu_read_lock(), as we dont take a reference */ 6087 static struct napi_struct *napi_by_id(unsigned int napi_id) 6088 { 6089 unsigned int hash = napi_id % HASH_SIZE(napi_hash); 6090 struct napi_struct *napi; 6091 6092 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node) 6093 if (napi->napi_id == napi_id) 6094 return napi; 6095 6096 return NULL; 6097 } 6098 6099 #if defined(CONFIG_NET_RX_BUSY_POLL) 6100 6101 #define BUSY_POLL_BUDGET 8 6102 6103 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock) 6104 { 6105 int rc; 6106 6107 /* Busy polling means there is a high chance device driver hard irq 6108 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was 6109 * set in napi_schedule_prep(). 6110 * Since we are about to call napi->poll() once more, we can safely 6111 * clear NAPI_STATE_MISSED. 6112 * 6113 * Note: x86 could use a single "lock and ..." instruction 6114 * to perform these two clear_bit() 6115 */ 6116 clear_bit(NAPI_STATE_MISSED, &napi->state); 6117 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state); 6118 6119 local_bh_disable(); 6120 6121 /* All we really want here is to re-enable device interrupts. 6122 * Ideally, a new ndo_busy_poll_stop() could avoid another round. 6123 */ 6124 rc = napi->poll(napi, BUSY_POLL_BUDGET); 6125 trace_napi_poll(napi, rc, BUSY_POLL_BUDGET); 6126 netpoll_poll_unlock(have_poll_lock); 6127 if (rc == BUSY_POLL_BUDGET) 6128 __napi_schedule(napi); 6129 local_bh_enable(); 6130 } 6131 6132 void napi_busy_loop(unsigned int napi_id, 6133 bool (*loop_end)(void *, unsigned long), 6134 void *loop_end_arg) 6135 { 6136 unsigned long start_time = loop_end ? busy_loop_current_time() : 0; 6137 int (*napi_poll)(struct napi_struct *napi, int budget); 6138 void *have_poll_lock = NULL; 6139 struct napi_struct *napi; 6140 6141 restart: 6142 napi_poll = NULL; 6143 6144 rcu_read_lock(); 6145 6146 napi = napi_by_id(napi_id); 6147 if (!napi) 6148 goto out; 6149 6150 preempt_disable(); 6151 for (;;) { 6152 int work = 0; 6153 6154 local_bh_disable(); 6155 if (!napi_poll) { 6156 unsigned long val = READ_ONCE(napi->state); 6157 6158 /* If multiple threads are competing for this napi, 6159 * we avoid dirtying napi->state as much as we can. 6160 */ 6161 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED | 6162 NAPIF_STATE_IN_BUSY_POLL)) 6163 goto count; 6164 if (cmpxchg(&napi->state, val, 6165 val | NAPIF_STATE_IN_BUSY_POLL | 6166 NAPIF_STATE_SCHED) != val) 6167 goto count; 6168 have_poll_lock = netpoll_poll_lock(napi); 6169 napi_poll = napi->poll; 6170 } 6171 work = napi_poll(napi, BUSY_POLL_BUDGET); 6172 trace_napi_poll(napi, work, BUSY_POLL_BUDGET); 6173 count: 6174 if (work > 0) 6175 __NET_ADD_STATS(dev_net(napi->dev), 6176 LINUX_MIB_BUSYPOLLRXPACKETS, work); 6177 local_bh_enable(); 6178 6179 if (!loop_end || loop_end(loop_end_arg, start_time)) 6180 break; 6181 6182 if (unlikely(need_resched())) { 6183 if (napi_poll) 6184 busy_poll_stop(napi, have_poll_lock); 6185 preempt_enable(); 6186 rcu_read_unlock(); 6187 cond_resched(); 6188 if (loop_end(loop_end_arg, start_time)) 6189 return; 6190 goto restart; 6191 } 6192 cpu_relax(); 6193 } 6194 if (napi_poll) 6195 busy_poll_stop(napi, have_poll_lock); 6196 preempt_enable(); 6197 out: 6198 rcu_read_unlock(); 6199 } 6200 EXPORT_SYMBOL(napi_busy_loop); 6201 6202 #endif /* CONFIG_NET_RX_BUSY_POLL */ 6203 6204 static void napi_hash_add(struct napi_struct *napi) 6205 { 6206 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) || 6207 test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) 6208 return; 6209 6210 spin_lock(&napi_hash_lock); 6211 6212 /* 0..NR_CPUS range is reserved for sender_cpu use */ 6213 do { 6214 if (unlikely(++napi_gen_id < MIN_NAPI_ID)) 6215 napi_gen_id = MIN_NAPI_ID; 6216 } while (napi_by_id(napi_gen_id)); 6217 napi->napi_id = napi_gen_id; 6218 6219 hlist_add_head_rcu(&napi->napi_hash_node, 6220 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); 6221 6222 spin_unlock(&napi_hash_lock); 6223 } 6224 6225 /* Warning : caller is responsible to make sure rcu grace period 6226 * is respected before freeing memory containing @napi 6227 */ 6228 bool napi_hash_del(struct napi_struct *napi) 6229 { 6230 bool rcu_sync_needed = false; 6231 6232 spin_lock(&napi_hash_lock); 6233 6234 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) { 6235 rcu_sync_needed = true; 6236 hlist_del_rcu(&napi->napi_hash_node); 6237 } 6238 spin_unlock(&napi_hash_lock); 6239 return rcu_sync_needed; 6240 } 6241 EXPORT_SYMBOL_GPL(napi_hash_del); 6242 6243 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) 6244 { 6245 struct napi_struct *napi; 6246 6247 napi = container_of(timer, struct napi_struct, timer); 6248 6249 /* Note : we use a relaxed variant of napi_schedule_prep() not setting 6250 * NAPI_STATE_MISSED, since we do not react to a device IRQ. 6251 */ 6252 if (napi->gro_bitmask && !napi_disable_pending(napi) && 6253 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) 6254 __napi_schedule_irqoff(napi); 6255 6256 return HRTIMER_NORESTART; 6257 } 6258 6259 static void init_gro_hash(struct napi_struct *napi) 6260 { 6261 int i; 6262 6263 for (i = 0; i < GRO_HASH_BUCKETS; i++) { 6264 INIT_LIST_HEAD(&napi->gro_hash[i].list); 6265 napi->gro_hash[i].count = 0; 6266 } 6267 napi->gro_bitmask = 0; 6268 } 6269 6270 void netif_napi_add(struct net_device *dev, struct napi_struct *napi, 6271 int (*poll)(struct napi_struct *, int), int weight) 6272 { 6273 INIT_LIST_HEAD(&napi->poll_list); 6274 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); 6275 napi->timer.function = napi_watchdog; 6276 init_gro_hash(napi); 6277 napi->skb = NULL; 6278 napi->poll = poll; 6279 if (weight > NAPI_POLL_WEIGHT) 6280 netdev_err_once(dev, "%s() called with weight %d\n", __func__, 6281 weight); 6282 napi->weight = weight; 6283 list_add(&napi->dev_list, &dev->napi_list); 6284 napi->dev = dev; 6285 #ifdef CONFIG_NETPOLL 6286 napi->poll_owner = -1; 6287 #endif 6288 set_bit(NAPI_STATE_SCHED, &napi->state); 6289 napi_hash_add(napi); 6290 } 6291 EXPORT_SYMBOL(netif_napi_add); 6292 6293 void napi_disable(struct napi_struct *n) 6294 { 6295 might_sleep(); 6296 set_bit(NAPI_STATE_DISABLE, &n->state); 6297 6298 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) 6299 msleep(1); 6300 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state)) 6301 msleep(1); 6302 6303 hrtimer_cancel(&n->timer); 6304 6305 clear_bit(NAPI_STATE_DISABLE, &n->state); 6306 } 6307 EXPORT_SYMBOL(napi_disable); 6308 6309 static void flush_gro_hash(struct napi_struct *napi) 6310 { 6311 int i; 6312 6313 for (i = 0; i < GRO_HASH_BUCKETS; i++) { 6314 struct sk_buff *skb, *n; 6315 6316 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list) 6317 kfree_skb(skb); 6318 napi->gro_hash[i].count = 0; 6319 } 6320 } 6321 6322 /* Must be called in process context */ 6323 void netif_napi_del(struct napi_struct *napi) 6324 { 6325 might_sleep(); 6326 if (napi_hash_del(napi)) 6327 synchronize_net(); 6328 list_del_init(&napi->dev_list); 6329 napi_free_frags(napi); 6330 6331 flush_gro_hash(napi); 6332 napi->gro_bitmask = 0; 6333 } 6334 EXPORT_SYMBOL(netif_napi_del); 6335 6336 static int napi_poll(struct napi_struct *n, struct list_head *repoll) 6337 { 6338 void *have; 6339 int work, weight; 6340 6341 list_del_init(&n->poll_list); 6342 6343 have = netpoll_poll_lock(n); 6344 6345 weight = n->weight; 6346 6347 /* This NAPI_STATE_SCHED test is for avoiding a race 6348 * with netpoll's poll_napi(). Only the entity which 6349 * obtains the lock and sees NAPI_STATE_SCHED set will 6350 * actually make the ->poll() call. Therefore we avoid 6351 * accidentally calling ->poll() when NAPI is not scheduled. 6352 */ 6353 work = 0; 6354 if (test_bit(NAPI_STATE_SCHED, &n->state)) { 6355 work = n->poll(n, weight); 6356 trace_napi_poll(n, work, weight); 6357 } 6358 6359 WARN_ON_ONCE(work > weight); 6360 6361 if (likely(work < weight)) 6362 goto out_unlock; 6363 6364 /* Drivers must not modify the NAPI state if they 6365 * consume the entire weight. In such cases this code 6366 * still "owns" the NAPI instance and therefore can 6367 * move the instance around on the list at-will. 6368 */ 6369 if (unlikely(napi_disable_pending(n))) { 6370 napi_complete(n); 6371 goto out_unlock; 6372 } 6373 6374 if (n->gro_bitmask) { 6375 /* flush too old packets 6376 * If HZ < 1000, flush all packets. 6377 */ 6378 napi_gro_flush(n, HZ >= 1000); 6379 } 6380 6381 /* Some drivers may have called napi_schedule 6382 * prior to exhausting their budget. 6383 */ 6384 if (unlikely(!list_empty(&n->poll_list))) { 6385 pr_warn_once("%s: Budget exhausted after napi rescheduled\n", 6386 n->dev ? n->dev->name : "backlog"); 6387 goto out_unlock; 6388 } 6389 6390 list_add_tail(&n->poll_list, repoll); 6391 6392 out_unlock: 6393 netpoll_poll_unlock(have); 6394 6395 return work; 6396 } 6397 6398 static __latent_entropy void net_rx_action(struct softirq_action *h) 6399 { 6400 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 6401 unsigned long time_limit = jiffies + 6402 usecs_to_jiffies(netdev_budget_usecs); 6403 int budget = netdev_budget; 6404 LIST_HEAD(list); 6405 LIST_HEAD(repoll); 6406 6407 local_irq_disable(); 6408 list_splice_init(&sd->poll_list, &list); 6409 local_irq_enable(); 6410 6411 for (;;) { 6412 struct napi_struct *n; 6413 6414 if (list_empty(&list)) { 6415 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll)) 6416 goto out; 6417 break; 6418 } 6419 6420 n = list_first_entry(&list, struct napi_struct, poll_list); 6421 budget -= napi_poll(n, &repoll); 6422 6423 /* If softirq window is exhausted then punt. 6424 * Allow this to run for 2 jiffies since which will allow 6425 * an average latency of 1.5/HZ. 6426 */ 6427 if (unlikely(budget <= 0 || 6428 time_after_eq(jiffies, time_limit))) { 6429 sd->time_squeeze++; 6430 break; 6431 } 6432 } 6433 6434 local_irq_disable(); 6435 6436 list_splice_tail_init(&sd->poll_list, &list); 6437 list_splice_tail(&repoll, &list); 6438 list_splice(&list, &sd->poll_list); 6439 if (!list_empty(&sd->poll_list)) 6440 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 6441 6442 net_rps_action_and_irq_enable(sd); 6443 out: 6444 __kfree_skb_flush(); 6445 } 6446 6447 struct netdev_adjacent { 6448 struct net_device *dev; 6449 6450 /* upper master flag, there can only be one master device per list */ 6451 bool master; 6452 6453 /* counter for the number of times this device was added to us */ 6454 u16 ref_nr; 6455 6456 /* private field for the users */ 6457 void *private; 6458 6459 struct list_head list; 6460 struct rcu_head rcu; 6461 }; 6462 6463 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev, 6464 struct list_head *adj_list) 6465 { 6466 struct netdev_adjacent *adj; 6467 6468 list_for_each_entry(adj, adj_list, list) { 6469 if (adj->dev == adj_dev) 6470 return adj; 6471 } 6472 return NULL; 6473 } 6474 6475 static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data) 6476 { 6477 struct net_device *dev = data; 6478 6479 return upper_dev == dev; 6480 } 6481 6482 /** 6483 * netdev_has_upper_dev - Check if device is linked to an upper device 6484 * @dev: device 6485 * @upper_dev: upper device to check 6486 * 6487 * Find out if a device is linked to specified upper device and return true 6488 * in case it is. Note that this checks only immediate upper device, 6489 * not through a complete stack of devices. The caller must hold the RTNL lock. 6490 */ 6491 bool netdev_has_upper_dev(struct net_device *dev, 6492 struct net_device *upper_dev) 6493 { 6494 ASSERT_RTNL(); 6495 6496 return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev, 6497 upper_dev); 6498 } 6499 EXPORT_SYMBOL(netdev_has_upper_dev); 6500 6501 /** 6502 * netdev_has_upper_dev_all - Check if device is linked to an upper device 6503 * @dev: device 6504 * @upper_dev: upper device to check 6505 * 6506 * Find out if a device is linked to specified upper device and return true 6507 * in case it is. Note that this checks the entire upper device chain. 6508 * The caller must hold rcu lock. 6509 */ 6510 6511 bool netdev_has_upper_dev_all_rcu(struct net_device *dev, 6512 struct net_device *upper_dev) 6513 { 6514 return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev, 6515 upper_dev); 6516 } 6517 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu); 6518 6519 /** 6520 * netdev_has_any_upper_dev - Check if device is linked to some device 6521 * @dev: device 6522 * 6523 * Find out if a device is linked to an upper device and return true in case 6524 * it is. The caller must hold the RTNL lock. 6525 */ 6526 bool netdev_has_any_upper_dev(struct net_device *dev) 6527 { 6528 ASSERT_RTNL(); 6529 6530 return !list_empty(&dev->adj_list.upper); 6531 } 6532 EXPORT_SYMBOL(netdev_has_any_upper_dev); 6533 6534 /** 6535 * netdev_master_upper_dev_get - Get master upper device 6536 * @dev: device 6537 * 6538 * Find a master upper device and return pointer to it or NULL in case 6539 * it's not there. The caller must hold the RTNL lock. 6540 */ 6541 struct net_device *netdev_master_upper_dev_get(struct net_device *dev) 6542 { 6543 struct netdev_adjacent *upper; 6544 6545 ASSERT_RTNL(); 6546 6547 if (list_empty(&dev->adj_list.upper)) 6548 return NULL; 6549 6550 upper = list_first_entry(&dev->adj_list.upper, 6551 struct netdev_adjacent, list); 6552 if (likely(upper->master)) 6553 return upper->dev; 6554 return NULL; 6555 } 6556 EXPORT_SYMBOL(netdev_master_upper_dev_get); 6557 6558 /** 6559 * netdev_has_any_lower_dev - Check if device is linked to some device 6560 * @dev: device 6561 * 6562 * Find out if a device is linked to a lower device and return true in case 6563 * it is. The caller must hold the RTNL lock. 6564 */ 6565 static bool netdev_has_any_lower_dev(struct net_device *dev) 6566 { 6567 ASSERT_RTNL(); 6568 6569 return !list_empty(&dev->adj_list.lower); 6570 } 6571 6572 void *netdev_adjacent_get_private(struct list_head *adj_list) 6573 { 6574 struct netdev_adjacent *adj; 6575 6576 adj = list_entry(adj_list, struct netdev_adjacent, list); 6577 6578 return adj->private; 6579 } 6580 EXPORT_SYMBOL(netdev_adjacent_get_private); 6581 6582 /** 6583 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list 6584 * @dev: device 6585 * @iter: list_head ** of the current position 6586 * 6587 * Gets the next device from the dev's upper list, starting from iter 6588 * position. The caller must hold RCU read lock. 6589 */ 6590 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, 6591 struct list_head **iter) 6592 { 6593 struct netdev_adjacent *upper; 6594 6595 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 6596 6597 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 6598 6599 if (&upper->list == &dev->adj_list.upper) 6600 return NULL; 6601 6602 *iter = &upper->list; 6603 6604 return upper->dev; 6605 } 6606 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu); 6607 6608 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev, 6609 struct list_head **iter) 6610 { 6611 struct netdev_adjacent *upper; 6612 6613 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 6614 6615 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 6616 6617 if (&upper->list == &dev->adj_list.upper) 6618 return NULL; 6619 6620 *iter = &upper->list; 6621 6622 return upper->dev; 6623 } 6624 6625 int netdev_walk_all_upper_dev_rcu(struct net_device *dev, 6626 int (*fn)(struct net_device *dev, 6627 void *data), 6628 void *data) 6629 { 6630 struct net_device *udev; 6631 struct list_head *iter; 6632 int ret; 6633 6634 for (iter = &dev->adj_list.upper, 6635 udev = netdev_next_upper_dev_rcu(dev, &iter); 6636 udev; 6637 udev = netdev_next_upper_dev_rcu(dev, &iter)) { 6638 /* first is the upper device itself */ 6639 ret = fn(udev, data); 6640 if (ret) 6641 return ret; 6642 6643 /* then look at all of its upper devices */ 6644 ret = netdev_walk_all_upper_dev_rcu(udev, fn, data); 6645 if (ret) 6646 return ret; 6647 } 6648 6649 return 0; 6650 } 6651 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu); 6652 6653 /** 6654 * netdev_lower_get_next_private - Get the next ->private from the 6655 * lower neighbour list 6656 * @dev: device 6657 * @iter: list_head ** of the current position 6658 * 6659 * Gets the next netdev_adjacent->private from the dev's lower neighbour 6660 * list, starting from iter position. The caller must hold either hold the 6661 * RTNL lock or its own locking that guarantees that the neighbour lower 6662 * list will remain unchanged. 6663 */ 6664 void *netdev_lower_get_next_private(struct net_device *dev, 6665 struct list_head **iter) 6666 { 6667 struct netdev_adjacent *lower; 6668 6669 lower = list_entry(*iter, struct netdev_adjacent, list); 6670 6671 if (&lower->list == &dev->adj_list.lower) 6672 return NULL; 6673 6674 *iter = lower->list.next; 6675 6676 return lower->private; 6677 } 6678 EXPORT_SYMBOL(netdev_lower_get_next_private); 6679 6680 /** 6681 * netdev_lower_get_next_private_rcu - Get the next ->private from the 6682 * lower neighbour list, RCU 6683 * variant 6684 * @dev: device 6685 * @iter: list_head ** of the current position 6686 * 6687 * Gets the next netdev_adjacent->private from the dev's lower neighbour 6688 * list, starting from iter position. The caller must hold RCU read lock. 6689 */ 6690 void *netdev_lower_get_next_private_rcu(struct net_device *dev, 6691 struct list_head **iter) 6692 { 6693 struct netdev_adjacent *lower; 6694 6695 WARN_ON_ONCE(!rcu_read_lock_held()); 6696 6697 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 6698 6699 if (&lower->list == &dev->adj_list.lower) 6700 return NULL; 6701 6702 *iter = &lower->list; 6703 6704 return lower->private; 6705 } 6706 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu); 6707 6708 /** 6709 * netdev_lower_get_next - Get the next device from the lower neighbour 6710 * list 6711 * @dev: device 6712 * @iter: list_head ** of the current position 6713 * 6714 * Gets the next netdev_adjacent from the dev's lower neighbour 6715 * list, starting from iter position. The caller must hold RTNL lock or 6716 * its own locking that guarantees that the neighbour lower 6717 * list will remain unchanged. 6718 */ 6719 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter) 6720 { 6721 struct netdev_adjacent *lower; 6722 6723 lower = list_entry(*iter, struct netdev_adjacent, list); 6724 6725 if (&lower->list == &dev->adj_list.lower) 6726 return NULL; 6727 6728 *iter = lower->list.next; 6729 6730 return lower->dev; 6731 } 6732 EXPORT_SYMBOL(netdev_lower_get_next); 6733 6734 static struct net_device *netdev_next_lower_dev(struct net_device *dev, 6735 struct list_head **iter) 6736 { 6737 struct netdev_adjacent *lower; 6738 6739 lower = list_entry((*iter)->next, struct netdev_adjacent, list); 6740 6741 if (&lower->list == &dev->adj_list.lower) 6742 return NULL; 6743 6744 *iter = &lower->list; 6745 6746 return lower->dev; 6747 } 6748 6749 int netdev_walk_all_lower_dev(struct net_device *dev, 6750 int (*fn)(struct net_device *dev, 6751 void *data), 6752 void *data) 6753 { 6754 struct net_device *ldev; 6755 struct list_head *iter; 6756 int ret; 6757 6758 for (iter = &dev->adj_list.lower, 6759 ldev = netdev_next_lower_dev(dev, &iter); 6760 ldev; 6761 ldev = netdev_next_lower_dev(dev, &iter)) { 6762 /* first is the lower device itself */ 6763 ret = fn(ldev, data); 6764 if (ret) 6765 return ret; 6766 6767 /* then look at all of its lower devices */ 6768 ret = netdev_walk_all_lower_dev(ldev, fn, data); 6769 if (ret) 6770 return ret; 6771 } 6772 6773 return 0; 6774 } 6775 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev); 6776 6777 static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, 6778 struct list_head **iter) 6779 { 6780 struct netdev_adjacent *lower; 6781 6782 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 6783 if (&lower->list == &dev->adj_list.lower) 6784 return NULL; 6785 6786 *iter = &lower->list; 6787 6788 return lower->dev; 6789 } 6790 6791 int netdev_walk_all_lower_dev_rcu(struct net_device *dev, 6792 int (*fn)(struct net_device *dev, 6793 void *data), 6794 void *data) 6795 { 6796 struct net_device *ldev; 6797 struct list_head *iter; 6798 int ret; 6799 6800 for (iter = &dev->adj_list.lower, 6801 ldev = netdev_next_lower_dev_rcu(dev, &iter); 6802 ldev; 6803 ldev = netdev_next_lower_dev_rcu(dev, &iter)) { 6804 /* first is the lower device itself */ 6805 ret = fn(ldev, data); 6806 if (ret) 6807 return ret; 6808 6809 /* then look at all of its lower devices */ 6810 ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data); 6811 if (ret) 6812 return ret; 6813 } 6814 6815 return 0; 6816 } 6817 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu); 6818 6819 /** 6820 * netdev_lower_get_first_private_rcu - Get the first ->private from the 6821 * lower neighbour list, RCU 6822 * variant 6823 * @dev: device 6824 * 6825 * Gets the first netdev_adjacent->private from the dev's lower neighbour 6826 * list. The caller must hold RCU read lock. 6827 */ 6828 void *netdev_lower_get_first_private_rcu(struct net_device *dev) 6829 { 6830 struct netdev_adjacent *lower; 6831 6832 lower = list_first_or_null_rcu(&dev->adj_list.lower, 6833 struct netdev_adjacent, list); 6834 if (lower) 6835 return lower->private; 6836 return NULL; 6837 } 6838 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu); 6839 6840 /** 6841 * netdev_master_upper_dev_get_rcu - Get master upper device 6842 * @dev: device 6843 * 6844 * Find a master upper device and return pointer to it or NULL in case 6845 * it's not there. The caller must hold the RCU read lock. 6846 */ 6847 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev) 6848 { 6849 struct netdev_adjacent *upper; 6850 6851 upper = list_first_or_null_rcu(&dev->adj_list.upper, 6852 struct netdev_adjacent, list); 6853 if (upper && likely(upper->master)) 6854 return upper->dev; 6855 return NULL; 6856 } 6857 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu); 6858 6859 static int netdev_adjacent_sysfs_add(struct net_device *dev, 6860 struct net_device *adj_dev, 6861 struct list_head *dev_list) 6862 { 6863 char linkname[IFNAMSIZ+7]; 6864 6865 sprintf(linkname, dev_list == &dev->adj_list.upper ? 6866 "upper_%s" : "lower_%s", adj_dev->name); 6867 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), 6868 linkname); 6869 } 6870 static void netdev_adjacent_sysfs_del(struct net_device *dev, 6871 char *name, 6872 struct list_head *dev_list) 6873 { 6874 char linkname[IFNAMSIZ+7]; 6875 6876 sprintf(linkname, dev_list == &dev->adj_list.upper ? 6877 "upper_%s" : "lower_%s", name); 6878 sysfs_remove_link(&(dev->dev.kobj), linkname); 6879 } 6880 6881 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev, 6882 struct net_device *adj_dev, 6883 struct list_head *dev_list) 6884 { 6885 return (dev_list == &dev->adj_list.upper || 6886 dev_list == &dev->adj_list.lower) && 6887 net_eq(dev_net(dev), dev_net(adj_dev)); 6888 } 6889 6890 static int __netdev_adjacent_dev_insert(struct net_device *dev, 6891 struct net_device *adj_dev, 6892 struct list_head *dev_list, 6893 void *private, bool master) 6894 { 6895 struct netdev_adjacent *adj; 6896 int ret; 6897 6898 adj = __netdev_find_adj(adj_dev, dev_list); 6899 6900 if (adj) { 6901 adj->ref_nr += 1; 6902 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n", 6903 dev->name, adj_dev->name, adj->ref_nr); 6904 6905 return 0; 6906 } 6907 6908 adj = kmalloc(sizeof(*adj), GFP_KERNEL); 6909 if (!adj) 6910 return -ENOMEM; 6911 6912 adj->dev = adj_dev; 6913 adj->master = master; 6914 adj->ref_nr = 1; 6915 adj->private = private; 6916 dev_hold(adj_dev); 6917 6918 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n", 6919 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name); 6920 6921 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) { 6922 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list); 6923 if (ret) 6924 goto free_adj; 6925 } 6926 6927 /* Ensure that master link is always the first item in list. */ 6928 if (master) { 6929 ret = sysfs_create_link(&(dev->dev.kobj), 6930 &(adj_dev->dev.kobj), "master"); 6931 if (ret) 6932 goto remove_symlinks; 6933 6934 list_add_rcu(&adj->list, dev_list); 6935 } else { 6936 list_add_tail_rcu(&adj->list, dev_list); 6937 } 6938 6939 return 0; 6940 6941 remove_symlinks: 6942 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) 6943 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 6944 free_adj: 6945 kfree(adj); 6946 dev_put(adj_dev); 6947 6948 return ret; 6949 } 6950 6951 static void __netdev_adjacent_dev_remove(struct net_device *dev, 6952 struct net_device *adj_dev, 6953 u16 ref_nr, 6954 struct list_head *dev_list) 6955 { 6956 struct netdev_adjacent *adj; 6957 6958 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n", 6959 dev->name, adj_dev->name, ref_nr); 6960 6961 adj = __netdev_find_adj(adj_dev, dev_list); 6962 6963 if (!adj) { 6964 pr_err("Adjacency does not exist for device %s from %s\n", 6965 dev->name, adj_dev->name); 6966 WARN_ON(1); 6967 return; 6968 } 6969 6970 if (adj->ref_nr > ref_nr) { 6971 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n", 6972 dev->name, adj_dev->name, ref_nr, 6973 adj->ref_nr - ref_nr); 6974 adj->ref_nr -= ref_nr; 6975 return; 6976 } 6977 6978 if (adj->master) 6979 sysfs_remove_link(&(dev->dev.kobj), "master"); 6980 6981 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) 6982 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 6983 6984 list_del_rcu(&adj->list); 6985 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n", 6986 adj_dev->name, dev->name, adj_dev->name); 6987 dev_put(adj_dev); 6988 kfree_rcu(adj, rcu); 6989 } 6990 6991 static int __netdev_adjacent_dev_link_lists(struct net_device *dev, 6992 struct net_device *upper_dev, 6993 struct list_head *up_list, 6994 struct list_head *down_list, 6995 void *private, bool master) 6996 { 6997 int ret; 6998 6999 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, 7000 private, master); 7001 if (ret) 7002 return ret; 7003 7004 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, 7005 private, false); 7006 if (ret) { 7007 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list); 7008 return ret; 7009 } 7010 7011 return 0; 7012 } 7013 7014 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev, 7015 struct net_device *upper_dev, 7016 u16 ref_nr, 7017 struct list_head *up_list, 7018 struct list_head *down_list) 7019 { 7020 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list); 7021 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list); 7022 } 7023 7024 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev, 7025 struct net_device *upper_dev, 7026 void *private, bool master) 7027 { 7028 return __netdev_adjacent_dev_link_lists(dev, upper_dev, 7029 &dev->adj_list.upper, 7030 &upper_dev->adj_list.lower, 7031 private, master); 7032 } 7033 7034 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev, 7035 struct net_device *upper_dev) 7036 { 7037 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1, 7038 &dev->adj_list.upper, 7039 &upper_dev->adj_list.lower); 7040 } 7041 7042 static int __netdev_upper_dev_link(struct net_device *dev, 7043 struct net_device *upper_dev, bool master, 7044 void *upper_priv, void *upper_info, 7045 struct netlink_ext_ack *extack) 7046 { 7047 struct netdev_notifier_changeupper_info changeupper_info = { 7048 .info = { 7049 .dev = dev, 7050 .extack = extack, 7051 }, 7052 .upper_dev = upper_dev, 7053 .master = master, 7054 .linking = true, 7055 .upper_info = upper_info, 7056 }; 7057 struct net_device *master_dev; 7058 int ret = 0; 7059 7060 ASSERT_RTNL(); 7061 7062 if (dev == upper_dev) 7063 return -EBUSY; 7064 7065 /* To prevent loops, check if dev is not upper device to upper_dev. */ 7066 if (netdev_has_upper_dev(upper_dev, dev)) 7067 return -EBUSY; 7068 7069 if (!master) { 7070 if (netdev_has_upper_dev(dev, upper_dev)) 7071 return -EEXIST; 7072 } else { 7073 master_dev = netdev_master_upper_dev_get(dev); 7074 if (master_dev) 7075 return master_dev == upper_dev ? -EEXIST : -EBUSY; 7076 } 7077 7078 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 7079 &changeupper_info.info); 7080 ret = notifier_to_errno(ret); 7081 if (ret) 7082 return ret; 7083 7084 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv, 7085 master); 7086 if (ret) 7087 return ret; 7088 7089 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 7090 &changeupper_info.info); 7091 ret = notifier_to_errno(ret); 7092 if (ret) 7093 goto rollback; 7094 7095 return 0; 7096 7097 rollback: 7098 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 7099 7100 return ret; 7101 } 7102 7103 /** 7104 * netdev_upper_dev_link - Add a link to the upper device 7105 * @dev: device 7106 * @upper_dev: new upper device 7107 * @extack: netlink extended ack 7108 * 7109 * Adds a link to device which is upper to this one. The caller must hold 7110 * the RTNL lock. On a failure a negative errno code is returned. 7111 * On success the reference counts are adjusted and the function 7112 * returns zero. 7113 */ 7114 int netdev_upper_dev_link(struct net_device *dev, 7115 struct net_device *upper_dev, 7116 struct netlink_ext_ack *extack) 7117 { 7118 return __netdev_upper_dev_link(dev, upper_dev, false, 7119 NULL, NULL, extack); 7120 } 7121 EXPORT_SYMBOL(netdev_upper_dev_link); 7122 7123 /** 7124 * netdev_master_upper_dev_link - Add a master link to the upper device 7125 * @dev: device 7126 * @upper_dev: new upper device 7127 * @upper_priv: upper device private 7128 * @upper_info: upper info to be passed down via notifier 7129 * @extack: netlink extended ack 7130 * 7131 * Adds a link to device which is upper to this one. In this case, only 7132 * one master upper device can be linked, although other non-master devices 7133 * might be linked as well. The caller must hold the RTNL lock. 7134 * On a failure a negative errno code is returned. On success the reference 7135 * counts are adjusted and the function returns zero. 7136 */ 7137 int netdev_master_upper_dev_link(struct net_device *dev, 7138 struct net_device *upper_dev, 7139 void *upper_priv, void *upper_info, 7140 struct netlink_ext_ack *extack) 7141 { 7142 return __netdev_upper_dev_link(dev, upper_dev, true, 7143 upper_priv, upper_info, extack); 7144 } 7145 EXPORT_SYMBOL(netdev_master_upper_dev_link); 7146 7147 /** 7148 * netdev_upper_dev_unlink - Removes a link to upper device 7149 * @dev: device 7150 * @upper_dev: new upper device 7151 * 7152 * Removes a link to device which is upper to this one. The caller must hold 7153 * the RTNL lock. 7154 */ 7155 void netdev_upper_dev_unlink(struct net_device *dev, 7156 struct net_device *upper_dev) 7157 { 7158 struct netdev_notifier_changeupper_info changeupper_info = { 7159 .info = { 7160 .dev = dev, 7161 }, 7162 .upper_dev = upper_dev, 7163 .linking = false, 7164 }; 7165 7166 ASSERT_RTNL(); 7167 7168 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev; 7169 7170 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 7171 &changeupper_info.info); 7172 7173 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 7174 7175 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 7176 &changeupper_info.info); 7177 } 7178 EXPORT_SYMBOL(netdev_upper_dev_unlink); 7179 7180 /** 7181 * netdev_bonding_info_change - Dispatch event about slave change 7182 * @dev: device 7183 * @bonding_info: info to dispatch 7184 * 7185 * Send NETDEV_BONDING_INFO to netdev notifiers with info. 7186 * The caller must hold the RTNL lock. 7187 */ 7188 void netdev_bonding_info_change(struct net_device *dev, 7189 struct netdev_bonding_info *bonding_info) 7190 { 7191 struct netdev_notifier_bonding_info info = { 7192 .info.dev = dev, 7193 }; 7194 7195 memcpy(&info.bonding_info, bonding_info, 7196 sizeof(struct netdev_bonding_info)); 7197 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, 7198 &info.info); 7199 } 7200 EXPORT_SYMBOL(netdev_bonding_info_change); 7201 7202 static void netdev_adjacent_add_links(struct net_device *dev) 7203 { 7204 struct netdev_adjacent *iter; 7205 7206 struct net *net = dev_net(dev); 7207 7208 list_for_each_entry(iter, &dev->adj_list.upper, list) { 7209 if (!net_eq(net, dev_net(iter->dev))) 7210 continue; 7211 netdev_adjacent_sysfs_add(iter->dev, dev, 7212 &iter->dev->adj_list.lower); 7213 netdev_adjacent_sysfs_add(dev, iter->dev, 7214 &dev->adj_list.upper); 7215 } 7216 7217 list_for_each_entry(iter, &dev->adj_list.lower, list) { 7218 if (!net_eq(net, dev_net(iter->dev))) 7219 continue; 7220 netdev_adjacent_sysfs_add(iter->dev, dev, 7221 &iter->dev->adj_list.upper); 7222 netdev_adjacent_sysfs_add(dev, iter->dev, 7223 &dev->adj_list.lower); 7224 } 7225 } 7226 7227 static void netdev_adjacent_del_links(struct net_device *dev) 7228 { 7229 struct netdev_adjacent *iter; 7230 7231 struct net *net = dev_net(dev); 7232 7233 list_for_each_entry(iter, &dev->adj_list.upper, list) { 7234 if (!net_eq(net, dev_net(iter->dev))) 7235 continue; 7236 netdev_adjacent_sysfs_del(iter->dev, dev->name, 7237 &iter->dev->adj_list.lower); 7238 netdev_adjacent_sysfs_del(dev, iter->dev->name, 7239 &dev->adj_list.upper); 7240 } 7241 7242 list_for_each_entry(iter, &dev->adj_list.lower, list) { 7243 if (!net_eq(net, dev_net(iter->dev))) 7244 continue; 7245 netdev_adjacent_sysfs_del(iter->dev, dev->name, 7246 &iter->dev->adj_list.upper); 7247 netdev_adjacent_sysfs_del(dev, iter->dev->name, 7248 &dev->adj_list.lower); 7249 } 7250 } 7251 7252 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname) 7253 { 7254 struct netdev_adjacent *iter; 7255 7256 struct net *net = dev_net(dev); 7257 7258 list_for_each_entry(iter, &dev->adj_list.upper, list) { 7259 if (!net_eq(net, dev_net(iter->dev))) 7260 continue; 7261 netdev_adjacent_sysfs_del(iter->dev, oldname, 7262 &iter->dev->adj_list.lower); 7263 netdev_adjacent_sysfs_add(iter->dev, dev, 7264 &iter->dev->adj_list.lower); 7265 } 7266 7267 list_for_each_entry(iter, &dev->adj_list.lower, list) { 7268 if (!net_eq(net, dev_net(iter->dev))) 7269 continue; 7270 netdev_adjacent_sysfs_del(iter->dev, oldname, 7271 &iter->dev->adj_list.upper); 7272 netdev_adjacent_sysfs_add(iter->dev, dev, 7273 &iter->dev->adj_list.upper); 7274 } 7275 } 7276 7277 void *netdev_lower_dev_get_private(struct net_device *dev, 7278 struct net_device *lower_dev) 7279 { 7280 struct netdev_adjacent *lower; 7281 7282 if (!lower_dev) 7283 return NULL; 7284 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower); 7285 if (!lower) 7286 return NULL; 7287 7288 return lower->private; 7289 } 7290 EXPORT_SYMBOL(netdev_lower_dev_get_private); 7291 7292 7293 int dev_get_nest_level(struct net_device *dev) 7294 { 7295 struct net_device *lower = NULL; 7296 struct list_head *iter; 7297 int max_nest = -1; 7298 int nest; 7299 7300 ASSERT_RTNL(); 7301 7302 netdev_for_each_lower_dev(dev, lower, iter) { 7303 nest = dev_get_nest_level(lower); 7304 if (max_nest < nest) 7305 max_nest = nest; 7306 } 7307 7308 return max_nest + 1; 7309 } 7310 EXPORT_SYMBOL(dev_get_nest_level); 7311 7312 /** 7313 * netdev_lower_change - Dispatch event about lower device state change 7314 * @lower_dev: device 7315 * @lower_state_info: state to dispatch 7316 * 7317 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info. 7318 * The caller must hold the RTNL lock. 7319 */ 7320 void netdev_lower_state_changed(struct net_device *lower_dev, 7321 void *lower_state_info) 7322 { 7323 struct netdev_notifier_changelowerstate_info changelowerstate_info = { 7324 .info.dev = lower_dev, 7325 }; 7326 7327 ASSERT_RTNL(); 7328 changelowerstate_info.lower_state_info = lower_state_info; 7329 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, 7330 &changelowerstate_info.info); 7331 } 7332 EXPORT_SYMBOL(netdev_lower_state_changed); 7333 7334 static void dev_change_rx_flags(struct net_device *dev, int flags) 7335 { 7336 const struct net_device_ops *ops = dev->netdev_ops; 7337 7338 if (ops->ndo_change_rx_flags) 7339 ops->ndo_change_rx_flags(dev, flags); 7340 } 7341 7342 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify) 7343 { 7344 unsigned int old_flags = dev->flags; 7345 kuid_t uid; 7346 kgid_t gid; 7347 7348 ASSERT_RTNL(); 7349 7350 dev->flags |= IFF_PROMISC; 7351 dev->promiscuity += inc; 7352 if (dev->promiscuity == 0) { 7353 /* 7354 * Avoid overflow. 7355 * If inc causes overflow, untouch promisc and return error. 7356 */ 7357 if (inc < 0) 7358 dev->flags &= ~IFF_PROMISC; 7359 else { 7360 dev->promiscuity -= inc; 7361 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n", 7362 dev->name); 7363 return -EOVERFLOW; 7364 } 7365 } 7366 if (dev->flags != old_flags) { 7367 pr_info("device %s %s promiscuous mode\n", 7368 dev->name, 7369 dev->flags & IFF_PROMISC ? "entered" : "left"); 7370 if (audit_enabled) { 7371 current_uid_gid(&uid, &gid); 7372 audit_log(audit_context(), GFP_ATOMIC, 7373 AUDIT_ANOM_PROMISCUOUS, 7374 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", 7375 dev->name, (dev->flags & IFF_PROMISC), 7376 (old_flags & IFF_PROMISC), 7377 from_kuid(&init_user_ns, audit_get_loginuid(current)), 7378 from_kuid(&init_user_ns, uid), 7379 from_kgid(&init_user_ns, gid), 7380 audit_get_sessionid(current)); 7381 } 7382 7383 dev_change_rx_flags(dev, IFF_PROMISC); 7384 } 7385 if (notify) 7386 __dev_notify_flags(dev, old_flags, IFF_PROMISC); 7387 return 0; 7388 } 7389 7390 /** 7391 * dev_set_promiscuity - update promiscuity count on a device 7392 * @dev: device 7393 * @inc: modifier 7394 * 7395 * Add or remove promiscuity from a device. While the count in the device 7396 * remains above zero the interface remains promiscuous. Once it hits zero 7397 * the device reverts back to normal filtering operation. A negative inc 7398 * value is used to drop promiscuity on the device. 7399 * Return 0 if successful or a negative errno code on error. 7400 */ 7401 int dev_set_promiscuity(struct net_device *dev, int inc) 7402 { 7403 unsigned int old_flags = dev->flags; 7404 int err; 7405 7406 err = __dev_set_promiscuity(dev, inc, true); 7407 if (err < 0) 7408 return err; 7409 if (dev->flags != old_flags) 7410 dev_set_rx_mode(dev); 7411 return err; 7412 } 7413 EXPORT_SYMBOL(dev_set_promiscuity); 7414 7415 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify) 7416 { 7417 unsigned int old_flags = dev->flags, old_gflags = dev->gflags; 7418 7419 ASSERT_RTNL(); 7420 7421 dev->flags |= IFF_ALLMULTI; 7422 dev->allmulti += inc; 7423 if (dev->allmulti == 0) { 7424 /* 7425 * Avoid overflow. 7426 * If inc causes overflow, untouch allmulti and return error. 7427 */ 7428 if (inc < 0) 7429 dev->flags &= ~IFF_ALLMULTI; 7430 else { 7431 dev->allmulti -= inc; 7432 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n", 7433 dev->name); 7434 return -EOVERFLOW; 7435 } 7436 } 7437 if (dev->flags ^ old_flags) { 7438 dev_change_rx_flags(dev, IFF_ALLMULTI); 7439 dev_set_rx_mode(dev); 7440 if (notify) 7441 __dev_notify_flags(dev, old_flags, 7442 dev->gflags ^ old_gflags); 7443 } 7444 return 0; 7445 } 7446 7447 /** 7448 * dev_set_allmulti - update allmulti count on a device 7449 * @dev: device 7450 * @inc: modifier 7451 * 7452 * Add or remove reception of all multicast frames to a device. While the 7453 * count in the device remains above zero the interface remains listening 7454 * to all interfaces. Once it hits zero the device reverts back to normal 7455 * filtering operation. A negative @inc value is used to drop the counter 7456 * when releasing a resource needing all multicasts. 7457 * Return 0 if successful or a negative errno code on error. 7458 */ 7459 7460 int dev_set_allmulti(struct net_device *dev, int inc) 7461 { 7462 return __dev_set_allmulti(dev, inc, true); 7463 } 7464 EXPORT_SYMBOL(dev_set_allmulti); 7465 7466 /* 7467 * Upload unicast and multicast address lists to device and 7468 * configure RX filtering. When the device doesn't support unicast 7469 * filtering it is put in promiscuous mode while unicast addresses 7470 * are present. 7471 */ 7472 void __dev_set_rx_mode(struct net_device *dev) 7473 { 7474 const struct net_device_ops *ops = dev->netdev_ops; 7475 7476 /* dev_open will call this function so the list will stay sane. */ 7477 if (!(dev->flags&IFF_UP)) 7478 return; 7479 7480 if (!netif_device_present(dev)) 7481 return; 7482 7483 if (!(dev->priv_flags & IFF_UNICAST_FLT)) { 7484 /* Unicast addresses changes may only happen under the rtnl, 7485 * therefore calling __dev_set_promiscuity here is safe. 7486 */ 7487 if (!netdev_uc_empty(dev) && !dev->uc_promisc) { 7488 __dev_set_promiscuity(dev, 1, false); 7489 dev->uc_promisc = true; 7490 } else if (netdev_uc_empty(dev) && dev->uc_promisc) { 7491 __dev_set_promiscuity(dev, -1, false); 7492 dev->uc_promisc = false; 7493 } 7494 } 7495 7496 if (ops->ndo_set_rx_mode) 7497 ops->ndo_set_rx_mode(dev); 7498 } 7499 7500 void dev_set_rx_mode(struct net_device *dev) 7501 { 7502 netif_addr_lock_bh(dev); 7503 __dev_set_rx_mode(dev); 7504 netif_addr_unlock_bh(dev); 7505 } 7506 7507 /** 7508 * dev_get_flags - get flags reported to userspace 7509 * @dev: device 7510 * 7511 * Get the combination of flag bits exported through APIs to userspace. 7512 */ 7513 unsigned int dev_get_flags(const struct net_device *dev) 7514 { 7515 unsigned int flags; 7516 7517 flags = (dev->flags & ~(IFF_PROMISC | 7518 IFF_ALLMULTI | 7519 IFF_RUNNING | 7520 IFF_LOWER_UP | 7521 IFF_DORMANT)) | 7522 (dev->gflags & (IFF_PROMISC | 7523 IFF_ALLMULTI)); 7524 7525 if (netif_running(dev)) { 7526 if (netif_oper_up(dev)) 7527 flags |= IFF_RUNNING; 7528 if (netif_carrier_ok(dev)) 7529 flags |= IFF_LOWER_UP; 7530 if (netif_dormant(dev)) 7531 flags |= IFF_DORMANT; 7532 } 7533 7534 return flags; 7535 } 7536 EXPORT_SYMBOL(dev_get_flags); 7537 7538 int __dev_change_flags(struct net_device *dev, unsigned int flags, 7539 struct netlink_ext_ack *extack) 7540 { 7541 unsigned int old_flags = dev->flags; 7542 int ret; 7543 7544 ASSERT_RTNL(); 7545 7546 /* 7547 * Set the flags on our device. 7548 */ 7549 7550 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | 7551 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | 7552 IFF_AUTOMEDIA)) | 7553 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | 7554 IFF_ALLMULTI)); 7555 7556 /* 7557 * Load in the correct multicast list now the flags have changed. 7558 */ 7559 7560 if ((old_flags ^ flags) & IFF_MULTICAST) 7561 dev_change_rx_flags(dev, IFF_MULTICAST); 7562 7563 dev_set_rx_mode(dev); 7564 7565 /* 7566 * Have we downed the interface. We handle IFF_UP ourselves 7567 * according to user attempts to set it, rather than blindly 7568 * setting it. 7569 */ 7570 7571 ret = 0; 7572 if ((old_flags ^ flags) & IFF_UP) { 7573 if (old_flags & IFF_UP) 7574 __dev_close(dev); 7575 else 7576 ret = __dev_open(dev, extack); 7577 } 7578 7579 if ((flags ^ dev->gflags) & IFF_PROMISC) { 7580 int inc = (flags & IFF_PROMISC) ? 1 : -1; 7581 unsigned int old_flags = dev->flags; 7582 7583 dev->gflags ^= IFF_PROMISC; 7584 7585 if (__dev_set_promiscuity(dev, inc, false) >= 0) 7586 if (dev->flags != old_flags) 7587 dev_set_rx_mode(dev); 7588 } 7589 7590 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI 7591 * is important. Some (broken) drivers set IFF_PROMISC, when 7592 * IFF_ALLMULTI is requested not asking us and not reporting. 7593 */ 7594 if ((flags ^ dev->gflags) & IFF_ALLMULTI) { 7595 int inc = (flags & IFF_ALLMULTI) ? 1 : -1; 7596 7597 dev->gflags ^= IFF_ALLMULTI; 7598 __dev_set_allmulti(dev, inc, false); 7599 } 7600 7601 return ret; 7602 } 7603 7604 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags, 7605 unsigned int gchanges) 7606 { 7607 unsigned int changes = dev->flags ^ old_flags; 7608 7609 if (gchanges) 7610 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC); 7611 7612 if (changes & IFF_UP) { 7613 if (dev->flags & IFF_UP) 7614 call_netdevice_notifiers(NETDEV_UP, dev); 7615 else 7616 call_netdevice_notifiers(NETDEV_DOWN, dev); 7617 } 7618 7619 if (dev->flags & IFF_UP && 7620 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) { 7621 struct netdev_notifier_change_info change_info = { 7622 .info = { 7623 .dev = dev, 7624 }, 7625 .flags_changed = changes, 7626 }; 7627 7628 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info); 7629 } 7630 } 7631 7632 /** 7633 * dev_change_flags - change device settings 7634 * @dev: device 7635 * @flags: device state flags 7636 * @extack: netlink extended ack 7637 * 7638 * Change settings on device based state flags. The flags are 7639 * in the userspace exported format. 7640 */ 7641 int dev_change_flags(struct net_device *dev, unsigned int flags, 7642 struct netlink_ext_ack *extack) 7643 { 7644 int ret; 7645 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags; 7646 7647 ret = __dev_change_flags(dev, flags, extack); 7648 if (ret < 0) 7649 return ret; 7650 7651 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags); 7652 __dev_notify_flags(dev, old_flags, changes); 7653 return ret; 7654 } 7655 EXPORT_SYMBOL(dev_change_flags); 7656 7657 int __dev_set_mtu(struct net_device *dev, int new_mtu) 7658 { 7659 const struct net_device_ops *ops = dev->netdev_ops; 7660 7661 if (ops->ndo_change_mtu) 7662 return ops->ndo_change_mtu(dev, new_mtu); 7663 7664 dev->mtu = new_mtu; 7665 return 0; 7666 } 7667 EXPORT_SYMBOL(__dev_set_mtu); 7668 7669 /** 7670 * dev_set_mtu_ext - Change maximum transfer unit 7671 * @dev: device 7672 * @new_mtu: new transfer unit 7673 * @extack: netlink extended ack 7674 * 7675 * Change the maximum transfer size of the network device. 7676 */ 7677 int dev_set_mtu_ext(struct net_device *dev, int new_mtu, 7678 struct netlink_ext_ack *extack) 7679 { 7680 int err, orig_mtu; 7681 7682 if (new_mtu == dev->mtu) 7683 return 0; 7684 7685 /* MTU must be positive, and in range */ 7686 if (new_mtu < 0 || new_mtu < dev->min_mtu) { 7687 NL_SET_ERR_MSG(extack, "mtu less than device minimum"); 7688 return -EINVAL; 7689 } 7690 7691 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) { 7692 NL_SET_ERR_MSG(extack, "mtu greater than device maximum"); 7693 return -EINVAL; 7694 } 7695 7696 if (!netif_device_present(dev)) 7697 return -ENODEV; 7698 7699 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev); 7700 err = notifier_to_errno(err); 7701 if (err) 7702 return err; 7703 7704 orig_mtu = dev->mtu; 7705 err = __dev_set_mtu(dev, new_mtu); 7706 7707 if (!err) { 7708 err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, 7709 orig_mtu); 7710 err = notifier_to_errno(err); 7711 if (err) { 7712 /* setting mtu back and notifying everyone again, 7713 * so that they have a chance to revert changes. 7714 */ 7715 __dev_set_mtu(dev, orig_mtu); 7716 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, 7717 new_mtu); 7718 } 7719 } 7720 return err; 7721 } 7722 7723 int dev_set_mtu(struct net_device *dev, int new_mtu) 7724 { 7725 struct netlink_ext_ack extack; 7726 int err; 7727 7728 memset(&extack, 0, sizeof(extack)); 7729 err = dev_set_mtu_ext(dev, new_mtu, &extack); 7730 if (err && extack._msg) 7731 net_err_ratelimited("%s: %s\n", dev->name, extack._msg); 7732 return err; 7733 } 7734 EXPORT_SYMBOL(dev_set_mtu); 7735 7736 /** 7737 * dev_change_tx_queue_len - Change TX queue length of a netdevice 7738 * @dev: device 7739 * @new_len: new tx queue length 7740 */ 7741 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len) 7742 { 7743 unsigned int orig_len = dev->tx_queue_len; 7744 int res; 7745 7746 if (new_len != (unsigned int)new_len) 7747 return -ERANGE; 7748 7749 if (new_len != orig_len) { 7750 dev->tx_queue_len = new_len; 7751 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev); 7752 res = notifier_to_errno(res); 7753 if (res) 7754 goto err_rollback; 7755 res = dev_qdisc_change_tx_queue_len(dev); 7756 if (res) 7757 goto err_rollback; 7758 } 7759 7760 return 0; 7761 7762 err_rollback: 7763 netdev_err(dev, "refused to change device tx_queue_len\n"); 7764 dev->tx_queue_len = orig_len; 7765 return res; 7766 } 7767 7768 /** 7769 * dev_set_group - Change group this device belongs to 7770 * @dev: device 7771 * @new_group: group this device should belong to 7772 */ 7773 void dev_set_group(struct net_device *dev, int new_group) 7774 { 7775 dev->group = new_group; 7776 } 7777 EXPORT_SYMBOL(dev_set_group); 7778 7779 /** 7780 * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR. 7781 * @dev: device 7782 * @addr: new address 7783 * @extack: netlink extended ack 7784 */ 7785 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, 7786 struct netlink_ext_ack *extack) 7787 { 7788 struct netdev_notifier_pre_changeaddr_info info = { 7789 .info.dev = dev, 7790 .info.extack = extack, 7791 .dev_addr = addr, 7792 }; 7793 int rc; 7794 7795 rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info); 7796 return notifier_to_errno(rc); 7797 } 7798 EXPORT_SYMBOL(dev_pre_changeaddr_notify); 7799 7800 /** 7801 * dev_set_mac_address - Change Media Access Control Address 7802 * @dev: device 7803 * @sa: new address 7804 * @extack: netlink extended ack 7805 * 7806 * Change the hardware (MAC) address of the device 7807 */ 7808 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, 7809 struct netlink_ext_ack *extack) 7810 { 7811 const struct net_device_ops *ops = dev->netdev_ops; 7812 int err; 7813 7814 if (!ops->ndo_set_mac_address) 7815 return -EOPNOTSUPP; 7816 if (sa->sa_family != dev->type) 7817 return -EINVAL; 7818 if (!netif_device_present(dev)) 7819 return -ENODEV; 7820 err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack); 7821 if (err) 7822 return err; 7823 err = ops->ndo_set_mac_address(dev, sa); 7824 if (err) 7825 return err; 7826 dev->addr_assign_type = NET_ADDR_SET; 7827 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 7828 add_device_randomness(dev->dev_addr, dev->addr_len); 7829 return 0; 7830 } 7831 EXPORT_SYMBOL(dev_set_mac_address); 7832 7833 /** 7834 * dev_change_carrier - Change device carrier 7835 * @dev: device 7836 * @new_carrier: new value 7837 * 7838 * Change device carrier 7839 */ 7840 int dev_change_carrier(struct net_device *dev, bool new_carrier) 7841 { 7842 const struct net_device_ops *ops = dev->netdev_ops; 7843 7844 if (!ops->ndo_change_carrier) 7845 return -EOPNOTSUPP; 7846 if (!netif_device_present(dev)) 7847 return -ENODEV; 7848 return ops->ndo_change_carrier(dev, new_carrier); 7849 } 7850 EXPORT_SYMBOL(dev_change_carrier); 7851 7852 /** 7853 * dev_get_phys_port_id - Get device physical port ID 7854 * @dev: device 7855 * @ppid: port ID 7856 * 7857 * Get device physical port ID 7858 */ 7859 int dev_get_phys_port_id(struct net_device *dev, 7860 struct netdev_phys_item_id *ppid) 7861 { 7862 const struct net_device_ops *ops = dev->netdev_ops; 7863 7864 if (!ops->ndo_get_phys_port_id) 7865 return -EOPNOTSUPP; 7866 return ops->ndo_get_phys_port_id(dev, ppid); 7867 } 7868 EXPORT_SYMBOL(dev_get_phys_port_id); 7869 7870 /** 7871 * dev_get_phys_port_name - Get device physical port name 7872 * @dev: device 7873 * @name: port name 7874 * @len: limit of bytes to copy to name 7875 * 7876 * Get device physical port name 7877 */ 7878 int dev_get_phys_port_name(struct net_device *dev, 7879 char *name, size_t len) 7880 { 7881 const struct net_device_ops *ops = dev->netdev_ops; 7882 int err; 7883 7884 if (ops->ndo_get_phys_port_name) { 7885 err = ops->ndo_get_phys_port_name(dev, name, len); 7886 if (err != -EOPNOTSUPP) 7887 return err; 7888 } 7889 return devlink_compat_phys_port_name_get(dev, name, len); 7890 } 7891 EXPORT_SYMBOL(dev_get_phys_port_name); 7892 7893 /** 7894 * dev_get_port_parent_id - Get the device's port parent identifier 7895 * @dev: network device 7896 * @ppid: pointer to a storage for the port's parent identifier 7897 * @recurse: allow/disallow recursion to lower devices 7898 * 7899 * Get the devices's port parent identifier 7900 */ 7901 int dev_get_port_parent_id(struct net_device *dev, 7902 struct netdev_phys_item_id *ppid, 7903 bool recurse) 7904 { 7905 const struct net_device_ops *ops = dev->netdev_ops; 7906 struct netdev_phys_item_id first = { }; 7907 struct net_device *lower_dev; 7908 struct list_head *iter; 7909 int err; 7910 7911 if (ops->ndo_get_port_parent_id) { 7912 err = ops->ndo_get_port_parent_id(dev, ppid); 7913 if (err != -EOPNOTSUPP) 7914 return err; 7915 } 7916 7917 err = devlink_compat_switch_id_get(dev, ppid); 7918 if (!err || err != -EOPNOTSUPP) 7919 return err; 7920 7921 if (!recurse) 7922 return -EOPNOTSUPP; 7923 7924 netdev_for_each_lower_dev(dev, lower_dev, iter) { 7925 err = dev_get_port_parent_id(lower_dev, ppid, recurse); 7926 if (err) 7927 break; 7928 if (!first.id_len) 7929 first = *ppid; 7930 else if (memcmp(&first, ppid, sizeof(*ppid))) 7931 return -ENODATA; 7932 } 7933 7934 return err; 7935 } 7936 EXPORT_SYMBOL(dev_get_port_parent_id); 7937 7938 /** 7939 * netdev_port_same_parent_id - Indicate if two network devices have 7940 * the same port parent identifier 7941 * @a: first network device 7942 * @b: second network device 7943 */ 7944 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b) 7945 { 7946 struct netdev_phys_item_id a_id = { }; 7947 struct netdev_phys_item_id b_id = { }; 7948 7949 if (dev_get_port_parent_id(a, &a_id, true) || 7950 dev_get_port_parent_id(b, &b_id, true)) 7951 return false; 7952 7953 return netdev_phys_item_id_same(&a_id, &b_id); 7954 } 7955 EXPORT_SYMBOL(netdev_port_same_parent_id); 7956 7957 /** 7958 * dev_change_proto_down - update protocol port state information 7959 * @dev: device 7960 * @proto_down: new value 7961 * 7962 * This info can be used by switch drivers to set the phys state of the 7963 * port. 7964 */ 7965 int dev_change_proto_down(struct net_device *dev, bool proto_down) 7966 { 7967 const struct net_device_ops *ops = dev->netdev_ops; 7968 7969 if (!ops->ndo_change_proto_down) 7970 return -EOPNOTSUPP; 7971 if (!netif_device_present(dev)) 7972 return -ENODEV; 7973 return ops->ndo_change_proto_down(dev, proto_down); 7974 } 7975 EXPORT_SYMBOL(dev_change_proto_down); 7976 7977 /** 7978 * dev_change_proto_down_generic - generic implementation for 7979 * ndo_change_proto_down that sets carrier according to 7980 * proto_down. 7981 * 7982 * @dev: device 7983 * @proto_down: new value 7984 */ 7985 int dev_change_proto_down_generic(struct net_device *dev, bool proto_down) 7986 { 7987 if (proto_down) 7988 netif_carrier_off(dev); 7989 else 7990 netif_carrier_on(dev); 7991 dev->proto_down = proto_down; 7992 return 0; 7993 } 7994 EXPORT_SYMBOL(dev_change_proto_down_generic); 7995 7996 u32 __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op, 7997 enum bpf_netdev_command cmd) 7998 { 7999 struct netdev_bpf xdp; 8000 8001 if (!bpf_op) 8002 return 0; 8003 8004 memset(&xdp, 0, sizeof(xdp)); 8005 xdp.command = cmd; 8006 8007 /* Query must always succeed. */ 8008 WARN_ON(bpf_op(dev, &xdp) < 0 && cmd == XDP_QUERY_PROG); 8009 8010 return xdp.prog_id; 8011 } 8012 8013 static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op, 8014 struct netlink_ext_ack *extack, u32 flags, 8015 struct bpf_prog *prog) 8016 { 8017 struct netdev_bpf xdp; 8018 8019 memset(&xdp, 0, sizeof(xdp)); 8020 if (flags & XDP_FLAGS_HW_MODE) 8021 xdp.command = XDP_SETUP_PROG_HW; 8022 else 8023 xdp.command = XDP_SETUP_PROG; 8024 xdp.extack = extack; 8025 xdp.flags = flags; 8026 xdp.prog = prog; 8027 8028 return bpf_op(dev, &xdp); 8029 } 8030 8031 static void dev_xdp_uninstall(struct net_device *dev) 8032 { 8033 struct netdev_bpf xdp; 8034 bpf_op_t ndo_bpf; 8035 8036 /* Remove generic XDP */ 8037 WARN_ON(dev_xdp_install(dev, generic_xdp_install, NULL, 0, NULL)); 8038 8039 /* Remove from the driver */ 8040 ndo_bpf = dev->netdev_ops->ndo_bpf; 8041 if (!ndo_bpf) 8042 return; 8043 8044 memset(&xdp, 0, sizeof(xdp)); 8045 xdp.command = XDP_QUERY_PROG; 8046 WARN_ON(ndo_bpf(dev, &xdp)); 8047 if (xdp.prog_id) 8048 WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, 8049 NULL)); 8050 8051 /* Remove HW offload */ 8052 memset(&xdp, 0, sizeof(xdp)); 8053 xdp.command = XDP_QUERY_PROG_HW; 8054 if (!ndo_bpf(dev, &xdp) && xdp.prog_id) 8055 WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, 8056 NULL)); 8057 } 8058 8059 /** 8060 * dev_change_xdp_fd - set or clear a bpf program for a device rx path 8061 * @dev: device 8062 * @extack: netlink extended ack 8063 * @fd: new program fd or negative value to clear 8064 * @flags: xdp-related flags 8065 * 8066 * Set or clear a bpf program for a device 8067 */ 8068 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 8069 int fd, u32 flags) 8070 { 8071 const struct net_device_ops *ops = dev->netdev_ops; 8072 enum bpf_netdev_command query; 8073 struct bpf_prog *prog = NULL; 8074 bpf_op_t bpf_op, bpf_chk; 8075 bool offload; 8076 int err; 8077 8078 ASSERT_RTNL(); 8079 8080 offload = flags & XDP_FLAGS_HW_MODE; 8081 query = offload ? XDP_QUERY_PROG_HW : XDP_QUERY_PROG; 8082 8083 bpf_op = bpf_chk = ops->ndo_bpf; 8084 if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE))) { 8085 NL_SET_ERR_MSG(extack, "underlying driver does not support XDP in native mode"); 8086 return -EOPNOTSUPP; 8087 } 8088 if (!bpf_op || (flags & XDP_FLAGS_SKB_MODE)) 8089 bpf_op = generic_xdp_install; 8090 if (bpf_op == bpf_chk) 8091 bpf_chk = generic_xdp_install; 8092 8093 if (fd >= 0) { 8094 if (!offload && __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG)) { 8095 NL_SET_ERR_MSG(extack, "native and generic XDP can't be active at the same time"); 8096 return -EEXIST; 8097 } 8098 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && 8099 __dev_xdp_query(dev, bpf_op, query)) { 8100 NL_SET_ERR_MSG(extack, "XDP program already attached"); 8101 return -EBUSY; 8102 } 8103 8104 prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP, 8105 bpf_op == ops->ndo_bpf); 8106 if (IS_ERR(prog)) 8107 return PTR_ERR(prog); 8108 8109 if (!offload && bpf_prog_is_dev_bound(prog->aux)) { 8110 NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported"); 8111 bpf_prog_put(prog); 8112 return -EINVAL; 8113 } 8114 } 8115 8116 err = dev_xdp_install(dev, bpf_op, extack, flags, prog); 8117 if (err < 0 && prog) 8118 bpf_prog_put(prog); 8119 8120 return err; 8121 } 8122 8123 /** 8124 * dev_new_index - allocate an ifindex 8125 * @net: the applicable net namespace 8126 * 8127 * Returns a suitable unique value for a new device interface 8128 * number. The caller must hold the rtnl semaphore or the 8129 * dev_base_lock to be sure it remains unique. 8130 */ 8131 static int dev_new_index(struct net *net) 8132 { 8133 int ifindex = net->ifindex; 8134 8135 for (;;) { 8136 if (++ifindex <= 0) 8137 ifindex = 1; 8138 if (!__dev_get_by_index(net, ifindex)) 8139 return net->ifindex = ifindex; 8140 } 8141 } 8142 8143 /* Delayed registration/unregisteration */ 8144 static LIST_HEAD(net_todo_list); 8145 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); 8146 8147 static void net_set_todo(struct net_device *dev) 8148 { 8149 list_add_tail(&dev->todo_list, &net_todo_list); 8150 dev_net(dev)->dev_unreg_count++; 8151 } 8152 8153 static void rollback_registered_many(struct list_head *head) 8154 { 8155 struct net_device *dev, *tmp; 8156 LIST_HEAD(close_head); 8157 8158 BUG_ON(dev_boot_phase); 8159 ASSERT_RTNL(); 8160 8161 list_for_each_entry_safe(dev, tmp, head, unreg_list) { 8162 /* Some devices call without registering 8163 * for initialization unwind. Remove those 8164 * devices and proceed with the remaining. 8165 */ 8166 if (dev->reg_state == NETREG_UNINITIALIZED) { 8167 pr_debug("unregister_netdevice: device %s/%p never was registered\n", 8168 dev->name, dev); 8169 8170 WARN_ON(1); 8171 list_del(&dev->unreg_list); 8172 continue; 8173 } 8174 dev->dismantle = true; 8175 BUG_ON(dev->reg_state != NETREG_REGISTERED); 8176 } 8177 8178 /* If device is running, close it first. */ 8179 list_for_each_entry(dev, head, unreg_list) 8180 list_add_tail(&dev->close_list, &close_head); 8181 dev_close_many(&close_head, true); 8182 8183 list_for_each_entry(dev, head, unreg_list) { 8184 /* And unlink it from device chain. */ 8185 unlist_netdevice(dev); 8186 8187 dev->reg_state = NETREG_UNREGISTERING; 8188 } 8189 flush_all_backlogs(); 8190 8191 synchronize_net(); 8192 8193 list_for_each_entry(dev, head, unreg_list) { 8194 struct sk_buff *skb = NULL; 8195 8196 /* Shutdown queueing discipline. */ 8197 dev_shutdown(dev); 8198 8199 dev_xdp_uninstall(dev); 8200 8201 /* Notify protocols, that we are about to destroy 8202 * this device. They should clean all the things. 8203 */ 8204 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 8205 8206 if (!dev->rtnl_link_ops || 8207 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 8208 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0, 8209 GFP_KERNEL, NULL, 0); 8210 8211 /* 8212 * Flush the unicast and multicast chains 8213 */ 8214 dev_uc_flush(dev); 8215 dev_mc_flush(dev); 8216 8217 if (dev->netdev_ops->ndo_uninit) 8218 dev->netdev_ops->ndo_uninit(dev); 8219 8220 if (skb) 8221 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL); 8222 8223 /* Notifier chain MUST detach us all upper devices. */ 8224 WARN_ON(netdev_has_any_upper_dev(dev)); 8225 WARN_ON(netdev_has_any_lower_dev(dev)); 8226 8227 /* Remove entries from kobject tree */ 8228 netdev_unregister_kobject(dev); 8229 #ifdef CONFIG_XPS 8230 /* Remove XPS queueing entries */ 8231 netif_reset_xps_queues_gt(dev, 0); 8232 #endif 8233 } 8234 8235 synchronize_net(); 8236 8237 list_for_each_entry(dev, head, unreg_list) 8238 dev_put(dev); 8239 } 8240 8241 static void rollback_registered(struct net_device *dev) 8242 { 8243 LIST_HEAD(single); 8244 8245 list_add(&dev->unreg_list, &single); 8246 rollback_registered_many(&single); 8247 list_del(&single); 8248 } 8249 8250 static netdev_features_t netdev_sync_upper_features(struct net_device *lower, 8251 struct net_device *upper, netdev_features_t features) 8252 { 8253 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; 8254 netdev_features_t feature; 8255 int feature_bit; 8256 8257 for_each_netdev_feature(upper_disables, feature_bit) { 8258 feature = __NETIF_F_BIT(feature_bit); 8259 if (!(upper->wanted_features & feature) 8260 && (features & feature)) { 8261 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n", 8262 &feature, upper->name); 8263 features &= ~feature; 8264 } 8265 } 8266 8267 return features; 8268 } 8269 8270 static void netdev_sync_lower_features(struct net_device *upper, 8271 struct net_device *lower, netdev_features_t features) 8272 { 8273 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; 8274 netdev_features_t feature; 8275 int feature_bit; 8276 8277 for_each_netdev_feature(upper_disables, feature_bit) { 8278 feature = __NETIF_F_BIT(feature_bit); 8279 if (!(features & feature) && (lower->features & feature)) { 8280 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", 8281 &feature, lower->name); 8282 lower->wanted_features &= ~feature; 8283 netdev_update_features(lower); 8284 8285 if (unlikely(lower->features & feature)) 8286 netdev_WARN(upper, "failed to disable %pNF on %s!\n", 8287 &feature, lower->name); 8288 } 8289 } 8290 } 8291 8292 static netdev_features_t netdev_fix_features(struct net_device *dev, 8293 netdev_features_t features) 8294 { 8295 /* Fix illegal checksum combinations */ 8296 if ((features & NETIF_F_HW_CSUM) && 8297 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 8298 netdev_warn(dev, "mixed HW and IP checksum settings.\n"); 8299 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 8300 } 8301 8302 /* TSO requires that SG is present as well. */ 8303 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { 8304 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); 8305 features &= ~NETIF_F_ALL_TSO; 8306 } 8307 8308 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) && 8309 !(features & NETIF_F_IP_CSUM)) { 8310 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n"); 8311 features &= ~NETIF_F_TSO; 8312 features &= ~NETIF_F_TSO_ECN; 8313 } 8314 8315 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) && 8316 !(features & NETIF_F_IPV6_CSUM)) { 8317 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n"); 8318 features &= ~NETIF_F_TSO6; 8319 } 8320 8321 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */ 8322 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO)) 8323 features &= ~NETIF_F_TSO_MANGLEID; 8324 8325 /* TSO ECN requires that TSO is present as well. */ 8326 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) 8327 features &= ~NETIF_F_TSO_ECN; 8328 8329 /* Software GSO depends on SG. */ 8330 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { 8331 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); 8332 features &= ~NETIF_F_GSO; 8333 } 8334 8335 /* GSO partial features require GSO partial be set */ 8336 if ((features & dev->gso_partial_features) && 8337 !(features & NETIF_F_GSO_PARTIAL)) { 8338 netdev_dbg(dev, 8339 "Dropping partially supported GSO features since no GSO partial.\n"); 8340 features &= ~dev->gso_partial_features; 8341 } 8342 8343 if (!(features & NETIF_F_RXCSUM)) { 8344 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet 8345 * successfully merged by hardware must also have the 8346 * checksum verified by hardware. If the user does not 8347 * want to enable RXCSUM, logically, we should disable GRO_HW. 8348 */ 8349 if (features & NETIF_F_GRO_HW) { 8350 netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n"); 8351 features &= ~NETIF_F_GRO_HW; 8352 } 8353 } 8354 8355 /* LRO/HW-GRO features cannot be combined with RX-FCS */ 8356 if (features & NETIF_F_RXFCS) { 8357 if (features & NETIF_F_LRO) { 8358 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n"); 8359 features &= ~NETIF_F_LRO; 8360 } 8361 8362 if (features & NETIF_F_GRO_HW) { 8363 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n"); 8364 features &= ~NETIF_F_GRO_HW; 8365 } 8366 } 8367 8368 return features; 8369 } 8370 8371 int __netdev_update_features(struct net_device *dev) 8372 { 8373 struct net_device *upper, *lower; 8374 netdev_features_t features; 8375 struct list_head *iter; 8376 int err = -1; 8377 8378 ASSERT_RTNL(); 8379 8380 features = netdev_get_wanted_features(dev); 8381 8382 if (dev->netdev_ops->ndo_fix_features) 8383 features = dev->netdev_ops->ndo_fix_features(dev, features); 8384 8385 /* driver might be less strict about feature dependencies */ 8386 features = netdev_fix_features(dev, features); 8387 8388 /* some features can't be enabled if they're off an an upper device */ 8389 netdev_for_each_upper_dev_rcu(dev, upper, iter) 8390 features = netdev_sync_upper_features(dev, upper, features); 8391 8392 if (dev->features == features) 8393 goto sync_lower; 8394 8395 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n", 8396 &dev->features, &features); 8397 8398 if (dev->netdev_ops->ndo_set_features) 8399 err = dev->netdev_ops->ndo_set_features(dev, features); 8400 else 8401 err = 0; 8402 8403 if (unlikely(err < 0)) { 8404 netdev_err(dev, 8405 "set_features() failed (%d); wanted %pNF, left %pNF\n", 8406 err, &features, &dev->features); 8407 /* return non-0 since some features might have changed and 8408 * it's better to fire a spurious notification than miss it 8409 */ 8410 return -1; 8411 } 8412 8413 sync_lower: 8414 /* some features must be disabled on lower devices when disabled 8415 * on an upper device (think: bonding master or bridge) 8416 */ 8417 netdev_for_each_lower_dev(dev, lower, iter) 8418 netdev_sync_lower_features(dev, lower, features); 8419 8420 if (!err) { 8421 netdev_features_t diff = features ^ dev->features; 8422 8423 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) { 8424 /* udp_tunnel_{get,drop}_rx_info both need 8425 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the 8426 * device, or they won't do anything. 8427 * Thus we need to update dev->features 8428 * *before* calling udp_tunnel_get_rx_info, 8429 * but *after* calling udp_tunnel_drop_rx_info. 8430 */ 8431 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) { 8432 dev->features = features; 8433 udp_tunnel_get_rx_info(dev); 8434 } else { 8435 udp_tunnel_drop_rx_info(dev); 8436 } 8437 } 8438 8439 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) { 8440 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { 8441 dev->features = features; 8442 err |= vlan_get_rx_ctag_filter_info(dev); 8443 } else { 8444 vlan_drop_rx_ctag_filter_info(dev); 8445 } 8446 } 8447 8448 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) { 8449 if (features & NETIF_F_HW_VLAN_STAG_FILTER) { 8450 dev->features = features; 8451 err |= vlan_get_rx_stag_filter_info(dev); 8452 } else { 8453 vlan_drop_rx_stag_filter_info(dev); 8454 } 8455 } 8456 8457 dev->features = features; 8458 } 8459 8460 return err < 0 ? 0 : 1; 8461 } 8462 8463 /** 8464 * netdev_update_features - recalculate device features 8465 * @dev: the device to check 8466 * 8467 * Recalculate dev->features set and send notifications if it 8468 * has changed. Should be called after driver or hardware dependent 8469 * conditions might have changed that influence the features. 8470 */ 8471 void netdev_update_features(struct net_device *dev) 8472 { 8473 if (__netdev_update_features(dev)) 8474 netdev_features_change(dev); 8475 } 8476 EXPORT_SYMBOL(netdev_update_features); 8477 8478 /** 8479 * netdev_change_features - recalculate device features 8480 * @dev: the device to check 8481 * 8482 * Recalculate dev->features set and send notifications even 8483 * if they have not changed. Should be called instead of 8484 * netdev_update_features() if also dev->vlan_features might 8485 * have changed to allow the changes to be propagated to stacked 8486 * VLAN devices. 8487 */ 8488 void netdev_change_features(struct net_device *dev) 8489 { 8490 __netdev_update_features(dev); 8491 netdev_features_change(dev); 8492 } 8493 EXPORT_SYMBOL(netdev_change_features); 8494 8495 /** 8496 * netif_stacked_transfer_operstate - transfer operstate 8497 * @rootdev: the root or lower level device to transfer state from 8498 * @dev: the device to transfer operstate to 8499 * 8500 * Transfer operational state from root to device. This is normally 8501 * called when a stacking relationship exists between the root 8502 * device and the device(a leaf device). 8503 */ 8504 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 8505 struct net_device *dev) 8506 { 8507 if (rootdev->operstate == IF_OPER_DORMANT) 8508 netif_dormant_on(dev); 8509 else 8510 netif_dormant_off(dev); 8511 8512 if (netif_carrier_ok(rootdev)) 8513 netif_carrier_on(dev); 8514 else 8515 netif_carrier_off(dev); 8516 } 8517 EXPORT_SYMBOL(netif_stacked_transfer_operstate); 8518 8519 static int netif_alloc_rx_queues(struct net_device *dev) 8520 { 8521 unsigned int i, count = dev->num_rx_queues; 8522 struct netdev_rx_queue *rx; 8523 size_t sz = count * sizeof(*rx); 8524 int err = 0; 8525 8526 BUG_ON(count < 1); 8527 8528 rx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL); 8529 if (!rx) 8530 return -ENOMEM; 8531 8532 dev->_rx = rx; 8533 8534 for (i = 0; i < count; i++) { 8535 rx[i].dev = dev; 8536 8537 /* XDP RX-queue setup */ 8538 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i); 8539 if (err < 0) 8540 goto err_rxq_info; 8541 } 8542 return 0; 8543 8544 err_rxq_info: 8545 /* Rollback successful reg's and free other resources */ 8546 while (i--) 8547 xdp_rxq_info_unreg(&rx[i].xdp_rxq); 8548 kvfree(dev->_rx); 8549 dev->_rx = NULL; 8550 return err; 8551 } 8552 8553 static void netif_free_rx_queues(struct net_device *dev) 8554 { 8555 unsigned int i, count = dev->num_rx_queues; 8556 8557 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */ 8558 if (!dev->_rx) 8559 return; 8560 8561 for (i = 0; i < count; i++) 8562 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq); 8563 8564 kvfree(dev->_rx); 8565 } 8566 8567 static void netdev_init_one_queue(struct net_device *dev, 8568 struct netdev_queue *queue, void *_unused) 8569 { 8570 /* Initialize queue lock */ 8571 spin_lock_init(&queue->_xmit_lock); 8572 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); 8573 queue->xmit_lock_owner = -1; 8574 netdev_queue_numa_node_write(queue, NUMA_NO_NODE); 8575 queue->dev = dev; 8576 #ifdef CONFIG_BQL 8577 dql_init(&queue->dql, HZ); 8578 #endif 8579 } 8580 8581 static void netif_free_tx_queues(struct net_device *dev) 8582 { 8583 kvfree(dev->_tx); 8584 } 8585 8586 static int netif_alloc_netdev_queues(struct net_device *dev) 8587 { 8588 unsigned int count = dev->num_tx_queues; 8589 struct netdev_queue *tx; 8590 size_t sz = count * sizeof(*tx); 8591 8592 if (count < 1 || count > 0xffff) 8593 return -EINVAL; 8594 8595 tx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL); 8596 if (!tx) 8597 return -ENOMEM; 8598 8599 dev->_tx = tx; 8600 8601 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 8602 spin_lock_init(&dev->tx_global_lock); 8603 8604 return 0; 8605 } 8606 8607 void netif_tx_stop_all_queues(struct net_device *dev) 8608 { 8609 unsigned int i; 8610 8611 for (i = 0; i < dev->num_tx_queues; i++) { 8612 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 8613 8614 netif_tx_stop_queue(txq); 8615 } 8616 } 8617 EXPORT_SYMBOL(netif_tx_stop_all_queues); 8618 8619 /** 8620 * register_netdevice - register a network device 8621 * @dev: device to register 8622 * 8623 * Take a completed network device structure and add it to the kernel 8624 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 8625 * chain. 0 is returned on success. A negative errno code is returned 8626 * on a failure to set up the device, or if the name is a duplicate. 8627 * 8628 * Callers must hold the rtnl semaphore. You may want 8629 * register_netdev() instead of this. 8630 * 8631 * BUGS: 8632 * The locking appears insufficient to guarantee two parallel registers 8633 * will not get the same name. 8634 */ 8635 8636 int register_netdevice(struct net_device *dev) 8637 { 8638 int ret; 8639 struct net *net = dev_net(dev); 8640 8641 BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE < 8642 NETDEV_FEATURE_COUNT); 8643 BUG_ON(dev_boot_phase); 8644 ASSERT_RTNL(); 8645 8646 might_sleep(); 8647 8648 /* When net_device's are persistent, this will be fatal. */ 8649 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 8650 BUG_ON(!net); 8651 8652 spin_lock_init(&dev->addr_list_lock); 8653 netdev_set_addr_lockdep_class(dev); 8654 8655 ret = dev_get_valid_name(net, dev, dev->name); 8656 if (ret < 0) 8657 goto out; 8658 8659 /* Init, if this function is available */ 8660 if (dev->netdev_ops->ndo_init) { 8661 ret = dev->netdev_ops->ndo_init(dev); 8662 if (ret) { 8663 if (ret > 0) 8664 ret = -EIO; 8665 goto out; 8666 } 8667 } 8668 8669 if (((dev->hw_features | dev->features) & 8670 NETIF_F_HW_VLAN_CTAG_FILTER) && 8671 (!dev->netdev_ops->ndo_vlan_rx_add_vid || 8672 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) { 8673 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n"); 8674 ret = -EINVAL; 8675 goto err_uninit; 8676 } 8677 8678 ret = -EBUSY; 8679 if (!dev->ifindex) 8680 dev->ifindex = dev_new_index(net); 8681 else if (__dev_get_by_index(net, dev->ifindex)) 8682 goto err_uninit; 8683 8684 /* Transfer changeable features to wanted_features and enable 8685 * software offloads (GSO and GRO). 8686 */ 8687 dev->hw_features |= NETIF_F_SOFT_FEATURES; 8688 dev->features |= NETIF_F_SOFT_FEATURES; 8689 8690 if (dev->netdev_ops->ndo_udp_tunnel_add) { 8691 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT; 8692 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT; 8693 } 8694 8695 dev->wanted_features = dev->features & dev->hw_features; 8696 8697 if (!(dev->flags & IFF_LOOPBACK)) 8698 dev->hw_features |= NETIF_F_NOCACHE_COPY; 8699 8700 /* If IPv4 TCP segmentation offload is supported we should also 8701 * allow the device to enable segmenting the frame with the option 8702 * of ignoring a static IP ID value. This doesn't enable the 8703 * feature itself but allows the user to enable it later. 8704 */ 8705 if (dev->hw_features & NETIF_F_TSO) 8706 dev->hw_features |= NETIF_F_TSO_MANGLEID; 8707 if (dev->vlan_features & NETIF_F_TSO) 8708 dev->vlan_features |= NETIF_F_TSO_MANGLEID; 8709 if (dev->mpls_features & NETIF_F_TSO) 8710 dev->mpls_features |= NETIF_F_TSO_MANGLEID; 8711 if (dev->hw_enc_features & NETIF_F_TSO) 8712 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 8713 8714 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices. 8715 */ 8716 dev->vlan_features |= NETIF_F_HIGHDMA; 8717 8718 /* Make NETIF_F_SG inheritable to tunnel devices. 8719 */ 8720 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL; 8721 8722 /* Make NETIF_F_SG inheritable to MPLS. 8723 */ 8724 dev->mpls_features |= NETIF_F_SG; 8725 8726 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); 8727 ret = notifier_to_errno(ret); 8728 if (ret) 8729 goto err_uninit; 8730 8731 ret = netdev_register_kobject(dev); 8732 if (ret) 8733 goto err_uninit; 8734 dev->reg_state = NETREG_REGISTERED; 8735 8736 __netdev_update_features(dev); 8737 8738 /* 8739 * Default initial state at registry is that the 8740 * device is present. 8741 */ 8742 8743 set_bit(__LINK_STATE_PRESENT, &dev->state); 8744 8745 linkwatch_init_dev(dev); 8746 8747 dev_init_scheduler(dev); 8748 dev_hold(dev); 8749 list_netdevice(dev); 8750 add_device_randomness(dev->dev_addr, dev->addr_len); 8751 8752 /* If the device has permanent device address, driver should 8753 * set dev_addr and also addr_assign_type should be set to 8754 * NET_ADDR_PERM (default value). 8755 */ 8756 if (dev->addr_assign_type == NET_ADDR_PERM) 8757 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 8758 8759 /* Notify protocols, that a new device appeared. */ 8760 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); 8761 ret = notifier_to_errno(ret); 8762 if (ret) { 8763 rollback_registered(dev); 8764 dev->reg_state = NETREG_UNREGISTERED; 8765 } 8766 /* 8767 * Prevent userspace races by waiting until the network 8768 * device is fully setup before sending notifications. 8769 */ 8770 if (!dev->rtnl_link_ops || 8771 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 8772 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL); 8773 8774 out: 8775 return ret; 8776 8777 err_uninit: 8778 if (dev->netdev_ops->ndo_uninit) 8779 dev->netdev_ops->ndo_uninit(dev); 8780 if (dev->priv_destructor) 8781 dev->priv_destructor(dev); 8782 goto out; 8783 } 8784 EXPORT_SYMBOL(register_netdevice); 8785 8786 /** 8787 * init_dummy_netdev - init a dummy network device for NAPI 8788 * @dev: device to init 8789 * 8790 * This takes a network device structure and initialize the minimum 8791 * amount of fields so it can be used to schedule NAPI polls without 8792 * registering a full blown interface. This is to be used by drivers 8793 * that need to tie several hardware interfaces to a single NAPI 8794 * poll scheduler due to HW limitations. 8795 */ 8796 int init_dummy_netdev(struct net_device *dev) 8797 { 8798 /* Clear everything. Note we don't initialize spinlocks 8799 * are they aren't supposed to be taken by any of the 8800 * NAPI code and this dummy netdev is supposed to be 8801 * only ever used for NAPI polls 8802 */ 8803 memset(dev, 0, sizeof(struct net_device)); 8804 8805 /* make sure we BUG if trying to hit standard 8806 * register/unregister code path 8807 */ 8808 dev->reg_state = NETREG_DUMMY; 8809 8810 /* NAPI wants this */ 8811 INIT_LIST_HEAD(&dev->napi_list); 8812 8813 /* a dummy interface is started by default */ 8814 set_bit(__LINK_STATE_PRESENT, &dev->state); 8815 set_bit(__LINK_STATE_START, &dev->state); 8816 8817 /* napi_busy_loop stats accounting wants this */ 8818 dev_net_set(dev, &init_net); 8819 8820 /* Note : We dont allocate pcpu_refcnt for dummy devices, 8821 * because users of this 'device' dont need to change 8822 * its refcount. 8823 */ 8824 8825 return 0; 8826 } 8827 EXPORT_SYMBOL_GPL(init_dummy_netdev); 8828 8829 8830 /** 8831 * register_netdev - register a network device 8832 * @dev: device to register 8833 * 8834 * Take a completed network device structure and add it to the kernel 8835 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 8836 * chain. 0 is returned on success. A negative errno code is returned 8837 * on a failure to set up the device, or if the name is a duplicate. 8838 * 8839 * This is a wrapper around register_netdevice that takes the rtnl semaphore 8840 * and expands the device name if you passed a format string to 8841 * alloc_netdev. 8842 */ 8843 int register_netdev(struct net_device *dev) 8844 { 8845 int err; 8846 8847 if (rtnl_lock_killable()) 8848 return -EINTR; 8849 err = register_netdevice(dev); 8850 rtnl_unlock(); 8851 return err; 8852 } 8853 EXPORT_SYMBOL(register_netdev); 8854 8855 int netdev_refcnt_read(const struct net_device *dev) 8856 { 8857 int i, refcnt = 0; 8858 8859 for_each_possible_cpu(i) 8860 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i); 8861 return refcnt; 8862 } 8863 EXPORT_SYMBOL(netdev_refcnt_read); 8864 8865 /** 8866 * netdev_wait_allrefs - wait until all references are gone. 8867 * @dev: target net_device 8868 * 8869 * This is called when unregistering network devices. 8870 * 8871 * Any protocol or device that holds a reference should register 8872 * for netdevice notification, and cleanup and put back the 8873 * reference if they receive an UNREGISTER event. 8874 * We can get stuck here if buggy protocols don't correctly 8875 * call dev_put. 8876 */ 8877 static void netdev_wait_allrefs(struct net_device *dev) 8878 { 8879 unsigned long rebroadcast_time, warning_time; 8880 int refcnt; 8881 8882 linkwatch_forget_dev(dev); 8883 8884 rebroadcast_time = warning_time = jiffies; 8885 refcnt = netdev_refcnt_read(dev); 8886 8887 while (refcnt != 0) { 8888 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { 8889 rtnl_lock(); 8890 8891 /* Rebroadcast unregister notification */ 8892 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 8893 8894 __rtnl_unlock(); 8895 rcu_barrier(); 8896 rtnl_lock(); 8897 8898 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 8899 &dev->state)) { 8900 /* We must not have linkwatch events 8901 * pending on unregister. If this 8902 * happens, we simply run the queue 8903 * unscheduled, resulting in a noop 8904 * for this device. 8905 */ 8906 linkwatch_run_queue(); 8907 } 8908 8909 __rtnl_unlock(); 8910 8911 rebroadcast_time = jiffies; 8912 } 8913 8914 msleep(250); 8915 8916 refcnt = netdev_refcnt_read(dev); 8917 8918 if (refcnt && time_after(jiffies, warning_time + 10 * HZ)) { 8919 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", 8920 dev->name, refcnt); 8921 warning_time = jiffies; 8922 } 8923 } 8924 } 8925 8926 /* The sequence is: 8927 * 8928 * rtnl_lock(); 8929 * ... 8930 * register_netdevice(x1); 8931 * register_netdevice(x2); 8932 * ... 8933 * unregister_netdevice(y1); 8934 * unregister_netdevice(y2); 8935 * ... 8936 * rtnl_unlock(); 8937 * free_netdev(y1); 8938 * free_netdev(y2); 8939 * 8940 * We are invoked by rtnl_unlock(). 8941 * This allows us to deal with problems: 8942 * 1) We can delete sysfs objects which invoke hotplug 8943 * without deadlocking with linkwatch via keventd. 8944 * 2) Since we run with the RTNL semaphore not held, we can sleep 8945 * safely in order to wait for the netdev refcnt to drop to zero. 8946 * 8947 * We must not return until all unregister events added during 8948 * the interval the lock was held have been completed. 8949 */ 8950 void netdev_run_todo(void) 8951 { 8952 struct list_head list; 8953 8954 /* Snapshot list, allow later requests */ 8955 list_replace_init(&net_todo_list, &list); 8956 8957 __rtnl_unlock(); 8958 8959 8960 /* Wait for rcu callbacks to finish before next phase */ 8961 if (!list_empty(&list)) 8962 rcu_barrier(); 8963 8964 while (!list_empty(&list)) { 8965 struct net_device *dev 8966 = list_first_entry(&list, struct net_device, todo_list); 8967 list_del(&dev->todo_list); 8968 8969 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 8970 pr_err("network todo '%s' but state %d\n", 8971 dev->name, dev->reg_state); 8972 dump_stack(); 8973 continue; 8974 } 8975 8976 dev->reg_state = NETREG_UNREGISTERED; 8977 8978 netdev_wait_allrefs(dev); 8979 8980 /* paranoia */ 8981 BUG_ON(netdev_refcnt_read(dev)); 8982 BUG_ON(!list_empty(&dev->ptype_all)); 8983 BUG_ON(!list_empty(&dev->ptype_specific)); 8984 WARN_ON(rcu_access_pointer(dev->ip_ptr)); 8985 WARN_ON(rcu_access_pointer(dev->ip6_ptr)); 8986 #if IS_ENABLED(CONFIG_DECNET) 8987 WARN_ON(dev->dn_ptr); 8988 #endif 8989 if (dev->priv_destructor) 8990 dev->priv_destructor(dev); 8991 if (dev->needs_free_netdev) 8992 free_netdev(dev); 8993 8994 /* Report a network device has been unregistered */ 8995 rtnl_lock(); 8996 dev_net(dev)->dev_unreg_count--; 8997 __rtnl_unlock(); 8998 wake_up(&netdev_unregistering_wq); 8999 9000 /* Free network device */ 9001 kobject_put(&dev->dev.kobj); 9002 } 9003 } 9004 9005 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has 9006 * all the same fields in the same order as net_device_stats, with only 9007 * the type differing, but rtnl_link_stats64 may have additional fields 9008 * at the end for newer counters. 9009 */ 9010 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 9011 const struct net_device_stats *netdev_stats) 9012 { 9013 #if BITS_PER_LONG == 64 9014 BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats)); 9015 memcpy(stats64, netdev_stats, sizeof(*netdev_stats)); 9016 /* zero out counters that only exist in rtnl_link_stats64 */ 9017 memset((char *)stats64 + sizeof(*netdev_stats), 0, 9018 sizeof(*stats64) - sizeof(*netdev_stats)); 9019 #else 9020 size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long); 9021 const unsigned long *src = (const unsigned long *)netdev_stats; 9022 u64 *dst = (u64 *)stats64; 9023 9024 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64)); 9025 for (i = 0; i < n; i++) 9026 dst[i] = src[i]; 9027 /* zero out counters that only exist in rtnl_link_stats64 */ 9028 memset((char *)stats64 + n * sizeof(u64), 0, 9029 sizeof(*stats64) - n * sizeof(u64)); 9030 #endif 9031 } 9032 EXPORT_SYMBOL(netdev_stats_to_stats64); 9033 9034 /** 9035 * dev_get_stats - get network device statistics 9036 * @dev: device to get statistics from 9037 * @storage: place to store stats 9038 * 9039 * Get network statistics from device. Return @storage. 9040 * The device driver may provide its own method by setting 9041 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats; 9042 * otherwise the internal statistics structure is used. 9043 */ 9044 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 9045 struct rtnl_link_stats64 *storage) 9046 { 9047 const struct net_device_ops *ops = dev->netdev_ops; 9048 9049 if (ops->ndo_get_stats64) { 9050 memset(storage, 0, sizeof(*storage)); 9051 ops->ndo_get_stats64(dev, storage); 9052 } else if (ops->ndo_get_stats) { 9053 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); 9054 } else { 9055 netdev_stats_to_stats64(storage, &dev->stats); 9056 } 9057 storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped); 9058 storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped); 9059 storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler); 9060 return storage; 9061 } 9062 EXPORT_SYMBOL(dev_get_stats); 9063 9064 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) 9065 { 9066 struct netdev_queue *queue = dev_ingress_queue(dev); 9067 9068 #ifdef CONFIG_NET_CLS_ACT 9069 if (queue) 9070 return queue; 9071 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 9072 if (!queue) 9073 return NULL; 9074 netdev_init_one_queue(dev, queue, NULL); 9075 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc); 9076 queue->qdisc_sleeping = &noop_qdisc; 9077 rcu_assign_pointer(dev->ingress_queue, queue); 9078 #endif 9079 return queue; 9080 } 9081 9082 static const struct ethtool_ops default_ethtool_ops; 9083 9084 void netdev_set_default_ethtool_ops(struct net_device *dev, 9085 const struct ethtool_ops *ops) 9086 { 9087 if (dev->ethtool_ops == &default_ethtool_ops) 9088 dev->ethtool_ops = ops; 9089 } 9090 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops); 9091 9092 void netdev_freemem(struct net_device *dev) 9093 { 9094 char *addr = (char *)dev - dev->padded; 9095 9096 kvfree(addr); 9097 } 9098 9099 /** 9100 * alloc_netdev_mqs - allocate network device 9101 * @sizeof_priv: size of private data to allocate space for 9102 * @name: device name format string 9103 * @name_assign_type: origin of device name 9104 * @setup: callback to initialize device 9105 * @txqs: the number of TX subqueues to allocate 9106 * @rxqs: the number of RX subqueues to allocate 9107 * 9108 * Allocates a struct net_device with private data area for driver use 9109 * and performs basic initialization. Also allocates subqueue structs 9110 * for each queue on the device. 9111 */ 9112 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 9113 unsigned char name_assign_type, 9114 void (*setup)(struct net_device *), 9115 unsigned int txqs, unsigned int rxqs) 9116 { 9117 struct net_device *dev; 9118 unsigned int alloc_size; 9119 struct net_device *p; 9120 9121 BUG_ON(strlen(name) >= sizeof(dev->name)); 9122 9123 if (txqs < 1) { 9124 pr_err("alloc_netdev: Unable to allocate device with zero queues\n"); 9125 return NULL; 9126 } 9127 9128 if (rxqs < 1) { 9129 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n"); 9130 return NULL; 9131 } 9132 9133 alloc_size = sizeof(struct net_device); 9134 if (sizeof_priv) { 9135 /* ensure 32-byte alignment of private area */ 9136 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); 9137 alloc_size += sizeof_priv; 9138 } 9139 /* ensure 32-byte alignment of whole construct */ 9140 alloc_size += NETDEV_ALIGN - 1; 9141 9142 p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL); 9143 if (!p) 9144 return NULL; 9145 9146 dev = PTR_ALIGN(p, NETDEV_ALIGN); 9147 dev->padded = (char *)dev - (char *)p; 9148 9149 dev->pcpu_refcnt = alloc_percpu(int); 9150 if (!dev->pcpu_refcnt) 9151 goto free_dev; 9152 9153 if (dev_addr_init(dev)) 9154 goto free_pcpu; 9155 9156 dev_mc_init(dev); 9157 dev_uc_init(dev); 9158 9159 dev_net_set(dev, &init_net); 9160 9161 dev->gso_max_size = GSO_MAX_SIZE; 9162 dev->gso_max_segs = GSO_MAX_SEGS; 9163 9164 INIT_LIST_HEAD(&dev->napi_list); 9165 INIT_LIST_HEAD(&dev->unreg_list); 9166 INIT_LIST_HEAD(&dev->close_list); 9167 INIT_LIST_HEAD(&dev->link_watch_list); 9168 INIT_LIST_HEAD(&dev->adj_list.upper); 9169 INIT_LIST_HEAD(&dev->adj_list.lower); 9170 INIT_LIST_HEAD(&dev->ptype_all); 9171 INIT_LIST_HEAD(&dev->ptype_specific); 9172 #ifdef CONFIG_NET_SCHED 9173 hash_init(dev->qdisc_hash); 9174 #endif 9175 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; 9176 setup(dev); 9177 9178 if (!dev->tx_queue_len) { 9179 dev->priv_flags |= IFF_NO_QUEUE; 9180 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; 9181 } 9182 9183 dev->num_tx_queues = txqs; 9184 dev->real_num_tx_queues = txqs; 9185 if (netif_alloc_netdev_queues(dev)) 9186 goto free_all; 9187 9188 dev->num_rx_queues = rxqs; 9189 dev->real_num_rx_queues = rxqs; 9190 if (netif_alloc_rx_queues(dev)) 9191 goto free_all; 9192 9193 strcpy(dev->name, name); 9194 dev->name_assign_type = name_assign_type; 9195 dev->group = INIT_NETDEV_GROUP; 9196 if (!dev->ethtool_ops) 9197 dev->ethtool_ops = &default_ethtool_ops; 9198 9199 nf_hook_ingress_init(dev); 9200 9201 return dev; 9202 9203 free_all: 9204 free_netdev(dev); 9205 return NULL; 9206 9207 free_pcpu: 9208 free_percpu(dev->pcpu_refcnt); 9209 free_dev: 9210 netdev_freemem(dev); 9211 return NULL; 9212 } 9213 EXPORT_SYMBOL(alloc_netdev_mqs); 9214 9215 /** 9216 * free_netdev - free network device 9217 * @dev: device 9218 * 9219 * This function does the last stage of destroying an allocated device 9220 * interface. The reference to the device object is released. If this 9221 * is the last reference then it will be freed.Must be called in process 9222 * context. 9223 */ 9224 void free_netdev(struct net_device *dev) 9225 { 9226 struct napi_struct *p, *n; 9227 9228 might_sleep(); 9229 netif_free_tx_queues(dev); 9230 netif_free_rx_queues(dev); 9231 9232 kfree(rcu_dereference_protected(dev->ingress_queue, 1)); 9233 9234 /* Flush device addresses */ 9235 dev_addr_flush(dev); 9236 9237 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 9238 netif_napi_del(p); 9239 9240 free_percpu(dev->pcpu_refcnt); 9241 dev->pcpu_refcnt = NULL; 9242 9243 /* Compatibility with error handling in drivers */ 9244 if (dev->reg_state == NETREG_UNINITIALIZED) { 9245 netdev_freemem(dev); 9246 return; 9247 } 9248 9249 BUG_ON(dev->reg_state != NETREG_UNREGISTERED); 9250 dev->reg_state = NETREG_RELEASED; 9251 9252 /* will free via device release */ 9253 put_device(&dev->dev); 9254 } 9255 EXPORT_SYMBOL(free_netdev); 9256 9257 /** 9258 * synchronize_net - Synchronize with packet receive processing 9259 * 9260 * Wait for packets currently being received to be done. 9261 * Does not block later packets from starting. 9262 */ 9263 void synchronize_net(void) 9264 { 9265 might_sleep(); 9266 if (rtnl_is_locked()) 9267 synchronize_rcu_expedited(); 9268 else 9269 synchronize_rcu(); 9270 } 9271 EXPORT_SYMBOL(synchronize_net); 9272 9273 /** 9274 * unregister_netdevice_queue - remove device from the kernel 9275 * @dev: device 9276 * @head: list 9277 * 9278 * This function shuts down a device interface and removes it 9279 * from the kernel tables. 9280 * If head not NULL, device is queued to be unregistered later. 9281 * 9282 * Callers must hold the rtnl semaphore. You may want 9283 * unregister_netdev() instead of this. 9284 */ 9285 9286 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) 9287 { 9288 ASSERT_RTNL(); 9289 9290 if (head) { 9291 list_move_tail(&dev->unreg_list, head); 9292 } else { 9293 rollback_registered(dev); 9294 /* Finish processing unregister after unlock */ 9295 net_set_todo(dev); 9296 } 9297 } 9298 EXPORT_SYMBOL(unregister_netdevice_queue); 9299 9300 /** 9301 * unregister_netdevice_many - unregister many devices 9302 * @head: list of devices 9303 * 9304 * Note: As most callers use a stack allocated list_head, 9305 * we force a list_del() to make sure stack wont be corrupted later. 9306 */ 9307 void unregister_netdevice_many(struct list_head *head) 9308 { 9309 struct net_device *dev; 9310 9311 if (!list_empty(head)) { 9312 rollback_registered_many(head); 9313 list_for_each_entry(dev, head, unreg_list) 9314 net_set_todo(dev); 9315 list_del(head); 9316 } 9317 } 9318 EXPORT_SYMBOL(unregister_netdevice_many); 9319 9320 /** 9321 * unregister_netdev - remove device from the kernel 9322 * @dev: device 9323 * 9324 * This function shuts down a device interface and removes it 9325 * from the kernel tables. 9326 * 9327 * This is just a wrapper for unregister_netdevice that takes 9328 * the rtnl semaphore. In general you want to use this and not 9329 * unregister_netdevice. 9330 */ 9331 void unregister_netdev(struct net_device *dev) 9332 { 9333 rtnl_lock(); 9334 unregister_netdevice(dev); 9335 rtnl_unlock(); 9336 } 9337 EXPORT_SYMBOL(unregister_netdev); 9338 9339 /** 9340 * dev_change_net_namespace - move device to different nethost namespace 9341 * @dev: device 9342 * @net: network namespace 9343 * @pat: If not NULL name pattern to try if the current device name 9344 * is already taken in the destination network namespace. 9345 * 9346 * This function shuts down a device interface and moves it 9347 * to a new network namespace. On success 0 is returned, on 9348 * a failure a netagive errno code is returned. 9349 * 9350 * Callers must hold the rtnl semaphore. 9351 */ 9352 9353 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat) 9354 { 9355 int err, new_nsid, new_ifindex; 9356 9357 ASSERT_RTNL(); 9358 9359 /* Don't allow namespace local devices to be moved. */ 9360 err = -EINVAL; 9361 if (dev->features & NETIF_F_NETNS_LOCAL) 9362 goto out; 9363 9364 /* Ensure the device has been registrered */ 9365 if (dev->reg_state != NETREG_REGISTERED) 9366 goto out; 9367 9368 /* Get out if there is nothing todo */ 9369 err = 0; 9370 if (net_eq(dev_net(dev), net)) 9371 goto out; 9372 9373 /* Pick the destination device name, and ensure 9374 * we can use it in the destination network namespace. 9375 */ 9376 err = -EEXIST; 9377 if (__dev_get_by_name(net, dev->name)) { 9378 /* We get here if we can't use the current device name */ 9379 if (!pat) 9380 goto out; 9381 err = dev_get_valid_name(net, dev, pat); 9382 if (err < 0) 9383 goto out; 9384 } 9385 9386 /* 9387 * And now a mini version of register_netdevice unregister_netdevice. 9388 */ 9389 9390 /* If device is running close it first. */ 9391 dev_close(dev); 9392 9393 /* And unlink it from device chain */ 9394 unlist_netdevice(dev); 9395 9396 synchronize_net(); 9397 9398 /* Shutdown queueing discipline. */ 9399 dev_shutdown(dev); 9400 9401 /* Notify protocols, that we are about to destroy 9402 * this device. They should clean all the things. 9403 * 9404 * Note that dev->reg_state stays at NETREG_REGISTERED. 9405 * This is wanted because this way 8021q and macvlan know 9406 * the device is just moving and can keep their slaves up. 9407 */ 9408 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 9409 rcu_barrier(); 9410 9411 new_nsid = peernet2id_alloc(dev_net(dev), net); 9412 /* If there is an ifindex conflict assign a new one */ 9413 if (__dev_get_by_index(net, dev->ifindex)) 9414 new_ifindex = dev_new_index(net); 9415 else 9416 new_ifindex = dev->ifindex; 9417 9418 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid, 9419 new_ifindex); 9420 9421 /* 9422 * Flush the unicast and multicast chains 9423 */ 9424 dev_uc_flush(dev); 9425 dev_mc_flush(dev); 9426 9427 /* Send a netdev-removed uevent to the old namespace */ 9428 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE); 9429 netdev_adjacent_del_links(dev); 9430 9431 /* Actually switch the network namespace */ 9432 dev_net_set(dev, net); 9433 dev->ifindex = new_ifindex; 9434 9435 /* Send a netdev-add uevent to the new namespace */ 9436 kobject_uevent(&dev->dev.kobj, KOBJ_ADD); 9437 netdev_adjacent_add_links(dev); 9438 9439 /* Fixup kobjects */ 9440 err = device_rename(&dev->dev, dev->name); 9441 WARN_ON(err); 9442 9443 /* Add the device back in the hashes */ 9444 list_netdevice(dev); 9445 9446 /* Notify protocols, that a new device appeared. */ 9447 call_netdevice_notifiers(NETDEV_REGISTER, dev); 9448 9449 /* 9450 * Prevent userspace races by waiting until the network 9451 * device is fully setup before sending notifications. 9452 */ 9453 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL); 9454 9455 synchronize_net(); 9456 err = 0; 9457 out: 9458 return err; 9459 } 9460 EXPORT_SYMBOL_GPL(dev_change_net_namespace); 9461 9462 static int dev_cpu_dead(unsigned int oldcpu) 9463 { 9464 struct sk_buff **list_skb; 9465 struct sk_buff *skb; 9466 unsigned int cpu; 9467 struct softnet_data *sd, *oldsd, *remsd = NULL; 9468 9469 local_irq_disable(); 9470 cpu = smp_processor_id(); 9471 sd = &per_cpu(softnet_data, cpu); 9472 oldsd = &per_cpu(softnet_data, oldcpu); 9473 9474 /* Find end of our completion_queue. */ 9475 list_skb = &sd->completion_queue; 9476 while (*list_skb) 9477 list_skb = &(*list_skb)->next; 9478 /* Append completion queue from offline CPU. */ 9479 *list_skb = oldsd->completion_queue; 9480 oldsd->completion_queue = NULL; 9481 9482 /* Append output queue from offline CPU. */ 9483 if (oldsd->output_queue) { 9484 *sd->output_queue_tailp = oldsd->output_queue; 9485 sd->output_queue_tailp = oldsd->output_queue_tailp; 9486 oldsd->output_queue = NULL; 9487 oldsd->output_queue_tailp = &oldsd->output_queue; 9488 } 9489 /* Append NAPI poll list from offline CPU, with one exception : 9490 * process_backlog() must be called by cpu owning percpu backlog. 9491 * We properly handle process_queue & input_pkt_queue later. 9492 */ 9493 while (!list_empty(&oldsd->poll_list)) { 9494 struct napi_struct *napi = list_first_entry(&oldsd->poll_list, 9495 struct napi_struct, 9496 poll_list); 9497 9498 list_del_init(&napi->poll_list); 9499 if (napi->poll == process_backlog) 9500 napi->state = 0; 9501 else 9502 ____napi_schedule(sd, napi); 9503 } 9504 9505 raise_softirq_irqoff(NET_TX_SOFTIRQ); 9506 local_irq_enable(); 9507 9508 #ifdef CONFIG_RPS 9509 remsd = oldsd->rps_ipi_list; 9510 oldsd->rps_ipi_list = NULL; 9511 #endif 9512 /* send out pending IPI's on offline CPU */ 9513 net_rps_send_ipi(remsd); 9514 9515 /* Process offline CPU's input_pkt_queue */ 9516 while ((skb = __skb_dequeue(&oldsd->process_queue))) { 9517 netif_rx_ni(skb); 9518 input_queue_head_incr(oldsd); 9519 } 9520 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { 9521 netif_rx_ni(skb); 9522 input_queue_head_incr(oldsd); 9523 } 9524 9525 return 0; 9526 } 9527 9528 /** 9529 * netdev_increment_features - increment feature set by one 9530 * @all: current feature set 9531 * @one: new feature set 9532 * @mask: mask feature set 9533 * 9534 * Computes a new feature set after adding a device with feature set 9535 * @one to the master device with current feature set @all. Will not 9536 * enable anything that is off in @mask. Returns the new feature set. 9537 */ 9538 netdev_features_t netdev_increment_features(netdev_features_t all, 9539 netdev_features_t one, netdev_features_t mask) 9540 { 9541 if (mask & NETIF_F_HW_CSUM) 9542 mask |= NETIF_F_CSUM_MASK; 9543 mask |= NETIF_F_VLAN_CHALLENGED; 9544 9545 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask; 9546 all &= one | ~NETIF_F_ALL_FOR_ALL; 9547 9548 /* If one device supports hw checksumming, set for all. */ 9549 if (all & NETIF_F_HW_CSUM) 9550 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM); 9551 9552 return all; 9553 } 9554 EXPORT_SYMBOL(netdev_increment_features); 9555 9556 static struct hlist_head * __net_init netdev_create_hash(void) 9557 { 9558 int i; 9559 struct hlist_head *hash; 9560 9561 hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL); 9562 if (hash != NULL) 9563 for (i = 0; i < NETDEV_HASHENTRIES; i++) 9564 INIT_HLIST_HEAD(&hash[i]); 9565 9566 return hash; 9567 } 9568 9569 /* Initialize per network namespace state */ 9570 static int __net_init netdev_init(struct net *net) 9571 { 9572 BUILD_BUG_ON(GRO_HASH_BUCKETS > 9573 8 * FIELD_SIZEOF(struct napi_struct, gro_bitmask)); 9574 9575 if (net != &init_net) 9576 INIT_LIST_HEAD(&net->dev_base_head); 9577 9578 net->dev_name_head = netdev_create_hash(); 9579 if (net->dev_name_head == NULL) 9580 goto err_name; 9581 9582 net->dev_index_head = netdev_create_hash(); 9583 if (net->dev_index_head == NULL) 9584 goto err_idx; 9585 9586 return 0; 9587 9588 err_idx: 9589 kfree(net->dev_name_head); 9590 err_name: 9591 return -ENOMEM; 9592 } 9593 9594 /** 9595 * netdev_drivername - network driver for the device 9596 * @dev: network device 9597 * 9598 * Determine network driver for device. 9599 */ 9600 const char *netdev_drivername(const struct net_device *dev) 9601 { 9602 const struct device_driver *driver; 9603 const struct device *parent; 9604 const char *empty = ""; 9605 9606 parent = dev->dev.parent; 9607 if (!parent) 9608 return empty; 9609 9610 driver = parent->driver; 9611 if (driver && driver->name) 9612 return driver->name; 9613 return empty; 9614 } 9615 9616 static void __netdev_printk(const char *level, const struct net_device *dev, 9617 struct va_format *vaf) 9618 { 9619 if (dev && dev->dev.parent) { 9620 dev_printk_emit(level[1] - '0', 9621 dev->dev.parent, 9622 "%s %s %s%s: %pV", 9623 dev_driver_string(dev->dev.parent), 9624 dev_name(dev->dev.parent), 9625 netdev_name(dev), netdev_reg_state(dev), 9626 vaf); 9627 } else if (dev) { 9628 printk("%s%s%s: %pV", 9629 level, netdev_name(dev), netdev_reg_state(dev), vaf); 9630 } else { 9631 printk("%s(NULL net_device): %pV", level, vaf); 9632 } 9633 } 9634 9635 void netdev_printk(const char *level, const struct net_device *dev, 9636 const char *format, ...) 9637 { 9638 struct va_format vaf; 9639 va_list args; 9640 9641 va_start(args, format); 9642 9643 vaf.fmt = format; 9644 vaf.va = &args; 9645 9646 __netdev_printk(level, dev, &vaf); 9647 9648 va_end(args); 9649 } 9650 EXPORT_SYMBOL(netdev_printk); 9651 9652 #define define_netdev_printk_level(func, level) \ 9653 void func(const struct net_device *dev, const char *fmt, ...) \ 9654 { \ 9655 struct va_format vaf; \ 9656 va_list args; \ 9657 \ 9658 va_start(args, fmt); \ 9659 \ 9660 vaf.fmt = fmt; \ 9661 vaf.va = &args; \ 9662 \ 9663 __netdev_printk(level, dev, &vaf); \ 9664 \ 9665 va_end(args); \ 9666 } \ 9667 EXPORT_SYMBOL(func); 9668 9669 define_netdev_printk_level(netdev_emerg, KERN_EMERG); 9670 define_netdev_printk_level(netdev_alert, KERN_ALERT); 9671 define_netdev_printk_level(netdev_crit, KERN_CRIT); 9672 define_netdev_printk_level(netdev_err, KERN_ERR); 9673 define_netdev_printk_level(netdev_warn, KERN_WARNING); 9674 define_netdev_printk_level(netdev_notice, KERN_NOTICE); 9675 define_netdev_printk_level(netdev_info, KERN_INFO); 9676 9677 static void __net_exit netdev_exit(struct net *net) 9678 { 9679 kfree(net->dev_name_head); 9680 kfree(net->dev_index_head); 9681 if (net != &init_net) 9682 WARN_ON_ONCE(!list_empty(&net->dev_base_head)); 9683 } 9684 9685 static struct pernet_operations __net_initdata netdev_net_ops = { 9686 .init = netdev_init, 9687 .exit = netdev_exit, 9688 }; 9689 9690 static void __net_exit default_device_exit(struct net *net) 9691 { 9692 struct net_device *dev, *aux; 9693 /* 9694 * Push all migratable network devices back to the 9695 * initial network namespace 9696 */ 9697 rtnl_lock(); 9698 for_each_netdev_safe(net, dev, aux) { 9699 int err; 9700 char fb_name[IFNAMSIZ]; 9701 9702 /* Ignore unmoveable devices (i.e. loopback) */ 9703 if (dev->features & NETIF_F_NETNS_LOCAL) 9704 continue; 9705 9706 /* Leave virtual devices for the generic cleanup */ 9707 if (dev->rtnl_link_ops) 9708 continue; 9709 9710 /* Push remaining network devices to init_net */ 9711 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 9712 err = dev_change_net_namespace(dev, &init_net, fb_name); 9713 if (err) { 9714 pr_emerg("%s: failed to move %s to init_net: %d\n", 9715 __func__, dev->name, err); 9716 BUG(); 9717 } 9718 } 9719 rtnl_unlock(); 9720 } 9721 9722 static void __net_exit rtnl_lock_unregistering(struct list_head *net_list) 9723 { 9724 /* Return with the rtnl_lock held when there are no network 9725 * devices unregistering in any network namespace in net_list. 9726 */ 9727 struct net *net; 9728 bool unregistering; 9729 DEFINE_WAIT_FUNC(wait, woken_wake_function); 9730 9731 add_wait_queue(&netdev_unregistering_wq, &wait); 9732 for (;;) { 9733 unregistering = false; 9734 rtnl_lock(); 9735 list_for_each_entry(net, net_list, exit_list) { 9736 if (net->dev_unreg_count > 0) { 9737 unregistering = true; 9738 break; 9739 } 9740 } 9741 if (!unregistering) 9742 break; 9743 __rtnl_unlock(); 9744 9745 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 9746 } 9747 remove_wait_queue(&netdev_unregistering_wq, &wait); 9748 } 9749 9750 static void __net_exit default_device_exit_batch(struct list_head *net_list) 9751 { 9752 /* At exit all network devices most be removed from a network 9753 * namespace. Do this in the reverse order of registration. 9754 * Do this across as many network namespaces as possible to 9755 * improve batching efficiency. 9756 */ 9757 struct net_device *dev; 9758 struct net *net; 9759 LIST_HEAD(dev_kill_list); 9760 9761 /* To prevent network device cleanup code from dereferencing 9762 * loopback devices or network devices that have been freed 9763 * wait here for all pending unregistrations to complete, 9764 * before unregistring the loopback device and allowing the 9765 * network namespace be freed. 9766 * 9767 * The netdev todo list containing all network devices 9768 * unregistrations that happen in default_device_exit_batch 9769 * will run in the rtnl_unlock() at the end of 9770 * default_device_exit_batch. 9771 */ 9772 rtnl_lock_unregistering(net_list); 9773 list_for_each_entry(net, net_list, exit_list) { 9774 for_each_netdev_reverse(net, dev) { 9775 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) 9776 dev->rtnl_link_ops->dellink(dev, &dev_kill_list); 9777 else 9778 unregister_netdevice_queue(dev, &dev_kill_list); 9779 } 9780 } 9781 unregister_netdevice_many(&dev_kill_list); 9782 rtnl_unlock(); 9783 } 9784 9785 static struct pernet_operations __net_initdata default_device_ops = { 9786 .exit = default_device_exit, 9787 .exit_batch = default_device_exit_batch, 9788 }; 9789 9790 /* 9791 * Initialize the DEV module. At boot time this walks the device list and 9792 * unhooks any devices that fail to initialise (normally hardware not 9793 * present) and leaves us with a valid list of present and active devices. 9794 * 9795 */ 9796 9797 /* 9798 * This is called single threaded during boot, so no need 9799 * to take the rtnl semaphore. 9800 */ 9801 static int __init net_dev_init(void) 9802 { 9803 int i, rc = -ENOMEM; 9804 9805 BUG_ON(!dev_boot_phase); 9806 9807 if (dev_proc_init()) 9808 goto out; 9809 9810 if (netdev_kobject_init()) 9811 goto out; 9812 9813 INIT_LIST_HEAD(&ptype_all); 9814 for (i = 0; i < PTYPE_HASH_SIZE; i++) 9815 INIT_LIST_HEAD(&ptype_base[i]); 9816 9817 INIT_LIST_HEAD(&offload_base); 9818 9819 if (register_pernet_subsys(&netdev_net_ops)) 9820 goto out; 9821 9822 /* 9823 * Initialise the packet receive queues. 9824 */ 9825 9826 for_each_possible_cpu(i) { 9827 struct work_struct *flush = per_cpu_ptr(&flush_works, i); 9828 struct softnet_data *sd = &per_cpu(softnet_data, i); 9829 9830 INIT_WORK(flush, flush_backlog); 9831 9832 skb_queue_head_init(&sd->input_pkt_queue); 9833 skb_queue_head_init(&sd->process_queue); 9834 #ifdef CONFIG_XFRM_OFFLOAD 9835 skb_queue_head_init(&sd->xfrm_backlog); 9836 #endif 9837 INIT_LIST_HEAD(&sd->poll_list); 9838 sd->output_queue_tailp = &sd->output_queue; 9839 #ifdef CONFIG_RPS 9840 sd->csd.func = rps_trigger_softirq; 9841 sd->csd.info = sd; 9842 sd->cpu = i; 9843 #endif 9844 9845 init_gro_hash(&sd->backlog); 9846 sd->backlog.poll = process_backlog; 9847 sd->backlog.weight = weight_p; 9848 } 9849 9850 dev_boot_phase = 0; 9851 9852 /* The loopback device is special if any other network devices 9853 * is present in a network namespace the loopback device must 9854 * be present. Since we now dynamically allocate and free the 9855 * loopback device ensure this invariant is maintained by 9856 * keeping the loopback device as the first device on the 9857 * list of network devices. Ensuring the loopback devices 9858 * is the first device that appears and the last network device 9859 * that disappears. 9860 */ 9861 if (register_pernet_device(&loopback_net_ops)) 9862 goto out; 9863 9864 if (register_pernet_device(&default_device_ops)) 9865 goto out; 9866 9867 open_softirq(NET_TX_SOFTIRQ, net_tx_action); 9868 open_softirq(NET_RX_SOFTIRQ, net_rx_action); 9869 9870 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead", 9871 NULL, dev_cpu_dead); 9872 WARN_ON(rc < 0); 9873 rc = 0; 9874 out: 9875 return rc; 9876 } 9877 9878 subsys_initcall(net_dev_init); 9879