1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * NET3 Protocol independent device support routines. 4 * 5 * Derived from the non IP parts of dev.c 1.0.19 6 * Authors: Ross Biro 7 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 8 * Mark Evans, <evansmp@uhura.aston.ac.uk> 9 * 10 * Additional Authors: 11 * Florian la Roche <rzsfl@rz.uni-sb.de> 12 * Alan Cox <gw4pts@gw4pts.ampr.org> 13 * David Hinds <dahinds@users.sourceforge.net> 14 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 15 * Adam Sulmicki <adam@cfar.umd.edu> 16 * Pekka Riikonen <priikone@poesidon.pspt.fi> 17 * 18 * Changes: 19 * D.J. Barrow : Fixed bug where dev->refcnt gets set 20 * to 2 if register_netdev gets called 21 * before net_dev_init & also removed a 22 * few lines of code in the process. 23 * Alan Cox : device private ioctl copies fields back. 24 * Alan Cox : Transmit queue code does relevant 25 * stunts to keep the queue safe. 26 * Alan Cox : Fixed double lock. 27 * Alan Cox : Fixed promisc NULL pointer trap 28 * ???????? : Support the full private ioctl range 29 * Alan Cox : Moved ioctl permission check into 30 * drivers 31 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI 32 * Alan Cox : 100 backlog just doesn't cut it when 33 * you start doing multicast video 8) 34 * Alan Cox : Rewrote net_bh and list manager. 35 * Alan Cox : Fix ETH_P_ALL echoback lengths. 36 * Alan Cox : Took out transmit every packet pass 37 * Saved a few bytes in the ioctl handler 38 * Alan Cox : Network driver sets packet type before 39 * calling netif_rx. Saves a function 40 * call a packet. 41 * Alan Cox : Hashed net_bh() 42 * Richard Kooijman: Timestamp fixes. 43 * Alan Cox : Wrong field in SIOCGIFDSTADDR 44 * Alan Cox : Device lock protection. 45 * Alan Cox : Fixed nasty side effect of device close 46 * changes. 47 * Rudi Cilibrasi : Pass the right thing to 48 * set_mac_address() 49 * Dave Miller : 32bit quantity for the device lock to 50 * make it work out on a Sparc. 51 * Bjorn Ekwall : Added KERNELD hack. 52 * Alan Cox : Cleaned up the backlog initialise. 53 * Craig Metz : SIOCGIFCONF fix if space for under 54 * 1 device. 55 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there 56 * is no device open function. 57 * Andi Kleen : Fix error reporting for SIOCGIFCONF 58 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF 59 * Cyrus Durgin : Cleaned for KMOD 60 * Adam Sulmicki : Bug Fix : Network Device Unload 61 * A network device unload needs to purge 62 * the backlog queue. 63 * Paul Rusty Russell : SIOCSIFNAME 64 * Pekka Riikonen : Netdev boot-time settings code 65 * Andrew Morton : Make unregister_netdevice wait 66 * indefinitely on dev->refcnt 67 * J Hadi Salim : - Backlog queue sampling 68 * - netif_rx() feedback 69 */ 70 71 #include <linux/uaccess.h> 72 #include <linux/bitmap.h> 73 #include <linux/capability.h> 74 #include <linux/cpu.h> 75 #include <linux/types.h> 76 #include <linux/kernel.h> 77 #include <linux/hash.h> 78 #include <linux/slab.h> 79 #include <linux/sched.h> 80 #include <linux/sched/mm.h> 81 #include <linux/mutex.h> 82 #include <linux/rwsem.h> 83 #include <linux/string.h> 84 #include <linux/mm.h> 85 #include <linux/socket.h> 86 #include <linux/sockios.h> 87 #include <linux/errno.h> 88 #include <linux/interrupt.h> 89 #include <linux/if_ether.h> 90 #include <linux/netdevice.h> 91 #include <linux/etherdevice.h> 92 #include <linux/ethtool.h> 93 #include <linux/skbuff.h> 94 #include <linux/kthread.h> 95 #include <linux/bpf.h> 96 #include <linux/bpf_trace.h> 97 #include <net/net_namespace.h> 98 #include <net/sock.h> 99 #include <net/busy_poll.h> 100 #include <linux/rtnetlink.h> 101 #include <linux/stat.h> 102 #include <net/dsa.h> 103 #include <net/dst.h> 104 #include <net/dst_metadata.h> 105 #include <net/gro.h> 106 #include <net/pkt_sched.h> 107 #include <net/pkt_cls.h> 108 #include <net/checksum.h> 109 #include <net/xfrm.h> 110 #include <net/tcx.h> 111 #include <linux/highmem.h> 112 #include <linux/init.h> 113 #include <linux/module.h> 114 #include <linux/netpoll.h> 115 #include <linux/rcupdate.h> 116 #include <linux/delay.h> 117 #include <net/iw_handler.h> 118 #include <asm/current.h> 119 #include <linux/audit.h> 120 #include <linux/dmaengine.h> 121 #include <linux/err.h> 122 #include <linux/ctype.h> 123 #include <linux/if_arp.h> 124 #include <linux/if_vlan.h> 125 #include <linux/ip.h> 126 #include <net/ip.h> 127 #include <net/mpls.h> 128 #include <linux/ipv6.h> 129 #include <linux/in.h> 130 #include <linux/jhash.h> 131 #include <linux/random.h> 132 #include <trace/events/napi.h> 133 #include <trace/events/net.h> 134 #include <trace/events/skb.h> 135 #include <trace/events/qdisc.h> 136 #include <trace/events/xdp.h> 137 #include <linux/inetdevice.h> 138 #include <linux/cpu_rmap.h> 139 #include <linux/static_key.h> 140 #include <linux/hashtable.h> 141 #include <linux/vmalloc.h> 142 #include <linux/if_macvlan.h> 143 #include <linux/errqueue.h> 144 #include <linux/hrtimer.h> 145 #include <linux/netfilter_netdev.h> 146 #include <linux/crash_dump.h> 147 #include <linux/sctp.h> 148 #include <net/udp_tunnel.h> 149 #include <linux/net_namespace.h> 150 #include <linux/indirect_call_wrapper.h> 151 #include <net/devlink.h> 152 #include <linux/pm_runtime.h> 153 #include <linux/prandom.h> 154 #include <linux/once_lite.h> 155 #include <net/netdev_rx_queue.h> 156 157 #include "dev.h" 158 #include "net-sysfs.h" 159 160 static DEFINE_SPINLOCK(ptype_lock); 161 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 162 struct list_head ptype_all __read_mostly; /* Taps */ 163 164 static int netif_rx_internal(struct sk_buff *skb); 165 static int call_netdevice_notifiers_extack(unsigned long val, 166 struct net_device *dev, 167 struct netlink_ext_ack *extack); 168 static struct napi_struct *napi_by_id(unsigned int napi_id); 169 170 /* 171 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 172 * semaphore. 173 * 174 * Pure readers hold dev_base_lock for reading, or rcu_read_lock() 175 * 176 * Writers must hold the rtnl semaphore while they loop through the 177 * dev_base_head list, and hold dev_base_lock for writing when they do the 178 * actual updates. This allows pure readers to access the list even 179 * while a writer is preparing to update it. 180 * 181 * To put it another way, dev_base_lock is held for writing only to 182 * protect against pure readers; the rtnl semaphore provides the 183 * protection against other writers. 184 * 185 * See, for example usages, register_netdevice() and 186 * unregister_netdevice(), which must be called with the rtnl 187 * semaphore held. 188 */ 189 DEFINE_RWLOCK(dev_base_lock); 190 EXPORT_SYMBOL(dev_base_lock); 191 192 static DEFINE_MUTEX(ifalias_mutex); 193 194 /* protects napi_hash addition/deletion and napi_gen_id */ 195 static DEFINE_SPINLOCK(napi_hash_lock); 196 197 static unsigned int napi_gen_id = NR_CPUS; 198 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); 199 200 static DECLARE_RWSEM(devnet_rename_sem); 201 202 static inline void dev_base_seq_inc(struct net *net) 203 { 204 while (++net->dev_base_seq == 0) 205 ; 206 } 207 208 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) 209 { 210 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ)); 211 212 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; 213 } 214 215 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) 216 { 217 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; 218 } 219 220 static inline void rps_lock_irqsave(struct softnet_data *sd, 221 unsigned long *flags) 222 { 223 if (IS_ENABLED(CONFIG_RPS)) 224 spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags); 225 else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 226 local_irq_save(*flags); 227 } 228 229 static inline void rps_lock_irq_disable(struct softnet_data *sd) 230 { 231 if (IS_ENABLED(CONFIG_RPS)) 232 spin_lock_irq(&sd->input_pkt_queue.lock); 233 else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 234 local_irq_disable(); 235 } 236 237 static inline void rps_unlock_irq_restore(struct softnet_data *sd, 238 unsigned long *flags) 239 { 240 if (IS_ENABLED(CONFIG_RPS)) 241 spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags); 242 else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 243 local_irq_restore(*flags); 244 } 245 246 static inline void rps_unlock_irq_enable(struct softnet_data *sd) 247 { 248 if (IS_ENABLED(CONFIG_RPS)) 249 spin_unlock_irq(&sd->input_pkt_queue.lock); 250 else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 251 local_irq_enable(); 252 } 253 254 static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev, 255 const char *name) 256 { 257 struct netdev_name_node *name_node; 258 259 name_node = kmalloc(sizeof(*name_node), GFP_KERNEL); 260 if (!name_node) 261 return NULL; 262 INIT_HLIST_NODE(&name_node->hlist); 263 name_node->dev = dev; 264 name_node->name = name; 265 return name_node; 266 } 267 268 static struct netdev_name_node * 269 netdev_name_node_head_alloc(struct net_device *dev) 270 { 271 struct netdev_name_node *name_node; 272 273 name_node = netdev_name_node_alloc(dev, dev->name); 274 if (!name_node) 275 return NULL; 276 INIT_LIST_HEAD(&name_node->list); 277 return name_node; 278 } 279 280 static void netdev_name_node_free(struct netdev_name_node *name_node) 281 { 282 kfree(name_node); 283 } 284 285 static void netdev_name_node_add(struct net *net, 286 struct netdev_name_node *name_node) 287 { 288 hlist_add_head_rcu(&name_node->hlist, 289 dev_name_hash(net, name_node->name)); 290 } 291 292 static void netdev_name_node_del(struct netdev_name_node *name_node) 293 { 294 hlist_del_rcu(&name_node->hlist); 295 } 296 297 static struct netdev_name_node *netdev_name_node_lookup(struct net *net, 298 const char *name) 299 { 300 struct hlist_head *head = dev_name_hash(net, name); 301 struct netdev_name_node *name_node; 302 303 hlist_for_each_entry(name_node, head, hlist) 304 if (!strcmp(name_node->name, name)) 305 return name_node; 306 return NULL; 307 } 308 309 static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net, 310 const char *name) 311 { 312 struct hlist_head *head = dev_name_hash(net, name); 313 struct netdev_name_node *name_node; 314 315 hlist_for_each_entry_rcu(name_node, head, hlist) 316 if (!strcmp(name_node->name, name)) 317 return name_node; 318 return NULL; 319 } 320 321 bool netdev_name_in_use(struct net *net, const char *name) 322 { 323 return netdev_name_node_lookup(net, name); 324 } 325 EXPORT_SYMBOL(netdev_name_in_use); 326 327 int netdev_name_node_alt_create(struct net_device *dev, const char *name) 328 { 329 struct netdev_name_node *name_node; 330 struct net *net = dev_net(dev); 331 332 name_node = netdev_name_node_lookup(net, name); 333 if (name_node) 334 return -EEXIST; 335 name_node = netdev_name_node_alloc(dev, name); 336 if (!name_node) 337 return -ENOMEM; 338 netdev_name_node_add(net, name_node); 339 /* The node that holds dev->name acts as a head of per-device list. */ 340 list_add_tail(&name_node->list, &dev->name_node->list); 341 342 return 0; 343 } 344 345 static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node) 346 { 347 list_del(&name_node->list); 348 kfree(name_node->name); 349 netdev_name_node_free(name_node); 350 } 351 352 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name) 353 { 354 struct netdev_name_node *name_node; 355 struct net *net = dev_net(dev); 356 357 name_node = netdev_name_node_lookup(net, name); 358 if (!name_node) 359 return -ENOENT; 360 /* lookup might have found our primary name or a name belonging 361 * to another device. 362 */ 363 if (name_node == dev->name_node || name_node->dev != dev) 364 return -EINVAL; 365 366 netdev_name_node_del(name_node); 367 synchronize_rcu(); 368 __netdev_name_node_alt_destroy(name_node); 369 370 return 0; 371 } 372 373 static void netdev_name_node_alt_flush(struct net_device *dev) 374 { 375 struct netdev_name_node *name_node, *tmp; 376 377 list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list) 378 __netdev_name_node_alt_destroy(name_node); 379 } 380 381 /* Device list insertion */ 382 static void list_netdevice(struct net_device *dev) 383 { 384 struct netdev_name_node *name_node; 385 struct net *net = dev_net(dev); 386 387 ASSERT_RTNL(); 388 389 write_lock(&dev_base_lock); 390 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); 391 netdev_name_node_add(net, dev->name_node); 392 hlist_add_head_rcu(&dev->index_hlist, 393 dev_index_hash(net, dev->ifindex)); 394 write_unlock(&dev_base_lock); 395 396 netdev_for_each_altname(dev, name_node) 397 netdev_name_node_add(net, name_node); 398 399 /* We reserved the ifindex, this can't fail */ 400 WARN_ON(xa_store(&net->dev_by_index, dev->ifindex, dev, GFP_KERNEL)); 401 402 dev_base_seq_inc(net); 403 } 404 405 /* Device list removal 406 * caller must respect a RCU grace period before freeing/reusing dev 407 */ 408 static void unlist_netdevice(struct net_device *dev, bool lock) 409 { 410 struct netdev_name_node *name_node; 411 struct net *net = dev_net(dev); 412 413 ASSERT_RTNL(); 414 415 xa_erase(&net->dev_by_index, dev->ifindex); 416 417 netdev_for_each_altname(dev, name_node) 418 netdev_name_node_del(name_node); 419 420 /* Unlink dev from the device chain */ 421 if (lock) 422 write_lock(&dev_base_lock); 423 list_del_rcu(&dev->dev_list); 424 netdev_name_node_del(dev->name_node); 425 hlist_del_rcu(&dev->index_hlist); 426 if (lock) 427 write_unlock(&dev_base_lock); 428 429 dev_base_seq_inc(dev_net(dev)); 430 } 431 432 /* 433 * Our notifier list 434 */ 435 436 static RAW_NOTIFIER_HEAD(netdev_chain); 437 438 /* 439 * Device drivers call our routines to queue packets here. We empty the 440 * queue in the local softnet handler. 441 */ 442 443 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 444 EXPORT_PER_CPU_SYMBOL(softnet_data); 445 446 #ifdef CONFIG_LOCKDEP 447 /* 448 * register_netdevice() inits txq->_xmit_lock and sets lockdep class 449 * according to dev->type 450 */ 451 static const unsigned short netdev_lock_type[] = { 452 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, 453 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, 454 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, 455 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, 456 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, 457 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, 458 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, 459 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, 460 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, 461 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, 462 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, 463 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 464 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, 465 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, 466 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; 467 468 static const char *const netdev_lock_name[] = { 469 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", 470 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", 471 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", 472 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", 473 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", 474 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", 475 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", 476 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", 477 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", 478 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", 479 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", 480 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 481 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", 482 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", 483 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; 484 485 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 486 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; 487 488 static inline unsigned short netdev_lock_pos(unsigned short dev_type) 489 { 490 int i; 491 492 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) 493 if (netdev_lock_type[i] == dev_type) 494 return i; 495 /* the last key is used by default */ 496 return ARRAY_SIZE(netdev_lock_type) - 1; 497 } 498 499 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 500 unsigned short dev_type) 501 { 502 int i; 503 504 i = netdev_lock_pos(dev_type); 505 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], 506 netdev_lock_name[i]); 507 } 508 509 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 510 { 511 int i; 512 513 i = netdev_lock_pos(dev->type); 514 lockdep_set_class_and_name(&dev->addr_list_lock, 515 &netdev_addr_lock_key[i], 516 netdev_lock_name[i]); 517 } 518 #else 519 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 520 unsigned short dev_type) 521 { 522 } 523 524 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 525 { 526 } 527 #endif 528 529 /******************************************************************************* 530 * 531 * Protocol management and registration routines 532 * 533 *******************************************************************************/ 534 535 536 /* 537 * Add a protocol ID to the list. Now that the input handler is 538 * smarter we can dispense with all the messy stuff that used to be 539 * here. 540 * 541 * BEWARE!!! Protocol handlers, mangling input packets, 542 * MUST BE last in hash buckets and checking protocol handlers 543 * MUST start from promiscuous ptype_all chain in net_bh. 544 * It is true now, do not change it. 545 * Explanation follows: if protocol handler, mangling packet, will 546 * be the first on list, it is not able to sense, that packet 547 * is cloned and should be copied-on-write, so that it will 548 * change it and subsequent readers will get broken packet. 549 * --ANK (980803) 550 */ 551 552 static inline struct list_head *ptype_head(const struct packet_type *pt) 553 { 554 if (pt->type == htons(ETH_P_ALL)) 555 return pt->dev ? &pt->dev->ptype_all : &ptype_all; 556 else 557 return pt->dev ? &pt->dev->ptype_specific : 558 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; 559 } 560 561 /** 562 * dev_add_pack - add packet handler 563 * @pt: packet type declaration 564 * 565 * Add a protocol handler to the networking stack. The passed &packet_type 566 * is linked into kernel lists and may not be freed until it has been 567 * removed from the kernel lists. 568 * 569 * This call does not sleep therefore it can not 570 * guarantee all CPU's that are in middle of receiving packets 571 * will see the new packet type (until the next received packet). 572 */ 573 574 void dev_add_pack(struct packet_type *pt) 575 { 576 struct list_head *head = ptype_head(pt); 577 578 spin_lock(&ptype_lock); 579 list_add_rcu(&pt->list, head); 580 spin_unlock(&ptype_lock); 581 } 582 EXPORT_SYMBOL(dev_add_pack); 583 584 /** 585 * __dev_remove_pack - remove packet handler 586 * @pt: packet type declaration 587 * 588 * Remove a protocol handler that was previously added to the kernel 589 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 590 * from the kernel lists and can be freed or reused once this function 591 * returns. 592 * 593 * The packet type might still be in use by receivers 594 * and must not be freed until after all the CPU's have gone 595 * through a quiescent state. 596 */ 597 void __dev_remove_pack(struct packet_type *pt) 598 { 599 struct list_head *head = ptype_head(pt); 600 struct packet_type *pt1; 601 602 spin_lock(&ptype_lock); 603 604 list_for_each_entry(pt1, head, list) { 605 if (pt == pt1) { 606 list_del_rcu(&pt->list); 607 goto out; 608 } 609 } 610 611 pr_warn("dev_remove_pack: %p not found\n", pt); 612 out: 613 spin_unlock(&ptype_lock); 614 } 615 EXPORT_SYMBOL(__dev_remove_pack); 616 617 /** 618 * dev_remove_pack - remove packet handler 619 * @pt: packet type declaration 620 * 621 * Remove a protocol handler that was previously added to the kernel 622 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 623 * from the kernel lists and can be freed or reused once this function 624 * returns. 625 * 626 * This call sleeps to guarantee that no CPU is looking at the packet 627 * type after return. 628 */ 629 void dev_remove_pack(struct packet_type *pt) 630 { 631 __dev_remove_pack(pt); 632 633 synchronize_net(); 634 } 635 EXPORT_SYMBOL(dev_remove_pack); 636 637 638 /******************************************************************************* 639 * 640 * Device Interface Subroutines 641 * 642 *******************************************************************************/ 643 644 /** 645 * dev_get_iflink - get 'iflink' value of a interface 646 * @dev: targeted interface 647 * 648 * Indicates the ifindex the interface is linked to. 649 * Physical interfaces have the same 'ifindex' and 'iflink' values. 650 */ 651 652 int dev_get_iflink(const struct net_device *dev) 653 { 654 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) 655 return dev->netdev_ops->ndo_get_iflink(dev); 656 657 return dev->ifindex; 658 } 659 EXPORT_SYMBOL(dev_get_iflink); 660 661 /** 662 * dev_fill_metadata_dst - Retrieve tunnel egress information. 663 * @dev: targeted interface 664 * @skb: The packet. 665 * 666 * For better visibility of tunnel traffic OVS needs to retrieve 667 * egress tunnel information for a packet. Following API allows 668 * user to get this info. 669 */ 670 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 671 { 672 struct ip_tunnel_info *info; 673 674 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst) 675 return -EINVAL; 676 677 info = skb_tunnel_info_unclone(skb); 678 if (!info) 679 return -ENOMEM; 680 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX))) 681 return -EINVAL; 682 683 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb); 684 } 685 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst); 686 687 static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack) 688 { 689 int k = stack->num_paths++; 690 691 if (WARN_ON_ONCE(k >= NET_DEVICE_PATH_STACK_MAX)) 692 return NULL; 693 694 return &stack->path[k]; 695 } 696 697 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr, 698 struct net_device_path_stack *stack) 699 { 700 const struct net_device *last_dev; 701 struct net_device_path_ctx ctx = { 702 .dev = dev, 703 }; 704 struct net_device_path *path; 705 int ret = 0; 706 707 memcpy(ctx.daddr, daddr, sizeof(ctx.daddr)); 708 stack->num_paths = 0; 709 while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) { 710 last_dev = ctx.dev; 711 path = dev_fwd_path(stack); 712 if (!path) 713 return -1; 714 715 memset(path, 0, sizeof(struct net_device_path)); 716 ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path); 717 if (ret < 0) 718 return -1; 719 720 if (WARN_ON_ONCE(last_dev == ctx.dev)) 721 return -1; 722 } 723 724 if (!ctx.dev) 725 return ret; 726 727 path = dev_fwd_path(stack); 728 if (!path) 729 return -1; 730 path->type = DEV_PATH_ETHERNET; 731 path->dev = ctx.dev; 732 733 return ret; 734 } 735 EXPORT_SYMBOL_GPL(dev_fill_forward_path); 736 737 /** 738 * __dev_get_by_name - find a device by its name 739 * @net: the applicable net namespace 740 * @name: name to find 741 * 742 * Find an interface by name. Must be called under RTNL semaphore 743 * or @dev_base_lock. If the name is found a pointer to the device 744 * is returned. If the name is not found then %NULL is returned. The 745 * reference counters are not incremented so the caller must be 746 * careful with locks. 747 */ 748 749 struct net_device *__dev_get_by_name(struct net *net, const char *name) 750 { 751 struct netdev_name_node *node_name; 752 753 node_name = netdev_name_node_lookup(net, name); 754 return node_name ? node_name->dev : NULL; 755 } 756 EXPORT_SYMBOL(__dev_get_by_name); 757 758 /** 759 * dev_get_by_name_rcu - find a device by its name 760 * @net: the applicable net namespace 761 * @name: name to find 762 * 763 * Find an interface by name. 764 * If the name is found a pointer to the device is returned. 765 * If the name is not found then %NULL is returned. 766 * The reference counters are not incremented so the caller must be 767 * careful with locks. The caller must hold RCU lock. 768 */ 769 770 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) 771 { 772 struct netdev_name_node *node_name; 773 774 node_name = netdev_name_node_lookup_rcu(net, name); 775 return node_name ? node_name->dev : NULL; 776 } 777 EXPORT_SYMBOL(dev_get_by_name_rcu); 778 779 /* Deprecated for new users, call netdev_get_by_name() instead */ 780 struct net_device *dev_get_by_name(struct net *net, const char *name) 781 { 782 struct net_device *dev; 783 784 rcu_read_lock(); 785 dev = dev_get_by_name_rcu(net, name); 786 dev_hold(dev); 787 rcu_read_unlock(); 788 return dev; 789 } 790 EXPORT_SYMBOL(dev_get_by_name); 791 792 /** 793 * netdev_get_by_name() - find a device by its name 794 * @net: the applicable net namespace 795 * @name: name to find 796 * @tracker: tracking object for the acquired reference 797 * @gfp: allocation flags for the tracker 798 * 799 * Find an interface by name. This can be called from any 800 * context and does its own locking. The returned handle has 801 * the usage count incremented and the caller must use netdev_put() to 802 * release it when it is no longer needed. %NULL is returned if no 803 * matching device is found. 804 */ 805 struct net_device *netdev_get_by_name(struct net *net, const char *name, 806 netdevice_tracker *tracker, gfp_t gfp) 807 { 808 struct net_device *dev; 809 810 dev = dev_get_by_name(net, name); 811 if (dev) 812 netdev_tracker_alloc(dev, tracker, gfp); 813 return dev; 814 } 815 EXPORT_SYMBOL(netdev_get_by_name); 816 817 /** 818 * __dev_get_by_index - find a device by its ifindex 819 * @net: the applicable net namespace 820 * @ifindex: index of device 821 * 822 * Search for an interface by index. Returns %NULL if the device 823 * is not found or a pointer to the device. The device has not 824 * had its reference counter increased so the caller must be careful 825 * about locking. The caller must hold either the RTNL semaphore 826 * or @dev_base_lock. 827 */ 828 829 struct net_device *__dev_get_by_index(struct net *net, int ifindex) 830 { 831 struct net_device *dev; 832 struct hlist_head *head = dev_index_hash(net, ifindex); 833 834 hlist_for_each_entry(dev, head, index_hlist) 835 if (dev->ifindex == ifindex) 836 return dev; 837 838 return NULL; 839 } 840 EXPORT_SYMBOL(__dev_get_by_index); 841 842 /** 843 * dev_get_by_index_rcu - find a device by its ifindex 844 * @net: the applicable net namespace 845 * @ifindex: index of device 846 * 847 * Search for an interface by index. Returns %NULL if the device 848 * is not found or a pointer to the device. The device has not 849 * had its reference counter increased so the caller must be careful 850 * about locking. The caller must hold RCU lock. 851 */ 852 853 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) 854 { 855 struct net_device *dev; 856 struct hlist_head *head = dev_index_hash(net, ifindex); 857 858 hlist_for_each_entry_rcu(dev, head, index_hlist) 859 if (dev->ifindex == ifindex) 860 return dev; 861 862 return NULL; 863 } 864 EXPORT_SYMBOL(dev_get_by_index_rcu); 865 866 /* Deprecated for new users, call netdev_get_by_index() instead */ 867 struct net_device *dev_get_by_index(struct net *net, int ifindex) 868 { 869 struct net_device *dev; 870 871 rcu_read_lock(); 872 dev = dev_get_by_index_rcu(net, ifindex); 873 dev_hold(dev); 874 rcu_read_unlock(); 875 return dev; 876 } 877 EXPORT_SYMBOL(dev_get_by_index); 878 879 /** 880 * netdev_get_by_index() - find a device by its ifindex 881 * @net: the applicable net namespace 882 * @ifindex: index of device 883 * @tracker: tracking object for the acquired reference 884 * @gfp: allocation flags for the tracker 885 * 886 * Search for an interface by index. Returns NULL if the device 887 * is not found or a pointer to the device. The device returned has 888 * had a reference added and the pointer is safe until the user calls 889 * netdev_put() to indicate they have finished with it. 890 */ 891 struct net_device *netdev_get_by_index(struct net *net, int ifindex, 892 netdevice_tracker *tracker, gfp_t gfp) 893 { 894 struct net_device *dev; 895 896 dev = dev_get_by_index(net, ifindex); 897 if (dev) 898 netdev_tracker_alloc(dev, tracker, gfp); 899 return dev; 900 } 901 EXPORT_SYMBOL(netdev_get_by_index); 902 903 /** 904 * dev_get_by_napi_id - find a device by napi_id 905 * @napi_id: ID of the NAPI struct 906 * 907 * Search for an interface by NAPI ID. Returns %NULL if the device 908 * is not found or a pointer to the device. The device has not had 909 * its reference counter increased so the caller must be careful 910 * about locking. The caller must hold RCU lock. 911 */ 912 913 struct net_device *dev_get_by_napi_id(unsigned int napi_id) 914 { 915 struct napi_struct *napi; 916 917 WARN_ON_ONCE(!rcu_read_lock_held()); 918 919 if (napi_id < MIN_NAPI_ID) 920 return NULL; 921 922 napi = napi_by_id(napi_id); 923 924 return napi ? napi->dev : NULL; 925 } 926 EXPORT_SYMBOL(dev_get_by_napi_id); 927 928 /** 929 * netdev_get_name - get a netdevice name, knowing its ifindex. 930 * @net: network namespace 931 * @name: a pointer to the buffer where the name will be stored. 932 * @ifindex: the ifindex of the interface to get the name from. 933 */ 934 int netdev_get_name(struct net *net, char *name, int ifindex) 935 { 936 struct net_device *dev; 937 int ret; 938 939 down_read(&devnet_rename_sem); 940 rcu_read_lock(); 941 942 dev = dev_get_by_index_rcu(net, ifindex); 943 if (!dev) { 944 ret = -ENODEV; 945 goto out; 946 } 947 948 strcpy(name, dev->name); 949 950 ret = 0; 951 out: 952 rcu_read_unlock(); 953 up_read(&devnet_rename_sem); 954 return ret; 955 } 956 957 /** 958 * dev_getbyhwaddr_rcu - find a device by its hardware address 959 * @net: the applicable net namespace 960 * @type: media type of device 961 * @ha: hardware address 962 * 963 * Search for an interface by MAC address. Returns NULL if the device 964 * is not found or a pointer to the device. 965 * The caller must hold RCU or RTNL. 966 * The returned device has not had its ref count increased 967 * and the caller must therefore be careful about locking 968 * 969 */ 970 971 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 972 const char *ha) 973 { 974 struct net_device *dev; 975 976 for_each_netdev_rcu(net, dev) 977 if (dev->type == type && 978 !memcmp(dev->dev_addr, ha, dev->addr_len)) 979 return dev; 980 981 return NULL; 982 } 983 EXPORT_SYMBOL(dev_getbyhwaddr_rcu); 984 985 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) 986 { 987 struct net_device *dev, *ret = NULL; 988 989 rcu_read_lock(); 990 for_each_netdev_rcu(net, dev) 991 if (dev->type == type) { 992 dev_hold(dev); 993 ret = dev; 994 break; 995 } 996 rcu_read_unlock(); 997 return ret; 998 } 999 EXPORT_SYMBOL(dev_getfirstbyhwtype); 1000 1001 /** 1002 * __dev_get_by_flags - find any device with given flags 1003 * @net: the applicable net namespace 1004 * @if_flags: IFF_* values 1005 * @mask: bitmask of bits in if_flags to check 1006 * 1007 * Search for any interface with the given flags. Returns NULL if a device 1008 * is not found or a pointer to the device. Must be called inside 1009 * rtnl_lock(), and result refcount is unchanged. 1010 */ 1011 1012 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags, 1013 unsigned short mask) 1014 { 1015 struct net_device *dev, *ret; 1016 1017 ASSERT_RTNL(); 1018 1019 ret = NULL; 1020 for_each_netdev(net, dev) { 1021 if (((dev->flags ^ if_flags) & mask) == 0) { 1022 ret = dev; 1023 break; 1024 } 1025 } 1026 return ret; 1027 } 1028 EXPORT_SYMBOL(__dev_get_by_flags); 1029 1030 /** 1031 * dev_valid_name - check if name is okay for network device 1032 * @name: name string 1033 * 1034 * Network device names need to be valid file names to 1035 * allow sysfs to work. We also disallow any kind of 1036 * whitespace. 1037 */ 1038 bool dev_valid_name(const char *name) 1039 { 1040 if (*name == '\0') 1041 return false; 1042 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ) 1043 return false; 1044 if (!strcmp(name, ".") || !strcmp(name, "..")) 1045 return false; 1046 1047 while (*name) { 1048 if (*name == '/' || *name == ':' || isspace(*name)) 1049 return false; 1050 name++; 1051 } 1052 return true; 1053 } 1054 EXPORT_SYMBOL(dev_valid_name); 1055 1056 /** 1057 * __dev_alloc_name - allocate a name for a device 1058 * @net: network namespace to allocate the device name in 1059 * @name: name format string 1060 * @res: result name string 1061 * 1062 * Passed a format string - eg "lt%d" it will try and find a suitable 1063 * id. It scans list of devices to build up a free map, then chooses 1064 * the first empty slot. The caller must hold the dev_base or rtnl lock 1065 * while allocating the name and adding the device in order to avoid 1066 * duplicates. 1067 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1068 * Returns the number of the unit assigned or a negative errno code. 1069 */ 1070 1071 static int __dev_alloc_name(struct net *net, const char *name, char *res) 1072 { 1073 int i = 0; 1074 const char *p; 1075 const int max_netdevices = 8*PAGE_SIZE; 1076 unsigned long *inuse; 1077 struct net_device *d; 1078 char buf[IFNAMSIZ]; 1079 1080 /* Verify the string as this thing may have come from the user. 1081 * There must be one "%d" and no other "%" characters. 1082 */ 1083 p = strchr(name, '%'); 1084 if (!p || p[1] != 'd' || strchr(p + 2, '%')) 1085 return -EINVAL; 1086 1087 /* Use one page as a bit array of possible slots */ 1088 inuse = bitmap_zalloc(max_netdevices, GFP_ATOMIC); 1089 if (!inuse) 1090 return -ENOMEM; 1091 1092 for_each_netdev(net, d) { 1093 struct netdev_name_node *name_node; 1094 1095 netdev_for_each_altname(d, name_node) { 1096 if (!sscanf(name_node->name, name, &i)) 1097 continue; 1098 if (i < 0 || i >= max_netdevices) 1099 continue; 1100 1101 /* avoid cases where sscanf is not exact inverse of printf */ 1102 snprintf(buf, IFNAMSIZ, name, i); 1103 if (!strncmp(buf, name_node->name, IFNAMSIZ)) 1104 __set_bit(i, inuse); 1105 } 1106 if (!sscanf(d->name, name, &i)) 1107 continue; 1108 if (i < 0 || i >= max_netdevices) 1109 continue; 1110 1111 /* avoid cases where sscanf is not exact inverse of printf */ 1112 snprintf(buf, IFNAMSIZ, name, i); 1113 if (!strncmp(buf, d->name, IFNAMSIZ)) 1114 __set_bit(i, inuse); 1115 } 1116 1117 i = find_first_zero_bit(inuse, max_netdevices); 1118 bitmap_free(inuse); 1119 if (i == max_netdevices) 1120 return -ENFILE; 1121 1122 /* 'res' and 'name' could overlap, use 'buf' as an intermediate buffer */ 1123 strscpy(buf, name, IFNAMSIZ); 1124 snprintf(res, IFNAMSIZ, buf, i); 1125 return i; 1126 } 1127 1128 /* Returns negative errno or allocated unit id (see __dev_alloc_name()) */ 1129 static int dev_prep_valid_name(struct net *net, struct net_device *dev, 1130 const char *want_name, char *out_name, 1131 int dup_errno) 1132 { 1133 if (!dev_valid_name(want_name)) 1134 return -EINVAL; 1135 1136 if (strchr(want_name, '%')) 1137 return __dev_alloc_name(net, want_name, out_name); 1138 1139 if (netdev_name_in_use(net, want_name)) 1140 return -dup_errno; 1141 if (out_name != want_name) 1142 strscpy(out_name, want_name, IFNAMSIZ); 1143 return 0; 1144 } 1145 1146 /** 1147 * dev_alloc_name - allocate a name for a device 1148 * @dev: device 1149 * @name: name format string 1150 * 1151 * Passed a format string - eg "lt%d" it will try and find a suitable 1152 * id. It scans list of devices to build up a free map, then chooses 1153 * the first empty slot. The caller must hold the dev_base or rtnl lock 1154 * while allocating the name and adding the device in order to avoid 1155 * duplicates. 1156 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1157 * Returns the number of the unit assigned or a negative errno code. 1158 */ 1159 1160 int dev_alloc_name(struct net_device *dev, const char *name) 1161 { 1162 return dev_prep_valid_name(dev_net(dev), dev, name, dev->name, ENFILE); 1163 } 1164 EXPORT_SYMBOL(dev_alloc_name); 1165 1166 static int dev_get_valid_name(struct net *net, struct net_device *dev, 1167 const char *name) 1168 { 1169 int ret; 1170 1171 ret = dev_prep_valid_name(net, dev, name, dev->name, EEXIST); 1172 return ret < 0 ? ret : 0; 1173 } 1174 1175 /** 1176 * dev_change_name - change name of a device 1177 * @dev: device 1178 * @newname: name (or format string) must be at least IFNAMSIZ 1179 * 1180 * Change name of a device, can pass format strings "eth%d". 1181 * for wildcarding. 1182 */ 1183 int dev_change_name(struct net_device *dev, const char *newname) 1184 { 1185 unsigned char old_assign_type; 1186 char oldname[IFNAMSIZ]; 1187 int err = 0; 1188 int ret; 1189 struct net *net; 1190 1191 ASSERT_RTNL(); 1192 BUG_ON(!dev_net(dev)); 1193 1194 net = dev_net(dev); 1195 1196 down_write(&devnet_rename_sem); 1197 1198 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { 1199 up_write(&devnet_rename_sem); 1200 return 0; 1201 } 1202 1203 memcpy(oldname, dev->name, IFNAMSIZ); 1204 1205 err = dev_get_valid_name(net, dev, newname); 1206 if (err < 0) { 1207 up_write(&devnet_rename_sem); 1208 return err; 1209 } 1210 1211 if (oldname[0] && !strchr(oldname, '%')) 1212 netdev_info(dev, "renamed from %s%s\n", oldname, 1213 dev->flags & IFF_UP ? " (while UP)" : ""); 1214 1215 old_assign_type = dev->name_assign_type; 1216 dev->name_assign_type = NET_NAME_RENAMED; 1217 1218 rollback: 1219 ret = device_rename(&dev->dev, dev->name); 1220 if (ret) { 1221 memcpy(dev->name, oldname, IFNAMSIZ); 1222 dev->name_assign_type = old_assign_type; 1223 up_write(&devnet_rename_sem); 1224 return ret; 1225 } 1226 1227 up_write(&devnet_rename_sem); 1228 1229 netdev_adjacent_rename_links(dev, oldname); 1230 1231 write_lock(&dev_base_lock); 1232 netdev_name_node_del(dev->name_node); 1233 write_unlock(&dev_base_lock); 1234 1235 synchronize_rcu(); 1236 1237 write_lock(&dev_base_lock); 1238 netdev_name_node_add(net, dev->name_node); 1239 write_unlock(&dev_base_lock); 1240 1241 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); 1242 ret = notifier_to_errno(ret); 1243 1244 if (ret) { 1245 /* err >= 0 after dev_alloc_name() or stores the first errno */ 1246 if (err >= 0) { 1247 err = ret; 1248 down_write(&devnet_rename_sem); 1249 memcpy(dev->name, oldname, IFNAMSIZ); 1250 memcpy(oldname, newname, IFNAMSIZ); 1251 dev->name_assign_type = old_assign_type; 1252 old_assign_type = NET_NAME_RENAMED; 1253 goto rollback; 1254 } else { 1255 netdev_err(dev, "name change rollback failed: %d\n", 1256 ret); 1257 } 1258 } 1259 1260 return err; 1261 } 1262 1263 /** 1264 * dev_set_alias - change ifalias of a device 1265 * @dev: device 1266 * @alias: name up to IFALIASZ 1267 * @len: limit of bytes to copy from info 1268 * 1269 * Set ifalias for a device, 1270 */ 1271 int dev_set_alias(struct net_device *dev, const char *alias, size_t len) 1272 { 1273 struct dev_ifalias *new_alias = NULL; 1274 1275 if (len >= IFALIASZ) 1276 return -EINVAL; 1277 1278 if (len) { 1279 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL); 1280 if (!new_alias) 1281 return -ENOMEM; 1282 1283 memcpy(new_alias->ifalias, alias, len); 1284 new_alias->ifalias[len] = 0; 1285 } 1286 1287 mutex_lock(&ifalias_mutex); 1288 new_alias = rcu_replace_pointer(dev->ifalias, new_alias, 1289 mutex_is_locked(&ifalias_mutex)); 1290 mutex_unlock(&ifalias_mutex); 1291 1292 if (new_alias) 1293 kfree_rcu(new_alias, rcuhead); 1294 1295 return len; 1296 } 1297 EXPORT_SYMBOL(dev_set_alias); 1298 1299 /** 1300 * dev_get_alias - get ifalias of a device 1301 * @dev: device 1302 * @name: buffer to store name of ifalias 1303 * @len: size of buffer 1304 * 1305 * get ifalias for a device. Caller must make sure dev cannot go 1306 * away, e.g. rcu read lock or own a reference count to device. 1307 */ 1308 int dev_get_alias(const struct net_device *dev, char *name, size_t len) 1309 { 1310 const struct dev_ifalias *alias; 1311 int ret = 0; 1312 1313 rcu_read_lock(); 1314 alias = rcu_dereference(dev->ifalias); 1315 if (alias) 1316 ret = snprintf(name, len, "%s", alias->ifalias); 1317 rcu_read_unlock(); 1318 1319 return ret; 1320 } 1321 1322 /** 1323 * netdev_features_change - device changes features 1324 * @dev: device to cause notification 1325 * 1326 * Called to indicate a device has changed features. 1327 */ 1328 void netdev_features_change(struct net_device *dev) 1329 { 1330 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); 1331 } 1332 EXPORT_SYMBOL(netdev_features_change); 1333 1334 /** 1335 * netdev_state_change - device changes state 1336 * @dev: device to cause notification 1337 * 1338 * Called to indicate a device has changed state. This function calls 1339 * the notifier chains for netdev_chain and sends a NEWLINK message 1340 * to the routing socket. 1341 */ 1342 void netdev_state_change(struct net_device *dev) 1343 { 1344 if (dev->flags & IFF_UP) { 1345 struct netdev_notifier_change_info change_info = { 1346 .info.dev = dev, 1347 }; 1348 1349 call_netdevice_notifiers_info(NETDEV_CHANGE, 1350 &change_info.info); 1351 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL, 0, NULL); 1352 } 1353 } 1354 EXPORT_SYMBOL(netdev_state_change); 1355 1356 /** 1357 * __netdev_notify_peers - notify network peers about existence of @dev, 1358 * to be called when rtnl lock is already held. 1359 * @dev: network device 1360 * 1361 * Generate traffic such that interested network peers are aware of 1362 * @dev, such as by generating a gratuitous ARP. This may be used when 1363 * a device wants to inform the rest of the network about some sort of 1364 * reconfiguration such as a failover event or virtual machine 1365 * migration. 1366 */ 1367 void __netdev_notify_peers(struct net_device *dev) 1368 { 1369 ASSERT_RTNL(); 1370 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); 1371 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev); 1372 } 1373 EXPORT_SYMBOL(__netdev_notify_peers); 1374 1375 /** 1376 * netdev_notify_peers - notify network peers about existence of @dev 1377 * @dev: network device 1378 * 1379 * Generate traffic such that interested network peers are aware of 1380 * @dev, such as by generating a gratuitous ARP. This may be used when 1381 * a device wants to inform the rest of the network about some sort of 1382 * reconfiguration such as a failover event or virtual machine 1383 * migration. 1384 */ 1385 void netdev_notify_peers(struct net_device *dev) 1386 { 1387 rtnl_lock(); 1388 __netdev_notify_peers(dev); 1389 rtnl_unlock(); 1390 } 1391 EXPORT_SYMBOL(netdev_notify_peers); 1392 1393 static int napi_threaded_poll(void *data); 1394 1395 static int napi_kthread_create(struct napi_struct *n) 1396 { 1397 int err = 0; 1398 1399 /* Create and wake up the kthread once to put it in 1400 * TASK_INTERRUPTIBLE mode to avoid the blocked task 1401 * warning and work with loadavg. 1402 */ 1403 n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d", 1404 n->dev->name, n->napi_id); 1405 if (IS_ERR(n->thread)) { 1406 err = PTR_ERR(n->thread); 1407 pr_err("kthread_run failed with err %d\n", err); 1408 n->thread = NULL; 1409 } 1410 1411 return err; 1412 } 1413 1414 static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack) 1415 { 1416 const struct net_device_ops *ops = dev->netdev_ops; 1417 int ret; 1418 1419 ASSERT_RTNL(); 1420 dev_addr_check(dev); 1421 1422 if (!netif_device_present(dev)) { 1423 /* may be detached because parent is runtime-suspended */ 1424 if (dev->dev.parent) 1425 pm_runtime_resume(dev->dev.parent); 1426 if (!netif_device_present(dev)) 1427 return -ENODEV; 1428 } 1429 1430 /* Block netpoll from trying to do any rx path servicing. 1431 * If we don't do this there is a chance ndo_poll_controller 1432 * or ndo_poll may be running while we open the device 1433 */ 1434 netpoll_poll_disable(dev); 1435 1436 ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack); 1437 ret = notifier_to_errno(ret); 1438 if (ret) 1439 return ret; 1440 1441 set_bit(__LINK_STATE_START, &dev->state); 1442 1443 if (ops->ndo_validate_addr) 1444 ret = ops->ndo_validate_addr(dev); 1445 1446 if (!ret && ops->ndo_open) 1447 ret = ops->ndo_open(dev); 1448 1449 netpoll_poll_enable(dev); 1450 1451 if (ret) 1452 clear_bit(__LINK_STATE_START, &dev->state); 1453 else { 1454 dev->flags |= IFF_UP; 1455 dev_set_rx_mode(dev); 1456 dev_activate(dev); 1457 add_device_randomness(dev->dev_addr, dev->addr_len); 1458 } 1459 1460 return ret; 1461 } 1462 1463 /** 1464 * dev_open - prepare an interface for use. 1465 * @dev: device to open 1466 * @extack: netlink extended ack 1467 * 1468 * Takes a device from down to up state. The device's private open 1469 * function is invoked and then the multicast lists are loaded. Finally 1470 * the device is moved into the up state and a %NETDEV_UP message is 1471 * sent to the netdev notifier chain. 1472 * 1473 * Calling this function on an active interface is a nop. On a failure 1474 * a negative errno code is returned. 1475 */ 1476 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack) 1477 { 1478 int ret; 1479 1480 if (dev->flags & IFF_UP) 1481 return 0; 1482 1483 ret = __dev_open(dev, extack); 1484 if (ret < 0) 1485 return ret; 1486 1487 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL); 1488 call_netdevice_notifiers(NETDEV_UP, dev); 1489 1490 return ret; 1491 } 1492 EXPORT_SYMBOL(dev_open); 1493 1494 static void __dev_close_many(struct list_head *head) 1495 { 1496 struct net_device *dev; 1497 1498 ASSERT_RTNL(); 1499 might_sleep(); 1500 1501 list_for_each_entry(dev, head, close_list) { 1502 /* Temporarily disable netpoll until the interface is down */ 1503 netpoll_poll_disable(dev); 1504 1505 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); 1506 1507 clear_bit(__LINK_STATE_START, &dev->state); 1508 1509 /* Synchronize to scheduled poll. We cannot touch poll list, it 1510 * can be even on different cpu. So just clear netif_running(). 1511 * 1512 * dev->stop() will invoke napi_disable() on all of it's 1513 * napi_struct instances on this device. 1514 */ 1515 smp_mb__after_atomic(); /* Commit netif_running(). */ 1516 } 1517 1518 dev_deactivate_many(head); 1519 1520 list_for_each_entry(dev, head, close_list) { 1521 const struct net_device_ops *ops = dev->netdev_ops; 1522 1523 /* 1524 * Call the device specific close. This cannot fail. 1525 * Only if device is UP 1526 * 1527 * We allow it to be called even after a DETACH hot-plug 1528 * event. 1529 */ 1530 if (ops->ndo_stop) 1531 ops->ndo_stop(dev); 1532 1533 dev->flags &= ~IFF_UP; 1534 netpoll_poll_enable(dev); 1535 } 1536 } 1537 1538 static void __dev_close(struct net_device *dev) 1539 { 1540 LIST_HEAD(single); 1541 1542 list_add(&dev->close_list, &single); 1543 __dev_close_many(&single); 1544 list_del(&single); 1545 } 1546 1547 void dev_close_many(struct list_head *head, bool unlink) 1548 { 1549 struct net_device *dev, *tmp; 1550 1551 /* Remove the devices that don't need to be closed */ 1552 list_for_each_entry_safe(dev, tmp, head, close_list) 1553 if (!(dev->flags & IFF_UP)) 1554 list_del_init(&dev->close_list); 1555 1556 __dev_close_many(head); 1557 1558 list_for_each_entry_safe(dev, tmp, head, close_list) { 1559 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL); 1560 call_netdevice_notifiers(NETDEV_DOWN, dev); 1561 if (unlink) 1562 list_del_init(&dev->close_list); 1563 } 1564 } 1565 EXPORT_SYMBOL(dev_close_many); 1566 1567 /** 1568 * dev_close - shutdown an interface. 1569 * @dev: device to shutdown 1570 * 1571 * This function moves an active device into down state. A 1572 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device 1573 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier 1574 * chain. 1575 */ 1576 void dev_close(struct net_device *dev) 1577 { 1578 if (dev->flags & IFF_UP) { 1579 LIST_HEAD(single); 1580 1581 list_add(&dev->close_list, &single); 1582 dev_close_many(&single, true); 1583 list_del(&single); 1584 } 1585 } 1586 EXPORT_SYMBOL(dev_close); 1587 1588 1589 /** 1590 * dev_disable_lro - disable Large Receive Offload on a device 1591 * @dev: device 1592 * 1593 * Disable Large Receive Offload (LRO) on a net device. Must be 1594 * called under RTNL. This is needed if received packets may be 1595 * forwarded to another interface. 1596 */ 1597 void dev_disable_lro(struct net_device *dev) 1598 { 1599 struct net_device *lower_dev; 1600 struct list_head *iter; 1601 1602 dev->wanted_features &= ~NETIF_F_LRO; 1603 netdev_update_features(dev); 1604 1605 if (unlikely(dev->features & NETIF_F_LRO)) 1606 netdev_WARN(dev, "failed to disable LRO!\n"); 1607 1608 netdev_for_each_lower_dev(dev, lower_dev, iter) 1609 dev_disable_lro(lower_dev); 1610 } 1611 EXPORT_SYMBOL(dev_disable_lro); 1612 1613 /** 1614 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device 1615 * @dev: device 1616 * 1617 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be 1618 * called under RTNL. This is needed if Generic XDP is installed on 1619 * the device. 1620 */ 1621 static void dev_disable_gro_hw(struct net_device *dev) 1622 { 1623 dev->wanted_features &= ~NETIF_F_GRO_HW; 1624 netdev_update_features(dev); 1625 1626 if (unlikely(dev->features & NETIF_F_GRO_HW)) 1627 netdev_WARN(dev, "failed to disable GRO_HW!\n"); 1628 } 1629 1630 const char *netdev_cmd_to_name(enum netdev_cmd cmd) 1631 { 1632 #define N(val) \ 1633 case NETDEV_##val: \ 1634 return "NETDEV_" __stringify(val); 1635 switch (cmd) { 1636 N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER) 1637 N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE) 1638 N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE) 1639 N(POST_INIT) N(PRE_UNINIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) 1640 N(CHANGEUPPER) N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) 1641 N(BONDING_INFO) N(PRECHANGEUPPER) N(CHANGELOWERSTATE) 1642 N(UDP_TUNNEL_PUSH_INFO) N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN) 1643 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO) 1644 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO) 1645 N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE) 1646 N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA) 1647 N(XDP_FEAT_CHANGE) 1648 } 1649 #undef N 1650 return "UNKNOWN_NETDEV_EVENT"; 1651 } 1652 EXPORT_SYMBOL_GPL(netdev_cmd_to_name); 1653 1654 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val, 1655 struct net_device *dev) 1656 { 1657 struct netdev_notifier_info info = { 1658 .dev = dev, 1659 }; 1660 1661 return nb->notifier_call(nb, val, &info); 1662 } 1663 1664 static int call_netdevice_register_notifiers(struct notifier_block *nb, 1665 struct net_device *dev) 1666 { 1667 int err; 1668 1669 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev); 1670 err = notifier_to_errno(err); 1671 if (err) 1672 return err; 1673 1674 if (!(dev->flags & IFF_UP)) 1675 return 0; 1676 1677 call_netdevice_notifier(nb, NETDEV_UP, dev); 1678 return 0; 1679 } 1680 1681 static void call_netdevice_unregister_notifiers(struct notifier_block *nb, 1682 struct net_device *dev) 1683 { 1684 if (dev->flags & IFF_UP) { 1685 call_netdevice_notifier(nb, NETDEV_GOING_DOWN, 1686 dev); 1687 call_netdevice_notifier(nb, NETDEV_DOWN, dev); 1688 } 1689 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); 1690 } 1691 1692 static int call_netdevice_register_net_notifiers(struct notifier_block *nb, 1693 struct net *net) 1694 { 1695 struct net_device *dev; 1696 int err; 1697 1698 for_each_netdev(net, dev) { 1699 err = call_netdevice_register_notifiers(nb, dev); 1700 if (err) 1701 goto rollback; 1702 } 1703 return 0; 1704 1705 rollback: 1706 for_each_netdev_continue_reverse(net, dev) 1707 call_netdevice_unregister_notifiers(nb, dev); 1708 return err; 1709 } 1710 1711 static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb, 1712 struct net *net) 1713 { 1714 struct net_device *dev; 1715 1716 for_each_netdev(net, dev) 1717 call_netdevice_unregister_notifiers(nb, dev); 1718 } 1719 1720 static int dev_boot_phase = 1; 1721 1722 /** 1723 * register_netdevice_notifier - register a network notifier block 1724 * @nb: notifier 1725 * 1726 * Register a notifier to be called when network device events occur. 1727 * The notifier passed is linked into the kernel structures and must 1728 * not be reused until it has been unregistered. A negative errno code 1729 * is returned on a failure. 1730 * 1731 * When registered all registration and up events are replayed 1732 * to the new notifier to allow device to have a race free 1733 * view of the network device list. 1734 */ 1735 1736 int register_netdevice_notifier(struct notifier_block *nb) 1737 { 1738 struct net *net; 1739 int err; 1740 1741 /* Close race with setup_net() and cleanup_net() */ 1742 down_write(&pernet_ops_rwsem); 1743 rtnl_lock(); 1744 err = raw_notifier_chain_register(&netdev_chain, nb); 1745 if (err) 1746 goto unlock; 1747 if (dev_boot_phase) 1748 goto unlock; 1749 for_each_net(net) { 1750 err = call_netdevice_register_net_notifiers(nb, net); 1751 if (err) 1752 goto rollback; 1753 } 1754 1755 unlock: 1756 rtnl_unlock(); 1757 up_write(&pernet_ops_rwsem); 1758 return err; 1759 1760 rollback: 1761 for_each_net_continue_reverse(net) 1762 call_netdevice_unregister_net_notifiers(nb, net); 1763 1764 raw_notifier_chain_unregister(&netdev_chain, nb); 1765 goto unlock; 1766 } 1767 EXPORT_SYMBOL(register_netdevice_notifier); 1768 1769 /** 1770 * unregister_netdevice_notifier - unregister a network notifier block 1771 * @nb: notifier 1772 * 1773 * Unregister a notifier previously registered by 1774 * register_netdevice_notifier(). The notifier is unlinked into the 1775 * kernel structures and may then be reused. A negative errno code 1776 * is returned on a failure. 1777 * 1778 * After unregistering unregister and down device events are synthesized 1779 * for all devices on the device list to the removed notifier to remove 1780 * the need for special case cleanup code. 1781 */ 1782 1783 int unregister_netdevice_notifier(struct notifier_block *nb) 1784 { 1785 struct net *net; 1786 int err; 1787 1788 /* Close race with setup_net() and cleanup_net() */ 1789 down_write(&pernet_ops_rwsem); 1790 rtnl_lock(); 1791 err = raw_notifier_chain_unregister(&netdev_chain, nb); 1792 if (err) 1793 goto unlock; 1794 1795 for_each_net(net) 1796 call_netdevice_unregister_net_notifiers(nb, net); 1797 1798 unlock: 1799 rtnl_unlock(); 1800 up_write(&pernet_ops_rwsem); 1801 return err; 1802 } 1803 EXPORT_SYMBOL(unregister_netdevice_notifier); 1804 1805 static int __register_netdevice_notifier_net(struct net *net, 1806 struct notifier_block *nb, 1807 bool ignore_call_fail) 1808 { 1809 int err; 1810 1811 err = raw_notifier_chain_register(&net->netdev_chain, nb); 1812 if (err) 1813 return err; 1814 if (dev_boot_phase) 1815 return 0; 1816 1817 err = call_netdevice_register_net_notifiers(nb, net); 1818 if (err && !ignore_call_fail) 1819 goto chain_unregister; 1820 1821 return 0; 1822 1823 chain_unregister: 1824 raw_notifier_chain_unregister(&net->netdev_chain, nb); 1825 return err; 1826 } 1827 1828 static int __unregister_netdevice_notifier_net(struct net *net, 1829 struct notifier_block *nb) 1830 { 1831 int err; 1832 1833 err = raw_notifier_chain_unregister(&net->netdev_chain, nb); 1834 if (err) 1835 return err; 1836 1837 call_netdevice_unregister_net_notifiers(nb, net); 1838 return 0; 1839 } 1840 1841 /** 1842 * register_netdevice_notifier_net - register a per-netns network notifier block 1843 * @net: network namespace 1844 * @nb: notifier 1845 * 1846 * Register a notifier to be called when network device events occur. 1847 * The notifier passed is linked into the kernel structures and must 1848 * not be reused until it has been unregistered. A negative errno code 1849 * is returned on a failure. 1850 * 1851 * When registered all registration and up events are replayed 1852 * to the new notifier to allow device to have a race free 1853 * view of the network device list. 1854 */ 1855 1856 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb) 1857 { 1858 int err; 1859 1860 rtnl_lock(); 1861 err = __register_netdevice_notifier_net(net, nb, false); 1862 rtnl_unlock(); 1863 return err; 1864 } 1865 EXPORT_SYMBOL(register_netdevice_notifier_net); 1866 1867 /** 1868 * unregister_netdevice_notifier_net - unregister a per-netns 1869 * network notifier block 1870 * @net: network namespace 1871 * @nb: notifier 1872 * 1873 * Unregister a notifier previously registered by 1874 * register_netdevice_notifier_net(). The notifier is unlinked from the 1875 * kernel structures and may then be reused. A negative errno code 1876 * is returned on a failure. 1877 * 1878 * After unregistering unregister and down device events are synthesized 1879 * for all devices on the device list to the removed notifier to remove 1880 * the need for special case cleanup code. 1881 */ 1882 1883 int unregister_netdevice_notifier_net(struct net *net, 1884 struct notifier_block *nb) 1885 { 1886 int err; 1887 1888 rtnl_lock(); 1889 err = __unregister_netdevice_notifier_net(net, nb); 1890 rtnl_unlock(); 1891 return err; 1892 } 1893 EXPORT_SYMBOL(unregister_netdevice_notifier_net); 1894 1895 static void __move_netdevice_notifier_net(struct net *src_net, 1896 struct net *dst_net, 1897 struct notifier_block *nb) 1898 { 1899 __unregister_netdevice_notifier_net(src_net, nb); 1900 __register_netdevice_notifier_net(dst_net, nb, true); 1901 } 1902 1903 int register_netdevice_notifier_dev_net(struct net_device *dev, 1904 struct notifier_block *nb, 1905 struct netdev_net_notifier *nn) 1906 { 1907 int err; 1908 1909 rtnl_lock(); 1910 err = __register_netdevice_notifier_net(dev_net(dev), nb, false); 1911 if (!err) { 1912 nn->nb = nb; 1913 list_add(&nn->list, &dev->net_notifier_list); 1914 } 1915 rtnl_unlock(); 1916 return err; 1917 } 1918 EXPORT_SYMBOL(register_netdevice_notifier_dev_net); 1919 1920 int unregister_netdevice_notifier_dev_net(struct net_device *dev, 1921 struct notifier_block *nb, 1922 struct netdev_net_notifier *nn) 1923 { 1924 int err; 1925 1926 rtnl_lock(); 1927 list_del(&nn->list); 1928 err = __unregister_netdevice_notifier_net(dev_net(dev), nb); 1929 rtnl_unlock(); 1930 return err; 1931 } 1932 EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net); 1933 1934 static void move_netdevice_notifiers_dev_net(struct net_device *dev, 1935 struct net *net) 1936 { 1937 struct netdev_net_notifier *nn; 1938 1939 list_for_each_entry(nn, &dev->net_notifier_list, list) 1940 __move_netdevice_notifier_net(dev_net(dev), net, nn->nb); 1941 } 1942 1943 /** 1944 * call_netdevice_notifiers_info - call all network notifier blocks 1945 * @val: value passed unmodified to notifier function 1946 * @info: notifier information data 1947 * 1948 * Call all network notifier blocks. Parameters and return value 1949 * are as for raw_notifier_call_chain(). 1950 */ 1951 1952 int call_netdevice_notifiers_info(unsigned long val, 1953 struct netdev_notifier_info *info) 1954 { 1955 struct net *net = dev_net(info->dev); 1956 int ret; 1957 1958 ASSERT_RTNL(); 1959 1960 /* Run per-netns notifier block chain first, then run the global one. 1961 * Hopefully, one day, the global one is going to be removed after 1962 * all notifier block registrators get converted to be per-netns. 1963 */ 1964 ret = raw_notifier_call_chain(&net->netdev_chain, val, info); 1965 if (ret & NOTIFY_STOP_MASK) 1966 return ret; 1967 return raw_notifier_call_chain(&netdev_chain, val, info); 1968 } 1969 1970 /** 1971 * call_netdevice_notifiers_info_robust - call per-netns notifier blocks 1972 * for and rollback on error 1973 * @val_up: value passed unmodified to notifier function 1974 * @val_down: value passed unmodified to the notifier function when 1975 * recovering from an error on @val_up 1976 * @info: notifier information data 1977 * 1978 * Call all per-netns network notifier blocks, but not notifier blocks on 1979 * the global notifier chain. Parameters and return value are as for 1980 * raw_notifier_call_chain_robust(). 1981 */ 1982 1983 static int 1984 call_netdevice_notifiers_info_robust(unsigned long val_up, 1985 unsigned long val_down, 1986 struct netdev_notifier_info *info) 1987 { 1988 struct net *net = dev_net(info->dev); 1989 1990 ASSERT_RTNL(); 1991 1992 return raw_notifier_call_chain_robust(&net->netdev_chain, 1993 val_up, val_down, info); 1994 } 1995 1996 static int call_netdevice_notifiers_extack(unsigned long val, 1997 struct net_device *dev, 1998 struct netlink_ext_ack *extack) 1999 { 2000 struct netdev_notifier_info info = { 2001 .dev = dev, 2002 .extack = extack, 2003 }; 2004 2005 return call_netdevice_notifiers_info(val, &info); 2006 } 2007 2008 /** 2009 * call_netdevice_notifiers - call all network notifier blocks 2010 * @val: value passed unmodified to notifier function 2011 * @dev: net_device pointer passed unmodified to notifier function 2012 * 2013 * Call all network notifier blocks. Parameters and return value 2014 * are as for raw_notifier_call_chain(). 2015 */ 2016 2017 int call_netdevice_notifiers(unsigned long val, struct net_device *dev) 2018 { 2019 return call_netdevice_notifiers_extack(val, dev, NULL); 2020 } 2021 EXPORT_SYMBOL(call_netdevice_notifiers); 2022 2023 /** 2024 * call_netdevice_notifiers_mtu - call all network notifier blocks 2025 * @val: value passed unmodified to notifier function 2026 * @dev: net_device pointer passed unmodified to notifier function 2027 * @arg: additional u32 argument passed to the notifier function 2028 * 2029 * Call all network notifier blocks. Parameters and return value 2030 * are as for raw_notifier_call_chain(). 2031 */ 2032 static int call_netdevice_notifiers_mtu(unsigned long val, 2033 struct net_device *dev, u32 arg) 2034 { 2035 struct netdev_notifier_info_ext info = { 2036 .info.dev = dev, 2037 .ext.mtu = arg, 2038 }; 2039 2040 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0); 2041 2042 return call_netdevice_notifiers_info(val, &info.info); 2043 } 2044 2045 #ifdef CONFIG_NET_INGRESS 2046 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key); 2047 2048 void net_inc_ingress_queue(void) 2049 { 2050 static_branch_inc(&ingress_needed_key); 2051 } 2052 EXPORT_SYMBOL_GPL(net_inc_ingress_queue); 2053 2054 void net_dec_ingress_queue(void) 2055 { 2056 static_branch_dec(&ingress_needed_key); 2057 } 2058 EXPORT_SYMBOL_GPL(net_dec_ingress_queue); 2059 #endif 2060 2061 #ifdef CONFIG_NET_EGRESS 2062 static DEFINE_STATIC_KEY_FALSE(egress_needed_key); 2063 2064 void net_inc_egress_queue(void) 2065 { 2066 static_branch_inc(&egress_needed_key); 2067 } 2068 EXPORT_SYMBOL_GPL(net_inc_egress_queue); 2069 2070 void net_dec_egress_queue(void) 2071 { 2072 static_branch_dec(&egress_needed_key); 2073 } 2074 EXPORT_SYMBOL_GPL(net_dec_egress_queue); 2075 #endif 2076 2077 DEFINE_STATIC_KEY_FALSE(netstamp_needed_key); 2078 EXPORT_SYMBOL(netstamp_needed_key); 2079 #ifdef CONFIG_JUMP_LABEL 2080 static atomic_t netstamp_needed_deferred; 2081 static atomic_t netstamp_wanted; 2082 static void netstamp_clear(struct work_struct *work) 2083 { 2084 int deferred = atomic_xchg(&netstamp_needed_deferred, 0); 2085 int wanted; 2086 2087 wanted = atomic_add_return(deferred, &netstamp_wanted); 2088 if (wanted > 0) 2089 static_branch_enable(&netstamp_needed_key); 2090 else 2091 static_branch_disable(&netstamp_needed_key); 2092 } 2093 static DECLARE_WORK(netstamp_work, netstamp_clear); 2094 #endif 2095 2096 void net_enable_timestamp(void) 2097 { 2098 #ifdef CONFIG_JUMP_LABEL 2099 int wanted = atomic_read(&netstamp_wanted); 2100 2101 while (wanted > 0) { 2102 if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted + 1)) 2103 return; 2104 } 2105 atomic_inc(&netstamp_needed_deferred); 2106 schedule_work(&netstamp_work); 2107 #else 2108 static_branch_inc(&netstamp_needed_key); 2109 #endif 2110 } 2111 EXPORT_SYMBOL(net_enable_timestamp); 2112 2113 void net_disable_timestamp(void) 2114 { 2115 #ifdef CONFIG_JUMP_LABEL 2116 int wanted = atomic_read(&netstamp_wanted); 2117 2118 while (wanted > 1) { 2119 if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted - 1)) 2120 return; 2121 } 2122 atomic_dec(&netstamp_needed_deferred); 2123 schedule_work(&netstamp_work); 2124 #else 2125 static_branch_dec(&netstamp_needed_key); 2126 #endif 2127 } 2128 EXPORT_SYMBOL(net_disable_timestamp); 2129 2130 static inline void net_timestamp_set(struct sk_buff *skb) 2131 { 2132 skb->tstamp = 0; 2133 skb->mono_delivery_time = 0; 2134 if (static_branch_unlikely(&netstamp_needed_key)) 2135 skb->tstamp = ktime_get_real(); 2136 } 2137 2138 #define net_timestamp_check(COND, SKB) \ 2139 if (static_branch_unlikely(&netstamp_needed_key)) { \ 2140 if ((COND) && !(SKB)->tstamp) \ 2141 (SKB)->tstamp = ktime_get_real(); \ 2142 } \ 2143 2144 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb) 2145 { 2146 return __is_skb_forwardable(dev, skb, true); 2147 } 2148 EXPORT_SYMBOL_GPL(is_skb_forwardable); 2149 2150 static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb, 2151 bool check_mtu) 2152 { 2153 int ret = ____dev_forward_skb(dev, skb, check_mtu); 2154 2155 if (likely(!ret)) { 2156 skb->protocol = eth_type_trans(skb, dev); 2157 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 2158 } 2159 2160 return ret; 2161 } 2162 2163 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 2164 { 2165 return __dev_forward_skb2(dev, skb, true); 2166 } 2167 EXPORT_SYMBOL_GPL(__dev_forward_skb); 2168 2169 /** 2170 * dev_forward_skb - loopback an skb to another netif 2171 * 2172 * @dev: destination network device 2173 * @skb: buffer to forward 2174 * 2175 * return values: 2176 * NET_RX_SUCCESS (no congestion) 2177 * NET_RX_DROP (packet was dropped, but freed) 2178 * 2179 * dev_forward_skb can be used for injecting an skb from the 2180 * start_xmit function of one device into the receive queue 2181 * of another device. 2182 * 2183 * The receiving device may be in another namespace, so 2184 * we have to clear all information in the skb that could 2185 * impact namespace isolation. 2186 */ 2187 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 2188 { 2189 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb); 2190 } 2191 EXPORT_SYMBOL_GPL(dev_forward_skb); 2192 2193 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb) 2194 { 2195 return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb); 2196 } 2197 2198 static inline int deliver_skb(struct sk_buff *skb, 2199 struct packet_type *pt_prev, 2200 struct net_device *orig_dev) 2201 { 2202 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 2203 return -ENOMEM; 2204 refcount_inc(&skb->users); 2205 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 2206 } 2207 2208 static inline void deliver_ptype_list_skb(struct sk_buff *skb, 2209 struct packet_type **pt, 2210 struct net_device *orig_dev, 2211 __be16 type, 2212 struct list_head *ptype_list) 2213 { 2214 struct packet_type *ptype, *pt_prev = *pt; 2215 2216 list_for_each_entry_rcu(ptype, ptype_list, list) { 2217 if (ptype->type != type) 2218 continue; 2219 if (pt_prev) 2220 deliver_skb(skb, pt_prev, orig_dev); 2221 pt_prev = ptype; 2222 } 2223 *pt = pt_prev; 2224 } 2225 2226 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) 2227 { 2228 if (!ptype->af_packet_priv || !skb->sk) 2229 return false; 2230 2231 if (ptype->id_match) 2232 return ptype->id_match(ptype, skb->sk); 2233 else if ((struct sock *)ptype->af_packet_priv == skb->sk) 2234 return true; 2235 2236 return false; 2237 } 2238 2239 /** 2240 * dev_nit_active - return true if any network interface taps are in use 2241 * 2242 * @dev: network device to check for the presence of taps 2243 */ 2244 bool dev_nit_active(struct net_device *dev) 2245 { 2246 return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all); 2247 } 2248 EXPORT_SYMBOL_GPL(dev_nit_active); 2249 2250 /* 2251 * Support routine. Sends outgoing frames to any network 2252 * taps currently in use. 2253 */ 2254 2255 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) 2256 { 2257 struct packet_type *ptype; 2258 struct sk_buff *skb2 = NULL; 2259 struct packet_type *pt_prev = NULL; 2260 struct list_head *ptype_list = &ptype_all; 2261 2262 rcu_read_lock(); 2263 again: 2264 list_for_each_entry_rcu(ptype, ptype_list, list) { 2265 if (ptype->ignore_outgoing) 2266 continue; 2267 2268 /* Never send packets back to the socket 2269 * they originated from - MvS (miquels@drinkel.ow.org) 2270 */ 2271 if (skb_loop_sk(ptype, skb)) 2272 continue; 2273 2274 if (pt_prev) { 2275 deliver_skb(skb2, pt_prev, skb->dev); 2276 pt_prev = ptype; 2277 continue; 2278 } 2279 2280 /* need to clone skb, done only once */ 2281 skb2 = skb_clone(skb, GFP_ATOMIC); 2282 if (!skb2) 2283 goto out_unlock; 2284 2285 net_timestamp_set(skb2); 2286 2287 /* skb->nh should be correctly 2288 * set by sender, so that the second statement is 2289 * just protection against buggy protocols. 2290 */ 2291 skb_reset_mac_header(skb2); 2292 2293 if (skb_network_header(skb2) < skb2->data || 2294 skb_network_header(skb2) > skb_tail_pointer(skb2)) { 2295 net_crit_ratelimited("protocol %04x is buggy, dev %s\n", 2296 ntohs(skb2->protocol), 2297 dev->name); 2298 skb_reset_network_header(skb2); 2299 } 2300 2301 skb2->transport_header = skb2->network_header; 2302 skb2->pkt_type = PACKET_OUTGOING; 2303 pt_prev = ptype; 2304 } 2305 2306 if (ptype_list == &ptype_all) { 2307 ptype_list = &dev->ptype_all; 2308 goto again; 2309 } 2310 out_unlock: 2311 if (pt_prev) { 2312 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC)) 2313 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); 2314 else 2315 kfree_skb(skb2); 2316 } 2317 rcu_read_unlock(); 2318 } 2319 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit); 2320 2321 /** 2322 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change 2323 * @dev: Network device 2324 * @txq: number of queues available 2325 * 2326 * If real_num_tx_queues is changed the tc mappings may no longer be 2327 * valid. To resolve this verify the tc mapping remains valid and if 2328 * not NULL the mapping. With no priorities mapping to this 2329 * offset/count pair it will no longer be used. In the worst case TC0 2330 * is invalid nothing can be done so disable priority mappings. If is 2331 * expected that drivers will fix this mapping if they can before 2332 * calling netif_set_real_num_tx_queues. 2333 */ 2334 static void netif_setup_tc(struct net_device *dev, unsigned int txq) 2335 { 2336 int i; 2337 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 2338 2339 /* If TC0 is invalidated disable TC mapping */ 2340 if (tc->offset + tc->count > txq) { 2341 netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); 2342 dev->num_tc = 0; 2343 return; 2344 } 2345 2346 /* Invalidated prio to tc mappings set to TC0 */ 2347 for (i = 1; i < TC_BITMASK + 1; i++) { 2348 int q = netdev_get_prio_tc_map(dev, i); 2349 2350 tc = &dev->tc_to_txq[q]; 2351 if (tc->offset + tc->count > txq) { 2352 netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", 2353 i, q); 2354 netdev_set_prio_tc_map(dev, i, 0); 2355 } 2356 } 2357 } 2358 2359 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq) 2360 { 2361 if (dev->num_tc) { 2362 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 2363 int i; 2364 2365 /* walk through the TCs and see if it falls into any of them */ 2366 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) { 2367 if ((txq - tc->offset) < tc->count) 2368 return i; 2369 } 2370 2371 /* didn't find it, just return -1 to indicate no match */ 2372 return -1; 2373 } 2374 2375 return 0; 2376 } 2377 EXPORT_SYMBOL(netdev_txq_to_tc); 2378 2379 #ifdef CONFIG_XPS 2380 static struct static_key xps_needed __read_mostly; 2381 static struct static_key xps_rxqs_needed __read_mostly; 2382 static DEFINE_MUTEX(xps_map_mutex); 2383 #define xmap_dereference(P) \ 2384 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) 2385 2386 static bool remove_xps_queue(struct xps_dev_maps *dev_maps, 2387 struct xps_dev_maps *old_maps, int tci, u16 index) 2388 { 2389 struct xps_map *map = NULL; 2390 int pos; 2391 2392 map = xmap_dereference(dev_maps->attr_map[tci]); 2393 if (!map) 2394 return false; 2395 2396 for (pos = map->len; pos--;) { 2397 if (map->queues[pos] != index) 2398 continue; 2399 2400 if (map->len > 1) { 2401 map->queues[pos] = map->queues[--map->len]; 2402 break; 2403 } 2404 2405 if (old_maps) 2406 RCU_INIT_POINTER(old_maps->attr_map[tci], NULL); 2407 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL); 2408 kfree_rcu(map, rcu); 2409 return false; 2410 } 2411 2412 return true; 2413 } 2414 2415 static bool remove_xps_queue_cpu(struct net_device *dev, 2416 struct xps_dev_maps *dev_maps, 2417 int cpu, u16 offset, u16 count) 2418 { 2419 int num_tc = dev_maps->num_tc; 2420 bool active = false; 2421 int tci; 2422 2423 for (tci = cpu * num_tc; num_tc--; tci++) { 2424 int i, j; 2425 2426 for (i = count, j = offset; i--; j++) { 2427 if (!remove_xps_queue(dev_maps, NULL, tci, j)) 2428 break; 2429 } 2430 2431 active |= i < 0; 2432 } 2433 2434 return active; 2435 } 2436 2437 static void reset_xps_maps(struct net_device *dev, 2438 struct xps_dev_maps *dev_maps, 2439 enum xps_map_type type) 2440 { 2441 static_key_slow_dec_cpuslocked(&xps_needed); 2442 if (type == XPS_RXQS) 2443 static_key_slow_dec_cpuslocked(&xps_rxqs_needed); 2444 2445 RCU_INIT_POINTER(dev->xps_maps[type], NULL); 2446 2447 kfree_rcu(dev_maps, rcu); 2448 } 2449 2450 static void clean_xps_maps(struct net_device *dev, enum xps_map_type type, 2451 u16 offset, u16 count) 2452 { 2453 struct xps_dev_maps *dev_maps; 2454 bool active = false; 2455 int i, j; 2456 2457 dev_maps = xmap_dereference(dev->xps_maps[type]); 2458 if (!dev_maps) 2459 return; 2460 2461 for (j = 0; j < dev_maps->nr_ids; j++) 2462 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, count); 2463 if (!active) 2464 reset_xps_maps(dev, dev_maps, type); 2465 2466 if (type == XPS_CPUS) { 2467 for (i = offset + (count - 1); count--; i--) 2468 netdev_queue_numa_node_write( 2469 netdev_get_tx_queue(dev, i), NUMA_NO_NODE); 2470 } 2471 } 2472 2473 static void netif_reset_xps_queues(struct net_device *dev, u16 offset, 2474 u16 count) 2475 { 2476 if (!static_key_false(&xps_needed)) 2477 return; 2478 2479 cpus_read_lock(); 2480 mutex_lock(&xps_map_mutex); 2481 2482 if (static_key_false(&xps_rxqs_needed)) 2483 clean_xps_maps(dev, XPS_RXQS, offset, count); 2484 2485 clean_xps_maps(dev, XPS_CPUS, offset, count); 2486 2487 mutex_unlock(&xps_map_mutex); 2488 cpus_read_unlock(); 2489 } 2490 2491 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index) 2492 { 2493 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index); 2494 } 2495 2496 static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index, 2497 u16 index, bool is_rxqs_map) 2498 { 2499 struct xps_map *new_map; 2500 int alloc_len = XPS_MIN_MAP_ALLOC; 2501 int i, pos; 2502 2503 for (pos = 0; map && pos < map->len; pos++) { 2504 if (map->queues[pos] != index) 2505 continue; 2506 return map; 2507 } 2508 2509 /* Need to add tx-queue to this CPU's/rx-queue's existing map */ 2510 if (map) { 2511 if (pos < map->alloc_len) 2512 return map; 2513 2514 alloc_len = map->alloc_len * 2; 2515 } 2516 2517 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's 2518 * map 2519 */ 2520 if (is_rxqs_map) 2521 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL); 2522 else 2523 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL, 2524 cpu_to_node(attr_index)); 2525 if (!new_map) 2526 return NULL; 2527 2528 for (i = 0; i < pos; i++) 2529 new_map->queues[i] = map->queues[i]; 2530 new_map->alloc_len = alloc_len; 2531 new_map->len = pos; 2532 2533 return new_map; 2534 } 2535 2536 /* Copy xps maps at a given index */ 2537 static void xps_copy_dev_maps(struct xps_dev_maps *dev_maps, 2538 struct xps_dev_maps *new_dev_maps, int index, 2539 int tc, bool skip_tc) 2540 { 2541 int i, tci = index * dev_maps->num_tc; 2542 struct xps_map *map; 2543 2544 /* copy maps belonging to foreign traffic classes */ 2545 for (i = 0; i < dev_maps->num_tc; i++, tci++) { 2546 if (i == tc && skip_tc) 2547 continue; 2548 2549 /* fill in the new device map from the old device map */ 2550 map = xmap_dereference(dev_maps->attr_map[tci]); 2551 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2552 } 2553 } 2554 2555 /* Must be called under cpus_read_lock */ 2556 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, 2557 u16 index, enum xps_map_type type) 2558 { 2559 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL, *old_dev_maps = NULL; 2560 const unsigned long *online_mask = NULL; 2561 bool active = false, copy = false; 2562 int i, j, tci, numa_node_id = -2; 2563 int maps_sz, num_tc = 1, tc = 0; 2564 struct xps_map *map, *new_map; 2565 unsigned int nr_ids; 2566 2567 WARN_ON_ONCE(index >= dev->num_tx_queues); 2568 2569 if (dev->num_tc) { 2570 /* Do not allow XPS on subordinate device directly */ 2571 num_tc = dev->num_tc; 2572 if (num_tc < 0) 2573 return -EINVAL; 2574 2575 /* If queue belongs to subordinate dev use its map */ 2576 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; 2577 2578 tc = netdev_txq_to_tc(dev, index); 2579 if (tc < 0) 2580 return -EINVAL; 2581 } 2582 2583 mutex_lock(&xps_map_mutex); 2584 2585 dev_maps = xmap_dereference(dev->xps_maps[type]); 2586 if (type == XPS_RXQS) { 2587 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues); 2588 nr_ids = dev->num_rx_queues; 2589 } else { 2590 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc); 2591 if (num_possible_cpus() > 1) 2592 online_mask = cpumask_bits(cpu_online_mask); 2593 nr_ids = nr_cpu_ids; 2594 } 2595 2596 if (maps_sz < L1_CACHE_BYTES) 2597 maps_sz = L1_CACHE_BYTES; 2598 2599 /* The old dev_maps could be larger or smaller than the one we're 2600 * setting up now, as dev->num_tc or nr_ids could have been updated in 2601 * between. We could try to be smart, but let's be safe instead and only 2602 * copy foreign traffic classes if the two map sizes match. 2603 */ 2604 if (dev_maps && 2605 dev_maps->num_tc == num_tc && dev_maps->nr_ids == nr_ids) 2606 copy = true; 2607 2608 /* allocate memory for queue storage */ 2609 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids), 2610 j < nr_ids;) { 2611 if (!new_dev_maps) { 2612 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); 2613 if (!new_dev_maps) { 2614 mutex_unlock(&xps_map_mutex); 2615 return -ENOMEM; 2616 } 2617 2618 new_dev_maps->nr_ids = nr_ids; 2619 new_dev_maps->num_tc = num_tc; 2620 } 2621 2622 tci = j * num_tc + tc; 2623 map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL; 2624 2625 map = expand_xps_map(map, j, index, type == XPS_RXQS); 2626 if (!map) 2627 goto error; 2628 2629 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2630 } 2631 2632 if (!new_dev_maps) 2633 goto out_no_new_maps; 2634 2635 if (!dev_maps) { 2636 /* Increment static keys at most once per type */ 2637 static_key_slow_inc_cpuslocked(&xps_needed); 2638 if (type == XPS_RXQS) 2639 static_key_slow_inc_cpuslocked(&xps_rxqs_needed); 2640 } 2641 2642 for (j = 0; j < nr_ids; j++) { 2643 bool skip_tc = false; 2644 2645 tci = j * num_tc + tc; 2646 if (netif_attr_test_mask(j, mask, nr_ids) && 2647 netif_attr_test_online(j, online_mask, nr_ids)) { 2648 /* add tx-queue to CPU/rx-queue maps */ 2649 int pos = 0; 2650 2651 skip_tc = true; 2652 2653 map = xmap_dereference(new_dev_maps->attr_map[tci]); 2654 while ((pos < map->len) && (map->queues[pos] != index)) 2655 pos++; 2656 2657 if (pos == map->len) 2658 map->queues[map->len++] = index; 2659 #ifdef CONFIG_NUMA 2660 if (type == XPS_CPUS) { 2661 if (numa_node_id == -2) 2662 numa_node_id = cpu_to_node(j); 2663 else if (numa_node_id != cpu_to_node(j)) 2664 numa_node_id = -1; 2665 } 2666 #endif 2667 } 2668 2669 if (copy) 2670 xps_copy_dev_maps(dev_maps, new_dev_maps, j, tc, 2671 skip_tc); 2672 } 2673 2674 rcu_assign_pointer(dev->xps_maps[type], new_dev_maps); 2675 2676 /* Cleanup old maps */ 2677 if (!dev_maps) 2678 goto out_no_old_maps; 2679 2680 for (j = 0; j < dev_maps->nr_ids; j++) { 2681 for (i = num_tc, tci = j * dev_maps->num_tc; i--; tci++) { 2682 map = xmap_dereference(dev_maps->attr_map[tci]); 2683 if (!map) 2684 continue; 2685 2686 if (copy) { 2687 new_map = xmap_dereference(new_dev_maps->attr_map[tci]); 2688 if (map == new_map) 2689 continue; 2690 } 2691 2692 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL); 2693 kfree_rcu(map, rcu); 2694 } 2695 } 2696 2697 old_dev_maps = dev_maps; 2698 2699 out_no_old_maps: 2700 dev_maps = new_dev_maps; 2701 active = true; 2702 2703 out_no_new_maps: 2704 if (type == XPS_CPUS) 2705 /* update Tx queue numa node */ 2706 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index), 2707 (numa_node_id >= 0) ? 2708 numa_node_id : NUMA_NO_NODE); 2709 2710 if (!dev_maps) 2711 goto out_no_maps; 2712 2713 /* removes tx-queue from unused CPUs/rx-queues */ 2714 for (j = 0; j < dev_maps->nr_ids; j++) { 2715 tci = j * dev_maps->num_tc; 2716 2717 for (i = 0; i < dev_maps->num_tc; i++, tci++) { 2718 if (i == tc && 2719 netif_attr_test_mask(j, mask, dev_maps->nr_ids) && 2720 netif_attr_test_online(j, online_mask, dev_maps->nr_ids)) 2721 continue; 2722 2723 active |= remove_xps_queue(dev_maps, 2724 copy ? old_dev_maps : NULL, 2725 tci, index); 2726 } 2727 } 2728 2729 if (old_dev_maps) 2730 kfree_rcu(old_dev_maps, rcu); 2731 2732 /* free map if not active */ 2733 if (!active) 2734 reset_xps_maps(dev, dev_maps, type); 2735 2736 out_no_maps: 2737 mutex_unlock(&xps_map_mutex); 2738 2739 return 0; 2740 error: 2741 /* remove any maps that we added */ 2742 for (j = 0; j < nr_ids; j++) { 2743 for (i = num_tc, tci = j * num_tc; i--; tci++) { 2744 new_map = xmap_dereference(new_dev_maps->attr_map[tci]); 2745 map = copy ? 2746 xmap_dereference(dev_maps->attr_map[tci]) : 2747 NULL; 2748 if (new_map && new_map != map) 2749 kfree(new_map); 2750 } 2751 } 2752 2753 mutex_unlock(&xps_map_mutex); 2754 2755 kfree(new_dev_maps); 2756 return -ENOMEM; 2757 } 2758 EXPORT_SYMBOL_GPL(__netif_set_xps_queue); 2759 2760 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 2761 u16 index) 2762 { 2763 int ret; 2764 2765 cpus_read_lock(); 2766 ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, XPS_CPUS); 2767 cpus_read_unlock(); 2768 2769 return ret; 2770 } 2771 EXPORT_SYMBOL(netif_set_xps_queue); 2772 2773 #endif 2774 static void netdev_unbind_all_sb_channels(struct net_device *dev) 2775 { 2776 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; 2777 2778 /* Unbind any subordinate channels */ 2779 while (txq-- != &dev->_tx[0]) { 2780 if (txq->sb_dev) 2781 netdev_unbind_sb_channel(dev, txq->sb_dev); 2782 } 2783 } 2784 2785 void netdev_reset_tc(struct net_device *dev) 2786 { 2787 #ifdef CONFIG_XPS 2788 netif_reset_xps_queues_gt(dev, 0); 2789 #endif 2790 netdev_unbind_all_sb_channels(dev); 2791 2792 /* Reset TC configuration of device */ 2793 dev->num_tc = 0; 2794 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); 2795 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); 2796 } 2797 EXPORT_SYMBOL(netdev_reset_tc); 2798 2799 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) 2800 { 2801 if (tc >= dev->num_tc) 2802 return -EINVAL; 2803 2804 #ifdef CONFIG_XPS 2805 netif_reset_xps_queues(dev, offset, count); 2806 #endif 2807 dev->tc_to_txq[tc].count = count; 2808 dev->tc_to_txq[tc].offset = offset; 2809 return 0; 2810 } 2811 EXPORT_SYMBOL(netdev_set_tc_queue); 2812 2813 int netdev_set_num_tc(struct net_device *dev, u8 num_tc) 2814 { 2815 if (num_tc > TC_MAX_QUEUE) 2816 return -EINVAL; 2817 2818 #ifdef CONFIG_XPS 2819 netif_reset_xps_queues_gt(dev, 0); 2820 #endif 2821 netdev_unbind_all_sb_channels(dev); 2822 2823 dev->num_tc = num_tc; 2824 return 0; 2825 } 2826 EXPORT_SYMBOL(netdev_set_num_tc); 2827 2828 void netdev_unbind_sb_channel(struct net_device *dev, 2829 struct net_device *sb_dev) 2830 { 2831 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; 2832 2833 #ifdef CONFIG_XPS 2834 netif_reset_xps_queues_gt(sb_dev, 0); 2835 #endif 2836 memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq)); 2837 memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map)); 2838 2839 while (txq-- != &dev->_tx[0]) { 2840 if (txq->sb_dev == sb_dev) 2841 txq->sb_dev = NULL; 2842 } 2843 } 2844 EXPORT_SYMBOL(netdev_unbind_sb_channel); 2845 2846 int netdev_bind_sb_channel_queue(struct net_device *dev, 2847 struct net_device *sb_dev, 2848 u8 tc, u16 count, u16 offset) 2849 { 2850 /* Make certain the sb_dev and dev are already configured */ 2851 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc) 2852 return -EINVAL; 2853 2854 /* We cannot hand out queues we don't have */ 2855 if ((offset + count) > dev->real_num_tx_queues) 2856 return -EINVAL; 2857 2858 /* Record the mapping */ 2859 sb_dev->tc_to_txq[tc].count = count; 2860 sb_dev->tc_to_txq[tc].offset = offset; 2861 2862 /* Provide a way for Tx queue to find the tc_to_txq map or 2863 * XPS map for itself. 2864 */ 2865 while (count--) 2866 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev; 2867 2868 return 0; 2869 } 2870 EXPORT_SYMBOL(netdev_bind_sb_channel_queue); 2871 2872 int netdev_set_sb_channel(struct net_device *dev, u16 channel) 2873 { 2874 /* Do not use a multiqueue device to represent a subordinate channel */ 2875 if (netif_is_multiqueue(dev)) 2876 return -ENODEV; 2877 2878 /* We allow channels 1 - 32767 to be used for subordinate channels. 2879 * Channel 0 is meant to be "native" mode and used only to represent 2880 * the main root device. We allow writing 0 to reset the device back 2881 * to normal mode after being used as a subordinate channel. 2882 */ 2883 if (channel > S16_MAX) 2884 return -EINVAL; 2885 2886 dev->num_tc = -channel; 2887 2888 return 0; 2889 } 2890 EXPORT_SYMBOL(netdev_set_sb_channel); 2891 2892 /* 2893 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues 2894 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed. 2895 */ 2896 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) 2897 { 2898 bool disabling; 2899 int rc; 2900 2901 disabling = txq < dev->real_num_tx_queues; 2902 2903 if (txq < 1 || txq > dev->num_tx_queues) 2904 return -EINVAL; 2905 2906 if (dev->reg_state == NETREG_REGISTERED || 2907 dev->reg_state == NETREG_UNREGISTERING) { 2908 ASSERT_RTNL(); 2909 2910 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, 2911 txq); 2912 if (rc) 2913 return rc; 2914 2915 if (dev->num_tc) 2916 netif_setup_tc(dev, txq); 2917 2918 dev_qdisc_change_real_num_tx(dev, txq); 2919 2920 dev->real_num_tx_queues = txq; 2921 2922 if (disabling) { 2923 synchronize_net(); 2924 qdisc_reset_all_tx_gt(dev, txq); 2925 #ifdef CONFIG_XPS 2926 netif_reset_xps_queues_gt(dev, txq); 2927 #endif 2928 } 2929 } else { 2930 dev->real_num_tx_queues = txq; 2931 } 2932 2933 return 0; 2934 } 2935 EXPORT_SYMBOL(netif_set_real_num_tx_queues); 2936 2937 #ifdef CONFIG_SYSFS 2938 /** 2939 * netif_set_real_num_rx_queues - set actual number of RX queues used 2940 * @dev: Network device 2941 * @rxq: Actual number of RX queues 2942 * 2943 * This must be called either with the rtnl_lock held or before 2944 * registration of the net device. Returns 0 on success, or a 2945 * negative error code. If called before registration, it always 2946 * succeeds. 2947 */ 2948 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) 2949 { 2950 int rc; 2951 2952 if (rxq < 1 || rxq > dev->num_rx_queues) 2953 return -EINVAL; 2954 2955 if (dev->reg_state == NETREG_REGISTERED) { 2956 ASSERT_RTNL(); 2957 2958 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, 2959 rxq); 2960 if (rc) 2961 return rc; 2962 } 2963 2964 dev->real_num_rx_queues = rxq; 2965 return 0; 2966 } 2967 EXPORT_SYMBOL(netif_set_real_num_rx_queues); 2968 #endif 2969 2970 /** 2971 * netif_set_real_num_queues - set actual number of RX and TX queues used 2972 * @dev: Network device 2973 * @txq: Actual number of TX queues 2974 * @rxq: Actual number of RX queues 2975 * 2976 * Set the real number of both TX and RX queues. 2977 * Does nothing if the number of queues is already correct. 2978 */ 2979 int netif_set_real_num_queues(struct net_device *dev, 2980 unsigned int txq, unsigned int rxq) 2981 { 2982 unsigned int old_rxq = dev->real_num_rx_queues; 2983 int err; 2984 2985 if (txq < 1 || txq > dev->num_tx_queues || 2986 rxq < 1 || rxq > dev->num_rx_queues) 2987 return -EINVAL; 2988 2989 /* Start from increases, so the error path only does decreases - 2990 * decreases can't fail. 2991 */ 2992 if (rxq > dev->real_num_rx_queues) { 2993 err = netif_set_real_num_rx_queues(dev, rxq); 2994 if (err) 2995 return err; 2996 } 2997 if (txq > dev->real_num_tx_queues) { 2998 err = netif_set_real_num_tx_queues(dev, txq); 2999 if (err) 3000 goto undo_rx; 3001 } 3002 if (rxq < dev->real_num_rx_queues) 3003 WARN_ON(netif_set_real_num_rx_queues(dev, rxq)); 3004 if (txq < dev->real_num_tx_queues) 3005 WARN_ON(netif_set_real_num_tx_queues(dev, txq)); 3006 3007 return 0; 3008 undo_rx: 3009 WARN_ON(netif_set_real_num_rx_queues(dev, old_rxq)); 3010 return err; 3011 } 3012 EXPORT_SYMBOL(netif_set_real_num_queues); 3013 3014 /** 3015 * netif_set_tso_max_size() - set the max size of TSO frames supported 3016 * @dev: netdev to update 3017 * @size: max skb->len of a TSO frame 3018 * 3019 * Set the limit on the size of TSO super-frames the device can handle. 3020 * Unless explicitly set the stack will assume the value of 3021 * %GSO_LEGACY_MAX_SIZE. 3022 */ 3023 void netif_set_tso_max_size(struct net_device *dev, unsigned int size) 3024 { 3025 dev->tso_max_size = min(GSO_MAX_SIZE, size); 3026 if (size < READ_ONCE(dev->gso_max_size)) 3027 netif_set_gso_max_size(dev, size); 3028 if (size < READ_ONCE(dev->gso_ipv4_max_size)) 3029 netif_set_gso_ipv4_max_size(dev, size); 3030 } 3031 EXPORT_SYMBOL(netif_set_tso_max_size); 3032 3033 /** 3034 * netif_set_tso_max_segs() - set the max number of segs supported for TSO 3035 * @dev: netdev to update 3036 * @segs: max number of TCP segments 3037 * 3038 * Set the limit on the number of TCP segments the device can generate from 3039 * a single TSO super-frame. 3040 * Unless explicitly set the stack will assume the value of %GSO_MAX_SEGS. 3041 */ 3042 void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs) 3043 { 3044 dev->tso_max_segs = segs; 3045 if (segs < READ_ONCE(dev->gso_max_segs)) 3046 netif_set_gso_max_segs(dev, segs); 3047 } 3048 EXPORT_SYMBOL(netif_set_tso_max_segs); 3049 3050 /** 3051 * netif_inherit_tso_max() - copy all TSO limits from a lower device to an upper 3052 * @to: netdev to update 3053 * @from: netdev from which to copy the limits 3054 */ 3055 void netif_inherit_tso_max(struct net_device *to, const struct net_device *from) 3056 { 3057 netif_set_tso_max_size(to, from->tso_max_size); 3058 netif_set_tso_max_segs(to, from->tso_max_segs); 3059 } 3060 EXPORT_SYMBOL(netif_inherit_tso_max); 3061 3062 /** 3063 * netif_get_num_default_rss_queues - default number of RSS queues 3064 * 3065 * Default value is the number of physical cores if there are only 1 or 2, or 3066 * divided by 2 if there are more. 3067 */ 3068 int netif_get_num_default_rss_queues(void) 3069 { 3070 cpumask_var_t cpus; 3071 int cpu, count = 0; 3072 3073 if (unlikely(is_kdump_kernel() || !zalloc_cpumask_var(&cpus, GFP_KERNEL))) 3074 return 1; 3075 3076 cpumask_copy(cpus, cpu_online_mask); 3077 for_each_cpu(cpu, cpus) { 3078 ++count; 3079 cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu)); 3080 } 3081 free_cpumask_var(cpus); 3082 3083 return count > 2 ? DIV_ROUND_UP(count, 2) : count; 3084 } 3085 EXPORT_SYMBOL(netif_get_num_default_rss_queues); 3086 3087 static void __netif_reschedule(struct Qdisc *q) 3088 { 3089 struct softnet_data *sd; 3090 unsigned long flags; 3091 3092 local_irq_save(flags); 3093 sd = this_cpu_ptr(&softnet_data); 3094 q->next_sched = NULL; 3095 *sd->output_queue_tailp = q; 3096 sd->output_queue_tailp = &q->next_sched; 3097 raise_softirq_irqoff(NET_TX_SOFTIRQ); 3098 local_irq_restore(flags); 3099 } 3100 3101 void __netif_schedule(struct Qdisc *q) 3102 { 3103 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) 3104 __netif_reschedule(q); 3105 } 3106 EXPORT_SYMBOL(__netif_schedule); 3107 3108 struct dev_kfree_skb_cb { 3109 enum skb_drop_reason reason; 3110 }; 3111 3112 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb) 3113 { 3114 return (struct dev_kfree_skb_cb *)skb->cb; 3115 } 3116 3117 void netif_schedule_queue(struct netdev_queue *txq) 3118 { 3119 rcu_read_lock(); 3120 if (!netif_xmit_stopped(txq)) { 3121 struct Qdisc *q = rcu_dereference(txq->qdisc); 3122 3123 __netif_schedule(q); 3124 } 3125 rcu_read_unlock(); 3126 } 3127 EXPORT_SYMBOL(netif_schedule_queue); 3128 3129 void netif_tx_wake_queue(struct netdev_queue *dev_queue) 3130 { 3131 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) { 3132 struct Qdisc *q; 3133 3134 rcu_read_lock(); 3135 q = rcu_dereference(dev_queue->qdisc); 3136 __netif_schedule(q); 3137 rcu_read_unlock(); 3138 } 3139 } 3140 EXPORT_SYMBOL(netif_tx_wake_queue); 3141 3142 void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason) 3143 { 3144 unsigned long flags; 3145 3146 if (unlikely(!skb)) 3147 return; 3148 3149 if (likely(refcount_read(&skb->users) == 1)) { 3150 smp_rmb(); 3151 refcount_set(&skb->users, 0); 3152 } else if (likely(!refcount_dec_and_test(&skb->users))) { 3153 return; 3154 } 3155 get_kfree_skb_cb(skb)->reason = reason; 3156 local_irq_save(flags); 3157 skb->next = __this_cpu_read(softnet_data.completion_queue); 3158 __this_cpu_write(softnet_data.completion_queue, skb); 3159 raise_softirq_irqoff(NET_TX_SOFTIRQ); 3160 local_irq_restore(flags); 3161 } 3162 EXPORT_SYMBOL(dev_kfree_skb_irq_reason); 3163 3164 void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason) 3165 { 3166 if (in_hardirq() || irqs_disabled()) 3167 dev_kfree_skb_irq_reason(skb, reason); 3168 else 3169 kfree_skb_reason(skb, reason); 3170 } 3171 EXPORT_SYMBOL(dev_kfree_skb_any_reason); 3172 3173 3174 /** 3175 * netif_device_detach - mark device as removed 3176 * @dev: network device 3177 * 3178 * Mark device as removed from system and therefore no longer available. 3179 */ 3180 void netif_device_detach(struct net_device *dev) 3181 { 3182 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && 3183 netif_running(dev)) { 3184 netif_tx_stop_all_queues(dev); 3185 } 3186 } 3187 EXPORT_SYMBOL(netif_device_detach); 3188 3189 /** 3190 * netif_device_attach - mark device as attached 3191 * @dev: network device 3192 * 3193 * Mark device as attached from system and restart if needed. 3194 */ 3195 void netif_device_attach(struct net_device *dev) 3196 { 3197 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && 3198 netif_running(dev)) { 3199 netif_tx_wake_all_queues(dev); 3200 __netdev_watchdog_up(dev); 3201 } 3202 } 3203 EXPORT_SYMBOL(netif_device_attach); 3204 3205 /* 3206 * Returns a Tx hash based on the given packet descriptor a Tx queues' number 3207 * to be used as a distribution range. 3208 */ 3209 static u16 skb_tx_hash(const struct net_device *dev, 3210 const struct net_device *sb_dev, 3211 struct sk_buff *skb) 3212 { 3213 u32 hash; 3214 u16 qoffset = 0; 3215 u16 qcount = dev->real_num_tx_queues; 3216 3217 if (dev->num_tc) { 3218 u8 tc = netdev_get_prio_tc_map(dev, skb->priority); 3219 3220 qoffset = sb_dev->tc_to_txq[tc].offset; 3221 qcount = sb_dev->tc_to_txq[tc].count; 3222 if (unlikely(!qcount)) { 3223 net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n", 3224 sb_dev->name, qoffset, tc); 3225 qoffset = 0; 3226 qcount = dev->real_num_tx_queues; 3227 } 3228 } 3229 3230 if (skb_rx_queue_recorded(skb)) { 3231 DEBUG_NET_WARN_ON_ONCE(qcount == 0); 3232 hash = skb_get_rx_queue(skb); 3233 if (hash >= qoffset) 3234 hash -= qoffset; 3235 while (unlikely(hash >= qcount)) 3236 hash -= qcount; 3237 return hash + qoffset; 3238 } 3239 3240 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; 3241 } 3242 3243 void skb_warn_bad_offload(const struct sk_buff *skb) 3244 { 3245 static const netdev_features_t null_features; 3246 struct net_device *dev = skb->dev; 3247 const char *name = ""; 3248 3249 if (!net_ratelimit()) 3250 return; 3251 3252 if (dev) { 3253 if (dev->dev.parent) 3254 name = dev_driver_string(dev->dev.parent); 3255 else 3256 name = netdev_name(dev); 3257 } 3258 skb_dump(KERN_WARNING, skb, false); 3259 WARN(1, "%s: caps=(%pNF, %pNF)\n", 3260 name, dev ? &dev->features : &null_features, 3261 skb->sk ? &skb->sk->sk_route_caps : &null_features); 3262 } 3263 3264 /* 3265 * Invalidate hardware checksum when packet is to be mangled, and 3266 * complete checksum manually on outgoing path. 3267 */ 3268 int skb_checksum_help(struct sk_buff *skb) 3269 { 3270 __wsum csum; 3271 int ret = 0, offset; 3272 3273 if (skb->ip_summed == CHECKSUM_COMPLETE) 3274 goto out_set_summed; 3275 3276 if (unlikely(skb_is_gso(skb))) { 3277 skb_warn_bad_offload(skb); 3278 return -EINVAL; 3279 } 3280 3281 /* Before computing a checksum, we should make sure no frag could 3282 * be modified by an external entity : checksum could be wrong. 3283 */ 3284 if (skb_has_shared_frag(skb)) { 3285 ret = __skb_linearize(skb); 3286 if (ret) 3287 goto out; 3288 } 3289 3290 offset = skb_checksum_start_offset(skb); 3291 ret = -EINVAL; 3292 if (unlikely(offset >= skb_headlen(skb))) { 3293 DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false); 3294 WARN_ONCE(true, "offset (%d) >= skb_headlen() (%u)\n", 3295 offset, skb_headlen(skb)); 3296 goto out; 3297 } 3298 csum = skb_checksum(skb, offset, skb->len - offset, 0); 3299 3300 offset += skb->csum_offset; 3301 if (unlikely(offset + sizeof(__sum16) > skb_headlen(skb))) { 3302 DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false); 3303 WARN_ONCE(true, "offset+2 (%zu) > skb_headlen() (%u)\n", 3304 offset + sizeof(__sum16), skb_headlen(skb)); 3305 goto out; 3306 } 3307 ret = skb_ensure_writable(skb, offset + sizeof(__sum16)); 3308 if (ret) 3309 goto out; 3310 3311 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0; 3312 out_set_summed: 3313 skb->ip_summed = CHECKSUM_NONE; 3314 out: 3315 return ret; 3316 } 3317 EXPORT_SYMBOL(skb_checksum_help); 3318 3319 int skb_crc32c_csum_help(struct sk_buff *skb) 3320 { 3321 __le32 crc32c_csum; 3322 int ret = 0, offset, start; 3323 3324 if (skb->ip_summed != CHECKSUM_PARTIAL) 3325 goto out; 3326 3327 if (unlikely(skb_is_gso(skb))) 3328 goto out; 3329 3330 /* Before computing a checksum, we should make sure no frag could 3331 * be modified by an external entity : checksum could be wrong. 3332 */ 3333 if (unlikely(skb_has_shared_frag(skb))) { 3334 ret = __skb_linearize(skb); 3335 if (ret) 3336 goto out; 3337 } 3338 start = skb_checksum_start_offset(skb); 3339 offset = start + offsetof(struct sctphdr, checksum); 3340 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) { 3341 ret = -EINVAL; 3342 goto out; 3343 } 3344 3345 ret = skb_ensure_writable(skb, offset + sizeof(__le32)); 3346 if (ret) 3347 goto out; 3348 3349 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start, 3350 skb->len - start, ~(__u32)0, 3351 crc32c_csum_stub)); 3352 *(__le32 *)(skb->data + offset) = crc32c_csum; 3353 skb_reset_csum_not_inet(skb); 3354 out: 3355 return ret; 3356 } 3357 3358 __be16 skb_network_protocol(struct sk_buff *skb, int *depth) 3359 { 3360 __be16 type = skb->protocol; 3361 3362 /* Tunnel gso handlers can set protocol to ethernet. */ 3363 if (type == htons(ETH_P_TEB)) { 3364 struct ethhdr *eth; 3365 3366 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) 3367 return 0; 3368 3369 eth = (struct ethhdr *)skb->data; 3370 type = eth->h_proto; 3371 } 3372 3373 return vlan_get_protocol_and_depth(skb, type, depth); 3374 } 3375 3376 3377 /* Take action when hardware reception checksum errors are detected. */ 3378 #ifdef CONFIG_BUG 3379 static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) 3380 { 3381 netdev_err(dev, "hw csum failure\n"); 3382 skb_dump(KERN_ERR, skb, true); 3383 dump_stack(); 3384 } 3385 3386 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) 3387 { 3388 DO_ONCE_LITE(do_netdev_rx_csum_fault, dev, skb); 3389 } 3390 EXPORT_SYMBOL(netdev_rx_csum_fault); 3391 #endif 3392 3393 /* XXX: check that highmem exists at all on the given machine. */ 3394 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 3395 { 3396 #ifdef CONFIG_HIGHMEM 3397 int i; 3398 3399 if (!(dev->features & NETIF_F_HIGHDMA)) { 3400 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3401 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3402 3403 if (PageHighMem(skb_frag_page(frag))) 3404 return 1; 3405 } 3406 } 3407 #endif 3408 return 0; 3409 } 3410 3411 /* If MPLS offload request, verify we are testing hardware MPLS features 3412 * instead of standard features for the netdev. 3413 */ 3414 #if IS_ENABLED(CONFIG_NET_MPLS_GSO) 3415 static netdev_features_t net_mpls_features(struct sk_buff *skb, 3416 netdev_features_t features, 3417 __be16 type) 3418 { 3419 if (eth_p_mpls(type)) 3420 features &= skb->dev->mpls_features; 3421 3422 return features; 3423 } 3424 #else 3425 static netdev_features_t net_mpls_features(struct sk_buff *skb, 3426 netdev_features_t features, 3427 __be16 type) 3428 { 3429 return features; 3430 } 3431 #endif 3432 3433 static netdev_features_t harmonize_features(struct sk_buff *skb, 3434 netdev_features_t features) 3435 { 3436 __be16 type; 3437 3438 type = skb_network_protocol(skb, NULL); 3439 features = net_mpls_features(skb, features, type); 3440 3441 if (skb->ip_summed != CHECKSUM_NONE && 3442 !can_checksum_protocol(features, type)) { 3443 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3444 } 3445 if (illegal_highdma(skb->dev, skb)) 3446 features &= ~NETIF_F_SG; 3447 3448 return features; 3449 } 3450 3451 netdev_features_t passthru_features_check(struct sk_buff *skb, 3452 struct net_device *dev, 3453 netdev_features_t features) 3454 { 3455 return features; 3456 } 3457 EXPORT_SYMBOL(passthru_features_check); 3458 3459 static netdev_features_t dflt_features_check(struct sk_buff *skb, 3460 struct net_device *dev, 3461 netdev_features_t features) 3462 { 3463 return vlan_features_check(skb, features); 3464 } 3465 3466 static netdev_features_t gso_features_check(const struct sk_buff *skb, 3467 struct net_device *dev, 3468 netdev_features_t features) 3469 { 3470 u16 gso_segs = skb_shinfo(skb)->gso_segs; 3471 3472 if (gso_segs > READ_ONCE(dev->gso_max_segs)) 3473 return features & ~NETIF_F_GSO_MASK; 3474 3475 if (!skb_shinfo(skb)->gso_type) { 3476 skb_warn_bad_offload(skb); 3477 return features & ~NETIF_F_GSO_MASK; 3478 } 3479 3480 /* Support for GSO partial features requires software 3481 * intervention before we can actually process the packets 3482 * so we need to strip support for any partial features now 3483 * and we can pull them back in after we have partially 3484 * segmented the frame. 3485 */ 3486 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL)) 3487 features &= ~dev->gso_partial_features; 3488 3489 /* Make sure to clear the IPv4 ID mangling feature if the 3490 * IPv4 header has the potential to be fragmented. 3491 */ 3492 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { 3493 struct iphdr *iph = skb->encapsulation ? 3494 inner_ip_hdr(skb) : ip_hdr(skb); 3495 3496 if (!(iph->frag_off & htons(IP_DF))) 3497 features &= ~NETIF_F_TSO_MANGLEID; 3498 } 3499 3500 return features; 3501 } 3502 3503 netdev_features_t netif_skb_features(struct sk_buff *skb) 3504 { 3505 struct net_device *dev = skb->dev; 3506 netdev_features_t features = dev->features; 3507 3508 if (skb_is_gso(skb)) 3509 features = gso_features_check(skb, dev, features); 3510 3511 /* If encapsulation offload request, verify we are testing 3512 * hardware encapsulation features instead of standard 3513 * features for the netdev 3514 */ 3515 if (skb->encapsulation) 3516 features &= dev->hw_enc_features; 3517 3518 if (skb_vlan_tagged(skb)) 3519 features = netdev_intersect_features(features, 3520 dev->vlan_features | 3521 NETIF_F_HW_VLAN_CTAG_TX | 3522 NETIF_F_HW_VLAN_STAG_TX); 3523 3524 if (dev->netdev_ops->ndo_features_check) 3525 features &= dev->netdev_ops->ndo_features_check(skb, dev, 3526 features); 3527 else 3528 features &= dflt_features_check(skb, dev, features); 3529 3530 return harmonize_features(skb, features); 3531 } 3532 EXPORT_SYMBOL(netif_skb_features); 3533 3534 static int xmit_one(struct sk_buff *skb, struct net_device *dev, 3535 struct netdev_queue *txq, bool more) 3536 { 3537 unsigned int len; 3538 int rc; 3539 3540 if (dev_nit_active(dev)) 3541 dev_queue_xmit_nit(skb, dev); 3542 3543 len = skb->len; 3544 trace_net_dev_start_xmit(skb, dev); 3545 rc = netdev_start_xmit(skb, dev, txq, more); 3546 trace_net_dev_xmit(skb, rc, dev, len); 3547 3548 return rc; 3549 } 3550 3551 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev, 3552 struct netdev_queue *txq, int *ret) 3553 { 3554 struct sk_buff *skb = first; 3555 int rc = NETDEV_TX_OK; 3556 3557 while (skb) { 3558 struct sk_buff *next = skb->next; 3559 3560 skb_mark_not_on_list(skb); 3561 rc = xmit_one(skb, dev, txq, next != NULL); 3562 if (unlikely(!dev_xmit_complete(rc))) { 3563 skb->next = next; 3564 goto out; 3565 } 3566 3567 skb = next; 3568 if (netif_tx_queue_stopped(txq) && skb) { 3569 rc = NETDEV_TX_BUSY; 3570 break; 3571 } 3572 } 3573 3574 out: 3575 *ret = rc; 3576 return skb; 3577 } 3578 3579 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, 3580 netdev_features_t features) 3581 { 3582 if (skb_vlan_tag_present(skb) && 3583 !vlan_hw_offload_capable(features, skb->vlan_proto)) 3584 skb = __vlan_hwaccel_push_inside(skb); 3585 return skb; 3586 } 3587 3588 int skb_csum_hwoffload_help(struct sk_buff *skb, 3589 const netdev_features_t features) 3590 { 3591 if (unlikely(skb_csum_is_sctp(skb))) 3592 return !!(features & NETIF_F_SCTP_CRC) ? 0 : 3593 skb_crc32c_csum_help(skb); 3594 3595 if (features & NETIF_F_HW_CSUM) 3596 return 0; 3597 3598 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { 3599 switch (skb->csum_offset) { 3600 case offsetof(struct tcphdr, check): 3601 case offsetof(struct udphdr, check): 3602 return 0; 3603 } 3604 } 3605 3606 return skb_checksum_help(skb); 3607 } 3608 EXPORT_SYMBOL(skb_csum_hwoffload_help); 3609 3610 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again) 3611 { 3612 netdev_features_t features; 3613 3614 features = netif_skb_features(skb); 3615 skb = validate_xmit_vlan(skb, features); 3616 if (unlikely(!skb)) 3617 goto out_null; 3618 3619 skb = sk_validate_xmit_skb(skb, dev); 3620 if (unlikely(!skb)) 3621 goto out_null; 3622 3623 if (netif_needs_gso(skb, features)) { 3624 struct sk_buff *segs; 3625 3626 segs = skb_gso_segment(skb, features); 3627 if (IS_ERR(segs)) { 3628 goto out_kfree_skb; 3629 } else if (segs) { 3630 consume_skb(skb); 3631 skb = segs; 3632 } 3633 } else { 3634 if (skb_needs_linearize(skb, features) && 3635 __skb_linearize(skb)) 3636 goto out_kfree_skb; 3637 3638 /* If packet is not checksummed and device does not 3639 * support checksumming for this protocol, complete 3640 * checksumming here. 3641 */ 3642 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3643 if (skb->encapsulation) 3644 skb_set_inner_transport_header(skb, 3645 skb_checksum_start_offset(skb)); 3646 else 3647 skb_set_transport_header(skb, 3648 skb_checksum_start_offset(skb)); 3649 if (skb_csum_hwoffload_help(skb, features)) 3650 goto out_kfree_skb; 3651 } 3652 } 3653 3654 skb = validate_xmit_xfrm(skb, features, again); 3655 3656 return skb; 3657 3658 out_kfree_skb: 3659 kfree_skb(skb); 3660 out_null: 3661 dev_core_stats_tx_dropped_inc(dev); 3662 return NULL; 3663 } 3664 3665 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again) 3666 { 3667 struct sk_buff *next, *head = NULL, *tail; 3668 3669 for (; skb != NULL; skb = next) { 3670 next = skb->next; 3671 skb_mark_not_on_list(skb); 3672 3673 /* in case skb wont be segmented, point to itself */ 3674 skb->prev = skb; 3675 3676 skb = validate_xmit_skb(skb, dev, again); 3677 if (!skb) 3678 continue; 3679 3680 if (!head) 3681 head = skb; 3682 else 3683 tail->next = skb; 3684 /* If skb was segmented, skb->prev points to 3685 * the last segment. If not, it still contains skb. 3686 */ 3687 tail = skb->prev; 3688 } 3689 return head; 3690 } 3691 EXPORT_SYMBOL_GPL(validate_xmit_skb_list); 3692 3693 static void qdisc_pkt_len_init(struct sk_buff *skb) 3694 { 3695 const struct skb_shared_info *shinfo = skb_shinfo(skb); 3696 3697 qdisc_skb_cb(skb)->pkt_len = skb->len; 3698 3699 /* To get more precise estimation of bytes sent on wire, 3700 * we add to pkt_len the headers size of all segments 3701 */ 3702 if (shinfo->gso_size && skb_transport_header_was_set(skb)) { 3703 u16 gso_segs = shinfo->gso_segs; 3704 unsigned int hdr_len; 3705 3706 /* mac layer + network layer */ 3707 hdr_len = skb_transport_offset(skb); 3708 3709 /* + transport layer */ 3710 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 3711 const struct tcphdr *th; 3712 struct tcphdr _tcphdr; 3713 3714 th = skb_header_pointer(skb, hdr_len, 3715 sizeof(_tcphdr), &_tcphdr); 3716 if (likely(th)) 3717 hdr_len += __tcp_hdrlen(th); 3718 } else { 3719 struct udphdr _udphdr; 3720 3721 if (skb_header_pointer(skb, hdr_len, 3722 sizeof(_udphdr), &_udphdr)) 3723 hdr_len += sizeof(struct udphdr); 3724 } 3725 3726 if (shinfo->gso_type & SKB_GSO_DODGY) 3727 gso_segs = DIV_ROUND_UP(skb->len - hdr_len, 3728 shinfo->gso_size); 3729 3730 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len; 3731 } 3732 } 3733 3734 static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q, 3735 struct sk_buff **to_free, 3736 struct netdev_queue *txq) 3737 { 3738 int rc; 3739 3740 rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK; 3741 if (rc == NET_XMIT_SUCCESS) 3742 trace_qdisc_enqueue(q, txq, skb); 3743 return rc; 3744 } 3745 3746 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, 3747 struct net_device *dev, 3748 struct netdev_queue *txq) 3749 { 3750 spinlock_t *root_lock = qdisc_lock(q); 3751 struct sk_buff *to_free = NULL; 3752 bool contended; 3753 int rc; 3754 3755 qdisc_calculate_pkt_len(skb, q); 3756 3757 if (q->flags & TCQ_F_NOLOCK) { 3758 if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) && 3759 qdisc_run_begin(q)) { 3760 /* Retest nolock_qdisc_is_empty() within the protection 3761 * of q->seqlock to protect from racing with requeuing. 3762 */ 3763 if (unlikely(!nolock_qdisc_is_empty(q))) { 3764 rc = dev_qdisc_enqueue(skb, q, &to_free, txq); 3765 __qdisc_run(q); 3766 qdisc_run_end(q); 3767 3768 goto no_lock_out; 3769 } 3770 3771 qdisc_bstats_cpu_update(q, skb); 3772 if (sch_direct_xmit(skb, q, dev, txq, NULL, true) && 3773 !nolock_qdisc_is_empty(q)) 3774 __qdisc_run(q); 3775 3776 qdisc_run_end(q); 3777 return NET_XMIT_SUCCESS; 3778 } 3779 3780 rc = dev_qdisc_enqueue(skb, q, &to_free, txq); 3781 qdisc_run(q); 3782 3783 no_lock_out: 3784 if (unlikely(to_free)) 3785 kfree_skb_list_reason(to_free, 3786 SKB_DROP_REASON_QDISC_DROP); 3787 return rc; 3788 } 3789 3790 /* 3791 * Heuristic to force contended enqueues to serialize on a 3792 * separate lock before trying to get qdisc main lock. 3793 * This permits qdisc->running owner to get the lock more 3794 * often and dequeue packets faster. 3795 * On PREEMPT_RT it is possible to preempt the qdisc owner during xmit 3796 * and then other tasks will only enqueue packets. The packets will be 3797 * sent after the qdisc owner is scheduled again. To prevent this 3798 * scenario the task always serialize on the lock. 3799 */ 3800 contended = qdisc_is_running(q) || IS_ENABLED(CONFIG_PREEMPT_RT); 3801 if (unlikely(contended)) 3802 spin_lock(&q->busylock); 3803 3804 spin_lock(root_lock); 3805 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { 3806 __qdisc_drop(skb, &to_free); 3807 rc = NET_XMIT_DROP; 3808 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && 3809 qdisc_run_begin(q)) { 3810 /* 3811 * This is a work-conserving queue; there are no old skbs 3812 * waiting to be sent out; and the qdisc is not running - 3813 * xmit the skb directly. 3814 */ 3815 3816 qdisc_bstats_update(q, skb); 3817 3818 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) { 3819 if (unlikely(contended)) { 3820 spin_unlock(&q->busylock); 3821 contended = false; 3822 } 3823 __qdisc_run(q); 3824 } 3825 3826 qdisc_run_end(q); 3827 rc = NET_XMIT_SUCCESS; 3828 } else { 3829 rc = dev_qdisc_enqueue(skb, q, &to_free, txq); 3830 if (qdisc_run_begin(q)) { 3831 if (unlikely(contended)) { 3832 spin_unlock(&q->busylock); 3833 contended = false; 3834 } 3835 __qdisc_run(q); 3836 qdisc_run_end(q); 3837 } 3838 } 3839 spin_unlock(root_lock); 3840 if (unlikely(to_free)) 3841 kfree_skb_list_reason(to_free, SKB_DROP_REASON_QDISC_DROP); 3842 if (unlikely(contended)) 3843 spin_unlock(&q->busylock); 3844 return rc; 3845 } 3846 3847 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 3848 static void skb_update_prio(struct sk_buff *skb) 3849 { 3850 const struct netprio_map *map; 3851 const struct sock *sk; 3852 unsigned int prioidx; 3853 3854 if (skb->priority) 3855 return; 3856 map = rcu_dereference_bh(skb->dev->priomap); 3857 if (!map) 3858 return; 3859 sk = skb_to_full_sk(skb); 3860 if (!sk) 3861 return; 3862 3863 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data); 3864 3865 if (prioidx < map->priomap_len) 3866 skb->priority = map->priomap[prioidx]; 3867 } 3868 #else 3869 #define skb_update_prio(skb) 3870 #endif 3871 3872 /** 3873 * dev_loopback_xmit - loop back @skb 3874 * @net: network namespace this loopback is happening in 3875 * @sk: sk needed to be a netfilter okfn 3876 * @skb: buffer to transmit 3877 */ 3878 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) 3879 { 3880 skb_reset_mac_header(skb); 3881 __skb_pull(skb, skb_network_offset(skb)); 3882 skb->pkt_type = PACKET_LOOPBACK; 3883 if (skb->ip_summed == CHECKSUM_NONE) 3884 skb->ip_summed = CHECKSUM_UNNECESSARY; 3885 DEBUG_NET_WARN_ON_ONCE(!skb_dst(skb)); 3886 skb_dst_force(skb); 3887 netif_rx(skb); 3888 return 0; 3889 } 3890 EXPORT_SYMBOL(dev_loopback_xmit); 3891 3892 #ifdef CONFIG_NET_EGRESS 3893 static struct netdev_queue * 3894 netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb) 3895 { 3896 int qm = skb_get_queue_mapping(skb); 3897 3898 return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm)); 3899 } 3900 3901 static bool netdev_xmit_txqueue_skipped(void) 3902 { 3903 return __this_cpu_read(softnet_data.xmit.skip_txqueue); 3904 } 3905 3906 void netdev_xmit_skip_txqueue(bool skip) 3907 { 3908 __this_cpu_write(softnet_data.xmit.skip_txqueue, skip); 3909 } 3910 EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue); 3911 #endif /* CONFIG_NET_EGRESS */ 3912 3913 #ifdef CONFIG_NET_XGRESS 3914 static int tc_run(struct tcx_entry *entry, struct sk_buff *skb, 3915 enum skb_drop_reason *drop_reason) 3916 { 3917 int ret = TC_ACT_UNSPEC; 3918 #ifdef CONFIG_NET_CLS_ACT 3919 struct mini_Qdisc *miniq = rcu_dereference_bh(entry->miniq); 3920 struct tcf_result res; 3921 3922 if (!miniq) 3923 return ret; 3924 3925 tc_skb_cb(skb)->mru = 0; 3926 tc_skb_cb(skb)->post_ct = false; 3927 res.drop_reason = *drop_reason; 3928 3929 mini_qdisc_bstats_cpu_update(miniq, skb); 3930 ret = tcf_classify(skb, miniq->block, miniq->filter_list, &res, false); 3931 /* Only tcf related quirks below. */ 3932 switch (ret) { 3933 case TC_ACT_SHOT: 3934 *drop_reason = res.drop_reason; 3935 mini_qdisc_qstats_cpu_drop(miniq); 3936 break; 3937 case TC_ACT_OK: 3938 case TC_ACT_RECLASSIFY: 3939 skb->tc_index = TC_H_MIN(res.classid); 3940 break; 3941 } 3942 #endif /* CONFIG_NET_CLS_ACT */ 3943 return ret; 3944 } 3945 3946 static DEFINE_STATIC_KEY_FALSE(tcx_needed_key); 3947 3948 void tcx_inc(void) 3949 { 3950 static_branch_inc(&tcx_needed_key); 3951 } 3952 3953 void tcx_dec(void) 3954 { 3955 static_branch_dec(&tcx_needed_key); 3956 } 3957 3958 static __always_inline enum tcx_action_base 3959 tcx_run(const struct bpf_mprog_entry *entry, struct sk_buff *skb, 3960 const bool needs_mac) 3961 { 3962 const struct bpf_mprog_fp *fp; 3963 const struct bpf_prog *prog; 3964 int ret = TCX_NEXT; 3965 3966 if (needs_mac) 3967 __skb_push(skb, skb->mac_len); 3968 bpf_mprog_foreach_prog(entry, fp, prog) { 3969 bpf_compute_data_pointers(skb); 3970 ret = bpf_prog_run(prog, skb); 3971 if (ret != TCX_NEXT) 3972 break; 3973 } 3974 if (needs_mac) 3975 __skb_pull(skb, skb->mac_len); 3976 return tcx_action_code(skb, ret); 3977 } 3978 3979 static __always_inline struct sk_buff * 3980 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, 3981 struct net_device *orig_dev, bool *another) 3982 { 3983 struct bpf_mprog_entry *entry = rcu_dereference_bh(skb->dev->tcx_ingress); 3984 enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_INGRESS; 3985 int sch_ret; 3986 3987 if (!entry) 3988 return skb; 3989 if (*pt_prev) { 3990 *ret = deliver_skb(skb, *pt_prev, orig_dev); 3991 *pt_prev = NULL; 3992 } 3993 3994 qdisc_skb_cb(skb)->pkt_len = skb->len; 3995 tcx_set_ingress(skb, true); 3996 3997 if (static_branch_unlikely(&tcx_needed_key)) { 3998 sch_ret = tcx_run(entry, skb, true); 3999 if (sch_ret != TC_ACT_UNSPEC) 4000 goto ingress_verdict; 4001 } 4002 sch_ret = tc_run(tcx_entry(entry), skb, &drop_reason); 4003 ingress_verdict: 4004 switch (sch_ret) { 4005 case TC_ACT_REDIRECT: 4006 /* skb_mac_header check was done by BPF, so we can safely 4007 * push the L2 header back before redirecting to another 4008 * netdev. 4009 */ 4010 __skb_push(skb, skb->mac_len); 4011 if (skb_do_redirect(skb) == -EAGAIN) { 4012 __skb_pull(skb, skb->mac_len); 4013 *another = true; 4014 break; 4015 } 4016 *ret = NET_RX_SUCCESS; 4017 return NULL; 4018 case TC_ACT_SHOT: 4019 kfree_skb_reason(skb, drop_reason); 4020 *ret = NET_RX_DROP; 4021 return NULL; 4022 /* used by tc_run */ 4023 case TC_ACT_STOLEN: 4024 case TC_ACT_QUEUED: 4025 case TC_ACT_TRAP: 4026 consume_skb(skb); 4027 fallthrough; 4028 case TC_ACT_CONSUMED: 4029 *ret = NET_RX_SUCCESS; 4030 return NULL; 4031 } 4032 4033 return skb; 4034 } 4035 4036 static __always_inline struct sk_buff * 4037 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) 4038 { 4039 struct bpf_mprog_entry *entry = rcu_dereference_bh(dev->tcx_egress); 4040 enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_EGRESS; 4041 int sch_ret; 4042 4043 if (!entry) 4044 return skb; 4045 4046 /* qdisc_skb_cb(skb)->pkt_len & tcx_set_ingress() was 4047 * already set by the caller. 4048 */ 4049 if (static_branch_unlikely(&tcx_needed_key)) { 4050 sch_ret = tcx_run(entry, skb, false); 4051 if (sch_ret != TC_ACT_UNSPEC) 4052 goto egress_verdict; 4053 } 4054 sch_ret = tc_run(tcx_entry(entry), skb, &drop_reason); 4055 egress_verdict: 4056 switch (sch_ret) { 4057 case TC_ACT_REDIRECT: 4058 /* No need to push/pop skb's mac_header here on egress! */ 4059 skb_do_redirect(skb); 4060 *ret = NET_XMIT_SUCCESS; 4061 return NULL; 4062 case TC_ACT_SHOT: 4063 kfree_skb_reason(skb, drop_reason); 4064 *ret = NET_XMIT_DROP; 4065 return NULL; 4066 /* used by tc_run */ 4067 case TC_ACT_STOLEN: 4068 case TC_ACT_QUEUED: 4069 case TC_ACT_TRAP: 4070 consume_skb(skb); 4071 fallthrough; 4072 case TC_ACT_CONSUMED: 4073 *ret = NET_XMIT_SUCCESS; 4074 return NULL; 4075 } 4076 4077 return skb; 4078 } 4079 #else 4080 static __always_inline struct sk_buff * 4081 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, 4082 struct net_device *orig_dev, bool *another) 4083 { 4084 return skb; 4085 } 4086 4087 static __always_inline struct sk_buff * 4088 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) 4089 { 4090 return skb; 4091 } 4092 #endif /* CONFIG_NET_XGRESS */ 4093 4094 #ifdef CONFIG_XPS 4095 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb, 4096 struct xps_dev_maps *dev_maps, unsigned int tci) 4097 { 4098 int tc = netdev_get_prio_tc_map(dev, skb->priority); 4099 struct xps_map *map; 4100 int queue_index = -1; 4101 4102 if (tc >= dev_maps->num_tc || tci >= dev_maps->nr_ids) 4103 return queue_index; 4104 4105 tci *= dev_maps->num_tc; 4106 tci += tc; 4107 4108 map = rcu_dereference(dev_maps->attr_map[tci]); 4109 if (map) { 4110 if (map->len == 1) 4111 queue_index = map->queues[0]; 4112 else 4113 queue_index = map->queues[reciprocal_scale( 4114 skb_get_hash(skb), map->len)]; 4115 if (unlikely(queue_index >= dev->real_num_tx_queues)) 4116 queue_index = -1; 4117 } 4118 return queue_index; 4119 } 4120 #endif 4121 4122 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev, 4123 struct sk_buff *skb) 4124 { 4125 #ifdef CONFIG_XPS 4126 struct xps_dev_maps *dev_maps; 4127 struct sock *sk = skb->sk; 4128 int queue_index = -1; 4129 4130 if (!static_key_false(&xps_needed)) 4131 return -1; 4132 4133 rcu_read_lock(); 4134 if (!static_key_false(&xps_rxqs_needed)) 4135 goto get_cpus_map; 4136 4137 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_RXQS]); 4138 if (dev_maps) { 4139 int tci = sk_rx_queue_get(sk); 4140 4141 if (tci >= 0) 4142 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, 4143 tci); 4144 } 4145 4146 get_cpus_map: 4147 if (queue_index < 0) { 4148 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_CPUS]); 4149 if (dev_maps) { 4150 unsigned int tci = skb->sender_cpu - 1; 4151 4152 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, 4153 tci); 4154 } 4155 } 4156 rcu_read_unlock(); 4157 4158 return queue_index; 4159 #else 4160 return -1; 4161 #endif 4162 } 4163 4164 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, 4165 struct net_device *sb_dev) 4166 { 4167 return 0; 4168 } 4169 EXPORT_SYMBOL(dev_pick_tx_zero); 4170 4171 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, 4172 struct net_device *sb_dev) 4173 { 4174 return (u16)raw_smp_processor_id() % dev->real_num_tx_queues; 4175 } 4176 EXPORT_SYMBOL(dev_pick_tx_cpu_id); 4177 4178 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, 4179 struct net_device *sb_dev) 4180 { 4181 struct sock *sk = skb->sk; 4182 int queue_index = sk_tx_queue_get(sk); 4183 4184 sb_dev = sb_dev ? : dev; 4185 4186 if (queue_index < 0 || skb->ooo_okay || 4187 queue_index >= dev->real_num_tx_queues) { 4188 int new_index = get_xps_queue(dev, sb_dev, skb); 4189 4190 if (new_index < 0) 4191 new_index = skb_tx_hash(dev, sb_dev, skb); 4192 4193 if (queue_index != new_index && sk && 4194 sk_fullsock(sk) && 4195 rcu_access_pointer(sk->sk_dst_cache)) 4196 sk_tx_queue_set(sk, new_index); 4197 4198 queue_index = new_index; 4199 } 4200 4201 return queue_index; 4202 } 4203 EXPORT_SYMBOL(netdev_pick_tx); 4204 4205 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, 4206 struct sk_buff *skb, 4207 struct net_device *sb_dev) 4208 { 4209 int queue_index = 0; 4210 4211 #ifdef CONFIG_XPS 4212 u32 sender_cpu = skb->sender_cpu - 1; 4213 4214 if (sender_cpu >= (u32)NR_CPUS) 4215 skb->sender_cpu = raw_smp_processor_id() + 1; 4216 #endif 4217 4218 if (dev->real_num_tx_queues != 1) { 4219 const struct net_device_ops *ops = dev->netdev_ops; 4220 4221 if (ops->ndo_select_queue) 4222 queue_index = ops->ndo_select_queue(dev, skb, sb_dev); 4223 else 4224 queue_index = netdev_pick_tx(dev, skb, sb_dev); 4225 4226 queue_index = netdev_cap_txqueue(dev, queue_index); 4227 } 4228 4229 skb_set_queue_mapping(skb, queue_index); 4230 return netdev_get_tx_queue(dev, queue_index); 4231 } 4232 4233 /** 4234 * __dev_queue_xmit() - transmit a buffer 4235 * @skb: buffer to transmit 4236 * @sb_dev: suboordinate device used for L2 forwarding offload 4237 * 4238 * Queue a buffer for transmission to a network device. The caller must 4239 * have set the device and priority and built the buffer before calling 4240 * this function. The function can be called from an interrupt. 4241 * 4242 * When calling this method, interrupts MUST be enabled. This is because 4243 * the BH enable code must have IRQs enabled so that it will not deadlock. 4244 * 4245 * Regardless of the return value, the skb is consumed, so it is currently 4246 * difficult to retry a send to this method. (You can bump the ref count 4247 * before sending to hold a reference for retry if you are careful.) 4248 * 4249 * Return: 4250 * * 0 - buffer successfully transmitted 4251 * * positive qdisc return code - NET_XMIT_DROP etc. 4252 * * negative errno - other errors 4253 */ 4254 int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) 4255 { 4256 struct net_device *dev = skb->dev; 4257 struct netdev_queue *txq = NULL; 4258 struct Qdisc *q; 4259 int rc = -ENOMEM; 4260 bool again = false; 4261 4262 skb_reset_mac_header(skb); 4263 skb_assert_len(skb); 4264 4265 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP)) 4266 __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED); 4267 4268 /* Disable soft irqs for various locks below. Also 4269 * stops preemption for RCU. 4270 */ 4271 rcu_read_lock_bh(); 4272 4273 skb_update_prio(skb); 4274 4275 qdisc_pkt_len_init(skb); 4276 tcx_set_ingress(skb, false); 4277 #ifdef CONFIG_NET_EGRESS 4278 if (static_branch_unlikely(&egress_needed_key)) { 4279 if (nf_hook_egress_active()) { 4280 skb = nf_hook_egress(skb, &rc, dev); 4281 if (!skb) 4282 goto out; 4283 } 4284 4285 netdev_xmit_skip_txqueue(false); 4286 4287 nf_skip_egress(skb, true); 4288 skb = sch_handle_egress(skb, &rc, dev); 4289 if (!skb) 4290 goto out; 4291 nf_skip_egress(skb, false); 4292 4293 if (netdev_xmit_txqueue_skipped()) 4294 txq = netdev_tx_queue_mapping(dev, skb); 4295 } 4296 #endif 4297 /* If device/qdisc don't need skb->dst, release it right now while 4298 * its hot in this cpu cache. 4299 */ 4300 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 4301 skb_dst_drop(skb); 4302 else 4303 skb_dst_force(skb); 4304 4305 if (!txq) 4306 txq = netdev_core_pick_tx(dev, skb, sb_dev); 4307 4308 q = rcu_dereference_bh(txq->qdisc); 4309 4310 trace_net_dev_queue(skb); 4311 if (q->enqueue) { 4312 rc = __dev_xmit_skb(skb, q, dev, txq); 4313 goto out; 4314 } 4315 4316 /* The device has no queue. Common case for software devices: 4317 * loopback, all the sorts of tunnels... 4318 4319 * Really, it is unlikely that netif_tx_lock protection is necessary 4320 * here. (f.e. loopback and IP tunnels are clean ignoring statistics 4321 * counters.) 4322 * However, it is possible, that they rely on protection 4323 * made by us here. 4324 4325 * Check this and shot the lock. It is not prone from deadlocks. 4326 *Either shot noqueue qdisc, it is even simpler 8) 4327 */ 4328 if (dev->flags & IFF_UP) { 4329 int cpu = smp_processor_id(); /* ok because BHs are off */ 4330 4331 /* Other cpus might concurrently change txq->xmit_lock_owner 4332 * to -1 or to their cpu id, but not to our id. 4333 */ 4334 if (READ_ONCE(txq->xmit_lock_owner) != cpu) { 4335 if (dev_xmit_recursion()) 4336 goto recursion_alert; 4337 4338 skb = validate_xmit_skb(skb, dev, &again); 4339 if (!skb) 4340 goto out; 4341 4342 HARD_TX_LOCK(dev, txq, cpu); 4343 4344 if (!netif_xmit_stopped(txq)) { 4345 dev_xmit_recursion_inc(); 4346 skb = dev_hard_start_xmit(skb, dev, txq, &rc); 4347 dev_xmit_recursion_dec(); 4348 if (dev_xmit_complete(rc)) { 4349 HARD_TX_UNLOCK(dev, txq); 4350 goto out; 4351 } 4352 } 4353 HARD_TX_UNLOCK(dev, txq); 4354 net_crit_ratelimited("Virtual device %s asks to queue packet!\n", 4355 dev->name); 4356 } else { 4357 /* Recursion is detected! It is possible, 4358 * unfortunately 4359 */ 4360 recursion_alert: 4361 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n", 4362 dev->name); 4363 } 4364 } 4365 4366 rc = -ENETDOWN; 4367 rcu_read_unlock_bh(); 4368 4369 dev_core_stats_tx_dropped_inc(dev); 4370 kfree_skb_list(skb); 4371 return rc; 4372 out: 4373 rcu_read_unlock_bh(); 4374 return rc; 4375 } 4376 EXPORT_SYMBOL(__dev_queue_xmit); 4377 4378 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id) 4379 { 4380 struct net_device *dev = skb->dev; 4381 struct sk_buff *orig_skb = skb; 4382 struct netdev_queue *txq; 4383 int ret = NETDEV_TX_BUSY; 4384 bool again = false; 4385 4386 if (unlikely(!netif_running(dev) || 4387 !netif_carrier_ok(dev))) 4388 goto drop; 4389 4390 skb = validate_xmit_skb_list(skb, dev, &again); 4391 if (skb != orig_skb) 4392 goto drop; 4393 4394 skb_set_queue_mapping(skb, queue_id); 4395 txq = skb_get_tx_queue(dev, skb); 4396 4397 local_bh_disable(); 4398 4399 dev_xmit_recursion_inc(); 4400 HARD_TX_LOCK(dev, txq, smp_processor_id()); 4401 if (!netif_xmit_frozen_or_drv_stopped(txq)) 4402 ret = netdev_start_xmit(skb, dev, txq, false); 4403 HARD_TX_UNLOCK(dev, txq); 4404 dev_xmit_recursion_dec(); 4405 4406 local_bh_enable(); 4407 return ret; 4408 drop: 4409 dev_core_stats_tx_dropped_inc(dev); 4410 kfree_skb_list(skb); 4411 return NET_XMIT_DROP; 4412 } 4413 EXPORT_SYMBOL(__dev_direct_xmit); 4414 4415 /************************************************************************* 4416 * Receiver routines 4417 *************************************************************************/ 4418 4419 int netdev_max_backlog __read_mostly = 1000; 4420 EXPORT_SYMBOL(netdev_max_backlog); 4421 4422 int netdev_tstamp_prequeue __read_mostly = 1; 4423 unsigned int sysctl_skb_defer_max __read_mostly = 64; 4424 int netdev_budget __read_mostly = 300; 4425 /* Must be at least 2 jiffes to guarantee 1 jiffy timeout */ 4426 unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ; 4427 int weight_p __read_mostly = 64; /* old backlog weight */ 4428 int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */ 4429 int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */ 4430 int dev_rx_weight __read_mostly = 64; 4431 int dev_tx_weight __read_mostly = 64; 4432 4433 /* Called with irq disabled */ 4434 static inline void ____napi_schedule(struct softnet_data *sd, 4435 struct napi_struct *napi) 4436 { 4437 struct task_struct *thread; 4438 4439 lockdep_assert_irqs_disabled(); 4440 4441 if (test_bit(NAPI_STATE_THREADED, &napi->state)) { 4442 /* Paired with smp_mb__before_atomic() in 4443 * napi_enable()/dev_set_threaded(). 4444 * Use READ_ONCE() to guarantee a complete 4445 * read on napi->thread. Only call 4446 * wake_up_process() when it's not NULL. 4447 */ 4448 thread = READ_ONCE(napi->thread); 4449 if (thread) { 4450 /* Avoid doing set_bit() if the thread is in 4451 * INTERRUPTIBLE state, cause napi_thread_wait() 4452 * makes sure to proceed with napi polling 4453 * if the thread is explicitly woken from here. 4454 */ 4455 if (READ_ONCE(thread->__state) != TASK_INTERRUPTIBLE) 4456 set_bit(NAPI_STATE_SCHED_THREADED, &napi->state); 4457 wake_up_process(thread); 4458 return; 4459 } 4460 } 4461 4462 list_add_tail(&napi->poll_list, &sd->poll_list); 4463 WRITE_ONCE(napi->list_owner, smp_processor_id()); 4464 /* If not called from net_rx_action() 4465 * we have to raise NET_RX_SOFTIRQ. 4466 */ 4467 if (!sd->in_net_rx_action) 4468 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4469 } 4470 4471 #ifdef CONFIG_RPS 4472 4473 /* One global table that all flow-based protocols share. */ 4474 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; 4475 EXPORT_SYMBOL(rps_sock_flow_table); 4476 u32 rps_cpu_mask __read_mostly; 4477 EXPORT_SYMBOL(rps_cpu_mask); 4478 4479 struct static_key_false rps_needed __read_mostly; 4480 EXPORT_SYMBOL(rps_needed); 4481 struct static_key_false rfs_needed __read_mostly; 4482 EXPORT_SYMBOL(rfs_needed); 4483 4484 static struct rps_dev_flow * 4485 set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 4486 struct rps_dev_flow *rflow, u16 next_cpu) 4487 { 4488 if (next_cpu < nr_cpu_ids) { 4489 #ifdef CONFIG_RFS_ACCEL 4490 struct netdev_rx_queue *rxqueue; 4491 struct rps_dev_flow_table *flow_table; 4492 struct rps_dev_flow *old_rflow; 4493 u32 flow_id; 4494 u16 rxq_index; 4495 int rc; 4496 4497 /* Should we steer this flow to a different hardware queue? */ 4498 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || 4499 !(dev->features & NETIF_F_NTUPLE)) 4500 goto out; 4501 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); 4502 if (rxq_index == skb_get_rx_queue(skb)) 4503 goto out; 4504 4505 rxqueue = dev->_rx + rxq_index; 4506 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4507 if (!flow_table) 4508 goto out; 4509 flow_id = skb_get_hash(skb) & flow_table->mask; 4510 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, 4511 rxq_index, flow_id); 4512 if (rc < 0) 4513 goto out; 4514 old_rflow = rflow; 4515 rflow = &flow_table->flows[flow_id]; 4516 rflow->filter = rc; 4517 if (old_rflow->filter == rflow->filter) 4518 old_rflow->filter = RPS_NO_FILTER; 4519 out: 4520 #endif 4521 rflow->last_qtail = 4522 per_cpu(softnet_data, next_cpu).input_queue_head; 4523 } 4524 4525 rflow->cpu = next_cpu; 4526 return rflow; 4527 } 4528 4529 /* 4530 * get_rps_cpu is called from netif_receive_skb and returns the target 4531 * CPU from the RPS map of the receiving queue for a given skb. 4532 * rcu_read_lock must be held on entry. 4533 */ 4534 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, 4535 struct rps_dev_flow **rflowp) 4536 { 4537 const struct rps_sock_flow_table *sock_flow_table; 4538 struct netdev_rx_queue *rxqueue = dev->_rx; 4539 struct rps_dev_flow_table *flow_table; 4540 struct rps_map *map; 4541 int cpu = -1; 4542 u32 tcpu; 4543 u32 hash; 4544 4545 if (skb_rx_queue_recorded(skb)) { 4546 u16 index = skb_get_rx_queue(skb); 4547 4548 if (unlikely(index >= dev->real_num_rx_queues)) { 4549 WARN_ONCE(dev->real_num_rx_queues > 1, 4550 "%s received packet on queue %u, but number " 4551 "of RX queues is %u\n", 4552 dev->name, index, dev->real_num_rx_queues); 4553 goto done; 4554 } 4555 rxqueue += index; 4556 } 4557 4558 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */ 4559 4560 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4561 map = rcu_dereference(rxqueue->rps_map); 4562 if (!flow_table && !map) 4563 goto done; 4564 4565 skb_reset_network_header(skb); 4566 hash = skb_get_hash(skb); 4567 if (!hash) 4568 goto done; 4569 4570 sock_flow_table = rcu_dereference(rps_sock_flow_table); 4571 if (flow_table && sock_flow_table) { 4572 struct rps_dev_flow *rflow; 4573 u32 next_cpu; 4574 u32 ident; 4575 4576 /* First check into global flow table if there is a match. 4577 * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow(). 4578 */ 4579 ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]); 4580 if ((ident ^ hash) & ~rps_cpu_mask) 4581 goto try_rps; 4582 4583 next_cpu = ident & rps_cpu_mask; 4584 4585 /* OK, now we know there is a match, 4586 * we can look at the local (per receive queue) flow table 4587 */ 4588 rflow = &flow_table->flows[hash & flow_table->mask]; 4589 tcpu = rflow->cpu; 4590 4591 /* 4592 * If the desired CPU (where last recvmsg was done) is 4593 * different from current CPU (one in the rx-queue flow 4594 * table entry), switch if one of the following holds: 4595 * - Current CPU is unset (>= nr_cpu_ids). 4596 * - Current CPU is offline. 4597 * - The current CPU's queue tail has advanced beyond the 4598 * last packet that was enqueued using this table entry. 4599 * This guarantees that all previous packets for the flow 4600 * have been dequeued, thus preserving in order delivery. 4601 */ 4602 if (unlikely(tcpu != next_cpu) && 4603 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) || 4604 ((int)(per_cpu(softnet_data, tcpu).input_queue_head - 4605 rflow->last_qtail)) >= 0)) { 4606 tcpu = next_cpu; 4607 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); 4608 } 4609 4610 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) { 4611 *rflowp = rflow; 4612 cpu = tcpu; 4613 goto done; 4614 } 4615 } 4616 4617 try_rps: 4618 4619 if (map) { 4620 tcpu = map->cpus[reciprocal_scale(hash, map->len)]; 4621 if (cpu_online(tcpu)) { 4622 cpu = tcpu; 4623 goto done; 4624 } 4625 } 4626 4627 done: 4628 return cpu; 4629 } 4630 4631 #ifdef CONFIG_RFS_ACCEL 4632 4633 /** 4634 * rps_may_expire_flow - check whether an RFS hardware filter may be removed 4635 * @dev: Device on which the filter was set 4636 * @rxq_index: RX queue index 4637 * @flow_id: Flow ID passed to ndo_rx_flow_steer() 4638 * @filter_id: Filter ID returned by ndo_rx_flow_steer() 4639 * 4640 * Drivers that implement ndo_rx_flow_steer() should periodically call 4641 * this function for each installed filter and remove the filters for 4642 * which it returns %true. 4643 */ 4644 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, 4645 u32 flow_id, u16 filter_id) 4646 { 4647 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index; 4648 struct rps_dev_flow_table *flow_table; 4649 struct rps_dev_flow *rflow; 4650 bool expire = true; 4651 unsigned int cpu; 4652 4653 rcu_read_lock(); 4654 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4655 if (flow_table && flow_id <= flow_table->mask) { 4656 rflow = &flow_table->flows[flow_id]; 4657 cpu = READ_ONCE(rflow->cpu); 4658 if (rflow->filter == filter_id && cpu < nr_cpu_ids && 4659 ((int)(per_cpu(softnet_data, cpu).input_queue_head - 4660 rflow->last_qtail) < 4661 (int)(10 * flow_table->mask))) 4662 expire = false; 4663 } 4664 rcu_read_unlock(); 4665 return expire; 4666 } 4667 EXPORT_SYMBOL(rps_may_expire_flow); 4668 4669 #endif /* CONFIG_RFS_ACCEL */ 4670 4671 /* Called from hardirq (IPI) context */ 4672 static void rps_trigger_softirq(void *data) 4673 { 4674 struct softnet_data *sd = data; 4675 4676 ____napi_schedule(sd, &sd->backlog); 4677 sd->received_rps++; 4678 } 4679 4680 #endif /* CONFIG_RPS */ 4681 4682 /* Called from hardirq (IPI) context */ 4683 static void trigger_rx_softirq(void *data) 4684 { 4685 struct softnet_data *sd = data; 4686 4687 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4688 smp_store_release(&sd->defer_ipi_scheduled, 0); 4689 } 4690 4691 /* 4692 * After we queued a packet into sd->input_pkt_queue, 4693 * we need to make sure this queue is serviced soon. 4694 * 4695 * - If this is another cpu queue, link it to our rps_ipi_list, 4696 * and make sure we will process rps_ipi_list from net_rx_action(). 4697 * 4698 * - If this is our own queue, NAPI schedule our backlog. 4699 * Note that this also raises NET_RX_SOFTIRQ. 4700 */ 4701 static void napi_schedule_rps(struct softnet_data *sd) 4702 { 4703 struct softnet_data *mysd = this_cpu_ptr(&softnet_data); 4704 4705 #ifdef CONFIG_RPS 4706 if (sd != mysd) { 4707 sd->rps_ipi_next = mysd->rps_ipi_list; 4708 mysd->rps_ipi_list = sd; 4709 4710 /* If not called from net_rx_action() or napi_threaded_poll() 4711 * we have to raise NET_RX_SOFTIRQ. 4712 */ 4713 if (!mysd->in_net_rx_action && !mysd->in_napi_threaded_poll) 4714 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4715 return; 4716 } 4717 #endif /* CONFIG_RPS */ 4718 __napi_schedule_irqoff(&mysd->backlog); 4719 } 4720 4721 #ifdef CONFIG_NET_FLOW_LIMIT 4722 int netdev_flow_limit_table_len __read_mostly = (1 << 12); 4723 #endif 4724 4725 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) 4726 { 4727 #ifdef CONFIG_NET_FLOW_LIMIT 4728 struct sd_flow_limit *fl; 4729 struct softnet_data *sd; 4730 unsigned int old_flow, new_flow; 4731 4732 if (qlen < (READ_ONCE(netdev_max_backlog) >> 1)) 4733 return false; 4734 4735 sd = this_cpu_ptr(&softnet_data); 4736 4737 rcu_read_lock(); 4738 fl = rcu_dereference(sd->flow_limit); 4739 if (fl) { 4740 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1); 4741 old_flow = fl->history[fl->history_head]; 4742 fl->history[fl->history_head] = new_flow; 4743 4744 fl->history_head++; 4745 fl->history_head &= FLOW_LIMIT_HISTORY - 1; 4746 4747 if (likely(fl->buckets[old_flow])) 4748 fl->buckets[old_flow]--; 4749 4750 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) { 4751 fl->count++; 4752 rcu_read_unlock(); 4753 return true; 4754 } 4755 } 4756 rcu_read_unlock(); 4757 #endif 4758 return false; 4759 } 4760 4761 /* 4762 * enqueue_to_backlog is called to queue an skb to a per CPU backlog 4763 * queue (may be a remote CPU queue). 4764 */ 4765 static int enqueue_to_backlog(struct sk_buff *skb, int cpu, 4766 unsigned int *qtail) 4767 { 4768 enum skb_drop_reason reason; 4769 struct softnet_data *sd; 4770 unsigned long flags; 4771 unsigned int qlen; 4772 4773 reason = SKB_DROP_REASON_NOT_SPECIFIED; 4774 sd = &per_cpu(softnet_data, cpu); 4775 4776 rps_lock_irqsave(sd, &flags); 4777 if (!netif_running(skb->dev)) 4778 goto drop; 4779 qlen = skb_queue_len(&sd->input_pkt_queue); 4780 if (qlen <= READ_ONCE(netdev_max_backlog) && !skb_flow_limit(skb, qlen)) { 4781 if (qlen) { 4782 enqueue: 4783 __skb_queue_tail(&sd->input_pkt_queue, skb); 4784 input_queue_tail_incr_save(sd, qtail); 4785 rps_unlock_irq_restore(sd, &flags); 4786 return NET_RX_SUCCESS; 4787 } 4788 4789 /* Schedule NAPI for backlog device 4790 * We can use non atomic operation since we own the queue lock 4791 */ 4792 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) 4793 napi_schedule_rps(sd); 4794 goto enqueue; 4795 } 4796 reason = SKB_DROP_REASON_CPU_BACKLOG; 4797 4798 drop: 4799 sd->dropped++; 4800 rps_unlock_irq_restore(sd, &flags); 4801 4802 dev_core_stats_rx_dropped_inc(skb->dev); 4803 kfree_skb_reason(skb, reason); 4804 return NET_RX_DROP; 4805 } 4806 4807 static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb) 4808 { 4809 struct net_device *dev = skb->dev; 4810 struct netdev_rx_queue *rxqueue; 4811 4812 rxqueue = dev->_rx; 4813 4814 if (skb_rx_queue_recorded(skb)) { 4815 u16 index = skb_get_rx_queue(skb); 4816 4817 if (unlikely(index >= dev->real_num_rx_queues)) { 4818 WARN_ONCE(dev->real_num_rx_queues > 1, 4819 "%s received packet on queue %u, but number " 4820 "of RX queues is %u\n", 4821 dev->name, index, dev->real_num_rx_queues); 4822 4823 return rxqueue; /* Return first rxqueue */ 4824 } 4825 rxqueue += index; 4826 } 4827 return rxqueue; 4828 } 4829 4830 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, 4831 struct bpf_prog *xdp_prog) 4832 { 4833 void *orig_data, *orig_data_end, *hard_start; 4834 struct netdev_rx_queue *rxqueue; 4835 bool orig_bcast, orig_host; 4836 u32 mac_len, frame_sz; 4837 __be16 orig_eth_type; 4838 struct ethhdr *eth; 4839 u32 metalen, act; 4840 int off; 4841 4842 /* The XDP program wants to see the packet starting at the MAC 4843 * header. 4844 */ 4845 mac_len = skb->data - skb_mac_header(skb); 4846 hard_start = skb->data - skb_headroom(skb); 4847 4848 /* SKB "head" area always have tailroom for skb_shared_info */ 4849 frame_sz = (void *)skb_end_pointer(skb) - hard_start; 4850 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4851 4852 rxqueue = netif_get_rxqueue(skb); 4853 xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq); 4854 xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len, 4855 skb_headlen(skb) + mac_len, true); 4856 4857 orig_data_end = xdp->data_end; 4858 orig_data = xdp->data; 4859 eth = (struct ethhdr *)xdp->data; 4860 orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr); 4861 orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest); 4862 orig_eth_type = eth->h_proto; 4863 4864 act = bpf_prog_run_xdp(xdp_prog, xdp); 4865 4866 /* check if bpf_xdp_adjust_head was used */ 4867 off = xdp->data - orig_data; 4868 if (off) { 4869 if (off > 0) 4870 __skb_pull(skb, off); 4871 else if (off < 0) 4872 __skb_push(skb, -off); 4873 4874 skb->mac_header += off; 4875 skb_reset_network_header(skb); 4876 } 4877 4878 /* check if bpf_xdp_adjust_tail was used */ 4879 off = xdp->data_end - orig_data_end; 4880 if (off != 0) { 4881 skb_set_tail_pointer(skb, xdp->data_end - xdp->data); 4882 skb->len += off; /* positive on grow, negative on shrink */ 4883 } 4884 4885 /* check if XDP changed eth hdr such SKB needs update */ 4886 eth = (struct ethhdr *)xdp->data; 4887 if ((orig_eth_type != eth->h_proto) || 4888 (orig_host != ether_addr_equal_64bits(eth->h_dest, 4889 skb->dev->dev_addr)) || 4890 (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) { 4891 __skb_push(skb, ETH_HLEN); 4892 skb->pkt_type = PACKET_HOST; 4893 skb->protocol = eth_type_trans(skb, skb->dev); 4894 } 4895 4896 /* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull 4897 * before calling us again on redirect path. We do not call do_redirect 4898 * as we leave that up to the caller. 4899 * 4900 * Caller is responsible for managing lifetime of skb (i.e. calling 4901 * kfree_skb in response to actions it cannot handle/XDP_DROP). 4902 */ 4903 switch (act) { 4904 case XDP_REDIRECT: 4905 case XDP_TX: 4906 __skb_push(skb, mac_len); 4907 break; 4908 case XDP_PASS: 4909 metalen = xdp->data - xdp->data_meta; 4910 if (metalen) 4911 skb_metadata_set(skb, metalen); 4912 break; 4913 } 4914 4915 return act; 4916 } 4917 4918 static u32 netif_receive_generic_xdp(struct sk_buff *skb, 4919 struct xdp_buff *xdp, 4920 struct bpf_prog *xdp_prog) 4921 { 4922 u32 act = XDP_DROP; 4923 4924 /* Reinjected packets coming from act_mirred or similar should 4925 * not get XDP generic processing. 4926 */ 4927 if (skb_is_redirected(skb)) 4928 return XDP_PASS; 4929 4930 /* XDP packets must be linear and must have sufficient headroom 4931 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also 4932 * native XDP provides, thus we need to do it here as well. 4933 */ 4934 if (skb_cloned(skb) || skb_is_nonlinear(skb) || 4935 skb_headroom(skb) < XDP_PACKET_HEADROOM) { 4936 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb); 4937 int troom = skb->tail + skb->data_len - skb->end; 4938 4939 /* In case we have to go down the path and also linearize, 4940 * then lets do the pskb_expand_head() work just once here. 4941 */ 4942 if (pskb_expand_head(skb, 4943 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0, 4944 troom > 0 ? troom + 128 : 0, GFP_ATOMIC)) 4945 goto do_drop; 4946 if (skb_linearize(skb)) 4947 goto do_drop; 4948 } 4949 4950 act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog); 4951 switch (act) { 4952 case XDP_REDIRECT: 4953 case XDP_TX: 4954 case XDP_PASS: 4955 break; 4956 default: 4957 bpf_warn_invalid_xdp_action(skb->dev, xdp_prog, act); 4958 fallthrough; 4959 case XDP_ABORTED: 4960 trace_xdp_exception(skb->dev, xdp_prog, act); 4961 fallthrough; 4962 case XDP_DROP: 4963 do_drop: 4964 kfree_skb(skb); 4965 break; 4966 } 4967 4968 return act; 4969 } 4970 4971 /* When doing generic XDP we have to bypass the qdisc layer and the 4972 * network taps in order to match in-driver-XDP behavior. This also means 4973 * that XDP packets are able to starve other packets going through a qdisc, 4974 * and DDOS attacks will be more effective. In-driver-XDP use dedicated TX 4975 * queues, so they do not have this starvation issue. 4976 */ 4977 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog) 4978 { 4979 struct net_device *dev = skb->dev; 4980 struct netdev_queue *txq; 4981 bool free_skb = true; 4982 int cpu, rc; 4983 4984 txq = netdev_core_pick_tx(dev, skb, NULL); 4985 cpu = smp_processor_id(); 4986 HARD_TX_LOCK(dev, txq, cpu); 4987 if (!netif_xmit_frozen_or_drv_stopped(txq)) { 4988 rc = netdev_start_xmit(skb, dev, txq, 0); 4989 if (dev_xmit_complete(rc)) 4990 free_skb = false; 4991 } 4992 HARD_TX_UNLOCK(dev, txq); 4993 if (free_skb) { 4994 trace_xdp_exception(dev, xdp_prog, XDP_TX); 4995 dev_core_stats_tx_dropped_inc(dev); 4996 kfree_skb(skb); 4997 } 4998 } 4999 5000 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key); 5001 5002 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb) 5003 { 5004 if (xdp_prog) { 5005 struct xdp_buff xdp; 5006 u32 act; 5007 int err; 5008 5009 act = netif_receive_generic_xdp(skb, &xdp, xdp_prog); 5010 if (act != XDP_PASS) { 5011 switch (act) { 5012 case XDP_REDIRECT: 5013 err = xdp_do_generic_redirect(skb->dev, skb, 5014 &xdp, xdp_prog); 5015 if (err) 5016 goto out_redir; 5017 break; 5018 case XDP_TX: 5019 generic_xdp_tx(skb, xdp_prog); 5020 break; 5021 } 5022 return XDP_DROP; 5023 } 5024 } 5025 return XDP_PASS; 5026 out_redir: 5027 kfree_skb_reason(skb, SKB_DROP_REASON_XDP); 5028 return XDP_DROP; 5029 } 5030 EXPORT_SYMBOL_GPL(do_xdp_generic); 5031 5032 static int netif_rx_internal(struct sk_buff *skb) 5033 { 5034 int ret; 5035 5036 net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb); 5037 5038 trace_netif_rx(skb); 5039 5040 #ifdef CONFIG_RPS 5041 if (static_branch_unlikely(&rps_needed)) { 5042 struct rps_dev_flow voidflow, *rflow = &voidflow; 5043 int cpu; 5044 5045 rcu_read_lock(); 5046 5047 cpu = get_rps_cpu(skb->dev, skb, &rflow); 5048 if (cpu < 0) 5049 cpu = smp_processor_id(); 5050 5051 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5052 5053 rcu_read_unlock(); 5054 } else 5055 #endif 5056 { 5057 unsigned int qtail; 5058 5059 ret = enqueue_to_backlog(skb, smp_processor_id(), &qtail); 5060 } 5061 return ret; 5062 } 5063 5064 /** 5065 * __netif_rx - Slightly optimized version of netif_rx 5066 * @skb: buffer to post 5067 * 5068 * This behaves as netif_rx except that it does not disable bottom halves. 5069 * As a result this function may only be invoked from the interrupt context 5070 * (either hard or soft interrupt). 5071 */ 5072 int __netif_rx(struct sk_buff *skb) 5073 { 5074 int ret; 5075 5076 lockdep_assert_once(hardirq_count() | softirq_count()); 5077 5078 trace_netif_rx_entry(skb); 5079 ret = netif_rx_internal(skb); 5080 trace_netif_rx_exit(ret); 5081 return ret; 5082 } 5083 EXPORT_SYMBOL(__netif_rx); 5084 5085 /** 5086 * netif_rx - post buffer to the network code 5087 * @skb: buffer to post 5088 * 5089 * This function receives a packet from a device driver and queues it for 5090 * the upper (protocol) levels to process via the backlog NAPI device. It 5091 * always succeeds. The buffer may be dropped during processing for 5092 * congestion control or by the protocol layers. 5093 * The network buffer is passed via the backlog NAPI device. Modern NIC 5094 * driver should use NAPI and GRO. 5095 * This function can used from interrupt and from process context. The 5096 * caller from process context must not disable interrupts before invoking 5097 * this function. 5098 * 5099 * return values: 5100 * NET_RX_SUCCESS (no congestion) 5101 * NET_RX_DROP (packet was dropped) 5102 * 5103 */ 5104 int netif_rx(struct sk_buff *skb) 5105 { 5106 bool need_bh_off = !(hardirq_count() | softirq_count()); 5107 int ret; 5108 5109 if (need_bh_off) 5110 local_bh_disable(); 5111 trace_netif_rx_entry(skb); 5112 ret = netif_rx_internal(skb); 5113 trace_netif_rx_exit(ret); 5114 if (need_bh_off) 5115 local_bh_enable(); 5116 return ret; 5117 } 5118 EXPORT_SYMBOL(netif_rx); 5119 5120 static __latent_entropy void net_tx_action(struct softirq_action *h) 5121 { 5122 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 5123 5124 if (sd->completion_queue) { 5125 struct sk_buff *clist; 5126 5127 local_irq_disable(); 5128 clist = sd->completion_queue; 5129 sd->completion_queue = NULL; 5130 local_irq_enable(); 5131 5132 while (clist) { 5133 struct sk_buff *skb = clist; 5134 5135 clist = clist->next; 5136 5137 WARN_ON(refcount_read(&skb->users)); 5138 if (likely(get_kfree_skb_cb(skb)->reason == SKB_CONSUMED)) 5139 trace_consume_skb(skb, net_tx_action); 5140 else 5141 trace_kfree_skb(skb, net_tx_action, 5142 get_kfree_skb_cb(skb)->reason); 5143 5144 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) 5145 __kfree_skb(skb); 5146 else 5147 __napi_kfree_skb(skb, 5148 get_kfree_skb_cb(skb)->reason); 5149 } 5150 } 5151 5152 if (sd->output_queue) { 5153 struct Qdisc *head; 5154 5155 local_irq_disable(); 5156 head = sd->output_queue; 5157 sd->output_queue = NULL; 5158 sd->output_queue_tailp = &sd->output_queue; 5159 local_irq_enable(); 5160 5161 rcu_read_lock(); 5162 5163 while (head) { 5164 struct Qdisc *q = head; 5165 spinlock_t *root_lock = NULL; 5166 5167 head = head->next_sched; 5168 5169 /* We need to make sure head->next_sched is read 5170 * before clearing __QDISC_STATE_SCHED 5171 */ 5172 smp_mb__before_atomic(); 5173 5174 if (!(q->flags & TCQ_F_NOLOCK)) { 5175 root_lock = qdisc_lock(q); 5176 spin_lock(root_lock); 5177 } else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, 5178 &q->state))) { 5179 /* There is a synchronize_net() between 5180 * STATE_DEACTIVATED flag being set and 5181 * qdisc_reset()/some_qdisc_is_busy() in 5182 * dev_deactivate(), so we can safely bail out 5183 * early here to avoid data race between 5184 * qdisc_deactivate() and some_qdisc_is_busy() 5185 * for lockless qdisc. 5186 */ 5187 clear_bit(__QDISC_STATE_SCHED, &q->state); 5188 continue; 5189 } 5190 5191 clear_bit(__QDISC_STATE_SCHED, &q->state); 5192 qdisc_run(q); 5193 if (root_lock) 5194 spin_unlock(root_lock); 5195 } 5196 5197 rcu_read_unlock(); 5198 } 5199 5200 xfrm_dev_backlog(sd); 5201 } 5202 5203 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE) 5204 /* This hook is defined here for ATM LANE */ 5205 int (*br_fdb_test_addr_hook)(struct net_device *dev, 5206 unsigned char *addr) __read_mostly; 5207 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); 5208 #endif 5209 5210 /** 5211 * netdev_is_rx_handler_busy - check if receive handler is registered 5212 * @dev: device to check 5213 * 5214 * Check if a receive handler is already registered for a given device. 5215 * Return true if there one. 5216 * 5217 * The caller must hold the rtnl_mutex. 5218 */ 5219 bool netdev_is_rx_handler_busy(struct net_device *dev) 5220 { 5221 ASSERT_RTNL(); 5222 return dev && rtnl_dereference(dev->rx_handler); 5223 } 5224 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy); 5225 5226 /** 5227 * netdev_rx_handler_register - register receive handler 5228 * @dev: device to register a handler for 5229 * @rx_handler: receive handler to register 5230 * @rx_handler_data: data pointer that is used by rx handler 5231 * 5232 * Register a receive handler for a device. This handler will then be 5233 * called from __netif_receive_skb. A negative errno code is returned 5234 * on a failure. 5235 * 5236 * The caller must hold the rtnl_mutex. 5237 * 5238 * For a general description of rx_handler, see enum rx_handler_result. 5239 */ 5240 int netdev_rx_handler_register(struct net_device *dev, 5241 rx_handler_func_t *rx_handler, 5242 void *rx_handler_data) 5243 { 5244 if (netdev_is_rx_handler_busy(dev)) 5245 return -EBUSY; 5246 5247 if (dev->priv_flags & IFF_NO_RX_HANDLER) 5248 return -EINVAL; 5249 5250 /* Note: rx_handler_data must be set before rx_handler */ 5251 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); 5252 rcu_assign_pointer(dev->rx_handler, rx_handler); 5253 5254 return 0; 5255 } 5256 EXPORT_SYMBOL_GPL(netdev_rx_handler_register); 5257 5258 /** 5259 * netdev_rx_handler_unregister - unregister receive handler 5260 * @dev: device to unregister a handler from 5261 * 5262 * Unregister a receive handler from a device. 5263 * 5264 * The caller must hold the rtnl_mutex. 5265 */ 5266 void netdev_rx_handler_unregister(struct net_device *dev) 5267 { 5268 5269 ASSERT_RTNL(); 5270 RCU_INIT_POINTER(dev->rx_handler, NULL); 5271 /* a reader seeing a non NULL rx_handler in a rcu_read_lock() 5272 * section has a guarantee to see a non NULL rx_handler_data 5273 * as well. 5274 */ 5275 synchronize_net(); 5276 RCU_INIT_POINTER(dev->rx_handler_data, NULL); 5277 } 5278 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 5279 5280 /* 5281 * Limit the use of PFMEMALLOC reserves to those protocols that implement 5282 * the special handling of PFMEMALLOC skbs. 5283 */ 5284 static bool skb_pfmemalloc_protocol(struct sk_buff *skb) 5285 { 5286 switch (skb->protocol) { 5287 case htons(ETH_P_ARP): 5288 case htons(ETH_P_IP): 5289 case htons(ETH_P_IPV6): 5290 case htons(ETH_P_8021Q): 5291 case htons(ETH_P_8021AD): 5292 return true; 5293 default: 5294 return false; 5295 } 5296 } 5297 5298 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev, 5299 int *ret, struct net_device *orig_dev) 5300 { 5301 if (nf_hook_ingress_active(skb)) { 5302 int ingress_retval; 5303 5304 if (*pt_prev) { 5305 *ret = deliver_skb(skb, *pt_prev, orig_dev); 5306 *pt_prev = NULL; 5307 } 5308 5309 rcu_read_lock(); 5310 ingress_retval = nf_hook_ingress(skb); 5311 rcu_read_unlock(); 5312 return ingress_retval; 5313 } 5314 return 0; 5315 } 5316 5317 static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc, 5318 struct packet_type **ppt_prev) 5319 { 5320 struct packet_type *ptype, *pt_prev; 5321 rx_handler_func_t *rx_handler; 5322 struct sk_buff *skb = *pskb; 5323 struct net_device *orig_dev; 5324 bool deliver_exact = false; 5325 int ret = NET_RX_DROP; 5326 __be16 type; 5327 5328 net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue), skb); 5329 5330 trace_netif_receive_skb(skb); 5331 5332 orig_dev = skb->dev; 5333 5334 skb_reset_network_header(skb); 5335 if (!skb_transport_header_was_set(skb)) 5336 skb_reset_transport_header(skb); 5337 skb_reset_mac_len(skb); 5338 5339 pt_prev = NULL; 5340 5341 another_round: 5342 skb->skb_iif = skb->dev->ifindex; 5343 5344 __this_cpu_inc(softnet_data.processed); 5345 5346 if (static_branch_unlikely(&generic_xdp_needed_key)) { 5347 int ret2; 5348 5349 migrate_disable(); 5350 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb); 5351 migrate_enable(); 5352 5353 if (ret2 != XDP_PASS) { 5354 ret = NET_RX_DROP; 5355 goto out; 5356 } 5357 } 5358 5359 if (eth_type_vlan(skb->protocol)) { 5360 skb = skb_vlan_untag(skb); 5361 if (unlikely(!skb)) 5362 goto out; 5363 } 5364 5365 if (skb_skip_tc_classify(skb)) 5366 goto skip_classify; 5367 5368 if (pfmemalloc) 5369 goto skip_taps; 5370 5371 list_for_each_entry_rcu(ptype, &ptype_all, list) { 5372 if (pt_prev) 5373 ret = deliver_skb(skb, pt_prev, orig_dev); 5374 pt_prev = ptype; 5375 } 5376 5377 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) { 5378 if (pt_prev) 5379 ret = deliver_skb(skb, pt_prev, orig_dev); 5380 pt_prev = ptype; 5381 } 5382 5383 skip_taps: 5384 #ifdef CONFIG_NET_INGRESS 5385 if (static_branch_unlikely(&ingress_needed_key)) { 5386 bool another = false; 5387 5388 nf_skip_egress(skb, true); 5389 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev, 5390 &another); 5391 if (another) 5392 goto another_round; 5393 if (!skb) 5394 goto out; 5395 5396 nf_skip_egress(skb, false); 5397 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0) 5398 goto out; 5399 } 5400 #endif 5401 skb_reset_redirect(skb); 5402 skip_classify: 5403 if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) 5404 goto drop; 5405 5406 if (skb_vlan_tag_present(skb)) { 5407 if (pt_prev) { 5408 ret = deliver_skb(skb, pt_prev, orig_dev); 5409 pt_prev = NULL; 5410 } 5411 if (vlan_do_receive(&skb)) 5412 goto another_round; 5413 else if (unlikely(!skb)) 5414 goto out; 5415 } 5416 5417 rx_handler = rcu_dereference(skb->dev->rx_handler); 5418 if (rx_handler) { 5419 if (pt_prev) { 5420 ret = deliver_skb(skb, pt_prev, orig_dev); 5421 pt_prev = NULL; 5422 } 5423 switch (rx_handler(&skb)) { 5424 case RX_HANDLER_CONSUMED: 5425 ret = NET_RX_SUCCESS; 5426 goto out; 5427 case RX_HANDLER_ANOTHER: 5428 goto another_round; 5429 case RX_HANDLER_EXACT: 5430 deliver_exact = true; 5431 break; 5432 case RX_HANDLER_PASS: 5433 break; 5434 default: 5435 BUG(); 5436 } 5437 } 5438 5439 if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) { 5440 check_vlan_id: 5441 if (skb_vlan_tag_get_id(skb)) { 5442 /* Vlan id is non 0 and vlan_do_receive() above couldn't 5443 * find vlan device. 5444 */ 5445 skb->pkt_type = PACKET_OTHERHOST; 5446 } else if (eth_type_vlan(skb->protocol)) { 5447 /* Outer header is 802.1P with vlan 0, inner header is 5448 * 802.1Q or 802.1AD and vlan_do_receive() above could 5449 * not find vlan dev for vlan id 0. 5450 */ 5451 __vlan_hwaccel_clear_tag(skb); 5452 skb = skb_vlan_untag(skb); 5453 if (unlikely(!skb)) 5454 goto out; 5455 if (vlan_do_receive(&skb)) 5456 /* After stripping off 802.1P header with vlan 0 5457 * vlan dev is found for inner header. 5458 */ 5459 goto another_round; 5460 else if (unlikely(!skb)) 5461 goto out; 5462 else 5463 /* We have stripped outer 802.1P vlan 0 header. 5464 * But could not find vlan dev. 5465 * check again for vlan id to set OTHERHOST. 5466 */ 5467 goto check_vlan_id; 5468 } 5469 /* Note: we might in the future use prio bits 5470 * and set skb->priority like in vlan_do_receive() 5471 * For the time being, just ignore Priority Code Point 5472 */ 5473 __vlan_hwaccel_clear_tag(skb); 5474 } 5475 5476 type = skb->protocol; 5477 5478 /* deliver only exact match when indicated */ 5479 if (likely(!deliver_exact)) { 5480 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5481 &ptype_base[ntohs(type) & 5482 PTYPE_HASH_MASK]); 5483 } 5484 5485 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5486 &orig_dev->ptype_specific); 5487 5488 if (unlikely(skb->dev != orig_dev)) { 5489 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5490 &skb->dev->ptype_specific); 5491 } 5492 5493 if (pt_prev) { 5494 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 5495 goto drop; 5496 *ppt_prev = pt_prev; 5497 } else { 5498 drop: 5499 if (!deliver_exact) 5500 dev_core_stats_rx_dropped_inc(skb->dev); 5501 else 5502 dev_core_stats_rx_nohandler_inc(skb->dev); 5503 kfree_skb_reason(skb, SKB_DROP_REASON_UNHANDLED_PROTO); 5504 /* Jamal, now you will not able to escape explaining 5505 * me how you were going to use this. :-) 5506 */ 5507 ret = NET_RX_DROP; 5508 } 5509 5510 out: 5511 /* The invariant here is that if *ppt_prev is not NULL 5512 * then skb should also be non-NULL. 5513 * 5514 * Apparently *ppt_prev assignment above holds this invariant due to 5515 * skb dereferencing near it. 5516 */ 5517 *pskb = skb; 5518 return ret; 5519 } 5520 5521 static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc) 5522 { 5523 struct net_device *orig_dev = skb->dev; 5524 struct packet_type *pt_prev = NULL; 5525 int ret; 5526 5527 ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev); 5528 if (pt_prev) 5529 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb, 5530 skb->dev, pt_prev, orig_dev); 5531 return ret; 5532 } 5533 5534 /** 5535 * netif_receive_skb_core - special purpose version of netif_receive_skb 5536 * @skb: buffer to process 5537 * 5538 * More direct receive version of netif_receive_skb(). It should 5539 * only be used by callers that have a need to skip RPS and Generic XDP. 5540 * Caller must also take care of handling if ``(page_is_)pfmemalloc``. 5541 * 5542 * This function may only be called from softirq context and interrupts 5543 * should be enabled. 5544 * 5545 * Return values (usually ignored): 5546 * NET_RX_SUCCESS: no congestion 5547 * NET_RX_DROP: packet was dropped 5548 */ 5549 int netif_receive_skb_core(struct sk_buff *skb) 5550 { 5551 int ret; 5552 5553 rcu_read_lock(); 5554 ret = __netif_receive_skb_one_core(skb, false); 5555 rcu_read_unlock(); 5556 5557 return ret; 5558 } 5559 EXPORT_SYMBOL(netif_receive_skb_core); 5560 5561 static inline void __netif_receive_skb_list_ptype(struct list_head *head, 5562 struct packet_type *pt_prev, 5563 struct net_device *orig_dev) 5564 { 5565 struct sk_buff *skb, *next; 5566 5567 if (!pt_prev) 5568 return; 5569 if (list_empty(head)) 5570 return; 5571 if (pt_prev->list_func != NULL) 5572 INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv, 5573 ip_list_rcv, head, pt_prev, orig_dev); 5574 else 5575 list_for_each_entry_safe(skb, next, head, list) { 5576 skb_list_del_init(skb); 5577 pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 5578 } 5579 } 5580 5581 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc) 5582 { 5583 /* Fast-path assumptions: 5584 * - There is no RX handler. 5585 * - Only one packet_type matches. 5586 * If either of these fails, we will end up doing some per-packet 5587 * processing in-line, then handling the 'last ptype' for the whole 5588 * sublist. This can't cause out-of-order delivery to any single ptype, 5589 * because the 'last ptype' must be constant across the sublist, and all 5590 * other ptypes are handled per-packet. 5591 */ 5592 /* Current (common) ptype of sublist */ 5593 struct packet_type *pt_curr = NULL; 5594 /* Current (common) orig_dev of sublist */ 5595 struct net_device *od_curr = NULL; 5596 struct list_head sublist; 5597 struct sk_buff *skb, *next; 5598 5599 INIT_LIST_HEAD(&sublist); 5600 list_for_each_entry_safe(skb, next, head, list) { 5601 struct net_device *orig_dev = skb->dev; 5602 struct packet_type *pt_prev = NULL; 5603 5604 skb_list_del_init(skb); 5605 __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev); 5606 if (!pt_prev) 5607 continue; 5608 if (pt_curr != pt_prev || od_curr != orig_dev) { 5609 /* dispatch old sublist */ 5610 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); 5611 /* start new sublist */ 5612 INIT_LIST_HEAD(&sublist); 5613 pt_curr = pt_prev; 5614 od_curr = orig_dev; 5615 } 5616 list_add_tail(&skb->list, &sublist); 5617 } 5618 5619 /* dispatch final sublist */ 5620 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); 5621 } 5622 5623 static int __netif_receive_skb(struct sk_buff *skb) 5624 { 5625 int ret; 5626 5627 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) { 5628 unsigned int noreclaim_flag; 5629 5630 /* 5631 * PFMEMALLOC skbs are special, they should 5632 * - be delivered to SOCK_MEMALLOC sockets only 5633 * - stay away from userspace 5634 * - have bounded memory usage 5635 * 5636 * Use PF_MEMALLOC as this saves us from propagating the allocation 5637 * context down to all allocation sites. 5638 */ 5639 noreclaim_flag = memalloc_noreclaim_save(); 5640 ret = __netif_receive_skb_one_core(skb, true); 5641 memalloc_noreclaim_restore(noreclaim_flag); 5642 } else 5643 ret = __netif_receive_skb_one_core(skb, false); 5644 5645 return ret; 5646 } 5647 5648 static void __netif_receive_skb_list(struct list_head *head) 5649 { 5650 unsigned long noreclaim_flag = 0; 5651 struct sk_buff *skb, *next; 5652 bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */ 5653 5654 list_for_each_entry_safe(skb, next, head, list) { 5655 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) { 5656 struct list_head sublist; 5657 5658 /* Handle the previous sublist */ 5659 list_cut_before(&sublist, head, &skb->list); 5660 if (!list_empty(&sublist)) 5661 __netif_receive_skb_list_core(&sublist, pfmemalloc); 5662 pfmemalloc = !pfmemalloc; 5663 /* See comments in __netif_receive_skb */ 5664 if (pfmemalloc) 5665 noreclaim_flag = memalloc_noreclaim_save(); 5666 else 5667 memalloc_noreclaim_restore(noreclaim_flag); 5668 } 5669 } 5670 /* Handle the remaining sublist */ 5671 if (!list_empty(head)) 5672 __netif_receive_skb_list_core(head, pfmemalloc); 5673 /* Restore pflags */ 5674 if (pfmemalloc) 5675 memalloc_noreclaim_restore(noreclaim_flag); 5676 } 5677 5678 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) 5679 { 5680 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog); 5681 struct bpf_prog *new = xdp->prog; 5682 int ret = 0; 5683 5684 switch (xdp->command) { 5685 case XDP_SETUP_PROG: 5686 rcu_assign_pointer(dev->xdp_prog, new); 5687 if (old) 5688 bpf_prog_put(old); 5689 5690 if (old && !new) { 5691 static_branch_dec(&generic_xdp_needed_key); 5692 } else if (new && !old) { 5693 static_branch_inc(&generic_xdp_needed_key); 5694 dev_disable_lro(dev); 5695 dev_disable_gro_hw(dev); 5696 } 5697 break; 5698 5699 default: 5700 ret = -EINVAL; 5701 break; 5702 } 5703 5704 return ret; 5705 } 5706 5707 static int netif_receive_skb_internal(struct sk_buff *skb) 5708 { 5709 int ret; 5710 5711 net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb); 5712 5713 if (skb_defer_rx_timestamp(skb)) 5714 return NET_RX_SUCCESS; 5715 5716 rcu_read_lock(); 5717 #ifdef CONFIG_RPS 5718 if (static_branch_unlikely(&rps_needed)) { 5719 struct rps_dev_flow voidflow, *rflow = &voidflow; 5720 int cpu = get_rps_cpu(skb->dev, skb, &rflow); 5721 5722 if (cpu >= 0) { 5723 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5724 rcu_read_unlock(); 5725 return ret; 5726 } 5727 } 5728 #endif 5729 ret = __netif_receive_skb(skb); 5730 rcu_read_unlock(); 5731 return ret; 5732 } 5733 5734 void netif_receive_skb_list_internal(struct list_head *head) 5735 { 5736 struct sk_buff *skb, *next; 5737 struct list_head sublist; 5738 5739 INIT_LIST_HEAD(&sublist); 5740 list_for_each_entry_safe(skb, next, head, list) { 5741 net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb); 5742 skb_list_del_init(skb); 5743 if (!skb_defer_rx_timestamp(skb)) 5744 list_add_tail(&skb->list, &sublist); 5745 } 5746 list_splice_init(&sublist, head); 5747 5748 rcu_read_lock(); 5749 #ifdef CONFIG_RPS 5750 if (static_branch_unlikely(&rps_needed)) { 5751 list_for_each_entry_safe(skb, next, head, list) { 5752 struct rps_dev_flow voidflow, *rflow = &voidflow; 5753 int cpu = get_rps_cpu(skb->dev, skb, &rflow); 5754 5755 if (cpu >= 0) { 5756 /* Will be handled, remove from list */ 5757 skb_list_del_init(skb); 5758 enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5759 } 5760 } 5761 } 5762 #endif 5763 __netif_receive_skb_list(head); 5764 rcu_read_unlock(); 5765 } 5766 5767 /** 5768 * netif_receive_skb - process receive buffer from network 5769 * @skb: buffer to process 5770 * 5771 * netif_receive_skb() is the main receive data processing function. 5772 * It always succeeds. The buffer may be dropped during processing 5773 * for congestion control or by the protocol layers. 5774 * 5775 * This function may only be called from softirq context and interrupts 5776 * should be enabled. 5777 * 5778 * Return values (usually ignored): 5779 * NET_RX_SUCCESS: no congestion 5780 * NET_RX_DROP: packet was dropped 5781 */ 5782 int netif_receive_skb(struct sk_buff *skb) 5783 { 5784 int ret; 5785 5786 trace_netif_receive_skb_entry(skb); 5787 5788 ret = netif_receive_skb_internal(skb); 5789 trace_netif_receive_skb_exit(ret); 5790 5791 return ret; 5792 } 5793 EXPORT_SYMBOL(netif_receive_skb); 5794 5795 /** 5796 * netif_receive_skb_list - process many receive buffers from network 5797 * @head: list of skbs to process. 5798 * 5799 * Since return value of netif_receive_skb() is normally ignored, and 5800 * wouldn't be meaningful for a list, this function returns void. 5801 * 5802 * This function may only be called from softirq context and interrupts 5803 * should be enabled. 5804 */ 5805 void netif_receive_skb_list(struct list_head *head) 5806 { 5807 struct sk_buff *skb; 5808 5809 if (list_empty(head)) 5810 return; 5811 if (trace_netif_receive_skb_list_entry_enabled()) { 5812 list_for_each_entry(skb, head, list) 5813 trace_netif_receive_skb_list_entry(skb); 5814 } 5815 netif_receive_skb_list_internal(head); 5816 trace_netif_receive_skb_list_exit(0); 5817 } 5818 EXPORT_SYMBOL(netif_receive_skb_list); 5819 5820 static DEFINE_PER_CPU(struct work_struct, flush_works); 5821 5822 /* Network device is going away, flush any packets still pending */ 5823 static void flush_backlog(struct work_struct *work) 5824 { 5825 struct sk_buff *skb, *tmp; 5826 struct softnet_data *sd; 5827 5828 local_bh_disable(); 5829 sd = this_cpu_ptr(&softnet_data); 5830 5831 rps_lock_irq_disable(sd); 5832 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { 5833 if (skb->dev->reg_state == NETREG_UNREGISTERING) { 5834 __skb_unlink(skb, &sd->input_pkt_queue); 5835 dev_kfree_skb_irq(skb); 5836 input_queue_head_incr(sd); 5837 } 5838 } 5839 rps_unlock_irq_enable(sd); 5840 5841 skb_queue_walk_safe(&sd->process_queue, skb, tmp) { 5842 if (skb->dev->reg_state == NETREG_UNREGISTERING) { 5843 __skb_unlink(skb, &sd->process_queue); 5844 kfree_skb(skb); 5845 input_queue_head_incr(sd); 5846 } 5847 } 5848 local_bh_enable(); 5849 } 5850 5851 static bool flush_required(int cpu) 5852 { 5853 #if IS_ENABLED(CONFIG_RPS) 5854 struct softnet_data *sd = &per_cpu(softnet_data, cpu); 5855 bool do_flush; 5856 5857 rps_lock_irq_disable(sd); 5858 5859 /* as insertion into process_queue happens with the rps lock held, 5860 * process_queue access may race only with dequeue 5861 */ 5862 do_flush = !skb_queue_empty(&sd->input_pkt_queue) || 5863 !skb_queue_empty_lockless(&sd->process_queue); 5864 rps_unlock_irq_enable(sd); 5865 5866 return do_flush; 5867 #endif 5868 /* without RPS we can't safely check input_pkt_queue: during a 5869 * concurrent remote skb_queue_splice() we can detect as empty both 5870 * input_pkt_queue and process_queue even if the latter could end-up 5871 * containing a lot of packets. 5872 */ 5873 return true; 5874 } 5875 5876 static void flush_all_backlogs(void) 5877 { 5878 static cpumask_t flush_cpus; 5879 unsigned int cpu; 5880 5881 /* since we are under rtnl lock protection we can use static data 5882 * for the cpumask and avoid allocating on stack the possibly 5883 * large mask 5884 */ 5885 ASSERT_RTNL(); 5886 5887 cpus_read_lock(); 5888 5889 cpumask_clear(&flush_cpus); 5890 for_each_online_cpu(cpu) { 5891 if (flush_required(cpu)) { 5892 queue_work_on(cpu, system_highpri_wq, 5893 per_cpu_ptr(&flush_works, cpu)); 5894 cpumask_set_cpu(cpu, &flush_cpus); 5895 } 5896 } 5897 5898 /* we can have in flight packet[s] on the cpus we are not flushing, 5899 * synchronize_net() in unregister_netdevice_many() will take care of 5900 * them 5901 */ 5902 for_each_cpu(cpu, &flush_cpus) 5903 flush_work(per_cpu_ptr(&flush_works, cpu)); 5904 5905 cpus_read_unlock(); 5906 } 5907 5908 static void net_rps_send_ipi(struct softnet_data *remsd) 5909 { 5910 #ifdef CONFIG_RPS 5911 while (remsd) { 5912 struct softnet_data *next = remsd->rps_ipi_next; 5913 5914 if (cpu_online(remsd->cpu)) 5915 smp_call_function_single_async(remsd->cpu, &remsd->csd); 5916 remsd = next; 5917 } 5918 #endif 5919 } 5920 5921 /* 5922 * net_rps_action_and_irq_enable sends any pending IPI's for rps. 5923 * Note: called with local irq disabled, but exits with local irq enabled. 5924 */ 5925 static void net_rps_action_and_irq_enable(struct softnet_data *sd) 5926 { 5927 #ifdef CONFIG_RPS 5928 struct softnet_data *remsd = sd->rps_ipi_list; 5929 5930 if (remsd) { 5931 sd->rps_ipi_list = NULL; 5932 5933 local_irq_enable(); 5934 5935 /* Send pending IPI's to kick RPS processing on remote cpus. */ 5936 net_rps_send_ipi(remsd); 5937 } else 5938 #endif 5939 local_irq_enable(); 5940 } 5941 5942 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) 5943 { 5944 #ifdef CONFIG_RPS 5945 return sd->rps_ipi_list != NULL; 5946 #else 5947 return false; 5948 #endif 5949 } 5950 5951 static int process_backlog(struct napi_struct *napi, int quota) 5952 { 5953 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); 5954 bool again = true; 5955 int work = 0; 5956 5957 /* Check if we have pending ipi, its better to send them now, 5958 * not waiting net_rx_action() end. 5959 */ 5960 if (sd_has_rps_ipi_waiting(sd)) { 5961 local_irq_disable(); 5962 net_rps_action_and_irq_enable(sd); 5963 } 5964 5965 napi->weight = READ_ONCE(dev_rx_weight); 5966 while (again) { 5967 struct sk_buff *skb; 5968 5969 while ((skb = __skb_dequeue(&sd->process_queue))) { 5970 rcu_read_lock(); 5971 __netif_receive_skb(skb); 5972 rcu_read_unlock(); 5973 input_queue_head_incr(sd); 5974 if (++work >= quota) 5975 return work; 5976 5977 } 5978 5979 rps_lock_irq_disable(sd); 5980 if (skb_queue_empty(&sd->input_pkt_queue)) { 5981 /* 5982 * Inline a custom version of __napi_complete(). 5983 * only current cpu owns and manipulates this napi, 5984 * and NAPI_STATE_SCHED is the only possible flag set 5985 * on backlog. 5986 * We can use a plain write instead of clear_bit(), 5987 * and we dont need an smp_mb() memory barrier. 5988 */ 5989 napi->state = 0; 5990 again = false; 5991 } else { 5992 skb_queue_splice_tail_init(&sd->input_pkt_queue, 5993 &sd->process_queue); 5994 } 5995 rps_unlock_irq_enable(sd); 5996 } 5997 5998 return work; 5999 } 6000 6001 /** 6002 * __napi_schedule - schedule for receive 6003 * @n: entry to schedule 6004 * 6005 * The entry's receive function will be scheduled to run. 6006 * Consider using __napi_schedule_irqoff() if hard irqs are masked. 6007 */ 6008 void __napi_schedule(struct napi_struct *n) 6009 { 6010 unsigned long flags; 6011 6012 local_irq_save(flags); 6013 ____napi_schedule(this_cpu_ptr(&softnet_data), n); 6014 local_irq_restore(flags); 6015 } 6016 EXPORT_SYMBOL(__napi_schedule); 6017 6018 /** 6019 * napi_schedule_prep - check if napi can be scheduled 6020 * @n: napi context 6021 * 6022 * Test if NAPI routine is already running, and if not mark 6023 * it as running. This is used as a condition variable to 6024 * insure only one NAPI poll instance runs. We also make 6025 * sure there is no pending NAPI disable. 6026 */ 6027 bool napi_schedule_prep(struct napi_struct *n) 6028 { 6029 unsigned long new, val = READ_ONCE(n->state); 6030 6031 do { 6032 if (unlikely(val & NAPIF_STATE_DISABLE)) 6033 return false; 6034 new = val | NAPIF_STATE_SCHED; 6035 6036 /* Sets STATE_MISSED bit if STATE_SCHED was already set 6037 * This was suggested by Alexander Duyck, as compiler 6038 * emits better code than : 6039 * if (val & NAPIF_STATE_SCHED) 6040 * new |= NAPIF_STATE_MISSED; 6041 */ 6042 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED * 6043 NAPIF_STATE_MISSED; 6044 } while (!try_cmpxchg(&n->state, &val, new)); 6045 6046 return !(val & NAPIF_STATE_SCHED); 6047 } 6048 EXPORT_SYMBOL(napi_schedule_prep); 6049 6050 /** 6051 * __napi_schedule_irqoff - schedule for receive 6052 * @n: entry to schedule 6053 * 6054 * Variant of __napi_schedule() assuming hard irqs are masked. 6055 * 6056 * On PREEMPT_RT enabled kernels this maps to __napi_schedule() 6057 * because the interrupt disabled assumption might not be true 6058 * due to force-threaded interrupts and spinlock substitution. 6059 */ 6060 void __napi_schedule_irqoff(struct napi_struct *n) 6061 { 6062 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 6063 ____napi_schedule(this_cpu_ptr(&softnet_data), n); 6064 else 6065 __napi_schedule(n); 6066 } 6067 EXPORT_SYMBOL(__napi_schedule_irqoff); 6068 6069 bool napi_complete_done(struct napi_struct *n, int work_done) 6070 { 6071 unsigned long flags, val, new, timeout = 0; 6072 bool ret = true; 6073 6074 /* 6075 * 1) Don't let napi dequeue from the cpu poll list 6076 * just in case its running on a different cpu. 6077 * 2) If we are busy polling, do nothing here, we have 6078 * the guarantee we will be called later. 6079 */ 6080 if (unlikely(n->state & (NAPIF_STATE_NPSVC | 6081 NAPIF_STATE_IN_BUSY_POLL))) 6082 return false; 6083 6084 if (work_done) { 6085 if (n->gro_bitmask) 6086 timeout = READ_ONCE(n->dev->gro_flush_timeout); 6087 n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs); 6088 } 6089 if (n->defer_hard_irqs_count > 0) { 6090 n->defer_hard_irqs_count--; 6091 timeout = READ_ONCE(n->dev->gro_flush_timeout); 6092 if (timeout) 6093 ret = false; 6094 } 6095 if (n->gro_bitmask) { 6096 /* When the NAPI instance uses a timeout and keeps postponing 6097 * it, we need to bound somehow the time packets are kept in 6098 * the GRO layer 6099 */ 6100 napi_gro_flush(n, !!timeout); 6101 } 6102 6103 gro_normal_list(n); 6104 6105 if (unlikely(!list_empty(&n->poll_list))) { 6106 /* If n->poll_list is not empty, we need to mask irqs */ 6107 local_irq_save(flags); 6108 list_del_init(&n->poll_list); 6109 local_irq_restore(flags); 6110 } 6111 WRITE_ONCE(n->list_owner, -1); 6112 6113 val = READ_ONCE(n->state); 6114 do { 6115 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED)); 6116 6117 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED | 6118 NAPIF_STATE_SCHED_THREADED | 6119 NAPIF_STATE_PREFER_BUSY_POLL); 6120 6121 /* If STATE_MISSED was set, leave STATE_SCHED set, 6122 * because we will call napi->poll() one more time. 6123 * This C code was suggested by Alexander Duyck to help gcc. 6124 */ 6125 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED * 6126 NAPIF_STATE_SCHED; 6127 } while (!try_cmpxchg(&n->state, &val, new)); 6128 6129 if (unlikely(val & NAPIF_STATE_MISSED)) { 6130 __napi_schedule(n); 6131 return false; 6132 } 6133 6134 if (timeout) 6135 hrtimer_start(&n->timer, ns_to_ktime(timeout), 6136 HRTIMER_MODE_REL_PINNED); 6137 return ret; 6138 } 6139 EXPORT_SYMBOL(napi_complete_done); 6140 6141 /* must be called under rcu_read_lock(), as we dont take a reference */ 6142 static struct napi_struct *napi_by_id(unsigned int napi_id) 6143 { 6144 unsigned int hash = napi_id % HASH_SIZE(napi_hash); 6145 struct napi_struct *napi; 6146 6147 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node) 6148 if (napi->napi_id == napi_id) 6149 return napi; 6150 6151 return NULL; 6152 } 6153 6154 #if defined(CONFIG_NET_RX_BUSY_POLL) 6155 6156 static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule) 6157 { 6158 if (!skip_schedule) { 6159 gro_normal_list(napi); 6160 __napi_schedule(napi); 6161 return; 6162 } 6163 6164 if (napi->gro_bitmask) { 6165 /* flush too old packets 6166 * If HZ < 1000, flush all packets. 6167 */ 6168 napi_gro_flush(napi, HZ >= 1000); 6169 } 6170 6171 gro_normal_list(napi); 6172 clear_bit(NAPI_STATE_SCHED, &napi->state); 6173 } 6174 6175 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bool prefer_busy_poll, 6176 u16 budget) 6177 { 6178 bool skip_schedule = false; 6179 unsigned long timeout; 6180 int rc; 6181 6182 /* Busy polling means there is a high chance device driver hard irq 6183 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was 6184 * set in napi_schedule_prep(). 6185 * Since we are about to call napi->poll() once more, we can safely 6186 * clear NAPI_STATE_MISSED. 6187 * 6188 * Note: x86 could use a single "lock and ..." instruction 6189 * to perform these two clear_bit() 6190 */ 6191 clear_bit(NAPI_STATE_MISSED, &napi->state); 6192 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state); 6193 6194 local_bh_disable(); 6195 6196 if (prefer_busy_poll) { 6197 napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs); 6198 timeout = READ_ONCE(napi->dev->gro_flush_timeout); 6199 if (napi->defer_hard_irqs_count && timeout) { 6200 hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED); 6201 skip_schedule = true; 6202 } 6203 } 6204 6205 /* All we really want here is to re-enable device interrupts. 6206 * Ideally, a new ndo_busy_poll_stop() could avoid another round. 6207 */ 6208 rc = napi->poll(napi, budget); 6209 /* We can't gro_normal_list() here, because napi->poll() might have 6210 * rearmed the napi (napi_complete_done()) in which case it could 6211 * already be running on another CPU. 6212 */ 6213 trace_napi_poll(napi, rc, budget); 6214 netpoll_poll_unlock(have_poll_lock); 6215 if (rc == budget) 6216 __busy_poll_stop(napi, skip_schedule); 6217 local_bh_enable(); 6218 } 6219 6220 void napi_busy_loop(unsigned int napi_id, 6221 bool (*loop_end)(void *, unsigned long), 6222 void *loop_end_arg, bool prefer_busy_poll, u16 budget) 6223 { 6224 unsigned long start_time = loop_end ? busy_loop_current_time() : 0; 6225 int (*napi_poll)(struct napi_struct *napi, int budget); 6226 void *have_poll_lock = NULL; 6227 struct napi_struct *napi; 6228 6229 restart: 6230 napi_poll = NULL; 6231 6232 rcu_read_lock(); 6233 6234 napi = napi_by_id(napi_id); 6235 if (!napi) 6236 goto out; 6237 6238 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 6239 preempt_disable(); 6240 for (;;) { 6241 int work = 0; 6242 6243 local_bh_disable(); 6244 if (!napi_poll) { 6245 unsigned long val = READ_ONCE(napi->state); 6246 6247 /* If multiple threads are competing for this napi, 6248 * we avoid dirtying napi->state as much as we can. 6249 */ 6250 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED | 6251 NAPIF_STATE_IN_BUSY_POLL)) { 6252 if (prefer_busy_poll) 6253 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); 6254 goto count; 6255 } 6256 if (cmpxchg(&napi->state, val, 6257 val | NAPIF_STATE_IN_BUSY_POLL | 6258 NAPIF_STATE_SCHED) != val) { 6259 if (prefer_busy_poll) 6260 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); 6261 goto count; 6262 } 6263 have_poll_lock = netpoll_poll_lock(napi); 6264 napi_poll = napi->poll; 6265 } 6266 work = napi_poll(napi, budget); 6267 trace_napi_poll(napi, work, budget); 6268 gro_normal_list(napi); 6269 count: 6270 if (work > 0) 6271 __NET_ADD_STATS(dev_net(napi->dev), 6272 LINUX_MIB_BUSYPOLLRXPACKETS, work); 6273 local_bh_enable(); 6274 6275 if (!loop_end || loop_end(loop_end_arg, start_time)) 6276 break; 6277 6278 if (unlikely(need_resched())) { 6279 if (napi_poll) 6280 busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget); 6281 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 6282 preempt_enable(); 6283 rcu_read_unlock(); 6284 cond_resched(); 6285 if (loop_end(loop_end_arg, start_time)) 6286 return; 6287 goto restart; 6288 } 6289 cpu_relax(); 6290 } 6291 if (napi_poll) 6292 busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget); 6293 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 6294 preempt_enable(); 6295 out: 6296 rcu_read_unlock(); 6297 } 6298 EXPORT_SYMBOL(napi_busy_loop); 6299 6300 #endif /* CONFIG_NET_RX_BUSY_POLL */ 6301 6302 static void napi_hash_add(struct napi_struct *napi) 6303 { 6304 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state)) 6305 return; 6306 6307 spin_lock(&napi_hash_lock); 6308 6309 /* 0..NR_CPUS range is reserved for sender_cpu use */ 6310 do { 6311 if (unlikely(++napi_gen_id < MIN_NAPI_ID)) 6312 napi_gen_id = MIN_NAPI_ID; 6313 } while (napi_by_id(napi_gen_id)); 6314 napi->napi_id = napi_gen_id; 6315 6316 hlist_add_head_rcu(&napi->napi_hash_node, 6317 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); 6318 6319 spin_unlock(&napi_hash_lock); 6320 } 6321 6322 /* Warning : caller is responsible to make sure rcu grace period 6323 * is respected before freeing memory containing @napi 6324 */ 6325 static void napi_hash_del(struct napi_struct *napi) 6326 { 6327 spin_lock(&napi_hash_lock); 6328 6329 hlist_del_init_rcu(&napi->napi_hash_node); 6330 6331 spin_unlock(&napi_hash_lock); 6332 } 6333 6334 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) 6335 { 6336 struct napi_struct *napi; 6337 6338 napi = container_of(timer, struct napi_struct, timer); 6339 6340 /* Note : we use a relaxed variant of napi_schedule_prep() not setting 6341 * NAPI_STATE_MISSED, since we do not react to a device IRQ. 6342 */ 6343 if (!napi_disable_pending(napi) && 6344 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) { 6345 clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); 6346 __napi_schedule_irqoff(napi); 6347 } 6348 6349 return HRTIMER_NORESTART; 6350 } 6351 6352 static void init_gro_hash(struct napi_struct *napi) 6353 { 6354 int i; 6355 6356 for (i = 0; i < GRO_HASH_BUCKETS; i++) { 6357 INIT_LIST_HEAD(&napi->gro_hash[i].list); 6358 napi->gro_hash[i].count = 0; 6359 } 6360 napi->gro_bitmask = 0; 6361 } 6362 6363 int dev_set_threaded(struct net_device *dev, bool threaded) 6364 { 6365 struct napi_struct *napi; 6366 int err = 0; 6367 6368 if (dev->threaded == threaded) 6369 return 0; 6370 6371 if (threaded) { 6372 list_for_each_entry(napi, &dev->napi_list, dev_list) { 6373 if (!napi->thread) { 6374 err = napi_kthread_create(napi); 6375 if (err) { 6376 threaded = false; 6377 break; 6378 } 6379 } 6380 } 6381 } 6382 6383 dev->threaded = threaded; 6384 6385 /* Make sure kthread is created before THREADED bit 6386 * is set. 6387 */ 6388 smp_mb__before_atomic(); 6389 6390 /* Setting/unsetting threaded mode on a napi might not immediately 6391 * take effect, if the current napi instance is actively being 6392 * polled. In this case, the switch between threaded mode and 6393 * softirq mode will happen in the next round of napi_schedule(). 6394 * This should not cause hiccups/stalls to the live traffic. 6395 */ 6396 list_for_each_entry(napi, &dev->napi_list, dev_list) 6397 assign_bit(NAPI_STATE_THREADED, &napi->state, threaded); 6398 6399 return err; 6400 } 6401 EXPORT_SYMBOL(dev_set_threaded); 6402 6403 void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, 6404 int (*poll)(struct napi_struct *, int), int weight) 6405 { 6406 if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state))) 6407 return; 6408 6409 INIT_LIST_HEAD(&napi->poll_list); 6410 INIT_HLIST_NODE(&napi->napi_hash_node); 6411 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); 6412 napi->timer.function = napi_watchdog; 6413 init_gro_hash(napi); 6414 napi->skb = NULL; 6415 INIT_LIST_HEAD(&napi->rx_list); 6416 napi->rx_count = 0; 6417 napi->poll = poll; 6418 if (weight > NAPI_POLL_WEIGHT) 6419 netdev_err_once(dev, "%s() called with weight %d\n", __func__, 6420 weight); 6421 napi->weight = weight; 6422 napi->dev = dev; 6423 #ifdef CONFIG_NETPOLL 6424 napi->poll_owner = -1; 6425 #endif 6426 napi->list_owner = -1; 6427 set_bit(NAPI_STATE_SCHED, &napi->state); 6428 set_bit(NAPI_STATE_NPSVC, &napi->state); 6429 list_add_rcu(&napi->dev_list, &dev->napi_list); 6430 napi_hash_add(napi); 6431 napi_get_frags_check(napi); 6432 /* Create kthread for this napi if dev->threaded is set. 6433 * Clear dev->threaded if kthread creation failed so that 6434 * threaded mode will not be enabled in napi_enable(). 6435 */ 6436 if (dev->threaded && napi_kthread_create(napi)) 6437 dev->threaded = 0; 6438 } 6439 EXPORT_SYMBOL(netif_napi_add_weight); 6440 6441 void napi_disable(struct napi_struct *n) 6442 { 6443 unsigned long val, new; 6444 6445 might_sleep(); 6446 set_bit(NAPI_STATE_DISABLE, &n->state); 6447 6448 val = READ_ONCE(n->state); 6449 do { 6450 while (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) { 6451 usleep_range(20, 200); 6452 val = READ_ONCE(n->state); 6453 } 6454 6455 new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC; 6456 new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL); 6457 } while (!try_cmpxchg(&n->state, &val, new)); 6458 6459 hrtimer_cancel(&n->timer); 6460 6461 clear_bit(NAPI_STATE_DISABLE, &n->state); 6462 } 6463 EXPORT_SYMBOL(napi_disable); 6464 6465 /** 6466 * napi_enable - enable NAPI scheduling 6467 * @n: NAPI context 6468 * 6469 * Resume NAPI from being scheduled on this context. 6470 * Must be paired with napi_disable. 6471 */ 6472 void napi_enable(struct napi_struct *n) 6473 { 6474 unsigned long new, val = READ_ONCE(n->state); 6475 6476 do { 6477 BUG_ON(!test_bit(NAPI_STATE_SCHED, &val)); 6478 6479 new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC); 6480 if (n->dev->threaded && n->thread) 6481 new |= NAPIF_STATE_THREADED; 6482 } while (!try_cmpxchg(&n->state, &val, new)); 6483 } 6484 EXPORT_SYMBOL(napi_enable); 6485 6486 static void flush_gro_hash(struct napi_struct *napi) 6487 { 6488 int i; 6489 6490 for (i = 0; i < GRO_HASH_BUCKETS; i++) { 6491 struct sk_buff *skb, *n; 6492 6493 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list) 6494 kfree_skb(skb); 6495 napi->gro_hash[i].count = 0; 6496 } 6497 } 6498 6499 /* Must be called in process context */ 6500 void __netif_napi_del(struct napi_struct *napi) 6501 { 6502 if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state)) 6503 return; 6504 6505 napi_hash_del(napi); 6506 list_del_rcu(&napi->dev_list); 6507 napi_free_frags(napi); 6508 6509 flush_gro_hash(napi); 6510 napi->gro_bitmask = 0; 6511 6512 if (napi->thread) { 6513 kthread_stop(napi->thread); 6514 napi->thread = NULL; 6515 } 6516 } 6517 EXPORT_SYMBOL(__netif_napi_del); 6518 6519 static int __napi_poll(struct napi_struct *n, bool *repoll) 6520 { 6521 int work, weight; 6522 6523 weight = n->weight; 6524 6525 /* This NAPI_STATE_SCHED test is for avoiding a race 6526 * with netpoll's poll_napi(). Only the entity which 6527 * obtains the lock and sees NAPI_STATE_SCHED set will 6528 * actually make the ->poll() call. Therefore we avoid 6529 * accidentally calling ->poll() when NAPI is not scheduled. 6530 */ 6531 work = 0; 6532 if (napi_is_scheduled(n)) { 6533 work = n->poll(n, weight); 6534 trace_napi_poll(n, work, weight); 6535 6536 xdp_do_check_flushed(n); 6537 } 6538 6539 if (unlikely(work > weight)) 6540 netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n", 6541 n->poll, work, weight); 6542 6543 if (likely(work < weight)) 6544 return work; 6545 6546 /* Drivers must not modify the NAPI state if they 6547 * consume the entire weight. In such cases this code 6548 * still "owns" the NAPI instance and therefore can 6549 * move the instance around on the list at-will. 6550 */ 6551 if (unlikely(napi_disable_pending(n))) { 6552 napi_complete(n); 6553 return work; 6554 } 6555 6556 /* The NAPI context has more processing work, but busy-polling 6557 * is preferred. Exit early. 6558 */ 6559 if (napi_prefer_busy_poll(n)) { 6560 if (napi_complete_done(n, work)) { 6561 /* If timeout is not set, we need to make sure 6562 * that the NAPI is re-scheduled. 6563 */ 6564 napi_schedule(n); 6565 } 6566 return work; 6567 } 6568 6569 if (n->gro_bitmask) { 6570 /* flush too old packets 6571 * If HZ < 1000, flush all packets. 6572 */ 6573 napi_gro_flush(n, HZ >= 1000); 6574 } 6575 6576 gro_normal_list(n); 6577 6578 /* Some drivers may have called napi_schedule 6579 * prior to exhausting their budget. 6580 */ 6581 if (unlikely(!list_empty(&n->poll_list))) { 6582 pr_warn_once("%s: Budget exhausted after napi rescheduled\n", 6583 n->dev ? n->dev->name : "backlog"); 6584 return work; 6585 } 6586 6587 *repoll = true; 6588 6589 return work; 6590 } 6591 6592 static int napi_poll(struct napi_struct *n, struct list_head *repoll) 6593 { 6594 bool do_repoll = false; 6595 void *have; 6596 int work; 6597 6598 list_del_init(&n->poll_list); 6599 6600 have = netpoll_poll_lock(n); 6601 6602 work = __napi_poll(n, &do_repoll); 6603 6604 if (do_repoll) 6605 list_add_tail(&n->poll_list, repoll); 6606 6607 netpoll_poll_unlock(have); 6608 6609 return work; 6610 } 6611 6612 static int napi_thread_wait(struct napi_struct *napi) 6613 { 6614 bool woken = false; 6615 6616 set_current_state(TASK_INTERRUPTIBLE); 6617 6618 while (!kthread_should_stop()) { 6619 /* Testing SCHED_THREADED bit here to make sure the current 6620 * kthread owns this napi and could poll on this napi. 6621 * Testing SCHED bit is not enough because SCHED bit might be 6622 * set by some other busy poll thread or by napi_disable(). 6623 */ 6624 if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) { 6625 WARN_ON(!list_empty(&napi->poll_list)); 6626 __set_current_state(TASK_RUNNING); 6627 return 0; 6628 } 6629 6630 schedule(); 6631 /* woken being true indicates this thread owns this napi. */ 6632 woken = true; 6633 set_current_state(TASK_INTERRUPTIBLE); 6634 } 6635 __set_current_state(TASK_RUNNING); 6636 6637 return -1; 6638 } 6639 6640 static void skb_defer_free_flush(struct softnet_data *sd) 6641 { 6642 struct sk_buff *skb, *next; 6643 6644 /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */ 6645 if (!READ_ONCE(sd->defer_list)) 6646 return; 6647 6648 spin_lock(&sd->defer_lock); 6649 skb = sd->defer_list; 6650 sd->defer_list = NULL; 6651 sd->defer_count = 0; 6652 spin_unlock(&sd->defer_lock); 6653 6654 while (skb != NULL) { 6655 next = skb->next; 6656 napi_consume_skb(skb, 1); 6657 skb = next; 6658 } 6659 } 6660 6661 static int napi_threaded_poll(void *data) 6662 { 6663 struct napi_struct *napi = data; 6664 struct softnet_data *sd; 6665 void *have; 6666 6667 while (!napi_thread_wait(napi)) { 6668 for (;;) { 6669 bool repoll = false; 6670 6671 local_bh_disable(); 6672 sd = this_cpu_ptr(&softnet_data); 6673 sd->in_napi_threaded_poll = true; 6674 6675 have = netpoll_poll_lock(napi); 6676 __napi_poll(napi, &repoll); 6677 netpoll_poll_unlock(have); 6678 6679 sd->in_napi_threaded_poll = false; 6680 barrier(); 6681 6682 if (sd_has_rps_ipi_waiting(sd)) { 6683 local_irq_disable(); 6684 net_rps_action_and_irq_enable(sd); 6685 } 6686 skb_defer_free_flush(sd); 6687 local_bh_enable(); 6688 6689 if (!repoll) 6690 break; 6691 6692 cond_resched(); 6693 } 6694 } 6695 return 0; 6696 } 6697 6698 static __latent_entropy void net_rx_action(struct softirq_action *h) 6699 { 6700 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 6701 unsigned long time_limit = jiffies + 6702 usecs_to_jiffies(READ_ONCE(netdev_budget_usecs)); 6703 int budget = READ_ONCE(netdev_budget); 6704 LIST_HEAD(list); 6705 LIST_HEAD(repoll); 6706 6707 start: 6708 sd->in_net_rx_action = true; 6709 local_irq_disable(); 6710 list_splice_init(&sd->poll_list, &list); 6711 local_irq_enable(); 6712 6713 for (;;) { 6714 struct napi_struct *n; 6715 6716 skb_defer_free_flush(sd); 6717 6718 if (list_empty(&list)) { 6719 if (list_empty(&repoll)) { 6720 sd->in_net_rx_action = false; 6721 barrier(); 6722 /* We need to check if ____napi_schedule() 6723 * had refilled poll_list while 6724 * sd->in_net_rx_action was true. 6725 */ 6726 if (!list_empty(&sd->poll_list)) 6727 goto start; 6728 if (!sd_has_rps_ipi_waiting(sd)) 6729 goto end; 6730 } 6731 break; 6732 } 6733 6734 n = list_first_entry(&list, struct napi_struct, poll_list); 6735 budget -= napi_poll(n, &repoll); 6736 6737 /* If softirq window is exhausted then punt. 6738 * Allow this to run for 2 jiffies since which will allow 6739 * an average latency of 1.5/HZ. 6740 */ 6741 if (unlikely(budget <= 0 || 6742 time_after_eq(jiffies, time_limit))) { 6743 sd->time_squeeze++; 6744 break; 6745 } 6746 } 6747 6748 local_irq_disable(); 6749 6750 list_splice_tail_init(&sd->poll_list, &list); 6751 list_splice_tail(&repoll, &list); 6752 list_splice(&list, &sd->poll_list); 6753 if (!list_empty(&sd->poll_list)) 6754 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 6755 else 6756 sd->in_net_rx_action = false; 6757 6758 net_rps_action_and_irq_enable(sd); 6759 end:; 6760 } 6761 6762 struct netdev_adjacent { 6763 struct net_device *dev; 6764 netdevice_tracker dev_tracker; 6765 6766 /* upper master flag, there can only be one master device per list */ 6767 bool master; 6768 6769 /* lookup ignore flag */ 6770 bool ignore; 6771 6772 /* counter for the number of times this device was added to us */ 6773 u16 ref_nr; 6774 6775 /* private field for the users */ 6776 void *private; 6777 6778 struct list_head list; 6779 struct rcu_head rcu; 6780 }; 6781 6782 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev, 6783 struct list_head *adj_list) 6784 { 6785 struct netdev_adjacent *adj; 6786 6787 list_for_each_entry(adj, adj_list, list) { 6788 if (adj->dev == adj_dev) 6789 return adj; 6790 } 6791 return NULL; 6792 } 6793 6794 static int ____netdev_has_upper_dev(struct net_device *upper_dev, 6795 struct netdev_nested_priv *priv) 6796 { 6797 struct net_device *dev = (struct net_device *)priv->data; 6798 6799 return upper_dev == dev; 6800 } 6801 6802 /** 6803 * netdev_has_upper_dev - Check if device is linked to an upper device 6804 * @dev: device 6805 * @upper_dev: upper device to check 6806 * 6807 * Find out if a device is linked to specified upper device and return true 6808 * in case it is. Note that this checks only immediate upper device, 6809 * not through a complete stack of devices. The caller must hold the RTNL lock. 6810 */ 6811 bool netdev_has_upper_dev(struct net_device *dev, 6812 struct net_device *upper_dev) 6813 { 6814 struct netdev_nested_priv priv = { 6815 .data = (void *)upper_dev, 6816 }; 6817 6818 ASSERT_RTNL(); 6819 6820 return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev, 6821 &priv); 6822 } 6823 EXPORT_SYMBOL(netdev_has_upper_dev); 6824 6825 /** 6826 * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device 6827 * @dev: device 6828 * @upper_dev: upper device to check 6829 * 6830 * Find out if a device is linked to specified upper device and return true 6831 * in case it is. Note that this checks the entire upper device chain. 6832 * The caller must hold rcu lock. 6833 */ 6834 6835 bool netdev_has_upper_dev_all_rcu(struct net_device *dev, 6836 struct net_device *upper_dev) 6837 { 6838 struct netdev_nested_priv priv = { 6839 .data = (void *)upper_dev, 6840 }; 6841 6842 return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev, 6843 &priv); 6844 } 6845 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu); 6846 6847 /** 6848 * netdev_has_any_upper_dev - Check if device is linked to some device 6849 * @dev: device 6850 * 6851 * Find out if a device is linked to an upper device and return true in case 6852 * it is. The caller must hold the RTNL lock. 6853 */ 6854 bool netdev_has_any_upper_dev(struct net_device *dev) 6855 { 6856 ASSERT_RTNL(); 6857 6858 return !list_empty(&dev->adj_list.upper); 6859 } 6860 EXPORT_SYMBOL(netdev_has_any_upper_dev); 6861 6862 /** 6863 * netdev_master_upper_dev_get - Get master upper device 6864 * @dev: device 6865 * 6866 * Find a master upper device and return pointer to it or NULL in case 6867 * it's not there. The caller must hold the RTNL lock. 6868 */ 6869 struct net_device *netdev_master_upper_dev_get(struct net_device *dev) 6870 { 6871 struct netdev_adjacent *upper; 6872 6873 ASSERT_RTNL(); 6874 6875 if (list_empty(&dev->adj_list.upper)) 6876 return NULL; 6877 6878 upper = list_first_entry(&dev->adj_list.upper, 6879 struct netdev_adjacent, list); 6880 if (likely(upper->master)) 6881 return upper->dev; 6882 return NULL; 6883 } 6884 EXPORT_SYMBOL(netdev_master_upper_dev_get); 6885 6886 static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev) 6887 { 6888 struct netdev_adjacent *upper; 6889 6890 ASSERT_RTNL(); 6891 6892 if (list_empty(&dev->adj_list.upper)) 6893 return NULL; 6894 6895 upper = list_first_entry(&dev->adj_list.upper, 6896 struct netdev_adjacent, list); 6897 if (likely(upper->master) && !upper->ignore) 6898 return upper->dev; 6899 return NULL; 6900 } 6901 6902 /** 6903 * netdev_has_any_lower_dev - Check if device is linked to some device 6904 * @dev: device 6905 * 6906 * Find out if a device is linked to a lower device and return true in case 6907 * it is. The caller must hold the RTNL lock. 6908 */ 6909 static bool netdev_has_any_lower_dev(struct net_device *dev) 6910 { 6911 ASSERT_RTNL(); 6912 6913 return !list_empty(&dev->adj_list.lower); 6914 } 6915 6916 void *netdev_adjacent_get_private(struct list_head *adj_list) 6917 { 6918 struct netdev_adjacent *adj; 6919 6920 adj = list_entry(adj_list, struct netdev_adjacent, list); 6921 6922 return adj->private; 6923 } 6924 EXPORT_SYMBOL(netdev_adjacent_get_private); 6925 6926 /** 6927 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list 6928 * @dev: device 6929 * @iter: list_head ** of the current position 6930 * 6931 * Gets the next device from the dev's upper list, starting from iter 6932 * position. The caller must hold RCU read lock. 6933 */ 6934 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, 6935 struct list_head **iter) 6936 { 6937 struct netdev_adjacent *upper; 6938 6939 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 6940 6941 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 6942 6943 if (&upper->list == &dev->adj_list.upper) 6944 return NULL; 6945 6946 *iter = &upper->list; 6947 6948 return upper->dev; 6949 } 6950 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu); 6951 6952 static struct net_device *__netdev_next_upper_dev(struct net_device *dev, 6953 struct list_head **iter, 6954 bool *ignore) 6955 { 6956 struct netdev_adjacent *upper; 6957 6958 upper = list_entry((*iter)->next, struct netdev_adjacent, list); 6959 6960 if (&upper->list == &dev->adj_list.upper) 6961 return NULL; 6962 6963 *iter = &upper->list; 6964 *ignore = upper->ignore; 6965 6966 return upper->dev; 6967 } 6968 6969 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev, 6970 struct list_head **iter) 6971 { 6972 struct netdev_adjacent *upper; 6973 6974 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 6975 6976 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 6977 6978 if (&upper->list == &dev->adj_list.upper) 6979 return NULL; 6980 6981 *iter = &upper->list; 6982 6983 return upper->dev; 6984 } 6985 6986 static int __netdev_walk_all_upper_dev(struct net_device *dev, 6987 int (*fn)(struct net_device *dev, 6988 struct netdev_nested_priv *priv), 6989 struct netdev_nested_priv *priv) 6990 { 6991 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 6992 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 6993 int ret, cur = 0; 6994 bool ignore; 6995 6996 now = dev; 6997 iter = &dev->adj_list.upper; 6998 6999 while (1) { 7000 if (now != dev) { 7001 ret = fn(now, priv); 7002 if (ret) 7003 return ret; 7004 } 7005 7006 next = NULL; 7007 while (1) { 7008 udev = __netdev_next_upper_dev(now, &iter, &ignore); 7009 if (!udev) 7010 break; 7011 if (ignore) 7012 continue; 7013 7014 next = udev; 7015 niter = &udev->adj_list.upper; 7016 dev_stack[cur] = now; 7017 iter_stack[cur++] = iter; 7018 break; 7019 } 7020 7021 if (!next) { 7022 if (!cur) 7023 return 0; 7024 next = dev_stack[--cur]; 7025 niter = iter_stack[cur]; 7026 } 7027 7028 now = next; 7029 iter = niter; 7030 } 7031 7032 return 0; 7033 } 7034 7035 int netdev_walk_all_upper_dev_rcu(struct net_device *dev, 7036 int (*fn)(struct net_device *dev, 7037 struct netdev_nested_priv *priv), 7038 struct netdev_nested_priv *priv) 7039 { 7040 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7041 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7042 int ret, cur = 0; 7043 7044 now = dev; 7045 iter = &dev->adj_list.upper; 7046 7047 while (1) { 7048 if (now != dev) { 7049 ret = fn(now, priv); 7050 if (ret) 7051 return ret; 7052 } 7053 7054 next = NULL; 7055 while (1) { 7056 udev = netdev_next_upper_dev_rcu(now, &iter); 7057 if (!udev) 7058 break; 7059 7060 next = udev; 7061 niter = &udev->adj_list.upper; 7062 dev_stack[cur] = now; 7063 iter_stack[cur++] = iter; 7064 break; 7065 } 7066 7067 if (!next) { 7068 if (!cur) 7069 return 0; 7070 next = dev_stack[--cur]; 7071 niter = iter_stack[cur]; 7072 } 7073 7074 now = next; 7075 iter = niter; 7076 } 7077 7078 return 0; 7079 } 7080 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu); 7081 7082 static bool __netdev_has_upper_dev(struct net_device *dev, 7083 struct net_device *upper_dev) 7084 { 7085 struct netdev_nested_priv priv = { 7086 .flags = 0, 7087 .data = (void *)upper_dev, 7088 }; 7089 7090 ASSERT_RTNL(); 7091 7092 return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev, 7093 &priv); 7094 } 7095 7096 /** 7097 * netdev_lower_get_next_private - Get the next ->private from the 7098 * lower neighbour list 7099 * @dev: device 7100 * @iter: list_head ** of the current position 7101 * 7102 * Gets the next netdev_adjacent->private from the dev's lower neighbour 7103 * list, starting from iter position. The caller must hold either hold the 7104 * RTNL lock or its own locking that guarantees that the neighbour lower 7105 * list will remain unchanged. 7106 */ 7107 void *netdev_lower_get_next_private(struct net_device *dev, 7108 struct list_head **iter) 7109 { 7110 struct netdev_adjacent *lower; 7111 7112 lower = list_entry(*iter, struct netdev_adjacent, list); 7113 7114 if (&lower->list == &dev->adj_list.lower) 7115 return NULL; 7116 7117 *iter = lower->list.next; 7118 7119 return lower->private; 7120 } 7121 EXPORT_SYMBOL(netdev_lower_get_next_private); 7122 7123 /** 7124 * netdev_lower_get_next_private_rcu - Get the next ->private from the 7125 * lower neighbour list, RCU 7126 * variant 7127 * @dev: device 7128 * @iter: list_head ** of the current position 7129 * 7130 * Gets the next netdev_adjacent->private from the dev's lower neighbour 7131 * list, starting from iter position. The caller must hold RCU read lock. 7132 */ 7133 void *netdev_lower_get_next_private_rcu(struct net_device *dev, 7134 struct list_head **iter) 7135 { 7136 struct netdev_adjacent *lower; 7137 7138 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 7139 7140 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 7141 7142 if (&lower->list == &dev->adj_list.lower) 7143 return NULL; 7144 7145 *iter = &lower->list; 7146 7147 return lower->private; 7148 } 7149 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu); 7150 7151 /** 7152 * netdev_lower_get_next - Get the next device from the lower neighbour 7153 * list 7154 * @dev: device 7155 * @iter: list_head ** of the current position 7156 * 7157 * Gets the next netdev_adjacent from the dev's lower neighbour 7158 * list, starting from iter position. The caller must hold RTNL lock or 7159 * its own locking that guarantees that the neighbour lower 7160 * list will remain unchanged. 7161 */ 7162 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter) 7163 { 7164 struct netdev_adjacent *lower; 7165 7166 lower = list_entry(*iter, struct netdev_adjacent, list); 7167 7168 if (&lower->list == &dev->adj_list.lower) 7169 return NULL; 7170 7171 *iter = lower->list.next; 7172 7173 return lower->dev; 7174 } 7175 EXPORT_SYMBOL(netdev_lower_get_next); 7176 7177 static struct net_device *netdev_next_lower_dev(struct net_device *dev, 7178 struct list_head **iter) 7179 { 7180 struct netdev_adjacent *lower; 7181 7182 lower = list_entry((*iter)->next, struct netdev_adjacent, list); 7183 7184 if (&lower->list == &dev->adj_list.lower) 7185 return NULL; 7186 7187 *iter = &lower->list; 7188 7189 return lower->dev; 7190 } 7191 7192 static struct net_device *__netdev_next_lower_dev(struct net_device *dev, 7193 struct list_head **iter, 7194 bool *ignore) 7195 { 7196 struct netdev_adjacent *lower; 7197 7198 lower = list_entry((*iter)->next, struct netdev_adjacent, list); 7199 7200 if (&lower->list == &dev->adj_list.lower) 7201 return NULL; 7202 7203 *iter = &lower->list; 7204 *ignore = lower->ignore; 7205 7206 return lower->dev; 7207 } 7208 7209 int netdev_walk_all_lower_dev(struct net_device *dev, 7210 int (*fn)(struct net_device *dev, 7211 struct netdev_nested_priv *priv), 7212 struct netdev_nested_priv *priv) 7213 { 7214 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7215 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7216 int ret, cur = 0; 7217 7218 now = dev; 7219 iter = &dev->adj_list.lower; 7220 7221 while (1) { 7222 if (now != dev) { 7223 ret = fn(now, priv); 7224 if (ret) 7225 return ret; 7226 } 7227 7228 next = NULL; 7229 while (1) { 7230 ldev = netdev_next_lower_dev(now, &iter); 7231 if (!ldev) 7232 break; 7233 7234 next = ldev; 7235 niter = &ldev->adj_list.lower; 7236 dev_stack[cur] = now; 7237 iter_stack[cur++] = iter; 7238 break; 7239 } 7240 7241 if (!next) { 7242 if (!cur) 7243 return 0; 7244 next = dev_stack[--cur]; 7245 niter = iter_stack[cur]; 7246 } 7247 7248 now = next; 7249 iter = niter; 7250 } 7251 7252 return 0; 7253 } 7254 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev); 7255 7256 static int __netdev_walk_all_lower_dev(struct net_device *dev, 7257 int (*fn)(struct net_device *dev, 7258 struct netdev_nested_priv *priv), 7259 struct netdev_nested_priv *priv) 7260 { 7261 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7262 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7263 int ret, cur = 0; 7264 bool ignore; 7265 7266 now = dev; 7267 iter = &dev->adj_list.lower; 7268 7269 while (1) { 7270 if (now != dev) { 7271 ret = fn(now, priv); 7272 if (ret) 7273 return ret; 7274 } 7275 7276 next = NULL; 7277 while (1) { 7278 ldev = __netdev_next_lower_dev(now, &iter, &ignore); 7279 if (!ldev) 7280 break; 7281 if (ignore) 7282 continue; 7283 7284 next = ldev; 7285 niter = &ldev->adj_list.lower; 7286 dev_stack[cur] = now; 7287 iter_stack[cur++] = iter; 7288 break; 7289 } 7290 7291 if (!next) { 7292 if (!cur) 7293 return 0; 7294 next = dev_stack[--cur]; 7295 niter = iter_stack[cur]; 7296 } 7297 7298 now = next; 7299 iter = niter; 7300 } 7301 7302 return 0; 7303 } 7304 7305 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, 7306 struct list_head **iter) 7307 { 7308 struct netdev_adjacent *lower; 7309 7310 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 7311 if (&lower->list == &dev->adj_list.lower) 7312 return NULL; 7313 7314 *iter = &lower->list; 7315 7316 return lower->dev; 7317 } 7318 EXPORT_SYMBOL(netdev_next_lower_dev_rcu); 7319 7320 static u8 __netdev_upper_depth(struct net_device *dev) 7321 { 7322 struct net_device *udev; 7323 struct list_head *iter; 7324 u8 max_depth = 0; 7325 bool ignore; 7326 7327 for (iter = &dev->adj_list.upper, 7328 udev = __netdev_next_upper_dev(dev, &iter, &ignore); 7329 udev; 7330 udev = __netdev_next_upper_dev(dev, &iter, &ignore)) { 7331 if (ignore) 7332 continue; 7333 if (max_depth < udev->upper_level) 7334 max_depth = udev->upper_level; 7335 } 7336 7337 return max_depth; 7338 } 7339 7340 static u8 __netdev_lower_depth(struct net_device *dev) 7341 { 7342 struct net_device *ldev; 7343 struct list_head *iter; 7344 u8 max_depth = 0; 7345 bool ignore; 7346 7347 for (iter = &dev->adj_list.lower, 7348 ldev = __netdev_next_lower_dev(dev, &iter, &ignore); 7349 ldev; 7350 ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) { 7351 if (ignore) 7352 continue; 7353 if (max_depth < ldev->lower_level) 7354 max_depth = ldev->lower_level; 7355 } 7356 7357 return max_depth; 7358 } 7359 7360 static int __netdev_update_upper_level(struct net_device *dev, 7361 struct netdev_nested_priv *__unused) 7362 { 7363 dev->upper_level = __netdev_upper_depth(dev) + 1; 7364 return 0; 7365 } 7366 7367 #ifdef CONFIG_LOCKDEP 7368 static LIST_HEAD(net_unlink_list); 7369 7370 static void net_unlink_todo(struct net_device *dev) 7371 { 7372 if (list_empty(&dev->unlink_list)) 7373 list_add_tail(&dev->unlink_list, &net_unlink_list); 7374 } 7375 #endif 7376 7377 static int __netdev_update_lower_level(struct net_device *dev, 7378 struct netdev_nested_priv *priv) 7379 { 7380 dev->lower_level = __netdev_lower_depth(dev) + 1; 7381 7382 #ifdef CONFIG_LOCKDEP 7383 if (!priv) 7384 return 0; 7385 7386 if (priv->flags & NESTED_SYNC_IMM) 7387 dev->nested_level = dev->lower_level - 1; 7388 if (priv->flags & NESTED_SYNC_TODO) 7389 net_unlink_todo(dev); 7390 #endif 7391 return 0; 7392 } 7393 7394 int netdev_walk_all_lower_dev_rcu(struct net_device *dev, 7395 int (*fn)(struct net_device *dev, 7396 struct netdev_nested_priv *priv), 7397 struct netdev_nested_priv *priv) 7398 { 7399 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7400 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7401 int ret, cur = 0; 7402 7403 now = dev; 7404 iter = &dev->adj_list.lower; 7405 7406 while (1) { 7407 if (now != dev) { 7408 ret = fn(now, priv); 7409 if (ret) 7410 return ret; 7411 } 7412 7413 next = NULL; 7414 while (1) { 7415 ldev = netdev_next_lower_dev_rcu(now, &iter); 7416 if (!ldev) 7417 break; 7418 7419 next = ldev; 7420 niter = &ldev->adj_list.lower; 7421 dev_stack[cur] = now; 7422 iter_stack[cur++] = iter; 7423 break; 7424 } 7425 7426 if (!next) { 7427 if (!cur) 7428 return 0; 7429 next = dev_stack[--cur]; 7430 niter = iter_stack[cur]; 7431 } 7432 7433 now = next; 7434 iter = niter; 7435 } 7436 7437 return 0; 7438 } 7439 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu); 7440 7441 /** 7442 * netdev_lower_get_first_private_rcu - Get the first ->private from the 7443 * lower neighbour list, RCU 7444 * variant 7445 * @dev: device 7446 * 7447 * Gets the first netdev_adjacent->private from the dev's lower neighbour 7448 * list. The caller must hold RCU read lock. 7449 */ 7450 void *netdev_lower_get_first_private_rcu(struct net_device *dev) 7451 { 7452 struct netdev_adjacent *lower; 7453 7454 lower = list_first_or_null_rcu(&dev->adj_list.lower, 7455 struct netdev_adjacent, list); 7456 if (lower) 7457 return lower->private; 7458 return NULL; 7459 } 7460 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu); 7461 7462 /** 7463 * netdev_master_upper_dev_get_rcu - Get master upper device 7464 * @dev: device 7465 * 7466 * Find a master upper device and return pointer to it or NULL in case 7467 * it's not there. The caller must hold the RCU read lock. 7468 */ 7469 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev) 7470 { 7471 struct netdev_adjacent *upper; 7472 7473 upper = list_first_or_null_rcu(&dev->adj_list.upper, 7474 struct netdev_adjacent, list); 7475 if (upper && likely(upper->master)) 7476 return upper->dev; 7477 return NULL; 7478 } 7479 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu); 7480 7481 static int netdev_adjacent_sysfs_add(struct net_device *dev, 7482 struct net_device *adj_dev, 7483 struct list_head *dev_list) 7484 { 7485 char linkname[IFNAMSIZ+7]; 7486 7487 sprintf(linkname, dev_list == &dev->adj_list.upper ? 7488 "upper_%s" : "lower_%s", adj_dev->name); 7489 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), 7490 linkname); 7491 } 7492 static void netdev_adjacent_sysfs_del(struct net_device *dev, 7493 char *name, 7494 struct list_head *dev_list) 7495 { 7496 char linkname[IFNAMSIZ+7]; 7497 7498 sprintf(linkname, dev_list == &dev->adj_list.upper ? 7499 "upper_%s" : "lower_%s", name); 7500 sysfs_remove_link(&(dev->dev.kobj), linkname); 7501 } 7502 7503 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev, 7504 struct net_device *adj_dev, 7505 struct list_head *dev_list) 7506 { 7507 return (dev_list == &dev->adj_list.upper || 7508 dev_list == &dev->adj_list.lower) && 7509 net_eq(dev_net(dev), dev_net(adj_dev)); 7510 } 7511 7512 static int __netdev_adjacent_dev_insert(struct net_device *dev, 7513 struct net_device *adj_dev, 7514 struct list_head *dev_list, 7515 void *private, bool master) 7516 { 7517 struct netdev_adjacent *adj; 7518 int ret; 7519 7520 adj = __netdev_find_adj(adj_dev, dev_list); 7521 7522 if (adj) { 7523 adj->ref_nr += 1; 7524 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n", 7525 dev->name, adj_dev->name, adj->ref_nr); 7526 7527 return 0; 7528 } 7529 7530 adj = kmalloc(sizeof(*adj), GFP_KERNEL); 7531 if (!adj) 7532 return -ENOMEM; 7533 7534 adj->dev = adj_dev; 7535 adj->master = master; 7536 adj->ref_nr = 1; 7537 adj->private = private; 7538 adj->ignore = false; 7539 netdev_hold(adj_dev, &adj->dev_tracker, GFP_KERNEL); 7540 7541 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n", 7542 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name); 7543 7544 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) { 7545 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list); 7546 if (ret) 7547 goto free_adj; 7548 } 7549 7550 /* Ensure that master link is always the first item in list. */ 7551 if (master) { 7552 ret = sysfs_create_link(&(dev->dev.kobj), 7553 &(adj_dev->dev.kobj), "master"); 7554 if (ret) 7555 goto remove_symlinks; 7556 7557 list_add_rcu(&adj->list, dev_list); 7558 } else { 7559 list_add_tail_rcu(&adj->list, dev_list); 7560 } 7561 7562 return 0; 7563 7564 remove_symlinks: 7565 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) 7566 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 7567 free_adj: 7568 netdev_put(adj_dev, &adj->dev_tracker); 7569 kfree(adj); 7570 7571 return ret; 7572 } 7573 7574 static void __netdev_adjacent_dev_remove(struct net_device *dev, 7575 struct net_device *adj_dev, 7576 u16 ref_nr, 7577 struct list_head *dev_list) 7578 { 7579 struct netdev_adjacent *adj; 7580 7581 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n", 7582 dev->name, adj_dev->name, ref_nr); 7583 7584 adj = __netdev_find_adj(adj_dev, dev_list); 7585 7586 if (!adj) { 7587 pr_err("Adjacency does not exist for device %s from %s\n", 7588 dev->name, adj_dev->name); 7589 WARN_ON(1); 7590 return; 7591 } 7592 7593 if (adj->ref_nr > ref_nr) { 7594 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n", 7595 dev->name, adj_dev->name, ref_nr, 7596 adj->ref_nr - ref_nr); 7597 adj->ref_nr -= ref_nr; 7598 return; 7599 } 7600 7601 if (adj->master) 7602 sysfs_remove_link(&(dev->dev.kobj), "master"); 7603 7604 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) 7605 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 7606 7607 list_del_rcu(&adj->list); 7608 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n", 7609 adj_dev->name, dev->name, adj_dev->name); 7610 netdev_put(adj_dev, &adj->dev_tracker); 7611 kfree_rcu(adj, rcu); 7612 } 7613 7614 static int __netdev_adjacent_dev_link_lists(struct net_device *dev, 7615 struct net_device *upper_dev, 7616 struct list_head *up_list, 7617 struct list_head *down_list, 7618 void *private, bool master) 7619 { 7620 int ret; 7621 7622 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, 7623 private, master); 7624 if (ret) 7625 return ret; 7626 7627 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, 7628 private, false); 7629 if (ret) { 7630 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list); 7631 return ret; 7632 } 7633 7634 return 0; 7635 } 7636 7637 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev, 7638 struct net_device *upper_dev, 7639 u16 ref_nr, 7640 struct list_head *up_list, 7641 struct list_head *down_list) 7642 { 7643 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list); 7644 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list); 7645 } 7646 7647 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev, 7648 struct net_device *upper_dev, 7649 void *private, bool master) 7650 { 7651 return __netdev_adjacent_dev_link_lists(dev, upper_dev, 7652 &dev->adj_list.upper, 7653 &upper_dev->adj_list.lower, 7654 private, master); 7655 } 7656 7657 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev, 7658 struct net_device *upper_dev) 7659 { 7660 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1, 7661 &dev->adj_list.upper, 7662 &upper_dev->adj_list.lower); 7663 } 7664 7665 static int __netdev_upper_dev_link(struct net_device *dev, 7666 struct net_device *upper_dev, bool master, 7667 void *upper_priv, void *upper_info, 7668 struct netdev_nested_priv *priv, 7669 struct netlink_ext_ack *extack) 7670 { 7671 struct netdev_notifier_changeupper_info changeupper_info = { 7672 .info = { 7673 .dev = dev, 7674 .extack = extack, 7675 }, 7676 .upper_dev = upper_dev, 7677 .master = master, 7678 .linking = true, 7679 .upper_info = upper_info, 7680 }; 7681 struct net_device *master_dev; 7682 int ret = 0; 7683 7684 ASSERT_RTNL(); 7685 7686 if (dev == upper_dev) 7687 return -EBUSY; 7688 7689 /* To prevent loops, check if dev is not upper device to upper_dev. */ 7690 if (__netdev_has_upper_dev(upper_dev, dev)) 7691 return -EBUSY; 7692 7693 if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV) 7694 return -EMLINK; 7695 7696 if (!master) { 7697 if (__netdev_has_upper_dev(dev, upper_dev)) 7698 return -EEXIST; 7699 } else { 7700 master_dev = __netdev_master_upper_dev_get(dev); 7701 if (master_dev) 7702 return master_dev == upper_dev ? -EEXIST : -EBUSY; 7703 } 7704 7705 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 7706 &changeupper_info.info); 7707 ret = notifier_to_errno(ret); 7708 if (ret) 7709 return ret; 7710 7711 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv, 7712 master); 7713 if (ret) 7714 return ret; 7715 7716 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 7717 &changeupper_info.info); 7718 ret = notifier_to_errno(ret); 7719 if (ret) 7720 goto rollback; 7721 7722 __netdev_update_upper_level(dev, NULL); 7723 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL); 7724 7725 __netdev_update_lower_level(upper_dev, priv); 7726 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level, 7727 priv); 7728 7729 return 0; 7730 7731 rollback: 7732 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 7733 7734 return ret; 7735 } 7736 7737 /** 7738 * netdev_upper_dev_link - Add a link to the upper device 7739 * @dev: device 7740 * @upper_dev: new upper device 7741 * @extack: netlink extended ack 7742 * 7743 * Adds a link to device which is upper to this one. The caller must hold 7744 * the RTNL lock. On a failure a negative errno code is returned. 7745 * On success the reference counts are adjusted and the function 7746 * returns zero. 7747 */ 7748 int netdev_upper_dev_link(struct net_device *dev, 7749 struct net_device *upper_dev, 7750 struct netlink_ext_ack *extack) 7751 { 7752 struct netdev_nested_priv priv = { 7753 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO, 7754 .data = NULL, 7755 }; 7756 7757 return __netdev_upper_dev_link(dev, upper_dev, false, 7758 NULL, NULL, &priv, extack); 7759 } 7760 EXPORT_SYMBOL(netdev_upper_dev_link); 7761 7762 /** 7763 * netdev_master_upper_dev_link - Add a master link to the upper device 7764 * @dev: device 7765 * @upper_dev: new upper device 7766 * @upper_priv: upper device private 7767 * @upper_info: upper info to be passed down via notifier 7768 * @extack: netlink extended ack 7769 * 7770 * Adds a link to device which is upper to this one. In this case, only 7771 * one master upper device can be linked, although other non-master devices 7772 * might be linked as well. The caller must hold the RTNL lock. 7773 * On a failure a negative errno code is returned. On success the reference 7774 * counts are adjusted and the function returns zero. 7775 */ 7776 int netdev_master_upper_dev_link(struct net_device *dev, 7777 struct net_device *upper_dev, 7778 void *upper_priv, void *upper_info, 7779 struct netlink_ext_ack *extack) 7780 { 7781 struct netdev_nested_priv priv = { 7782 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO, 7783 .data = NULL, 7784 }; 7785 7786 return __netdev_upper_dev_link(dev, upper_dev, true, 7787 upper_priv, upper_info, &priv, extack); 7788 } 7789 EXPORT_SYMBOL(netdev_master_upper_dev_link); 7790 7791 static void __netdev_upper_dev_unlink(struct net_device *dev, 7792 struct net_device *upper_dev, 7793 struct netdev_nested_priv *priv) 7794 { 7795 struct netdev_notifier_changeupper_info changeupper_info = { 7796 .info = { 7797 .dev = dev, 7798 }, 7799 .upper_dev = upper_dev, 7800 .linking = false, 7801 }; 7802 7803 ASSERT_RTNL(); 7804 7805 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev; 7806 7807 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 7808 &changeupper_info.info); 7809 7810 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 7811 7812 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 7813 &changeupper_info.info); 7814 7815 __netdev_update_upper_level(dev, NULL); 7816 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL); 7817 7818 __netdev_update_lower_level(upper_dev, priv); 7819 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level, 7820 priv); 7821 } 7822 7823 /** 7824 * netdev_upper_dev_unlink - Removes a link to upper device 7825 * @dev: device 7826 * @upper_dev: new upper device 7827 * 7828 * Removes a link to device which is upper to this one. The caller must hold 7829 * the RTNL lock. 7830 */ 7831 void netdev_upper_dev_unlink(struct net_device *dev, 7832 struct net_device *upper_dev) 7833 { 7834 struct netdev_nested_priv priv = { 7835 .flags = NESTED_SYNC_TODO, 7836 .data = NULL, 7837 }; 7838 7839 __netdev_upper_dev_unlink(dev, upper_dev, &priv); 7840 } 7841 EXPORT_SYMBOL(netdev_upper_dev_unlink); 7842 7843 static void __netdev_adjacent_dev_set(struct net_device *upper_dev, 7844 struct net_device *lower_dev, 7845 bool val) 7846 { 7847 struct netdev_adjacent *adj; 7848 7849 adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower); 7850 if (adj) 7851 adj->ignore = val; 7852 7853 adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper); 7854 if (adj) 7855 adj->ignore = val; 7856 } 7857 7858 static void netdev_adjacent_dev_disable(struct net_device *upper_dev, 7859 struct net_device *lower_dev) 7860 { 7861 __netdev_adjacent_dev_set(upper_dev, lower_dev, true); 7862 } 7863 7864 static void netdev_adjacent_dev_enable(struct net_device *upper_dev, 7865 struct net_device *lower_dev) 7866 { 7867 __netdev_adjacent_dev_set(upper_dev, lower_dev, false); 7868 } 7869 7870 int netdev_adjacent_change_prepare(struct net_device *old_dev, 7871 struct net_device *new_dev, 7872 struct net_device *dev, 7873 struct netlink_ext_ack *extack) 7874 { 7875 struct netdev_nested_priv priv = { 7876 .flags = 0, 7877 .data = NULL, 7878 }; 7879 int err; 7880 7881 if (!new_dev) 7882 return 0; 7883 7884 if (old_dev && new_dev != old_dev) 7885 netdev_adjacent_dev_disable(dev, old_dev); 7886 err = __netdev_upper_dev_link(new_dev, dev, false, NULL, NULL, &priv, 7887 extack); 7888 if (err) { 7889 if (old_dev && new_dev != old_dev) 7890 netdev_adjacent_dev_enable(dev, old_dev); 7891 return err; 7892 } 7893 7894 return 0; 7895 } 7896 EXPORT_SYMBOL(netdev_adjacent_change_prepare); 7897 7898 void netdev_adjacent_change_commit(struct net_device *old_dev, 7899 struct net_device *new_dev, 7900 struct net_device *dev) 7901 { 7902 struct netdev_nested_priv priv = { 7903 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO, 7904 .data = NULL, 7905 }; 7906 7907 if (!new_dev || !old_dev) 7908 return; 7909 7910 if (new_dev == old_dev) 7911 return; 7912 7913 netdev_adjacent_dev_enable(dev, old_dev); 7914 __netdev_upper_dev_unlink(old_dev, dev, &priv); 7915 } 7916 EXPORT_SYMBOL(netdev_adjacent_change_commit); 7917 7918 void netdev_adjacent_change_abort(struct net_device *old_dev, 7919 struct net_device *new_dev, 7920 struct net_device *dev) 7921 { 7922 struct netdev_nested_priv priv = { 7923 .flags = 0, 7924 .data = NULL, 7925 }; 7926 7927 if (!new_dev) 7928 return; 7929 7930 if (old_dev && new_dev != old_dev) 7931 netdev_adjacent_dev_enable(dev, old_dev); 7932 7933 __netdev_upper_dev_unlink(new_dev, dev, &priv); 7934 } 7935 EXPORT_SYMBOL(netdev_adjacent_change_abort); 7936 7937 /** 7938 * netdev_bonding_info_change - Dispatch event about slave change 7939 * @dev: device 7940 * @bonding_info: info to dispatch 7941 * 7942 * Send NETDEV_BONDING_INFO to netdev notifiers with info. 7943 * The caller must hold the RTNL lock. 7944 */ 7945 void netdev_bonding_info_change(struct net_device *dev, 7946 struct netdev_bonding_info *bonding_info) 7947 { 7948 struct netdev_notifier_bonding_info info = { 7949 .info.dev = dev, 7950 }; 7951 7952 memcpy(&info.bonding_info, bonding_info, 7953 sizeof(struct netdev_bonding_info)); 7954 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, 7955 &info.info); 7956 } 7957 EXPORT_SYMBOL(netdev_bonding_info_change); 7958 7959 static int netdev_offload_xstats_enable_l3(struct net_device *dev, 7960 struct netlink_ext_ack *extack) 7961 { 7962 struct netdev_notifier_offload_xstats_info info = { 7963 .info.dev = dev, 7964 .info.extack = extack, 7965 .type = NETDEV_OFFLOAD_XSTATS_TYPE_L3, 7966 }; 7967 int err; 7968 int rc; 7969 7970 dev->offload_xstats_l3 = kzalloc(sizeof(*dev->offload_xstats_l3), 7971 GFP_KERNEL); 7972 if (!dev->offload_xstats_l3) 7973 return -ENOMEM; 7974 7975 rc = call_netdevice_notifiers_info_robust(NETDEV_OFFLOAD_XSTATS_ENABLE, 7976 NETDEV_OFFLOAD_XSTATS_DISABLE, 7977 &info.info); 7978 err = notifier_to_errno(rc); 7979 if (err) 7980 goto free_stats; 7981 7982 return 0; 7983 7984 free_stats: 7985 kfree(dev->offload_xstats_l3); 7986 dev->offload_xstats_l3 = NULL; 7987 return err; 7988 } 7989 7990 int netdev_offload_xstats_enable(struct net_device *dev, 7991 enum netdev_offload_xstats_type type, 7992 struct netlink_ext_ack *extack) 7993 { 7994 ASSERT_RTNL(); 7995 7996 if (netdev_offload_xstats_enabled(dev, type)) 7997 return -EALREADY; 7998 7999 switch (type) { 8000 case NETDEV_OFFLOAD_XSTATS_TYPE_L3: 8001 return netdev_offload_xstats_enable_l3(dev, extack); 8002 } 8003 8004 WARN_ON(1); 8005 return -EINVAL; 8006 } 8007 EXPORT_SYMBOL(netdev_offload_xstats_enable); 8008 8009 static void netdev_offload_xstats_disable_l3(struct net_device *dev) 8010 { 8011 struct netdev_notifier_offload_xstats_info info = { 8012 .info.dev = dev, 8013 .type = NETDEV_OFFLOAD_XSTATS_TYPE_L3, 8014 }; 8015 8016 call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_DISABLE, 8017 &info.info); 8018 kfree(dev->offload_xstats_l3); 8019 dev->offload_xstats_l3 = NULL; 8020 } 8021 8022 int netdev_offload_xstats_disable(struct net_device *dev, 8023 enum netdev_offload_xstats_type type) 8024 { 8025 ASSERT_RTNL(); 8026 8027 if (!netdev_offload_xstats_enabled(dev, type)) 8028 return -EALREADY; 8029 8030 switch (type) { 8031 case NETDEV_OFFLOAD_XSTATS_TYPE_L3: 8032 netdev_offload_xstats_disable_l3(dev); 8033 return 0; 8034 } 8035 8036 WARN_ON(1); 8037 return -EINVAL; 8038 } 8039 EXPORT_SYMBOL(netdev_offload_xstats_disable); 8040 8041 static void netdev_offload_xstats_disable_all(struct net_device *dev) 8042 { 8043 netdev_offload_xstats_disable(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3); 8044 } 8045 8046 static struct rtnl_hw_stats64 * 8047 netdev_offload_xstats_get_ptr(const struct net_device *dev, 8048 enum netdev_offload_xstats_type type) 8049 { 8050 switch (type) { 8051 case NETDEV_OFFLOAD_XSTATS_TYPE_L3: 8052 return dev->offload_xstats_l3; 8053 } 8054 8055 WARN_ON(1); 8056 return NULL; 8057 } 8058 8059 bool netdev_offload_xstats_enabled(const struct net_device *dev, 8060 enum netdev_offload_xstats_type type) 8061 { 8062 ASSERT_RTNL(); 8063 8064 return netdev_offload_xstats_get_ptr(dev, type); 8065 } 8066 EXPORT_SYMBOL(netdev_offload_xstats_enabled); 8067 8068 struct netdev_notifier_offload_xstats_ru { 8069 bool used; 8070 }; 8071 8072 struct netdev_notifier_offload_xstats_rd { 8073 struct rtnl_hw_stats64 stats; 8074 bool used; 8075 }; 8076 8077 static void netdev_hw_stats64_add(struct rtnl_hw_stats64 *dest, 8078 const struct rtnl_hw_stats64 *src) 8079 { 8080 dest->rx_packets += src->rx_packets; 8081 dest->tx_packets += src->tx_packets; 8082 dest->rx_bytes += src->rx_bytes; 8083 dest->tx_bytes += src->tx_bytes; 8084 dest->rx_errors += src->rx_errors; 8085 dest->tx_errors += src->tx_errors; 8086 dest->rx_dropped += src->rx_dropped; 8087 dest->tx_dropped += src->tx_dropped; 8088 dest->multicast += src->multicast; 8089 } 8090 8091 static int netdev_offload_xstats_get_used(struct net_device *dev, 8092 enum netdev_offload_xstats_type type, 8093 bool *p_used, 8094 struct netlink_ext_ack *extack) 8095 { 8096 struct netdev_notifier_offload_xstats_ru report_used = {}; 8097 struct netdev_notifier_offload_xstats_info info = { 8098 .info.dev = dev, 8099 .info.extack = extack, 8100 .type = type, 8101 .report_used = &report_used, 8102 }; 8103 int rc; 8104 8105 WARN_ON(!netdev_offload_xstats_enabled(dev, type)); 8106 rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_USED, 8107 &info.info); 8108 *p_used = report_used.used; 8109 return notifier_to_errno(rc); 8110 } 8111 8112 static int netdev_offload_xstats_get_stats(struct net_device *dev, 8113 enum netdev_offload_xstats_type type, 8114 struct rtnl_hw_stats64 *p_stats, 8115 bool *p_used, 8116 struct netlink_ext_ack *extack) 8117 { 8118 struct netdev_notifier_offload_xstats_rd report_delta = {}; 8119 struct netdev_notifier_offload_xstats_info info = { 8120 .info.dev = dev, 8121 .info.extack = extack, 8122 .type = type, 8123 .report_delta = &report_delta, 8124 }; 8125 struct rtnl_hw_stats64 *stats; 8126 int rc; 8127 8128 stats = netdev_offload_xstats_get_ptr(dev, type); 8129 if (WARN_ON(!stats)) 8130 return -EINVAL; 8131 8132 rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_DELTA, 8133 &info.info); 8134 8135 /* Cache whatever we got, even if there was an error, otherwise the 8136 * successful stats retrievals would get lost. 8137 */ 8138 netdev_hw_stats64_add(stats, &report_delta.stats); 8139 8140 if (p_stats) 8141 *p_stats = *stats; 8142 *p_used = report_delta.used; 8143 8144 return notifier_to_errno(rc); 8145 } 8146 8147 int netdev_offload_xstats_get(struct net_device *dev, 8148 enum netdev_offload_xstats_type type, 8149 struct rtnl_hw_stats64 *p_stats, bool *p_used, 8150 struct netlink_ext_ack *extack) 8151 { 8152 ASSERT_RTNL(); 8153 8154 if (p_stats) 8155 return netdev_offload_xstats_get_stats(dev, type, p_stats, 8156 p_used, extack); 8157 else 8158 return netdev_offload_xstats_get_used(dev, type, p_used, 8159 extack); 8160 } 8161 EXPORT_SYMBOL(netdev_offload_xstats_get); 8162 8163 void 8164 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *report_delta, 8165 const struct rtnl_hw_stats64 *stats) 8166 { 8167 report_delta->used = true; 8168 netdev_hw_stats64_add(&report_delta->stats, stats); 8169 } 8170 EXPORT_SYMBOL(netdev_offload_xstats_report_delta); 8171 8172 void 8173 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *report_used) 8174 { 8175 report_used->used = true; 8176 } 8177 EXPORT_SYMBOL(netdev_offload_xstats_report_used); 8178 8179 void netdev_offload_xstats_push_delta(struct net_device *dev, 8180 enum netdev_offload_xstats_type type, 8181 const struct rtnl_hw_stats64 *p_stats) 8182 { 8183 struct rtnl_hw_stats64 *stats; 8184 8185 ASSERT_RTNL(); 8186 8187 stats = netdev_offload_xstats_get_ptr(dev, type); 8188 if (WARN_ON(!stats)) 8189 return; 8190 8191 netdev_hw_stats64_add(stats, p_stats); 8192 } 8193 EXPORT_SYMBOL(netdev_offload_xstats_push_delta); 8194 8195 /** 8196 * netdev_get_xmit_slave - Get the xmit slave of master device 8197 * @dev: device 8198 * @skb: The packet 8199 * @all_slaves: assume all the slaves are active 8200 * 8201 * The reference counters are not incremented so the caller must be 8202 * careful with locks. The caller must hold RCU lock. 8203 * %NULL is returned if no slave is found. 8204 */ 8205 8206 struct net_device *netdev_get_xmit_slave(struct net_device *dev, 8207 struct sk_buff *skb, 8208 bool all_slaves) 8209 { 8210 const struct net_device_ops *ops = dev->netdev_ops; 8211 8212 if (!ops->ndo_get_xmit_slave) 8213 return NULL; 8214 return ops->ndo_get_xmit_slave(dev, skb, all_slaves); 8215 } 8216 EXPORT_SYMBOL(netdev_get_xmit_slave); 8217 8218 static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev, 8219 struct sock *sk) 8220 { 8221 const struct net_device_ops *ops = dev->netdev_ops; 8222 8223 if (!ops->ndo_sk_get_lower_dev) 8224 return NULL; 8225 return ops->ndo_sk_get_lower_dev(dev, sk); 8226 } 8227 8228 /** 8229 * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket 8230 * @dev: device 8231 * @sk: the socket 8232 * 8233 * %NULL is returned if no lower device is found. 8234 */ 8235 8236 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev, 8237 struct sock *sk) 8238 { 8239 struct net_device *lower; 8240 8241 lower = netdev_sk_get_lower_dev(dev, sk); 8242 while (lower) { 8243 dev = lower; 8244 lower = netdev_sk_get_lower_dev(dev, sk); 8245 } 8246 8247 return dev; 8248 } 8249 EXPORT_SYMBOL(netdev_sk_get_lowest_dev); 8250 8251 static void netdev_adjacent_add_links(struct net_device *dev) 8252 { 8253 struct netdev_adjacent *iter; 8254 8255 struct net *net = dev_net(dev); 8256 8257 list_for_each_entry(iter, &dev->adj_list.upper, list) { 8258 if (!net_eq(net, dev_net(iter->dev))) 8259 continue; 8260 netdev_adjacent_sysfs_add(iter->dev, dev, 8261 &iter->dev->adj_list.lower); 8262 netdev_adjacent_sysfs_add(dev, iter->dev, 8263 &dev->adj_list.upper); 8264 } 8265 8266 list_for_each_entry(iter, &dev->adj_list.lower, list) { 8267 if (!net_eq(net, dev_net(iter->dev))) 8268 continue; 8269 netdev_adjacent_sysfs_add(iter->dev, dev, 8270 &iter->dev->adj_list.upper); 8271 netdev_adjacent_sysfs_add(dev, iter->dev, 8272 &dev->adj_list.lower); 8273 } 8274 } 8275 8276 static void netdev_adjacent_del_links(struct net_device *dev) 8277 { 8278 struct netdev_adjacent *iter; 8279 8280 struct net *net = dev_net(dev); 8281 8282 list_for_each_entry(iter, &dev->adj_list.upper, list) { 8283 if (!net_eq(net, dev_net(iter->dev))) 8284 continue; 8285 netdev_adjacent_sysfs_del(iter->dev, dev->name, 8286 &iter->dev->adj_list.lower); 8287 netdev_adjacent_sysfs_del(dev, iter->dev->name, 8288 &dev->adj_list.upper); 8289 } 8290 8291 list_for_each_entry(iter, &dev->adj_list.lower, list) { 8292 if (!net_eq(net, dev_net(iter->dev))) 8293 continue; 8294 netdev_adjacent_sysfs_del(iter->dev, dev->name, 8295 &iter->dev->adj_list.upper); 8296 netdev_adjacent_sysfs_del(dev, iter->dev->name, 8297 &dev->adj_list.lower); 8298 } 8299 } 8300 8301 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname) 8302 { 8303 struct netdev_adjacent *iter; 8304 8305 struct net *net = dev_net(dev); 8306 8307 list_for_each_entry(iter, &dev->adj_list.upper, list) { 8308 if (!net_eq(net, dev_net(iter->dev))) 8309 continue; 8310 netdev_adjacent_sysfs_del(iter->dev, oldname, 8311 &iter->dev->adj_list.lower); 8312 netdev_adjacent_sysfs_add(iter->dev, dev, 8313 &iter->dev->adj_list.lower); 8314 } 8315 8316 list_for_each_entry(iter, &dev->adj_list.lower, list) { 8317 if (!net_eq(net, dev_net(iter->dev))) 8318 continue; 8319 netdev_adjacent_sysfs_del(iter->dev, oldname, 8320 &iter->dev->adj_list.upper); 8321 netdev_adjacent_sysfs_add(iter->dev, dev, 8322 &iter->dev->adj_list.upper); 8323 } 8324 } 8325 8326 void *netdev_lower_dev_get_private(struct net_device *dev, 8327 struct net_device *lower_dev) 8328 { 8329 struct netdev_adjacent *lower; 8330 8331 if (!lower_dev) 8332 return NULL; 8333 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower); 8334 if (!lower) 8335 return NULL; 8336 8337 return lower->private; 8338 } 8339 EXPORT_SYMBOL(netdev_lower_dev_get_private); 8340 8341 8342 /** 8343 * netdev_lower_state_changed - Dispatch event about lower device state change 8344 * @lower_dev: device 8345 * @lower_state_info: state to dispatch 8346 * 8347 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info. 8348 * The caller must hold the RTNL lock. 8349 */ 8350 void netdev_lower_state_changed(struct net_device *lower_dev, 8351 void *lower_state_info) 8352 { 8353 struct netdev_notifier_changelowerstate_info changelowerstate_info = { 8354 .info.dev = lower_dev, 8355 }; 8356 8357 ASSERT_RTNL(); 8358 changelowerstate_info.lower_state_info = lower_state_info; 8359 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, 8360 &changelowerstate_info.info); 8361 } 8362 EXPORT_SYMBOL(netdev_lower_state_changed); 8363 8364 static void dev_change_rx_flags(struct net_device *dev, int flags) 8365 { 8366 const struct net_device_ops *ops = dev->netdev_ops; 8367 8368 if (ops->ndo_change_rx_flags) 8369 ops->ndo_change_rx_flags(dev, flags); 8370 } 8371 8372 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify) 8373 { 8374 unsigned int old_flags = dev->flags; 8375 kuid_t uid; 8376 kgid_t gid; 8377 8378 ASSERT_RTNL(); 8379 8380 dev->flags |= IFF_PROMISC; 8381 dev->promiscuity += inc; 8382 if (dev->promiscuity == 0) { 8383 /* 8384 * Avoid overflow. 8385 * If inc causes overflow, untouch promisc and return error. 8386 */ 8387 if (inc < 0) 8388 dev->flags &= ~IFF_PROMISC; 8389 else { 8390 dev->promiscuity -= inc; 8391 netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n"); 8392 return -EOVERFLOW; 8393 } 8394 } 8395 if (dev->flags != old_flags) { 8396 netdev_info(dev, "%s promiscuous mode\n", 8397 dev->flags & IFF_PROMISC ? "entered" : "left"); 8398 if (audit_enabled) { 8399 current_uid_gid(&uid, &gid); 8400 audit_log(audit_context(), GFP_ATOMIC, 8401 AUDIT_ANOM_PROMISCUOUS, 8402 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", 8403 dev->name, (dev->flags & IFF_PROMISC), 8404 (old_flags & IFF_PROMISC), 8405 from_kuid(&init_user_ns, audit_get_loginuid(current)), 8406 from_kuid(&init_user_ns, uid), 8407 from_kgid(&init_user_ns, gid), 8408 audit_get_sessionid(current)); 8409 } 8410 8411 dev_change_rx_flags(dev, IFF_PROMISC); 8412 } 8413 if (notify) 8414 __dev_notify_flags(dev, old_flags, IFF_PROMISC, 0, NULL); 8415 return 0; 8416 } 8417 8418 /** 8419 * dev_set_promiscuity - update promiscuity count on a device 8420 * @dev: device 8421 * @inc: modifier 8422 * 8423 * Add or remove promiscuity from a device. While the count in the device 8424 * remains above zero the interface remains promiscuous. Once it hits zero 8425 * the device reverts back to normal filtering operation. A negative inc 8426 * value is used to drop promiscuity on the device. 8427 * Return 0 if successful or a negative errno code on error. 8428 */ 8429 int dev_set_promiscuity(struct net_device *dev, int inc) 8430 { 8431 unsigned int old_flags = dev->flags; 8432 int err; 8433 8434 err = __dev_set_promiscuity(dev, inc, true); 8435 if (err < 0) 8436 return err; 8437 if (dev->flags != old_flags) 8438 dev_set_rx_mode(dev); 8439 return err; 8440 } 8441 EXPORT_SYMBOL(dev_set_promiscuity); 8442 8443 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify) 8444 { 8445 unsigned int old_flags = dev->flags, old_gflags = dev->gflags; 8446 8447 ASSERT_RTNL(); 8448 8449 dev->flags |= IFF_ALLMULTI; 8450 dev->allmulti += inc; 8451 if (dev->allmulti == 0) { 8452 /* 8453 * Avoid overflow. 8454 * If inc causes overflow, untouch allmulti and return error. 8455 */ 8456 if (inc < 0) 8457 dev->flags &= ~IFF_ALLMULTI; 8458 else { 8459 dev->allmulti -= inc; 8460 netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n"); 8461 return -EOVERFLOW; 8462 } 8463 } 8464 if (dev->flags ^ old_flags) { 8465 netdev_info(dev, "%s allmulticast mode\n", 8466 dev->flags & IFF_ALLMULTI ? "entered" : "left"); 8467 dev_change_rx_flags(dev, IFF_ALLMULTI); 8468 dev_set_rx_mode(dev); 8469 if (notify) 8470 __dev_notify_flags(dev, old_flags, 8471 dev->gflags ^ old_gflags, 0, NULL); 8472 } 8473 return 0; 8474 } 8475 8476 /** 8477 * dev_set_allmulti - update allmulti count on a device 8478 * @dev: device 8479 * @inc: modifier 8480 * 8481 * Add or remove reception of all multicast frames to a device. While the 8482 * count in the device remains above zero the interface remains listening 8483 * to all interfaces. Once it hits zero the device reverts back to normal 8484 * filtering operation. A negative @inc value is used to drop the counter 8485 * when releasing a resource needing all multicasts. 8486 * Return 0 if successful or a negative errno code on error. 8487 */ 8488 8489 int dev_set_allmulti(struct net_device *dev, int inc) 8490 { 8491 return __dev_set_allmulti(dev, inc, true); 8492 } 8493 EXPORT_SYMBOL(dev_set_allmulti); 8494 8495 /* 8496 * Upload unicast and multicast address lists to device and 8497 * configure RX filtering. When the device doesn't support unicast 8498 * filtering it is put in promiscuous mode while unicast addresses 8499 * are present. 8500 */ 8501 void __dev_set_rx_mode(struct net_device *dev) 8502 { 8503 const struct net_device_ops *ops = dev->netdev_ops; 8504 8505 /* dev_open will call this function so the list will stay sane. */ 8506 if (!(dev->flags&IFF_UP)) 8507 return; 8508 8509 if (!netif_device_present(dev)) 8510 return; 8511 8512 if (!(dev->priv_flags & IFF_UNICAST_FLT)) { 8513 /* Unicast addresses changes may only happen under the rtnl, 8514 * therefore calling __dev_set_promiscuity here is safe. 8515 */ 8516 if (!netdev_uc_empty(dev) && !dev->uc_promisc) { 8517 __dev_set_promiscuity(dev, 1, false); 8518 dev->uc_promisc = true; 8519 } else if (netdev_uc_empty(dev) && dev->uc_promisc) { 8520 __dev_set_promiscuity(dev, -1, false); 8521 dev->uc_promisc = false; 8522 } 8523 } 8524 8525 if (ops->ndo_set_rx_mode) 8526 ops->ndo_set_rx_mode(dev); 8527 } 8528 8529 void dev_set_rx_mode(struct net_device *dev) 8530 { 8531 netif_addr_lock_bh(dev); 8532 __dev_set_rx_mode(dev); 8533 netif_addr_unlock_bh(dev); 8534 } 8535 8536 /** 8537 * dev_get_flags - get flags reported to userspace 8538 * @dev: device 8539 * 8540 * Get the combination of flag bits exported through APIs to userspace. 8541 */ 8542 unsigned int dev_get_flags(const struct net_device *dev) 8543 { 8544 unsigned int flags; 8545 8546 flags = (dev->flags & ~(IFF_PROMISC | 8547 IFF_ALLMULTI | 8548 IFF_RUNNING | 8549 IFF_LOWER_UP | 8550 IFF_DORMANT)) | 8551 (dev->gflags & (IFF_PROMISC | 8552 IFF_ALLMULTI)); 8553 8554 if (netif_running(dev)) { 8555 if (netif_oper_up(dev)) 8556 flags |= IFF_RUNNING; 8557 if (netif_carrier_ok(dev)) 8558 flags |= IFF_LOWER_UP; 8559 if (netif_dormant(dev)) 8560 flags |= IFF_DORMANT; 8561 } 8562 8563 return flags; 8564 } 8565 EXPORT_SYMBOL(dev_get_flags); 8566 8567 int __dev_change_flags(struct net_device *dev, unsigned int flags, 8568 struct netlink_ext_ack *extack) 8569 { 8570 unsigned int old_flags = dev->flags; 8571 int ret; 8572 8573 ASSERT_RTNL(); 8574 8575 /* 8576 * Set the flags on our device. 8577 */ 8578 8579 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | 8580 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | 8581 IFF_AUTOMEDIA)) | 8582 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | 8583 IFF_ALLMULTI)); 8584 8585 /* 8586 * Load in the correct multicast list now the flags have changed. 8587 */ 8588 8589 if ((old_flags ^ flags) & IFF_MULTICAST) 8590 dev_change_rx_flags(dev, IFF_MULTICAST); 8591 8592 dev_set_rx_mode(dev); 8593 8594 /* 8595 * Have we downed the interface. We handle IFF_UP ourselves 8596 * according to user attempts to set it, rather than blindly 8597 * setting it. 8598 */ 8599 8600 ret = 0; 8601 if ((old_flags ^ flags) & IFF_UP) { 8602 if (old_flags & IFF_UP) 8603 __dev_close(dev); 8604 else 8605 ret = __dev_open(dev, extack); 8606 } 8607 8608 if ((flags ^ dev->gflags) & IFF_PROMISC) { 8609 int inc = (flags & IFF_PROMISC) ? 1 : -1; 8610 unsigned int old_flags = dev->flags; 8611 8612 dev->gflags ^= IFF_PROMISC; 8613 8614 if (__dev_set_promiscuity(dev, inc, false) >= 0) 8615 if (dev->flags != old_flags) 8616 dev_set_rx_mode(dev); 8617 } 8618 8619 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI 8620 * is important. Some (broken) drivers set IFF_PROMISC, when 8621 * IFF_ALLMULTI is requested not asking us and not reporting. 8622 */ 8623 if ((flags ^ dev->gflags) & IFF_ALLMULTI) { 8624 int inc = (flags & IFF_ALLMULTI) ? 1 : -1; 8625 8626 dev->gflags ^= IFF_ALLMULTI; 8627 __dev_set_allmulti(dev, inc, false); 8628 } 8629 8630 return ret; 8631 } 8632 8633 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags, 8634 unsigned int gchanges, u32 portid, 8635 const struct nlmsghdr *nlh) 8636 { 8637 unsigned int changes = dev->flags ^ old_flags; 8638 8639 if (gchanges) 8640 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC, portid, nlh); 8641 8642 if (changes & IFF_UP) { 8643 if (dev->flags & IFF_UP) 8644 call_netdevice_notifiers(NETDEV_UP, dev); 8645 else 8646 call_netdevice_notifiers(NETDEV_DOWN, dev); 8647 } 8648 8649 if (dev->flags & IFF_UP && 8650 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) { 8651 struct netdev_notifier_change_info change_info = { 8652 .info = { 8653 .dev = dev, 8654 }, 8655 .flags_changed = changes, 8656 }; 8657 8658 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info); 8659 } 8660 } 8661 8662 /** 8663 * dev_change_flags - change device settings 8664 * @dev: device 8665 * @flags: device state flags 8666 * @extack: netlink extended ack 8667 * 8668 * Change settings on device based state flags. The flags are 8669 * in the userspace exported format. 8670 */ 8671 int dev_change_flags(struct net_device *dev, unsigned int flags, 8672 struct netlink_ext_ack *extack) 8673 { 8674 int ret; 8675 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags; 8676 8677 ret = __dev_change_flags(dev, flags, extack); 8678 if (ret < 0) 8679 return ret; 8680 8681 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags); 8682 __dev_notify_flags(dev, old_flags, changes, 0, NULL); 8683 return ret; 8684 } 8685 EXPORT_SYMBOL(dev_change_flags); 8686 8687 int __dev_set_mtu(struct net_device *dev, int new_mtu) 8688 { 8689 const struct net_device_ops *ops = dev->netdev_ops; 8690 8691 if (ops->ndo_change_mtu) 8692 return ops->ndo_change_mtu(dev, new_mtu); 8693 8694 /* Pairs with all the lockless reads of dev->mtu in the stack */ 8695 WRITE_ONCE(dev->mtu, new_mtu); 8696 return 0; 8697 } 8698 EXPORT_SYMBOL(__dev_set_mtu); 8699 8700 int dev_validate_mtu(struct net_device *dev, int new_mtu, 8701 struct netlink_ext_ack *extack) 8702 { 8703 /* MTU must be positive, and in range */ 8704 if (new_mtu < 0 || new_mtu < dev->min_mtu) { 8705 NL_SET_ERR_MSG(extack, "mtu less than device minimum"); 8706 return -EINVAL; 8707 } 8708 8709 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) { 8710 NL_SET_ERR_MSG(extack, "mtu greater than device maximum"); 8711 return -EINVAL; 8712 } 8713 return 0; 8714 } 8715 8716 /** 8717 * dev_set_mtu_ext - Change maximum transfer unit 8718 * @dev: device 8719 * @new_mtu: new transfer unit 8720 * @extack: netlink extended ack 8721 * 8722 * Change the maximum transfer size of the network device. 8723 */ 8724 int dev_set_mtu_ext(struct net_device *dev, int new_mtu, 8725 struct netlink_ext_ack *extack) 8726 { 8727 int err, orig_mtu; 8728 8729 if (new_mtu == dev->mtu) 8730 return 0; 8731 8732 err = dev_validate_mtu(dev, new_mtu, extack); 8733 if (err) 8734 return err; 8735 8736 if (!netif_device_present(dev)) 8737 return -ENODEV; 8738 8739 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev); 8740 err = notifier_to_errno(err); 8741 if (err) 8742 return err; 8743 8744 orig_mtu = dev->mtu; 8745 err = __dev_set_mtu(dev, new_mtu); 8746 8747 if (!err) { 8748 err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, 8749 orig_mtu); 8750 err = notifier_to_errno(err); 8751 if (err) { 8752 /* setting mtu back and notifying everyone again, 8753 * so that they have a chance to revert changes. 8754 */ 8755 __dev_set_mtu(dev, orig_mtu); 8756 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, 8757 new_mtu); 8758 } 8759 } 8760 return err; 8761 } 8762 8763 int dev_set_mtu(struct net_device *dev, int new_mtu) 8764 { 8765 struct netlink_ext_ack extack; 8766 int err; 8767 8768 memset(&extack, 0, sizeof(extack)); 8769 err = dev_set_mtu_ext(dev, new_mtu, &extack); 8770 if (err && extack._msg) 8771 net_err_ratelimited("%s: %s\n", dev->name, extack._msg); 8772 return err; 8773 } 8774 EXPORT_SYMBOL(dev_set_mtu); 8775 8776 /** 8777 * dev_change_tx_queue_len - Change TX queue length of a netdevice 8778 * @dev: device 8779 * @new_len: new tx queue length 8780 */ 8781 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len) 8782 { 8783 unsigned int orig_len = dev->tx_queue_len; 8784 int res; 8785 8786 if (new_len != (unsigned int)new_len) 8787 return -ERANGE; 8788 8789 if (new_len != orig_len) { 8790 dev->tx_queue_len = new_len; 8791 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev); 8792 res = notifier_to_errno(res); 8793 if (res) 8794 goto err_rollback; 8795 res = dev_qdisc_change_tx_queue_len(dev); 8796 if (res) 8797 goto err_rollback; 8798 } 8799 8800 return 0; 8801 8802 err_rollback: 8803 netdev_err(dev, "refused to change device tx_queue_len\n"); 8804 dev->tx_queue_len = orig_len; 8805 return res; 8806 } 8807 8808 /** 8809 * dev_set_group - Change group this device belongs to 8810 * @dev: device 8811 * @new_group: group this device should belong to 8812 */ 8813 void dev_set_group(struct net_device *dev, int new_group) 8814 { 8815 dev->group = new_group; 8816 } 8817 8818 /** 8819 * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR. 8820 * @dev: device 8821 * @addr: new address 8822 * @extack: netlink extended ack 8823 */ 8824 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, 8825 struct netlink_ext_ack *extack) 8826 { 8827 struct netdev_notifier_pre_changeaddr_info info = { 8828 .info.dev = dev, 8829 .info.extack = extack, 8830 .dev_addr = addr, 8831 }; 8832 int rc; 8833 8834 rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info); 8835 return notifier_to_errno(rc); 8836 } 8837 EXPORT_SYMBOL(dev_pre_changeaddr_notify); 8838 8839 /** 8840 * dev_set_mac_address - Change Media Access Control Address 8841 * @dev: device 8842 * @sa: new address 8843 * @extack: netlink extended ack 8844 * 8845 * Change the hardware (MAC) address of the device 8846 */ 8847 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, 8848 struct netlink_ext_ack *extack) 8849 { 8850 const struct net_device_ops *ops = dev->netdev_ops; 8851 int err; 8852 8853 if (!ops->ndo_set_mac_address) 8854 return -EOPNOTSUPP; 8855 if (sa->sa_family != dev->type) 8856 return -EINVAL; 8857 if (!netif_device_present(dev)) 8858 return -ENODEV; 8859 err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack); 8860 if (err) 8861 return err; 8862 if (memcmp(dev->dev_addr, sa->sa_data, dev->addr_len)) { 8863 err = ops->ndo_set_mac_address(dev, sa); 8864 if (err) 8865 return err; 8866 } 8867 dev->addr_assign_type = NET_ADDR_SET; 8868 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 8869 add_device_randomness(dev->dev_addr, dev->addr_len); 8870 return 0; 8871 } 8872 EXPORT_SYMBOL(dev_set_mac_address); 8873 8874 static DECLARE_RWSEM(dev_addr_sem); 8875 8876 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, 8877 struct netlink_ext_ack *extack) 8878 { 8879 int ret; 8880 8881 down_write(&dev_addr_sem); 8882 ret = dev_set_mac_address(dev, sa, extack); 8883 up_write(&dev_addr_sem); 8884 return ret; 8885 } 8886 EXPORT_SYMBOL(dev_set_mac_address_user); 8887 8888 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name) 8889 { 8890 size_t size = sizeof(sa->sa_data_min); 8891 struct net_device *dev; 8892 int ret = 0; 8893 8894 down_read(&dev_addr_sem); 8895 rcu_read_lock(); 8896 8897 dev = dev_get_by_name_rcu(net, dev_name); 8898 if (!dev) { 8899 ret = -ENODEV; 8900 goto unlock; 8901 } 8902 if (!dev->addr_len) 8903 memset(sa->sa_data, 0, size); 8904 else 8905 memcpy(sa->sa_data, dev->dev_addr, 8906 min_t(size_t, size, dev->addr_len)); 8907 sa->sa_family = dev->type; 8908 8909 unlock: 8910 rcu_read_unlock(); 8911 up_read(&dev_addr_sem); 8912 return ret; 8913 } 8914 EXPORT_SYMBOL(dev_get_mac_address); 8915 8916 /** 8917 * dev_change_carrier - Change device carrier 8918 * @dev: device 8919 * @new_carrier: new value 8920 * 8921 * Change device carrier 8922 */ 8923 int dev_change_carrier(struct net_device *dev, bool new_carrier) 8924 { 8925 const struct net_device_ops *ops = dev->netdev_ops; 8926 8927 if (!ops->ndo_change_carrier) 8928 return -EOPNOTSUPP; 8929 if (!netif_device_present(dev)) 8930 return -ENODEV; 8931 return ops->ndo_change_carrier(dev, new_carrier); 8932 } 8933 8934 /** 8935 * dev_get_phys_port_id - Get device physical port ID 8936 * @dev: device 8937 * @ppid: port ID 8938 * 8939 * Get device physical port ID 8940 */ 8941 int dev_get_phys_port_id(struct net_device *dev, 8942 struct netdev_phys_item_id *ppid) 8943 { 8944 const struct net_device_ops *ops = dev->netdev_ops; 8945 8946 if (!ops->ndo_get_phys_port_id) 8947 return -EOPNOTSUPP; 8948 return ops->ndo_get_phys_port_id(dev, ppid); 8949 } 8950 8951 /** 8952 * dev_get_phys_port_name - Get device physical port name 8953 * @dev: device 8954 * @name: port name 8955 * @len: limit of bytes to copy to name 8956 * 8957 * Get device physical port name 8958 */ 8959 int dev_get_phys_port_name(struct net_device *dev, 8960 char *name, size_t len) 8961 { 8962 const struct net_device_ops *ops = dev->netdev_ops; 8963 int err; 8964 8965 if (ops->ndo_get_phys_port_name) { 8966 err = ops->ndo_get_phys_port_name(dev, name, len); 8967 if (err != -EOPNOTSUPP) 8968 return err; 8969 } 8970 return devlink_compat_phys_port_name_get(dev, name, len); 8971 } 8972 8973 /** 8974 * dev_get_port_parent_id - Get the device's port parent identifier 8975 * @dev: network device 8976 * @ppid: pointer to a storage for the port's parent identifier 8977 * @recurse: allow/disallow recursion to lower devices 8978 * 8979 * Get the devices's port parent identifier 8980 */ 8981 int dev_get_port_parent_id(struct net_device *dev, 8982 struct netdev_phys_item_id *ppid, 8983 bool recurse) 8984 { 8985 const struct net_device_ops *ops = dev->netdev_ops; 8986 struct netdev_phys_item_id first = { }; 8987 struct net_device *lower_dev; 8988 struct list_head *iter; 8989 int err; 8990 8991 if (ops->ndo_get_port_parent_id) { 8992 err = ops->ndo_get_port_parent_id(dev, ppid); 8993 if (err != -EOPNOTSUPP) 8994 return err; 8995 } 8996 8997 err = devlink_compat_switch_id_get(dev, ppid); 8998 if (!recurse || err != -EOPNOTSUPP) 8999 return err; 9000 9001 netdev_for_each_lower_dev(dev, lower_dev, iter) { 9002 err = dev_get_port_parent_id(lower_dev, ppid, true); 9003 if (err) 9004 break; 9005 if (!first.id_len) 9006 first = *ppid; 9007 else if (memcmp(&first, ppid, sizeof(*ppid))) 9008 return -EOPNOTSUPP; 9009 } 9010 9011 return err; 9012 } 9013 EXPORT_SYMBOL(dev_get_port_parent_id); 9014 9015 /** 9016 * netdev_port_same_parent_id - Indicate if two network devices have 9017 * the same port parent identifier 9018 * @a: first network device 9019 * @b: second network device 9020 */ 9021 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b) 9022 { 9023 struct netdev_phys_item_id a_id = { }; 9024 struct netdev_phys_item_id b_id = { }; 9025 9026 if (dev_get_port_parent_id(a, &a_id, true) || 9027 dev_get_port_parent_id(b, &b_id, true)) 9028 return false; 9029 9030 return netdev_phys_item_id_same(&a_id, &b_id); 9031 } 9032 EXPORT_SYMBOL(netdev_port_same_parent_id); 9033 9034 static void netdev_dpll_pin_assign(struct net_device *dev, struct dpll_pin *dpll_pin) 9035 { 9036 #if IS_ENABLED(CONFIG_DPLL) 9037 rtnl_lock(); 9038 dev->dpll_pin = dpll_pin; 9039 rtnl_unlock(); 9040 #endif 9041 } 9042 9043 void netdev_dpll_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin) 9044 { 9045 WARN_ON(!dpll_pin); 9046 netdev_dpll_pin_assign(dev, dpll_pin); 9047 } 9048 EXPORT_SYMBOL(netdev_dpll_pin_set); 9049 9050 void netdev_dpll_pin_clear(struct net_device *dev) 9051 { 9052 netdev_dpll_pin_assign(dev, NULL); 9053 } 9054 EXPORT_SYMBOL(netdev_dpll_pin_clear); 9055 9056 /** 9057 * dev_change_proto_down - set carrier according to proto_down. 9058 * 9059 * @dev: device 9060 * @proto_down: new value 9061 */ 9062 int dev_change_proto_down(struct net_device *dev, bool proto_down) 9063 { 9064 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) 9065 return -EOPNOTSUPP; 9066 if (!netif_device_present(dev)) 9067 return -ENODEV; 9068 if (proto_down) 9069 netif_carrier_off(dev); 9070 else 9071 netif_carrier_on(dev); 9072 dev->proto_down = proto_down; 9073 return 0; 9074 } 9075 9076 /** 9077 * dev_change_proto_down_reason - proto down reason 9078 * 9079 * @dev: device 9080 * @mask: proto down mask 9081 * @value: proto down value 9082 */ 9083 void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask, 9084 u32 value) 9085 { 9086 int b; 9087 9088 if (!mask) { 9089 dev->proto_down_reason = value; 9090 } else { 9091 for_each_set_bit(b, &mask, 32) { 9092 if (value & (1 << b)) 9093 dev->proto_down_reason |= BIT(b); 9094 else 9095 dev->proto_down_reason &= ~BIT(b); 9096 } 9097 } 9098 } 9099 9100 struct bpf_xdp_link { 9101 struct bpf_link link; 9102 struct net_device *dev; /* protected by rtnl_lock, no refcnt held */ 9103 int flags; 9104 }; 9105 9106 static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags) 9107 { 9108 if (flags & XDP_FLAGS_HW_MODE) 9109 return XDP_MODE_HW; 9110 if (flags & XDP_FLAGS_DRV_MODE) 9111 return XDP_MODE_DRV; 9112 if (flags & XDP_FLAGS_SKB_MODE) 9113 return XDP_MODE_SKB; 9114 return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB; 9115 } 9116 9117 static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode) 9118 { 9119 switch (mode) { 9120 case XDP_MODE_SKB: 9121 return generic_xdp_install; 9122 case XDP_MODE_DRV: 9123 case XDP_MODE_HW: 9124 return dev->netdev_ops->ndo_bpf; 9125 default: 9126 return NULL; 9127 } 9128 } 9129 9130 static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev, 9131 enum bpf_xdp_mode mode) 9132 { 9133 return dev->xdp_state[mode].link; 9134 } 9135 9136 static struct bpf_prog *dev_xdp_prog(struct net_device *dev, 9137 enum bpf_xdp_mode mode) 9138 { 9139 struct bpf_xdp_link *link = dev_xdp_link(dev, mode); 9140 9141 if (link) 9142 return link->link.prog; 9143 return dev->xdp_state[mode].prog; 9144 } 9145 9146 u8 dev_xdp_prog_count(struct net_device *dev) 9147 { 9148 u8 count = 0; 9149 int i; 9150 9151 for (i = 0; i < __MAX_XDP_MODE; i++) 9152 if (dev->xdp_state[i].prog || dev->xdp_state[i].link) 9153 count++; 9154 return count; 9155 } 9156 EXPORT_SYMBOL_GPL(dev_xdp_prog_count); 9157 9158 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode) 9159 { 9160 struct bpf_prog *prog = dev_xdp_prog(dev, mode); 9161 9162 return prog ? prog->aux->id : 0; 9163 } 9164 9165 static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode, 9166 struct bpf_xdp_link *link) 9167 { 9168 dev->xdp_state[mode].link = link; 9169 dev->xdp_state[mode].prog = NULL; 9170 } 9171 9172 static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode, 9173 struct bpf_prog *prog) 9174 { 9175 dev->xdp_state[mode].link = NULL; 9176 dev->xdp_state[mode].prog = prog; 9177 } 9178 9179 static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode, 9180 bpf_op_t bpf_op, struct netlink_ext_ack *extack, 9181 u32 flags, struct bpf_prog *prog) 9182 { 9183 struct netdev_bpf xdp; 9184 int err; 9185 9186 memset(&xdp, 0, sizeof(xdp)); 9187 xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG; 9188 xdp.extack = extack; 9189 xdp.flags = flags; 9190 xdp.prog = prog; 9191 9192 /* Drivers assume refcnt is already incremented (i.e, prog pointer is 9193 * "moved" into driver), so they don't increment it on their own, but 9194 * they do decrement refcnt when program is detached or replaced. 9195 * Given net_device also owns link/prog, we need to bump refcnt here 9196 * to prevent drivers from underflowing it. 9197 */ 9198 if (prog) 9199 bpf_prog_inc(prog); 9200 err = bpf_op(dev, &xdp); 9201 if (err) { 9202 if (prog) 9203 bpf_prog_put(prog); 9204 return err; 9205 } 9206 9207 if (mode != XDP_MODE_HW) 9208 bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog); 9209 9210 return 0; 9211 } 9212 9213 static void dev_xdp_uninstall(struct net_device *dev) 9214 { 9215 struct bpf_xdp_link *link; 9216 struct bpf_prog *prog; 9217 enum bpf_xdp_mode mode; 9218 bpf_op_t bpf_op; 9219 9220 ASSERT_RTNL(); 9221 9222 for (mode = XDP_MODE_SKB; mode < __MAX_XDP_MODE; mode++) { 9223 prog = dev_xdp_prog(dev, mode); 9224 if (!prog) 9225 continue; 9226 9227 bpf_op = dev_xdp_bpf_op(dev, mode); 9228 if (!bpf_op) 9229 continue; 9230 9231 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL)); 9232 9233 /* auto-detach link from net device */ 9234 link = dev_xdp_link(dev, mode); 9235 if (link) 9236 link->dev = NULL; 9237 else 9238 bpf_prog_put(prog); 9239 9240 dev_xdp_set_link(dev, mode, NULL); 9241 } 9242 } 9243 9244 static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack, 9245 struct bpf_xdp_link *link, struct bpf_prog *new_prog, 9246 struct bpf_prog *old_prog, u32 flags) 9247 { 9248 unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES); 9249 struct bpf_prog *cur_prog; 9250 struct net_device *upper; 9251 struct list_head *iter; 9252 enum bpf_xdp_mode mode; 9253 bpf_op_t bpf_op; 9254 int err; 9255 9256 ASSERT_RTNL(); 9257 9258 /* either link or prog attachment, never both */ 9259 if (link && (new_prog || old_prog)) 9260 return -EINVAL; 9261 /* link supports only XDP mode flags */ 9262 if (link && (flags & ~XDP_FLAGS_MODES)) { 9263 NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment"); 9264 return -EINVAL; 9265 } 9266 /* just one XDP mode bit should be set, zero defaults to drv/skb mode */ 9267 if (num_modes > 1) { 9268 NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set"); 9269 return -EINVAL; 9270 } 9271 /* avoid ambiguity if offload + drv/skb mode progs are both loaded */ 9272 if (!num_modes && dev_xdp_prog_count(dev) > 1) { 9273 NL_SET_ERR_MSG(extack, 9274 "More than one program loaded, unset mode is ambiguous"); 9275 return -EINVAL; 9276 } 9277 /* old_prog != NULL implies XDP_FLAGS_REPLACE is set */ 9278 if (old_prog && !(flags & XDP_FLAGS_REPLACE)) { 9279 NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified"); 9280 return -EINVAL; 9281 } 9282 9283 mode = dev_xdp_mode(dev, flags); 9284 /* can't replace attached link */ 9285 if (dev_xdp_link(dev, mode)) { 9286 NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link"); 9287 return -EBUSY; 9288 } 9289 9290 /* don't allow if an upper device already has a program */ 9291 netdev_for_each_upper_dev_rcu(dev, upper, iter) { 9292 if (dev_xdp_prog_count(upper) > 0) { 9293 NL_SET_ERR_MSG(extack, "Cannot attach when an upper device already has a program"); 9294 return -EEXIST; 9295 } 9296 } 9297 9298 cur_prog = dev_xdp_prog(dev, mode); 9299 /* can't replace attached prog with link */ 9300 if (link && cur_prog) { 9301 NL_SET_ERR_MSG(extack, "Can't replace active XDP program with BPF link"); 9302 return -EBUSY; 9303 } 9304 if ((flags & XDP_FLAGS_REPLACE) && cur_prog != old_prog) { 9305 NL_SET_ERR_MSG(extack, "Active program does not match expected"); 9306 return -EEXIST; 9307 } 9308 9309 /* put effective new program into new_prog */ 9310 if (link) 9311 new_prog = link->link.prog; 9312 9313 if (new_prog) { 9314 bool offload = mode == XDP_MODE_HW; 9315 enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB 9316 ? XDP_MODE_DRV : XDP_MODE_SKB; 9317 9318 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) { 9319 NL_SET_ERR_MSG(extack, "XDP program already attached"); 9320 return -EBUSY; 9321 } 9322 if (!offload && dev_xdp_prog(dev, other_mode)) { 9323 NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time"); 9324 return -EEXIST; 9325 } 9326 if (!offload && bpf_prog_is_offloaded(new_prog->aux)) { 9327 NL_SET_ERR_MSG(extack, "Using offloaded program without HW_MODE flag is not supported"); 9328 return -EINVAL; 9329 } 9330 if (bpf_prog_is_dev_bound(new_prog->aux) && !bpf_offload_dev_match(new_prog, dev)) { 9331 NL_SET_ERR_MSG(extack, "Program bound to different device"); 9332 return -EINVAL; 9333 } 9334 if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) { 9335 NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device"); 9336 return -EINVAL; 9337 } 9338 if (new_prog->expected_attach_type == BPF_XDP_CPUMAP) { 9339 NL_SET_ERR_MSG(extack, "BPF_XDP_CPUMAP programs can not be attached to a device"); 9340 return -EINVAL; 9341 } 9342 } 9343 9344 /* don't call drivers if the effective program didn't change */ 9345 if (new_prog != cur_prog) { 9346 bpf_op = dev_xdp_bpf_op(dev, mode); 9347 if (!bpf_op) { 9348 NL_SET_ERR_MSG(extack, "Underlying driver does not support XDP in native mode"); 9349 return -EOPNOTSUPP; 9350 } 9351 9352 err = dev_xdp_install(dev, mode, bpf_op, extack, flags, new_prog); 9353 if (err) 9354 return err; 9355 } 9356 9357 if (link) 9358 dev_xdp_set_link(dev, mode, link); 9359 else 9360 dev_xdp_set_prog(dev, mode, new_prog); 9361 if (cur_prog) 9362 bpf_prog_put(cur_prog); 9363 9364 return 0; 9365 } 9366 9367 static int dev_xdp_attach_link(struct net_device *dev, 9368 struct netlink_ext_ack *extack, 9369 struct bpf_xdp_link *link) 9370 { 9371 return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags); 9372 } 9373 9374 static int dev_xdp_detach_link(struct net_device *dev, 9375 struct netlink_ext_ack *extack, 9376 struct bpf_xdp_link *link) 9377 { 9378 enum bpf_xdp_mode mode; 9379 bpf_op_t bpf_op; 9380 9381 ASSERT_RTNL(); 9382 9383 mode = dev_xdp_mode(dev, link->flags); 9384 if (dev_xdp_link(dev, mode) != link) 9385 return -EINVAL; 9386 9387 bpf_op = dev_xdp_bpf_op(dev, mode); 9388 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL)); 9389 dev_xdp_set_link(dev, mode, NULL); 9390 return 0; 9391 } 9392 9393 static void bpf_xdp_link_release(struct bpf_link *link) 9394 { 9395 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9396 9397 rtnl_lock(); 9398 9399 /* if racing with net_device's tear down, xdp_link->dev might be 9400 * already NULL, in which case link was already auto-detached 9401 */ 9402 if (xdp_link->dev) { 9403 WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link)); 9404 xdp_link->dev = NULL; 9405 } 9406 9407 rtnl_unlock(); 9408 } 9409 9410 static int bpf_xdp_link_detach(struct bpf_link *link) 9411 { 9412 bpf_xdp_link_release(link); 9413 return 0; 9414 } 9415 9416 static void bpf_xdp_link_dealloc(struct bpf_link *link) 9417 { 9418 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9419 9420 kfree(xdp_link); 9421 } 9422 9423 static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link, 9424 struct seq_file *seq) 9425 { 9426 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9427 u32 ifindex = 0; 9428 9429 rtnl_lock(); 9430 if (xdp_link->dev) 9431 ifindex = xdp_link->dev->ifindex; 9432 rtnl_unlock(); 9433 9434 seq_printf(seq, "ifindex:\t%u\n", ifindex); 9435 } 9436 9437 static int bpf_xdp_link_fill_link_info(const struct bpf_link *link, 9438 struct bpf_link_info *info) 9439 { 9440 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9441 u32 ifindex = 0; 9442 9443 rtnl_lock(); 9444 if (xdp_link->dev) 9445 ifindex = xdp_link->dev->ifindex; 9446 rtnl_unlock(); 9447 9448 info->xdp.ifindex = ifindex; 9449 return 0; 9450 } 9451 9452 static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog, 9453 struct bpf_prog *old_prog) 9454 { 9455 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9456 enum bpf_xdp_mode mode; 9457 bpf_op_t bpf_op; 9458 int err = 0; 9459 9460 rtnl_lock(); 9461 9462 /* link might have been auto-released already, so fail */ 9463 if (!xdp_link->dev) { 9464 err = -ENOLINK; 9465 goto out_unlock; 9466 } 9467 9468 if (old_prog && link->prog != old_prog) { 9469 err = -EPERM; 9470 goto out_unlock; 9471 } 9472 old_prog = link->prog; 9473 if (old_prog->type != new_prog->type || 9474 old_prog->expected_attach_type != new_prog->expected_attach_type) { 9475 err = -EINVAL; 9476 goto out_unlock; 9477 } 9478 9479 if (old_prog == new_prog) { 9480 /* no-op, don't disturb drivers */ 9481 bpf_prog_put(new_prog); 9482 goto out_unlock; 9483 } 9484 9485 mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags); 9486 bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode); 9487 err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL, 9488 xdp_link->flags, new_prog); 9489 if (err) 9490 goto out_unlock; 9491 9492 old_prog = xchg(&link->prog, new_prog); 9493 bpf_prog_put(old_prog); 9494 9495 out_unlock: 9496 rtnl_unlock(); 9497 return err; 9498 } 9499 9500 static const struct bpf_link_ops bpf_xdp_link_lops = { 9501 .release = bpf_xdp_link_release, 9502 .dealloc = bpf_xdp_link_dealloc, 9503 .detach = bpf_xdp_link_detach, 9504 .show_fdinfo = bpf_xdp_link_show_fdinfo, 9505 .fill_link_info = bpf_xdp_link_fill_link_info, 9506 .update_prog = bpf_xdp_link_update, 9507 }; 9508 9509 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 9510 { 9511 struct net *net = current->nsproxy->net_ns; 9512 struct bpf_link_primer link_primer; 9513 struct netlink_ext_ack extack = {}; 9514 struct bpf_xdp_link *link; 9515 struct net_device *dev; 9516 int err, fd; 9517 9518 rtnl_lock(); 9519 dev = dev_get_by_index(net, attr->link_create.target_ifindex); 9520 if (!dev) { 9521 rtnl_unlock(); 9522 return -EINVAL; 9523 } 9524 9525 link = kzalloc(sizeof(*link), GFP_USER); 9526 if (!link) { 9527 err = -ENOMEM; 9528 goto unlock; 9529 } 9530 9531 bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog); 9532 link->dev = dev; 9533 link->flags = attr->link_create.flags; 9534 9535 err = bpf_link_prime(&link->link, &link_primer); 9536 if (err) { 9537 kfree(link); 9538 goto unlock; 9539 } 9540 9541 err = dev_xdp_attach_link(dev, &extack, link); 9542 rtnl_unlock(); 9543 9544 if (err) { 9545 link->dev = NULL; 9546 bpf_link_cleanup(&link_primer); 9547 trace_bpf_xdp_link_attach_failed(extack._msg); 9548 goto out_put_dev; 9549 } 9550 9551 fd = bpf_link_settle(&link_primer); 9552 /* link itself doesn't hold dev's refcnt to not complicate shutdown */ 9553 dev_put(dev); 9554 return fd; 9555 9556 unlock: 9557 rtnl_unlock(); 9558 9559 out_put_dev: 9560 dev_put(dev); 9561 return err; 9562 } 9563 9564 /** 9565 * dev_change_xdp_fd - set or clear a bpf program for a device rx path 9566 * @dev: device 9567 * @extack: netlink extended ack 9568 * @fd: new program fd or negative value to clear 9569 * @expected_fd: old program fd that userspace expects to replace or clear 9570 * @flags: xdp-related flags 9571 * 9572 * Set or clear a bpf program for a device 9573 */ 9574 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 9575 int fd, int expected_fd, u32 flags) 9576 { 9577 enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags); 9578 struct bpf_prog *new_prog = NULL, *old_prog = NULL; 9579 int err; 9580 9581 ASSERT_RTNL(); 9582 9583 if (fd >= 0) { 9584 new_prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP, 9585 mode != XDP_MODE_SKB); 9586 if (IS_ERR(new_prog)) 9587 return PTR_ERR(new_prog); 9588 } 9589 9590 if (expected_fd >= 0) { 9591 old_prog = bpf_prog_get_type_dev(expected_fd, BPF_PROG_TYPE_XDP, 9592 mode != XDP_MODE_SKB); 9593 if (IS_ERR(old_prog)) { 9594 err = PTR_ERR(old_prog); 9595 old_prog = NULL; 9596 goto err_out; 9597 } 9598 } 9599 9600 err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags); 9601 9602 err_out: 9603 if (err && new_prog) 9604 bpf_prog_put(new_prog); 9605 if (old_prog) 9606 bpf_prog_put(old_prog); 9607 return err; 9608 } 9609 9610 /** 9611 * dev_index_reserve() - allocate an ifindex in a namespace 9612 * @net: the applicable net namespace 9613 * @ifindex: requested ifindex, pass %0 to get one allocated 9614 * 9615 * Allocate a ifindex for a new device. Caller must either use the ifindex 9616 * to store the device (via list_netdevice()) or call dev_index_release() 9617 * to give the index up. 9618 * 9619 * Return: a suitable unique value for a new device interface number or -errno. 9620 */ 9621 static int dev_index_reserve(struct net *net, u32 ifindex) 9622 { 9623 int err; 9624 9625 if (ifindex > INT_MAX) { 9626 DEBUG_NET_WARN_ON_ONCE(1); 9627 return -EINVAL; 9628 } 9629 9630 if (!ifindex) 9631 err = xa_alloc_cyclic(&net->dev_by_index, &ifindex, NULL, 9632 xa_limit_31b, &net->ifindex, GFP_KERNEL); 9633 else 9634 err = xa_insert(&net->dev_by_index, ifindex, NULL, GFP_KERNEL); 9635 if (err < 0) 9636 return err; 9637 9638 return ifindex; 9639 } 9640 9641 static void dev_index_release(struct net *net, int ifindex) 9642 { 9643 /* Expect only unused indexes, unlist_netdevice() removes the used */ 9644 WARN_ON(xa_erase(&net->dev_by_index, ifindex)); 9645 } 9646 9647 /* Delayed registration/unregisteration */ 9648 LIST_HEAD(net_todo_list); 9649 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); 9650 9651 static void net_set_todo(struct net_device *dev) 9652 { 9653 list_add_tail(&dev->todo_list, &net_todo_list); 9654 atomic_inc(&dev_net(dev)->dev_unreg_count); 9655 } 9656 9657 static netdev_features_t netdev_sync_upper_features(struct net_device *lower, 9658 struct net_device *upper, netdev_features_t features) 9659 { 9660 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; 9661 netdev_features_t feature; 9662 int feature_bit; 9663 9664 for_each_netdev_feature(upper_disables, feature_bit) { 9665 feature = __NETIF_F_BIT(feature_bit); 9666 if (!(upper->wanted_features & feature) 9667 && (features & feature)) { 9668 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n", 9669 &feature, upper->name); 9670 features &= ~feature; 9671 } 9672 } 9673 9674 return features; 9675 } 9676 9677 static void netdev_sync_lower_features(struct net_device *upper, 9678 struct net_device *lower, netdev_features_t features) 9679 { 9680 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; 9681 netdev_features_t feature; 9682 int feature_bit; 9683 9684 for_each_netdev_feature(upper_disables, feature_bit) { 9685 feature = __NETIF_F_BIT(feature_bit); 9686 if (!(features & feature) && (lower->features & feature)) { 9687 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", 9688 &feature, lower->name); 9689 lower->wanted_features &= ~feature; 9690 __netdev_update_features(lower); 9691 9692 if (unlikely(lower->features & feature)) 9693 netdev_WARN(upper, "failed to disable %pNF on %s!\n", 9694 &feature, lower->name); 9695 else 9696 netdev_features_change(lower); 9697 } 9698 } 9699 } 9700 9701 static netdev_features_t netdev_fix_features(struct net_device *dev, 9702 netdev_features_t features) 9703 { 9704 /* Fix illegal checksum combinations */ 9705 if ((features & NETIF_F_HW_CSUM) && 9706 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 9707 netdev_warn(dev, "mixed HW and IP checksum settings.\n"); 9708 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 9709 } 9710 9711 /* TSO requires that SG is present as well. */ 9712 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { 9713 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); 9714 features &= ~NETIF_F_ALL_TSO; 9715 } 9716 9717 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) && 9718 !(features & NETIF_F_IP_CSUM)) { 9719 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n"); 9720 features &= ~NETIF_F_TSO; 9721 features &= ~NETIF_F_TSO_ECN; 9722 } 9723 9724 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) && 9725 !(features & NETIF_F_IPV6_CSUM)) { 9726 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n"); 9727 features &= ~NETIF_F_TSO6; 9728 } 9729 9730 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */ 9731 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO)) 9732 features &= ~NETIF_F_TSO_MANGLEID; 9733 9734 /* TSO ECN requires that TSO is present as well. */ 9735 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) 9736 features &= ~NETIF_F_TSO_ECN; 9737 9738 /* Software GSO depends on SG. */ 9739 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { 9740 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); 9741 features &= ~NETIF_F_GSO; 9742 } 9743 9744 /* GSO partial features require GSO partial be set */ 9745 if ((features & dev->gso_partial_features) && 9746 !(features & NETIF_F_GSO_PARTIAL)) { 9747 netdev_dbg(dev, 9748 "Dropping partially supported GSO features since no GSO partial.\n"); 9749 features &= ~dev->gso_partial_features; 9750 } 9751 9752 if (!(features & NETIF_F_RXCSUM)) { 9753 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet 9754 * successfully merged by hardware must also have the 9755 * checksum verified by hardware. If the user does not 9756 * want to enable RXCSUM, logically, we should disable GRO_HW. 9757 */ 9758 if (features & NETIF_F_GRO_HW) { 9759 netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n"); 9760 features &= ~NETIF_F_GRO_HW; 9761 } 9762 } 9763 9764 /* LRO/HW-GRO features cannot be combined with RX-FCS */ 9765 if (features & NETIF_F_RXFCS) { 9766 if (features & NETIF_F_LRO) { 9767 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n"); 9768 features &= ~NETIF_F_LRO; 9769 } 9770 9771 if (features & NETIF_F_GRO_HW) { 9772 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n"); 9773 features &= ~NETIF_F_GRO_HW; 9774 } 9775 } 9776 9777 if ((features & NETIF_F_GRO_HW) && (features & NETIF_F_LRO)) { 9778 netdev_dbg(dev, "Dropping LRO feature since HW-GRO is requested.\n"); 9779 features &= ~NETIF_F_LRO; 9780 } 9781 9782 if (features & NETIF_F_HW_TLS_TX) { 9783 bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) == 9784 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 9785 bool hw_csum = features & NETIF_F_HW_CSUM; 9786 9787 if (!ip_csum && !hw_csum) { 9788 netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n"); 9789 features &= ~NETIF_F_HW_TLS_TX; 9790 } 9791 } 9792 9793 if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) { 9794 netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n"); 9795 features &= ~NETIF_F_HW_TLS_RX; 9796 } 9797 9798 return features; 9799 } 9800 9801 int __netdev_update_features(struct net_device *dev) 9802 { 9803 struct net_device *upper, *lower; 9804 netdev_features_t features; 9805 struct list_head *iter; 9806 int err = -1; 9807 9808 ASSERT_RTNL(); 9809 9810 features = netdev_get_wanted_features(dev); 9811 9812 if (dev->netdev_ops->ndo_fix_features) 9813 features = dev->netdev_ops->ndo_fix_features(dev, features); 9814 9815 /* driver might be less strict about feature dependencies */ 9816 features = netdev_fix_features(dev, features); 9817 9818 /* some features can't be enabled if they're off on an upper device */ 9819 netdev_for_each_upper_dev_rcu(dev, upper, iter) 9820 features = netdev_sync_upper_features(dev, upper, features); 9821 9822 if (dev->features == features) 9823 goto sync_lower; 9824 9825 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n", 9826 &dev->features, &features); 9827 9828 if (dev->netdev_ops->ndo_set_features) 9829 err = dev->netdev_ops->ndo_set_features(dev, features); 9830 else 9831 err = 0; 9832 9833 if (unlikely(err < 0)) { 9834 netdev_err(dev, 9835 "set_features() failed (%d); wanted %pNF, left %pNF\n", 9836 err, &features, &dev->features); 9837 /* return non-0 since some features might have changed and 9838 * it's better to fire a spurious notification than miss it 9839 */ 9840 return -1; 9841 } 9842 9843 sync_lower: 9844 /* some features must be disabled on lower devices when disabled 9845 * on an upper device (think: bonding master or bridge) 9846 */ 9847 netdev_for_each_lower_dev(dev, lower, iter) 9848 netdev_sync_lower_features(dev, lower, features); 9849 9850 if (!err) { 9851 netdev_features_t diff = features ^ dev->features; 9852 9853 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) { 9854 /* udp_tunnel_{get,drop}_rx_info both need 9855 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the 9856 * device, or they won't do anything. 9857 * Thus we need to update dev->features 9858 * *before* calling udp_tunnel_get_rx_info, 9859 * but *after* calling udp_tunnel_drop_rx_info. 9860 */ 9861 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) { 9862 dev->features = features; 9863 udp_tunnel_get_rx_info(dev); 9864 } else { 9865 udp_tunnel_drop_rx_info(dev); 9866 } 9867 } 9868 9869 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) { 9870 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { 9871 dev->features = features; 9872 err |= vlan_get_rx_ctag_filter_info(dev); 9873 } else { 9874 vlan_drop_rx_ctag_filter_info(dev); 9875 } 9876 } 9877 9878 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) { 9879 if (features & NETIF_F_HW_VLAN_STAG_FILTER) { 9880 dev->features = features; 9881 err |= vlan_get_rx_stag_filter_info(dev); 9882 } else { 9883 vlan_drop_rx_stag_filter_info(dev); 9884 } 9885 } 9886 9887 dev->features = features; 9888 } 9889 9890 return err < 0 ? 0 : 1; 9891 } 9892 9893 /** 9894 * netdev_update_features - recalculate device features 9895 * @dev: the device to check 9896 * 9897 * Recalculate dev->features set and send notifications if it 9898 * has changed. Should be called after driver or hardware dependent 9899 * conditions might have changed that influence the features. 9900 */ 9901 void netdev_update_features(struct net_device *dev) 9902 { 9903 if (__netdev_update_features(dev)) 9904 netdev_features_change(dev); 9905 } 9906 EXPORT_SYMBOL(netdev_update_features); 9907 9908 /** 9909 * netdev_change_features - recalculate device features 9910 * @dev: the device to check 9911 * 9912 * Recalculate dev->features set and send notifications even 9913 * if they have not changed. Should be called instead of 9914 * netdev_update_features() if also dev->vlan_features might 9915 * have changed to allow the changes to be propagated to stacked 9916 * VLAN devices. 9917 */ 9918 void netdev_change_features(struct net_device *dev) 9919 { 9920 __netdev_update_features(dev); 9921 netdev_features_change(dev); 9922 } 9923 EXPORT_SYMBOL(netdev_change_features); 9924 9925 /** 9926 * netif_stacked_transfer_operstate - transfer operstate 9927 * @rootdev: the root or lower level device to transfer state from 9928 * @dev: the device to transfer operstate to 9929 * 9930 * Transfer operational state from root to device. This is normally 9931 * called when a stacking relationship exists between the root 9932 * device and the device(a leaf device). 9933 */ 9934 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 9935 struct net_device *dev) 9936 { 9937 if (rootdev->operstate == IF_OPER_DORMANT) 9938 netif_dormant_on(dev); 9939 else 9940 netif_dormant_off(dev); 9941 9942 if (rootdev->operstate == IF_OPER_TESTING) 9943 netif_testing_on(dev); 9944 else 9945 netif_testing_off(dev); 9946 9947 if (netif_carrier_ok(rootdev)) 9948 netif_carrier_on(dev); 9949 else 9950 netif_carrier_off(dev); 9951 } 9952 EXPORT_SYMBOL(netif_stacked_transfer_operstate); 9953 9954 static int netif_alloc_rx_queues(struct net_device *dev) 9955 { 9956 unsigned int i, count = dev->num_rx_queues; 9957 struct netdev_rx_queue *rx; 9958 size_t sz = count * sizeof(*rx); 9959 int err = 0; 9960 9961 BUG_ON(count < 1); 9962 9963 rx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); 9964 if (!rx) 9965 return -ENOMEM; 9966 9967 dev->_rx = rx; 9968 9969 for (i = 0; i < count; i++) { 9970 rx[i].dev = dev; 9971 9972 /* XDP RX-queue setup */ 9973 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0); 9974 if (err < 0) 9975 goto err_rxq_info; 9976 } 9977 return 0; 9978 9979 err_rxq_info: 9980 /* Rollback successful reg's and free other resources */ 9981 while (i--) 9982 xdp_rxq_info_unreg(&rx[i].xdp_rxq); 9983 kvfree(dev->_rx); 9984 dev->_rx = NULL; 9985 return err; 9986 } 9987 9988 static void netif_free_rx_queues(struct net_device *dev) 9989 { 9990 unsigned int i, count = dev->num_rx_queues; 9991 9992 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */ 9993 if (!dev->_rx) 9994 return; 9995 9996 for (i = 0; i < count; i++) 9997 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq); 9998 9999 kvfree(dev->_rx); 10000 } 10001 10002 static void netdev_init_one_queue(struct net_device *dev, 10003 struct netdev_queue *queue, void *_unused) 10004 { 10005 /* Initialize queue lock */ 10006 spin_lock_init(&queue->_xmit_lock); 10007 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); 10008 queue->xmit_lock_owner = -1; 10009 netdev_queue_numa_node_write(queue, NUMA_NO_NODE); 10010 queue->dev = dev; 10011 #ifdef CONFIG_BQL 10012 dql_init(&queue->dql, HZ); 10013 #endif 10014 } 10015 10016 static void netif_free_tx_queues(struct net_device *dev) 10017 { 10018 kvfree(dev->_tx); 10019 } 10020 10021 static int netif_alloc_netdev_queues(struct net_device *dev) 10022 { 10023 unsigned int count = dev->num_tx_queues; 10024 struct netdev_queue *tx; 10025 size_t sz = count * sizeof(*tx); 10026 10027 if (count < 1 || count > 0xffff) 10028 return -EINVAL; 10029 10030 tx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); 10031 if (!tx) 10032 return -ENOMEM; 10033 10034 dev->_tx = tx; 10035 10036 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 10037 spin_lock_init(&dev->tx_global_lock); 10038 10039 return 0; 10040 } 10041 10042 void netif_tx_stop_all_queues(struct net_device *dev) 10043 { 10044 unsigned int i; 10045 10046 for (i = 0; i < dev->num_tx_queues; i++) { 10047 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 10048 10049 netif_tx_stop_queue(txq); 10050 } 10051 } 10052 EXPORT_SYMBOL(netif_tx_stop_all_queues); 10053 10054 /** 10055 * register_netdevice() - register a network device 10056 * @dev: device to register 10057 * 10058 * Take a prepared network device structure and make it externally accessible. 10059 * A %NETDEV_REGISTER message is sent to the netdev notifier chain. 10060 * Callers must hold the rtnl lock - you may want register_netdev() 10061 * instead of this. 10062 */ 10063 int register_netdevice(struct net_device *dev) 10064 { 10065 int ret; 10066 struct net *net = dev_net(dev); 10067 10068 BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE < 10069 NETDEV_FEATURE_COUNT); 10070 BUG_ON(dev_boot_phase); 10071 ASSERT_RTNL(); 10072 10073 might_sleep(); 10074 10075 /* When net_device's are persistent, this will be fatal. */ 10076 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 10077 BUG_ON(!net); 10078 10079 ret = ethtool_check_ops(dev->ethtool_ops); 10080 if (ret) 10081 return ret; 10082 10083 spin_lock_init(&dev->addr_list_lock); 10084 netdev_set_addr_lockdep_class(dev); 10085 10086 ret = dev_get_valid_name(net, dev, dev->name); 10087 if (ret < 0) 10088 goto out; 10089 10090 ret = -ENOMEM; 10091 dev->name_node = netdev_name_node_head_alloc(dev); 10092 if (!dev->name_node) 10093 goto out; 10094 10095 /* Init, if this function is available */ 10096 if (dev->netdev_ops->ndo_init) { 10097 ret = dev->netdev_ops->ndo_init(dev); 10098 if (ret) { 10099 if (ret > 0) 10100 ret = -EIO; 10101 goto err_free_name; 10102 } 10103 } 10104 10105 if (((dev->hw_features | dev->features) & 10106 NETIF_F_HW_VLAN_CTAG_FILTER) && 10107 (!dev->netdev_ops->ndo_vlan_rx_add_vid || 10108 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) { 10109 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n"); 10110 ret = -EINVAL; 10111 goto err_uninit; 10112 } 10113 10114 ret = dev_index_reserve(net, dev->ifindex); 10115 if (ret < 0) 10116 goto err_uninit; 10117 dev->ifindex = ret; 10118 10119 /* Transfer changeable features to wanted_features and enable 10120 * software offloads (GSO and GRO). 10121 */ 10122 dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF); 10123 dev->features |= NETIF_F_SOFT_FEATURES; 10124 10125 if (dev->udp_tunnel_nic_info) { 10126 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT; 10127 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT; 10128 } 10129 10130 dev->wanted_features = dev->features & dev->hw_features; 10131 10132 if (!(dev->flags & IFF_LOOPBACK)) 10133 dev->hw_features |= NETIF_F_NOCACHE_COPY; 10134 10135 /* If IPv4 TCP segmentation offload is supported we should also 10136 * allow the device to enable segmenting the frame with the option 10137 * of ignoring a static IP ID value. This doesn't enable the 10138 * feature itself but allows the user to enable it later. 10139 */ 10140 if (dev->hw_features & NETIF_F_TSO) 10141 dev->hw_features |= NETIF_F_TSO_MANGLEID; 10142 if (dev->vlan_features & NETIF_F_TSO) 10143 dev->vlan_features |= NETIF_F_TSO_MANGLEID; 10144 if (dev->mpls_features & NETIF_F_TSO) 10145 dev->mpls_features |= NETIF_F_TSO_MANGLEID; 10146 if (dev->hw_enc_features & NETIF_F_TSO) 10147 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 10148 10149 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices. 10150 */ 10151 dev->vlan_features |= NETIF_F_HIGHDMA; 10152 10153 /* Make NETIF_F_SG inheritable to tunnel devices. 10154 */ 10155 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL; 10156 10157 /* Make NETIF_F_SG inheritable to MPLS. 10158 */ 10159 dev->mpls_features |= NETIF_F_SG; 10160 10161 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); 10162 ret = notifier_to_errno(ret); 10163 if (ret) 10164 goto err_ifindex_release; 10165 10166 ret = netdev_register_kobject(dev); 10167 write_lock(&dev_base_lock); 10168 dev->reg_state = ret ? NETREG_UNREGISTERED : NETREG_REGISTERED; 10169 write_unlock(&dev_base_lock); 10170 if (ret) 10171 goto err_uninit_notify; 10172 10173 __netdev_update_features(dev); 10174 10175 /* 10176 * Default initial state at registry is that the 10177 * device is present. 10178 */ 10179 10180 set_bit(__LINK_STATE_PRESENT, &dev->state); 10181 10182 linkwatch_init_dev(dev); 10183 10184 dev_init_scheduler(dev); 10185 10186 netdev_hold(dev, &dev->dev_registered_tracker, GFP_KERNEL); 10187 list_netdevice(dev); 10188 10189 add_device_randomness(dev->dev_addr, dev->addr_len); 10190 10191 /* If the device has permanent device address, driver should 10192 * set dev_addr and also addr_assign_type should be set to 10193 * NET_ADDR_PERM (default value). 10194 */ 10195 if (dev->addr_assign_type == NET_ADDR_PERM) 10196 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 10197 10198 /* Notify protocols, that a new device appeared. */ 10199 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); 10200 ret = notifier_to_errno(ret); 10201 if (ret) { 10202 /* Expect explicit free_netdev() on failure */ 10203 dev->needs_free_netdev = false; 10204 unregister_netdevice_queue(dev, NULL); 10205 goto out; 10206 } 10207 /* 10208 * Prevent userspace races by waiting until the network 10209 * device is fully setup before sending notifications. 10210 */ 10211 if (!dev->rtnl_link_ops || 10212 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 10213 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL); 10214 10215 out: 10216 return ret; 10217 10218 err_uninit_notify: 10219 call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev); 10220 err_ifindex_release: 10221 dev_index_release(net, dev->ifindex); 10222 err_uninit: 10223 if (dev->netdev_ops->ndo_uninit) 10224 dev->netdev_ops->ndo_uninit(dev); 10225 if (dev->priv_destructor) 10226 dev->priv_destructor(dev); 10227 err_free_name: 10228 netdev_name_node_free(dev->name_node); 10229 goto out; 10230 } 10231 EXPORT_SYMBOL(register_netdevice); 10232 10233 /** 10234 * init_dummy_netdev - init a dummy network device for NAPI 10235 * @dev: device to init 10236 * 10237 * This takes a network device structure and initialize the minimum 10238 * amount of fields so it can be used to schedule NAPI polls without 10239 * registering a full blown interface. This is to be used by drivers 10240 * that need to tie several hardware interfaces to a single NAPI 10241 * poll scheduler due to HW limitations. 10242 */ 10243 int init_dummy_netdev(struct net_device *dev) 10244 { 10245 /* Clear everything. Note we don't initialize spinlocks 10246 * are they aren't supposed to be taken by any of the 10247 * NAPI code and this dummy netdev is supposed to be 10248 * only ever used for NAPI polls 10249 */ 10250 memset(dev, 0, sizeof(struct net_device)); 10251 10252 /* make sure we BUG if trying to hit standard 10253 * register/unregister code path 10254 */ 10255 dev->reg_state = NETREG_DUMMY; 10256 10257 /* NAPI wants this */ 10258 INIT_LIST_HEAD(&dev->napi_list); 10259 10260 /* a dummy interface is started by default */ 10261 set_bit(__LINK_STATE_PRESENT, &dev->state); 10262 set_bit(__LINK_STATE_START, &dev->state); 10263 10264 /* napi_busy_loop stats accounting wants this */ 10265 dev_net_set(dev, &init_net); 10266 10267 /* Note : We dont allocate pcpu_refcnt for dummy devices, 10268 * because users of this 'device' dont need to change 10269 * its refcount. 10270 */ 10271 10272 return 0; 10273 } 10274 EXPORT_SYMBOL_GPL(init_dummy_netdev); 10275 10276 10277 /** 10278 * register_netdev - register a network device 10279 * @dev: device to register 10280 * 10281 * Take a completed network device structure and add it to the kernel 10282 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 10283 * chain. 0 is returned on success. A negative errno code is returned 10284 * on a failure to set up the device, or if the name is a duplicate. 10285 * 10286 * This is a wrapper around register_netdevice that takes the rtnl semaphore 10287 * and expands the device name if you passed a format string to 10288 * alloc_netdev. 10289 */ 10290 int register_netdev(struct net_device *dev) 10291 { 10292 int err; 10293 10294 if (rtnl_lock_killable()) 10295 return -EINTR; 10296 err = register_netdevice(dev); 10297 rtnl_unlock(); 10298 return err; 10299 } 10300 EXPORT_SYMBOL(register_netdev); 10301 10302 int netdev_refcnt_read(const struct net_device *dev) 10303 { 10304 #ifdef CONFIG_PCPU_DEV_REFCNT 10305 int i, refcnt = 0; 10306 10307 for_each_possible_cpu(i) 10308 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i); 10309 return refcnt; 10310 #else 10311 return refcount_read(&dev->dev_refcnt); 10312 #endif 10313 } 10314 EXPORT_SYMBOL(netdev_refcnt_read); 10315 10316 int netdev_unregister_timeout_secs __read_mostly = 10; 10317 10318 #define WAIT_REFS_MIN_MSECS 1 10319 #define WAIT_REFS_MAX_MSECS 250 10320 /** 10321 * netdev_wait_allrefs_any - wait until all references are gone. 10322 * @list: list of net_devices to wait on 10323 * 10324 * This is called when unregistering network devices. 10325 * 10326 * Any protocol or device that holds a reference should register 10327 * for netdevice notification, and cleanup and put back the 10328 * reference if they receive an UNREGISTER event. 10329 * We can get stuck here if buggy protocols don't correctly 10330 * call dev_put. 10331 */ 10332 static struct net_device *netdev_wait_allrefs_any(struct list_head *list) 10333 { 10334 unsigned long rebroadcast_time, warning_time; 10335 struct net_device *dev; 10336 int wait = 0; 10337 10338 rebroadcast_time = warning_time = jiffies; 10339 10340 list_for_each_entry(dev, list, todo_list) 10341 if (netdev_refcnt_read(dev) == 1) 10342 return dev; 10343 10344 while (true) { 10345 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { 10346 rtnl_lock(); 10347 10348 /* Rebroadcast unregister notification */ 10349 list_for_each_entry(dev, list, todo_list) 10350 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 10351 10352 __rtnl_unlock(); 10353 rcu_barrier(); 10354 rtnl_lock(); 10355 10356 list_for_each_entry(dev, list, todo_list) 10357 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 10358 &dev->state)) { 10359 /* We must not have linkwatch events 10360 * pending on unregister. If this 10361 * happens, we simply run the queue 10362 * unscheduled, resulting in a noop 10363 * for this device. 10364 */ 10365 linkwatch_run_queue(); 10366 break; 10367 } 10368 10369 __rtnl_unlock(); 10370 10371 rebroadcast_time = jiffies; 10372 } 10373 10374 if (!wait) { 10375 rcu_barrier(); 10376 wait = WAIT_REFS_MIN_MSECS; 10377 } else { 10378 msleep(wait); 10379 wait = min(wait << 1, WAIT_REFS_MAX_MSECS); 10380 } 10381 10382 list_for_each_entry(dev, list, todo_list) 10383 if (netdev_refcnt_read(dev) == 1) 10384 return dev; 10385 10386 if (time_after(jiffies, warning_time + 10387 READ_ONCE(netdev_unregister_timeout_secs) * HZ)) { 10388 list_for_each_entry(dev, list, todo_list) { 10389 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", 10390 dev->name, netdev_refcnt_read(dev)); 10391 ref_tracker_dir_print(&dev->refcnt_tracker, 10); 10392 } 10393 10394 warning_time = jiffies; 10395 } 10396 } 10397 } 10398 10399 /* The sequence is: 10400 * 10401 * rtnl_lock(); 10402 * ... 10403 * register_netdevice(x1); 10404 * register_netdevice(x2); 10405 * ... 10406 * unregister_netdevice(y1); 10407 * unregister_netdevice(y2); 10408 * ... 10409 * rtnl_unlock(); 10410 * free_netdev(y1); 10411 * free_netdev(y2); 10412 * 10413 * We are invoked by rtnl_unlock(). 10414 * This allows us to deal with problems: 10415 * 1) We can delete sysfs objects which invoke hotplug 10416 * without deadlocking with linkwatch via keventd. 10417 * 2) Since we run with the RTNL semaphore not held, we can sleep 10418 * safely in order to wait for the netdev refcnt to drop to zero. 10419 * 10420 * We must not return until all unregister events added during 10421 * the interval the lock was held have been completed. 10422 */ 10423 void netdev_run_todo(void) 10424 { 10425 struct net_device *dev, *tmp; 10426 struct list_head list; 10427 #ifdef CONFIG_LOCKDEP 10428 struct list_head unlink_list; 10429 10430 list_replace_init(&net_unlink_list, &unlink_list); 10431 10432 while (!list_empty(&unlink_list)) { 10433 struct net_device *dev = list_first_entry(&unlink_list, 10434 struct net_device, 10435 unlink_list); 10436 list_del_init(&dev->unlink_list); 10437 dev->nested_level = dev->lower_level - 1; 10438 } 10439 #endif 10440 10441 /* Snapshot list, allow later requests */ 10442 list_replace_init(&net_todo_list, &list); 10443 10444 __rtnl_unlock(); 10445 10446 /* Wait for rcu callbacks to finish before next phase */ 10447 if (!list_empty(&list)) 10448 rcu_barrier(); 10449 10450 list_for_each_entry_safe(dev, tmp, &list, todo_list) { 10451 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 10452 netdev_WARN(dev, "run_todo but not unregistering\n"); 10453 list_del(&dev->todo_list); 10454 continue; 10455 } 10456 10457 write_lock(&dev_base_lock); 10458 dev->reg_state = NETREG_UNREGISTERED; 10459 write_unlock(&dev_base_lock); 10460 linkwatch_forget_dev(dev); 10461 } 10462 10463 while (!list_empty(&list)) { 10464 dev = netdev_wait_allrefs_any(&list); 10465 list_del(&dev->todo_list); 10466 10467 /* paranoia */ 10468 BUG_ON(netdev_refcnt_read(dev) != 1); 10469 BUG_ON(!list_empty(&dev->ptype_all)); 10470 BUG_ON(!list_empty(&dev->ptype_specific)); 10471 WARN_ON(rcu_access_pointer(dev->ip_ptr)); 10472 WARN_ON(rcu_access_pointer(dev->ip6_ptr)); 10473 10474 if (dev->priv_destructor) 10475 dev->priv_destructor(dev); 10476 if (dev->needs_free_netdev) 10477 free_netdev(dev); 10478 10479 if (atomic_dec_and_test(&dev_net(dev)->dev_unreg_count)) 10480 wake_up(&netdev_unregistering_wq); 10481 10482 /* Free network device */ 10483 kobject_put(&dev->dev.kobj); 10484 } 10485 } 10486 10487 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has 10488 * all the same fields in the same order as net_device_stats, with only 10489 * the type differing, but rtnl_link_stats64 may have additional fields 10490 * at the end for newer counters. 10491 */ 10492 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 10493 const struct net_device_stats *netdev_stats) 10494 { 10495 size_t i, n = sizeof(*netdev_stats) / sizeof(atomic_long_t); 10496 const atomic_long_t *src = (atomic_long_t *)netdev_stats; 10497 u64 *dst = (u64 *)stats64; 10498 10499 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64)); 10500 for (i = 0; i < n; i++) 10501 dst[i] = (unsigned long)atomic_long_read(&src[i]); 10502 /* zero out counters that only exist in rtnl_link_stats64 */ 10503 memset((char *)stats64 + n * sizeof(u64), 0, 10504 sizeof(*stats64) - n * sizeof(u64)); 10505 } 10506 EXPORT_SYMBOL(netdev_stats_to_stats64); 10507 10508 static __cold struct net_device_core_stats __percpu *netdev_core_stats_alloc( 10509 struct net_device *dev) 10510 { 10511 struct net_device_core_stats __percpu *p; 10512 10513 p = alloc_percpu_gfp(struct net_device_core_stats, 10514 GFP_ATOMIC | __GFP_NOWARN); 10515 10516 if (p && cmpxchg(&dev->core_stats, NULL, p)) 10517 free_percpu(p); 10518 10519 /* This READ_ONCE() pairs with the cmpxchg() above */ 10520 return READ_ONCE(dev->core_stats); 10521 } 10522 10523 noinline void netdev_core_stats_inc(struct net_device *dev, u32 offset) 10524 { 10525 /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */ 10526 struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats); 10527 unsigned long __percpu *field; 10528 10529 if (unlikely(!p)) { 10530 p = netdev_core_stats_alloc(dev); 10531 if (!p) 10532 return; 10533 } 10534 10535 field = (__force unsigned long __percpu *)((__force void *)p + offset); 10536 this_cpu_inc(*field); 10537 } 10538 EXPORT_SYMBOL_GPL(netdev_core_stats_inc); 10539 10540 /** 10541 * dev_get_stats - get network device statistics 10542 * @dev: device to get statistics from 10543 * @storage: place to store stats 10544 * 10545 * Get network statistics from device. Return @storage. 10546 * The device driver may provide its own method by setting 10547 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats; 10548 * otherwise the internal statistics structure is used. 10549 */ 10550 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 10551 struct rtnl_link_stats64 *storage) 10552 { 10553 const struct net_device_ops *ops = dev->netdev_ops; 10554 const struct net_device_core_stats __percpu *p; 10555 10556 if (ops->ndo_get_stats64) { 10557 memset(storage, 0, sizeof(*storage)); 10558 ops->ndo_get_stats64(dev, storage); 10559 } else if (ops->ndo_get_stats) { 10560 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); 10561 } else { 10562 netdev_stats_to_stats64(storage, &dev->stats); 10563 } 10564 10565 /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */ 10566 p = READ_ONCE(dev->core_stats); 10567 if (p) { 10568 const struct net_device_core_stats *core_stats; 10569 int i; 10570 10571 for_each_possible_cpu(i) { 10572 core_stats = per_cpu_ptr(p, i); 10573 storage->rx_dropped += READ_ONCE(core_stats->rx_dropped); 10574 storage->tx_dropped += READ_ONCE(core_stats->tx_dropped); 10575 storage->rx_nohandler += READ_ONCE(core_stats->rx_nohandler); 10576 storage->rx_otherhost_dropped += READ_ONCE(core_stats->rx_otherhost_dropped); 10577 } 10578 } 10579 return storage; 10580 } 10581 EXPORT_SYMBOL(dev_get_stats); 10582 10583 /** 10584 * dev_fetch_sw_netstats - get per-cpu network device statistics 10585 * @s: place to store stats 10586 * @netstats: per-cpu network stats to read from 10587 * 10588 * Read per-cpu network statistics and populate the related fields in @s. 10589 */ 10590 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s, 10591 const struct pcpu_sw_netstats __percpu *netstats) 10592 { 10593 int cpu; 10594 10595 for_each_possible_cpu(cpu) { 10596 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 10597 const struct pcpu_sw_netstats *stats; 10598 unsigned int start; 10599 10600 stats = per_cpu_ptr(netstats, cpu); 10601 do { 10602 start = u64_stats_fetch_begin(&stats->syncp); 10603 rx_packets = u64_stats_read(&stats->rx_packets); 10604 rx_bytes = u64_stats_read(&stats->rx_bytes); 10605 tx_packets = u64_stats_read(&stats->tx_packets); 10606 tx_bytes = u64_stats_read(&stats->tx_bytes); 10607 } while (u64_stats_fetch_retry(&stats->syncp, start)); 10608 10609 s->rx_packets += rx_packets; 10610 s->rx_bytes += rx_bytes; 10611 s->tx_packets += tx_packets; 10612 s->tx_bytes += tx_bytes; 10613 } 10614 } 10615 EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats); 10616 10617 /** 10618 * dev_get_tstats64 - ndo_get_stats64 implementation 10619 * @dev: device to get statistics from 10620 * @s: place to store stats 10621 * 10622 * Populate @s from dev->stats and dev->tstats. Can be used as 10623 * ndo_get_stats64() callback. 10624 */ 10625 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s) 10626 { 10627 netdev_stats_to_stats64(s, &dev->stats); 10628 dev_fetch_sw_netstats(s, dev->tstats); 10629 } 10630 EXPORT_SYMBOL_GPL(dev_get_tstats64); 10631 10632 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) 10633 { 10634 struct netdev_queue *queue = dev_ingress_queue(dev); 10635 10636 #ifdef CONFIG_NET_CLS_ACT 10637 if (queue) 10638 return queue; 10639 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 10640 if (!queue) 10641 return NULL; 10642 netdev_init_one_queue(dev, queue, NULL); 10643 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc); 10644 RCU_INIT_POINTER(queue->qdisc_sleeping, &noop_qdisc); 10645 rcu_assign_pointer(dev->ingress_queue, queue); 10646 #endif 10647 return queue; 10648 } 10649 10650 static const struct ethtool_ops default_ethtool_ops; 10651 10652 void netdev_set_default_ethtool_ops(struct net_device *dev, 10653 const struct ethtool_ops *ops) 10654 { 10655 if (dev->ethtool_ops == &default_ethtool_ops) 10656 dev->ethtool_ops = ops; 10657 } 10658 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops); 10659 10660 /** 10661 * netdev_sw_irq_coalesce_default_on() - enable SW IRQ coalescing by default 10662 * @dev: netdev to enable the IRQ coalescing on 10663 * 10664 * Sets a conservative default for SW IRQ coalescing. Users can use 10665 * sysfs attributes to override the default values. 10666 */ 10667 void netdev_sw_irq_coalesce_default_on(struct net_device *dev) 10668 { 10669 WARN_ON(dev->reg_state == NETREG_REGISTERED); 10670 10671 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { 10672 dev->gro_flush_timeout = 20000; 10673 dev->napi_defer_hard_irqs = 1; 10674 } 10675 } 10676 EXPORT_SYMBOL_GPL(netdev_sw_irq_coalesce_default_on); 10677 10678 void netdev_freemem(struct net_device *dev) 10679 { 10680 char *addr = (char *)dev - dev->padded; 10681 10682 kvfree(addr); 10683 } 10684 10685 /** 10686 * alloc_netdev_mqs - allocate network device 10687 * @sizeof_priv: size of private data to allocate space for 10688 * @name: device name format string 10689 * @name_assign_type: origin of device name 10690 * @setup: callback to initialize device 10691 * @txqs: the number of TX subqueues to allocate 10692 * @rxqs: the number of RX subqueues to allocate 10693 * 10694 * Allocates a struct net_device with private data area for driver use 10695 * and performs basic initialization. Also allocates subqueue structs 10696 * for each queue on the device. 10697 */ 10698 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 10699 unsigned char name_assign_type, 10700 void (*setup)(struct net_device *), 10701 unsigned int txqs, unsigned int rxqs) 10702 { 10703 struct net_device *dev; 10704 unsigned int alloc_size; 10705 struct net_device *p; 10706 10707 BUG_ON(strlen(name) >= sizeof(dev->name)); 10708 10709 if (txqs < 1) { 10710 pr_err("alloc_netdev: Unable to allocate device with zero queues\n"); 10711 return NULL; 10712 } 10713 10714 if (rxqs < 1) { 10715 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n"); 10716 return NULL; 10717 } 10718 10719 alloc_size = sizeof(struct net_device); 10720 if (sizeof_priv) { 10721 /* ensure 32-byte alignment of private area */ 10722 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); 10723 alloc_size += sizeof_priv; 10724 } 10725 /* ensure 32-byte alignment of whole construct */ 10726 alloc_size += NETDEV_ALIGN - 1; 10727 10728 p = kvzalloc(alloc_size, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); 10729 if (!p) 10730 return NULL; 10731 10732 dev = PTR_ALIGN(p, NETDEV_ALIGN); 10733 dev->padded = (char *)dev - (char *)p; 10734 10735 ref_tracker_dir_init(&dev->refcnt_tracker, 128, name); 10736 #ifdef CONFIG_PCPU_DEV_REFCNT 10737 dev->pcpu_refcnt = alloc_percpu(int); 10738 if (!dev->pcpu_refcnt) 10739 goto free_dev; 10740 __dev_hold(dev); 10741 #else 10742 refcount_set(&dev->dev_refcnt, 1); 10743 #endif 10744 10745 if (dev_addr_init(dev)) 10746 goto free_pcpu; 10747 10748 dev_mc_init(dev); 10749 dev_uc_init(dev); 10750 10751 dev_net_set(dev, &init_net); 10752 10753 dev->gso_max_size = GSO_LEGACY_MAX_SIZE; 10754 dev->xdp_zc_max_segs = 1; 10755 dev->gso_max_segs = GSO_MAX_SEGS; 10756 dev->gro_max_size = GRO_LEGACY_MAX_SIZE; 10757 dev->gso_ipv4_max_size = GSO_LEGACY_MAX_SIZE; 10758 dev->gro_ipv4_max_size = GRO_LEGACY_MAX_SIZE; 10759 dev->tso_max_size = TSO_LEGACY_MAX_SIZE; 10760 dev->tso_max_segs = TSO_MAX_SEGS; 10761 dev->upper_level = 1; 10762 dev->lower_level = 1; 10763 #ifdef CONFIG_LOCKDEP 10764 dev->nested_level = 0; 10765 INIT_LIST_HEAD(&dev->unlink_list); 10766 #endif 10767 10768 INIT_LIST_HEAD(&dev->napi_list); 10769 INIT_LIST_HEAD(&dev->unreg_list); 10770 INIT_LIST_HEAD(&dev->close_list); 10771 INIT_LIST_HEAD(&dev->link_watch_list); 10772 INIT_LIST_HEAD(&dev->adj_list.upper); 10773 INIT_LIST_HEAD(&dev->adj_list.lower); 10774 INIT_LIST_HEAD(&dev->ptype_all); 10775 INIT_LIST_HEAD(&dev->ptype_specific); 10776 INIT_LIST_HEAD(&dev->net_notifier_list); 10777 #ifdef CONFIG_NET_SCHED 10778 hash_init(dev->qdisc_hash); 10779 #endif 10780 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; 10781 setup(dev); 10782 10783 if (!dev->tx_queue_len) { 10784 dev->priv_flags |= IFF_NO_QUEUE; 10785 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; 10786 } 10787 10788 dev->num_tx_queues = txqs; 10789 dev->real_num_tx_queues = txqs; 10790 if (netif_alloc_netdev_queues(dev)) 10791 goto free_all; 10792 10793 dev->num_rx_queues = rxqs; 10794 dev->real_num_rx_queues = rxqs; 10795 if (netif_alloc_rx_queues(dev)) 10796 goto free_all; 10797 10798 strcpy(dev->name, name); 10799 dev->name_assign_type = name_assign_type; 10800 dev->group = INIT_NETDEV_GROUP; 10801 if (!dev->ethtool_ops) 10802 dev->ethtool_ops = &default_ethtool_ops; 10803 10804 nf_hook_netdev_init(dev); 10805 10806 return dev; 10807 10808 free_all: 10809 free_netdev(dev); 10810 return NULL; 10811 10812 free_pcpu: 10813 #ifdef CONFIG_PCPU_DEV_REFCNT 10814 free_percpu(dev->pcpu_refcnt); 10815 free_dev: 10816 #endif 10817 netdev_freemem(dev); 10818 return NULL; 10819 } 10820 EXPORT_SYMBOL(alloc_netdev_mqs); 10821 10822 /** 10823 * free_netdev - free network device 10824 * @dev: device 10825 * 10826 * This function does the last stage of destroying an allocated device 10827 * interface. The reference to the device object is released. If this 10828 * is the last reference then it will be freed.Must be called in process 10829 * context. 10830 */ 10831 void free_netdev(struct net_device *dev) 10832 { 10833 struct napi_struct *p, *n; 10834 10835 might_sleep(); 10836 10837 /* When called immediately after register_netdevice() failed the unwind 10838 * handling may still be dismantling the device. Handle that case by 10839 * deferring the free. 10840 */ 10841 if (dev->reg_state == NETREG_UNREGISTERING) { 10842 ASSERT_RTNL(); 10843 dev->needs_free_netdev = true; 10844 return; 10845 } 10846 10847 netif_free_tx_queues(dev); 10848 netif_free_rx_queues(dev); 10849 10850 kfree(rcu_dereference_protected(dev->ingress_queue, 1)); 10851 10852 /* Flush device addresses */ 10853 dev_addr_flush(dev); 10854 10855 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 10856 netif_napi_del(p); 10857 10858 ref_tracker_dir_exit(&dev->refcnt_tracker); 10859 #ifdef CONFIG_PCPU_DEV_REFCNT 10860 free_percpu(dev->pcpu_refcnt); 10861 dev->pcpu_refcnt = NULL; 10862 #endif 10863 free_percpu(dev->core_stats); 10864 dev->core_stats = NULL; 10865 free_percpu(dev->xdp_bulkq); 10866 dev->xdp_bulkq = NULL; 10867 10868 /* Compatibility with error handling in drivers */ 10869 if (dev->reg_state == NETREG_UNINITIALIZED) { 10870 netdev_freemem(dev); 10871 return; 10872 } 10873 10874 BUG_ON(dev->reg_state != NETREG_UNREGISTERED); 10875 dev->reg_state = NETREG_RELEASED; 10876 10877 /* will free via device release */ 10878 put_device(&dev->dev); 10879 } 10880 EXPORT_SYMBOL(free_netdev); 10881 10882 /** 10883 * synchronize_net - Synchronize with packet receive processing 10884 * 10885 * Wait for packets currently being received to be done. 10886 * Does not block later packets from starting. 10887 */ 10888 void synchronize_net(void) 10889 { 10890 might_sleep(); 10891 if (rtnl_is_locked()) 10892 synchronize_rcu_expedited(); 10893 else 10894 synchronize_rcu(); 10895 } 10896 EXPORT_SYMBOL(synchronize_net); 10897 10898 /** 10899 * unregister_netdevice_queue - remove device from the kernel 10900 * @dev: device 10901 * @head: list 10902 * 10903 * This function shuts down a device interface and removes it 10904 * from the kernel tables. 10905 * If head not NULL, device is queued to be unregistered later. 10906 * 10907 * Callers must hold the rtnl semaphore. You may want 10908 * unregister_netdev() instead of this. 10909 */ 10910 10911 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) 10912 { 10913 ASSERT_RTNL(); 10914 10915 if (head) { 10916 list_move_tail(&dev->unreg_list, head); 10917 } else { 10918 LIST_HEAD(single); 10919 10920 list_add(&dev->unreg_list, &single); 10921 unregister_netdevice_many(&single); 10922 } 10923 } 10924 EXPORT_SYMBOL(unregister_netdevice_queue); 10925 10926 void unregister_netdevice_many_notify(struct list_head *head, 10927 u32 portid, const struct nlmsghdr *nlh) 10928 { 10929 struct net_device *dev, *tmp; 10930 LIST_HEAD(close_head); 10931 10932 BUG_ON(dev_boot_phase); 10933 ASSERT_RTNL(); 10934 10935 if (list_empty(head)) 10936 return; 10937 10938 list_for_each_entry_safe(dev, tmp, head, unreg_list) { 10939 /* Some devices call without registering 10940 * for initialization unwind. Remove those 10941 * devices and proceed with the remaining. 10942 */ 10943 if (dev->reg_state == NETREG_UNINITIALIZED) { 10944 pr_debug("unregister_netdevice: device %s/%p never was registered\n", 10945 dev->name, dev); 10946 10947 WARN_ON(1); 10948 list_del(&dev->unreg_list); 10949 continue; 10950 } 10951 dev->dismantle = true; 10952 BUG_ON(dev->reg_state != NETREG_REGISTERED); 10953 } 10954 10955 /* If device is running, close it first. */ 10956 list_for_each_entry(dev, head, unreg_list) 10957 list_add_tail(&dev->close_list, &close_head); 10958 dev_close_many(&close_head, true); 10959 10960 list_for_each_entry(dev, head, unreg_list) { 10961 /* And unlink it from device chain. */ 10962 write_lock(&dev_base_lock); 10963 unlist_netdevice(dev, false); 10964 dev->reg_state = NETREG_UNREGISTERING; 10965 write_unlock(&dev_base_lock); 10966 } 10967 flush_all_backlogs(); 10968 10969 synchronize_net(); 10970 10971 list_for_each_entry(dev, head, unreg_list) { 10972 struct sk_buff *skb = NULL; 10973 10974 /* Shutdown queueing discipline. */ 10975 dev_shutdown(dev); 10976 dev_tcx_uninstall(dev); 10977 dev_xdp_uninstall(dev); 10978 bpf_dev_bound_netdev_unregister(dev); 10979 10980 netdev_offload_xstats_disable_all(dev); 10981 10982 /* Notify protocols, that we are about to destroy 10983 * this device. They should clean all the things. 10984 */ 10985 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 10986 10987 if (!dev->rtnl_link_ops || 10988 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 10989 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0, 10990 GFP_KERNEL, NULL, 0, 10991 portid, nlh); 10992 10993 /* 10994 * Flush the unicast and multicast chains 10995 */ 10996 dev_uc_flush(dev); 10997 dev_mc_flush(dev); 10998 10999 netdev_name_node_alt_flush(dev); 11000 netdev_name_node_free(dev->name_node); 11001 11002 call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev); 11003 11004 if (dev->netdev_ops->ndo_uninit) 11005 dev->netdev_ops->ndo_uninit(dev); 11006 11007 if (skb) 11008 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL, portid, nlh); 11009 11010 /* Notifier chain MUST detach us all upper devices. */ 11011 WARN_ON(netdev_has_any_upper_dev(dev)); 11012 WARN_ON(netdev_has_any_lower_dev(dev)); 11013 11014 /* Remove entries from kobject tree */ 11015 netdev_unregister_kobject(dev); 11016 #ifdef CONFIG_XPS 11017 /* Remove XPS queueing entries */ 11018 netif_reset_xps_queues_gt(dev, 0); 11019 #endif 11020 } 11021 11022 synchronize_net(); 11023 11024 list_for_each_entry(dev, head, unreg_list) { 11025 netdev_put(dev, &dev->dev_registered_tracker); 11026 net_set_todo(dev); 11027 } 11028 11029 list_del(head); 11030 } 11031 11032 /** 11033 * unregister_netdevice_many - unregister many devices 11034 * @head: list of devices 11035 * 11036 * Note: As most callers use a stack allocated list_head, 11037 * we force a list_del() to make sure stack wont be corrupted later. 11038 */ 11039 void unregister_netdevice_many(struct list_head *head) 11040 { 11041 unregister_netdevice_many_notify(head, 0, NULL); 11042 } 11043 EXPORT_SYMBOL(unregister_netdevice_many); 11044 11045 /** 11046 * unregister_netdev - remove device from the kernel 11047 * @dev: device 11048 * 11049 * This function shuts down a device interface and removes it 11050 * from the kernel tables. 11051 * 11052 * This is just a wrapper for unregister_netdevice that takes 11053 * the rtnl semaphore. In general you want to use this and not 11054 * unregister_netdevice. 11055 */ 11056 void unregister_netdev(struct net_device *dev) 11057 { 11058 rtnl_lock(); 11059 unregister_netdevice(dev); 11060 rtnl_unlock(); 11061 } 11062 EXPORT_SYMBOL(unregister_netdev); 11063 11064 /** 11065 * __dev_change_net_namespace - move device to different nethost namespace 11066 * @dev: device 11067 * @net: network namespace 11068 * @pat: If not NULL name pattern to try if the current device name 11069 * is already taken in the destination network namespace. 11070 * @new_ifindex: If not zero, specifies device index in the target 11071 * namespace. 11072 * 11073 * This function shuts down a device interface and moves it 11074 * to a new network namespace. On success 0 is returned, on 11075 * a failure a netagive errno code is returned. 11076 * 11077 * Callers must hold the rtnl semaphore. 11078 */ 11079 11080 int __dev_change_net_namespace(struct net_device *dev, struct net *net, 11081 const char *pat, int new_ifindex) 11082 { 11083 struct netdev_name_node *name_node; 11084 struct net *net_old = dev_net(dev); 11085 char new_name[IFNAMSIZ] = {}; 11086 int err, new_nsid; 11087 11088 ASSERT_RTNL(); 11089 11090 /* Don't allow namespace local devices to be moved. */ 11091 err = -EINVAL; 11092 if (dev->features & NETIF_F_NETNS_LOCAL) 11093 goto out; 11094 11095 /* Ensure the device has been registrered */ 11096 if (dev->reg_state != NETREG_REGISTERED) 11097 goto out; 11098 11099 /* Get out if there is nothing todo */ 11100 err = 0; 11101 if (net_eq(net_old, net)) 11102 goto out; 11103 11104 /* Pick the destination device name, and ensure 11105 * we can use it in the destination network namespace. 11106 */ 11107 err = -EEXIST; 11108 if (netdev_name_in_use(net, dev->name)) { 11109 /* We get here if we can't use the current device name */ 11110 if (!pat) 11111 goto out; 11112 err = dev_prep_valid_name(net, dev, pat, new_name, EEXIST); 11113 if (err < 0) 11114 goto out; 11115 } 11116 /* Check that none of the altnames conflicts. */ 11117 err = -EEXIST; 11118 netdev_for_each_altname(dev, name_node) 11119 if (netdev_name_in_use(net, name_node->name)) 11120 goto out; 11121 11122 /* Check that new_ifindex isn't used yet. */ 11123 if (new_ifindex) { 11124 err = dev_index_reserve(net, new_ifindex); 11125 if (err < 0) 11126 goto out; 11127 } else { 11128 /* If there is an ifindex conflict assign a new one */ 11129 err = dev_index_reserve(net, dev->ifindex); 11130 if (err == -EBUSY) 11131 err = dev_index_reserve(net, 0); 11132 if (err < 0) 11133 goto out; 11134 new_ifindex = err; 11135 } 11136 11137 /* 11138 * And now a mini version of register_netdevice unregister_netdevice. 11139 */ 11140 11141 /* If device is running close it first. */ 11142 dev_close(dev); 11143 11144 /* And unlink it from device chain */ 11145 unlist_netdevice(dev, true); 11146 11147 synchronize_net(); 11148 11149 /* Shutdown queueing discipline. */ 11150 dev_shutdown(dev); 11151 11152 /* Notify protocols, that we are about to destroy 11153 * this device. They should clean all the things. 11154 * 11155 * Note that dev->reg_state stays at NETREG_REGISTERED. 11156 * This is wanted because this way 8021q and macvlan know 11157 * the device is just moving and can keep their slaves up. 11158 */ 11159 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 11160 rcu_barrier(); 11161 11162 new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL); 11163 11164 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid, 11165 new_ifindex); 11166 11167 /* 11168 * Flush the unicast and multicast chains 11169 */ 11170 dev_uc_flush(dev); 11171 dev_mc_flush(dev); 11172 11173 /* Send a netdev-removed uevent to the old namespace */ 11174 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE); 11175 netdev_adjacent_del_links(dev); 11176 11177 /* Move per-net netdevice notifiers that are following the netdevice */ 11178 move_netdevice_notifiers_dev_net(dev, net); 11179 11180 /* Actually switch the network namespace */ 11181 dev_net_set(dev, net); 11182 dev->ifindex = new_ifindex; 11183 11184 /* Send a netdev-add uevent to the new namespace */ 11185 kobject_uevent(&dev->dev.kobj, KOBJ_ADD); 11186 netdev_adjacent_add_links(dev); 11187 11188 if (new_name[0]) /* Rename the netdev to prepared name */ 11189 strscpy(dev->name, new_name, IFNAMSIZ); 11190 11191 /* Fixup kobjects */ 11192 err = device_rename(&dev->dev, dev->name); 11193 WARN_ON(err); 11194 11195 /* Adapt owner in case owning user namespace of target network 11196 * namespace is different from the original one. 11197 */ 11198 err = netdev_change_owner(dev, net_old, net); 11199 WARN_ON(err); 11200 11201 /* Add the device back in the hashes */ 11202 list_netdevice(dev); 11203 11204 /* Notify protocols, that a new device appeared. */ 11205 call_netdevice_notifiers(NETDEV_REGISTER, dev); 11206 11207 /* 11208 * Prevent userspace races by waiting until the network 11209 * device is fully setup before sending notifications. 11210 */ 11211 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL); 11212 11213 synchronize_net(); 11214 err = 0; 11215 out: 11216 return err; 11217 } 11218 EXPORT_SYMBOL_GPL(__dev_change_net_namespace); 11219 11220 static int dev_cpu_dead(unsigned int oldcpu) 11221 { 11222 struct sk_buff **list_skb; 11223 struct sk_buff *skb; 11224 unsigned int cpu; 11225 struct softnet_data *sd, *oldsd, *remsd = NULL; 11226 11227 local_irq_disable(); 11228 cpu = smp_processor_id(); 11229 sd = &per_cpu(softnet_data, cpu); 11230 oldsd = &per_cpu(softnet_data, oldcpu); 11231 11232 /* Find end of our completion_queue. */ 11233 list_skb = &sd->completion_queue; 11234 while (*list_skb) 11235 list_skb = &(*list_skb)->next; 11236 /* Append completion queue from offline CPU. */ 11237 *list_skb = oldsd->completion_queue; 11238 oldsd->completion_queue = NULL; 11239 11240 /* Append output queue from offline CPU. */ 11241 if (oldsd->output_queue) { 11242 *sd->output_queue_tailp = oldsd->output_queue; 11243 sd->output_queue_tailp = oldsd->output_queue_tailp; 11244 oldsd->output_queue = NULL; 11245 oldsd->output_queue_tailp = &oldsd->output_queue; 11246 } 11247 /* Append NAPI poll list from offline CPU, with one exception : 11248 * process_backlog() must be called by cpu owning percpu backlog. 11249 * We properly handle process_queue & input_pkt_queue later. 11250 */ 11251 while (!list_empty(&oldsd->poll_list)) { 11252 struct napi_struct *napi = list_first_entry(&oldsd->poll_list, 11253 struct napi_struct, 11254 poll_list); 11255 11256 list_del_init(&napi->poll_list); 11257 if (napi->poll == process_backlog) 11258 napi->state = 0; 11259 else 11260 ____napi_schedule(sd, napi); 11261 } 11262 11263 raise_softirq_irqoff(NET_TX_SOFTIRQ); 11264 local_irq_enable(); 11265 11266 #ifdef CONFIG_RPS 11267 remsd = oldsd->rps_ipi_list; 11268 oldsd->rps_ipi_list = NULL; 11269 #endif 11270 /* send out pending IPI's on offline CPU */ 11271 net_rps_send_ipi(remsd); 11272 11273 /* Process offline CPU's input_pkt_queue */ 11274 while ((skb = __skb_dequeue(&oldsd->process_queue))) { 11275 netif_rx(skb); 11276 input_queue_head_incr(oldsd); 11277 } 11278 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { 11279 netif_rx(skb); 11280 input_queue_head_incr(oldsd); 11281 } 11282 11283 return 0; 11284 } 11285 11286 /** 11287 * netdev_increment_features - increment feature set by one 11288 * @all: current feature set 11289 * @one: new feature set 11290 * @mask: mask feature set 11291 * 11292 * Computes a new feature set after adding a device with feature set 11293 * @one to the master device with current feature set @all. Will not 11294 * enable anything that is off in @mask. Returns the new feature set. 11295 */ 11296 netdev_features_t netdev_increment_features(netdev_features_t all, 11297 netdev_features_t one, netdev_features_t mask) 11298 { 11299 if (mask & NETIF_F_HW_CSUM) 11300 mask |= NETIF_F_CSUM_MASK; 11301 mask |= NETIF_F_VLAN_CHALLENGED; 11302 11303 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask; 11304 all &= one | ~NETIF_F_ALL_FOR_ALL; 11305 11306 /* If one device supports hw checksumming, set for all. */ 11307 if (all & NETIF_F_HW_CSUM) 11308 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM); 11309 11310 return all; 11311 } 11312 EXPORT_SYMBOL(netdev_increment_features); 11313 11314 static struct hlist_head * __net_init netdev_create_hash(void) 11315 { 11316 int i; 11317 struct hlist_head *hash; 11318 11319 hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL); 11320 if (hash != NULL) 11321 for (i = 0; i < NETDEV_HASHENTRIES; i++) 11322 INIT_HLIST_HEAD(&hash[i]); 11323 11324 return hash; 11325 } 11326 11327 /* Initialize per network namespace state */ 11328 static int __net_init netdev_init(struct net *net) 11329 { 11330 BUILD_BUG_ON(GRO_HASH_BUCKETS > 11331 8 * sizeof_field(struct napi_struct, gro_bitmask)); 11332 11333 INIT_LIST_HEAD(&net->dev_base_head); 11334 11335 net->dev_name_head = netdev_create_hash(); 11336 if (net->dev_name_head == NULL) 11337 goto err_name; 11338 11339 net->dev_index_head = netdev_create_hash(); 11340 if (net->dev_index_head == NULL) 11341 goto err_idx; 11342 11343 xa_init_flags(&net->dev_by_index, XA_FLAGS_ALLOC1); 11344 11345 RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain); 11346 11347 return 0; 11348 11349 err_idx: 11350 kfree(net->dev_name_head); 11351 err_name: 11352 return -ENOMEM; 11353 } 11354 11355 /** 11356 * netdev_drivername - network driver for the device 11357 * @dev: network device 11358 * 11359 * Determine network driver for device. 11360 */ 11361 const char *netdev_drivername(const struct net_device *dev) 11362 { 11363 const struct device_driver *driver; 11364 const struct device *parent; 11365 const char *empty = ""; 11366 11367 parent = dev->dev.parent; 11368 if (!parent) 11369 return empty; 11370 11371 driver = parent->driver; 11372 if (driver && driver->name) 11373 return driver->name; 11374 return empty; 11375 } 11376 11377 static void __netdev_printk(const char *level, const struct net_device *dev, 11378 struct va_format *vaf) 11379 { 11380 if (dev && dev->dev.parent) { 11381 dev_printk_emit(level[1] - '0', 11382 dev->dev.parent, 11383 "%s %s %s%s: %pV", 11384 dev_driver_string(dev->dev.parent), 11385 dev_name(dev->dev.parent), 11386 netdev_name(dev), netdev_reg_state(dev), 11387 vaf); 11388 } else if (dev) { 11389 printk("%s%s%s: %pV", 11390 level, netdev_name(dev), netdev_reg_state(dev), vaf); 11391 } else { 11392 printk("%s(NULL net_device): %pV", level, vaf); 11393 } 11394 } 11395 11396 void netdev_printk(const char *level, const struct net_device *dev, 11397 const char *format, ...) 11398 { 11399 struct va_format vaf; 11400 va_list args; 11401 11402 va_start(args, format); 11403 11404 vaf.fmt = format; 11405 vaf.va = &args; 11406 11407 __netdev_printk(level, dev, &vaf); 11408 11409 va_end(args); 11410 } 11411 EXPORT_SYMBOL(netdev_printk); 11412 11413 #define define_netdev_printk_level(func, level) \ 11414 void func(const struct net_device *dev, const char *fmt, ...) \ 11415 { \ 11416 struct va_format vaf; \ 11417 va_list args; \ 11418 \ 11419 va_start(args, fmt); \ 11420 \ 11421 vaf.fmt = fmt; \ 11422 vaf.va = &args; \ 11423 \ 11424 __netdev_printk(level, dev, &vaf); \ 11425 \ 11426 va_end(args); \ 11427 } \ 11428 EXPORT_SYMBOL(func); 11429 11430 define_netdev_printk_level(netdev_emerg, KERN_EMERG); 11431 define_netdev_printk_level(netdev_alert, KERN_ALERT); 11432 define_netdev_printk_level(netdev_crit, KERN_CRIT); 11433 define_netdev_printk_level(netdev_err, KERN_ERR); 11434 define_netdev_printk_level(netdev_warn, KERN_WARNING); 11435 define_netdev_printk_level(netdev_notice, KERN_NOTICE); 11436 define_netdev_printk_level(netdev_info, KERN_INFO); 11437 11438 static void __net_exit netdev_exit(struct net *net) 11439 { 11440 kfree(net->dev_name_head); 11441 kfree(net->dev_index_head); 11442 xa_destroy(&net->dev_by_index); 11443 if (net != &init_net) 11444 WARN_ON_ONCE(!list_empty(&net->dev_base_head)); 11445 } 11446 11447 static struct pernet_operations __net_initdata netdev_net_ops = { 11448 .init = netdev_init, 11449 .exit = netdev_exit, 11450 }; 11451 11452 static void __net_exit default_device_exit_net(struct net *net) 11453 { 11454 struct net_device *dev, *aux; 11455 /* 11456 * Push all migratable network devices back to the 11457 * initial network namespace 11458 */ 11459 ASSERT_RTNL(); 11460 for_each_netdev_safe(net, dev, aux) { 11461 int err; 11462 char fb_name[IFNAMSIZ]; 11463 11464 /* Ignore unmoveable devices (i.e. loopback) */ 11465 if (dev->features & NETIF_F_NETNS_LOCAL) 11466 continue; 11467 11468 /* Leave virtual devices for the generic cleanup */ 11469 if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund) 11470 continue; 11471 11472 /* Push remaining network devices to init_net */ 11473 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 11474 if (netdev_name_in_use(&init_net, fb_name)) 11475 snprintf(fb_name, IFNAMSIZ, "dev%%d"); 11476 err = dev_change_net_namespace(dev, &init_net, fb_name); 11477 if (err) { 11478 pr_emerg("%s: failed to move %s to init_net: %d\n", 11479 __func__, dev->name, err); 11480 BUG(); 11481 } 11482 } 11483 } 11484 11485 static void __net_exit default_device_exit_batch(struct list_head *net_list) 11486 { 11487 /* At exit all network devices most be removed from a network 11488 * namespace. Do this in the reverse order of registration. 11489 * Do this across as many network namespaces as possible to 11490 * improve batching efficiency. 11491 */ 11492 struct net_device *dev; 11493 struct net *net; 11494 LIST_HEAD(dev_kill_list); 11495 11496 rtnl_lock(); 11497 list_for_each_entry(net, net_list, exit_list) { 11498 default_device_exit_net(net); 11499 cond_resched(); 11500 } 11501 11502 list_for_each_entry(net, net_list, exit_list) { 11503 for_each_netdev_reverse(net, dev) { 11504 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) 11505 dev->rtnl_link_ops->dellink(dev, &dev_kill_list); 11506 else 11507 unregister_netdevice_queue(dev, &dev_kill_list); 11508 } 11509 } 11510 unregister_netdevice_many(&dev_kill_list); 11511 rtnl_unlock(); 11512 } 11513 11514 static struct pernet_operations __net_initdata default_device_ops = { 11515 .exit_batch = default_device_exit_batch, 11516 }; 11517 11518 /* 11519 * Initialize the DEV module. At boot time this walks the device list and 11520 * unhooks any devices that fail to initialise (normally hardware not 11521 * present) and leaves us with a valid list of present and active devices. 11522 * 11523 */ 11524 11525 /* 11526 * This is called single threaded during boot, so no need 11527 * to take the rtnl semaphore. 11528 */ 11529 static int __init net_dev_init(void) 11530 { 11531 int i, rc = -ENOMEM; 11532 11533 BUG_ON(!dev_boot_phase); 11534 11535 if (dev_proc_init()) 11536 goto out; 11537 11538 if (netdev_kobject_init()) 11539 goto out; 11540 11541 INIT_LIST_HEAD(&ptype_all); 11542 for (i = 0; i < PTYPE_HASH_SIZE; i++) 11543 INIT_LIST_HEAD(&ptype_base[i]); 11544 11545 if (register_pernet_subsys(&netdev_net_ops)) 11546 goto out; 11547 11548 /* 11549 * Initialise the packet receive queues. 11550 */ 11551 11552 for_each_possible_cpu(i) { 11553 struct work_struct *flush = per_cpu_ptr(&flush_works, i); 11554 struct softnet_data *sd = &per_cpu(softnet_data, i); 11555 11556 INIT_WORK(flush, flush_backlog); 11557 11558 skb_queue_head_init(&sd->input_pkt_queue); 11559 skb_queue_head_init(&sd->process_queue); 11560 #ifdef CONFIG_XFRM_OFFLOAD 11561 skb_queue_head_init(&sd->xfrm_backlog); 11562 #endif 11563 INIT_LIST_HEAD(&sd->poll_list); 11564 sd->output_queue_tailp = &sd->output_queue; 11565 #ifdef CONFIG_RPS 11566 INIT_CSD(&sd->csd, rps_trigger_softirq, sd); 11567 sd->cpu = i; 11568 #endif 11569 INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd); 11570 spin_lock_init(&sd->defer_lock); 11571 11572 init_gro_hash(&sd->backlog); 11573 sd->backlog.poll = process_backlog; 11574 sd->backlog.weight = weight_p; 11575 } 11576 11577 dev_boot_phase = 0; 11578 11579 /* The loopback device is special if any other network devices 11580 * is present in a network namespace the loopback device must 11581 * be present. Since we now dynamically allocate and free the 11582 * loopback device ensure this invariant is maintained by 11583 * keeping the loopback device as the first device on the 11584 * list of network devices. Ensuring the loopback devices 11585 * is the first device that appears and the last network device 11586 * that disappears. 11587 */ 11588 if (register_pernet_device(&loopback_net_ops)) 11589 goto out; 11590 11591 if (register_pernet_device(&default_device_ops)) 11592 goto out; 11593 11594 open_softirq(NET_TX_SOFTIRQ, net_tx_action); 11595 open_softirq(NET_RX_SOFTIRQ, net_rx_action); 11596 11597 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead", 11598 NULL, dev_cpu_dead); 11599 WARN_ON(rc < 0); 11600 rc = 0; 11601 out: 11602 return rc; 11603 } 11604 11605 subsys_initcall(net_dev_init); 11606