1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * NET3 Protocol independent device support routines. 4 * 5 * Derived from the non IP parts of dev.c 1.0.19 6 * Authors: Ross Biro 7 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 8 * Mark Evans, <evansmp@uhura.aston.ac.uk> 9 * 10 * Additional Authors: 11 * Florian la Roche <rzsfl@rz.uni-sb.de> 12 * Alan Cox <gw4pts@gw4pts.ampr.org> 13 * David Hinds <dahinds@users.sourceforge.net> 14 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 15 * Adam Sulmicki <adam@cfar.umd.edu> 16 * Pekka Riikonen <priikone@poesidon.pspt.fi> 17 * 18 * Changes: 19 * D.J. Barrow : Fixed bug where dev->refcnt gets set 20 * to 2 if register_netdev gets called 21 * before net_dev_init & also removed a 22 * few lines of code in the process. 23 * Alan Cox : device private ioctl copies fields back. 24 * Alan Cox : Transmit queue code does relevant 25 * stunts to keep the queue safe. 26 * Alan Cox : Fixed double lock. 27 * Alan Cox : Fixed promisc NULL pointer trap 28 * ???????? : Support the full private ioctl range 29 * Alan Cox : Moved ioctl permission check into 30 * drivers 31 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI 32 * Alan Cox : 100 backlog just doesn't cut it when 33 * you start doing multicast video 8) 34 * Alan Cox : Rewrote net_bh and list manager. 35 * Alan Cox : Fix ETH_P_ALL echoback lengths. 36 * Alan Cox : Took out transmit every packet pass 37 * Saved a few bytes in the ioctl handler 38 * Alan Cox : Network driver sets packet type before 39 * calling netif_rx. Saves a function 40 * call a packet. 41 * Alan Cox : Hashed net_bh() 42 * Richard Kooijman: Timestamp fixes. 43 * Alan Cox : Wrong field in SIOCGIFDSTADDR 44 * Alan Cox : Device lock protection. 45 * Alan Cox : Fixed nasty side effect of device close 46 * changes. 47 * Rudi Cilibrasi : Pass the right thing to 48 * set_mac_address() 49 * Dave Miller : 32bit quantity for the device lock to 50 * make it work out on a Sparc. 51 * Bjorn Ekwall : Added KERNELD hack. 52 * Alan Cox : Cleaned up the backlog initialise. 53 * Craig Metz : SIOCGIFCONF fix if space for under 54 * 1 device. 55 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there 56 * is no device open function. 57 * Andi Kleen : Fix error reporting for SIOCGIFCONF 58 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF 59 * Cyrus Durgin : Cleaned for KMOD 60 * Adam Sulmicki : Bug Fix : Network Device Unload 61 * A network device unload needs to purge 62 * the backlog queue. 63 * Paul Rusty Russell : SIOCSIFNAME 64 * Pekka Riikonen : Netdev boot-time settings code 65 * Andrew Morton : Make unregister_netdevice wait 66 * indefinitely on dev->refcnt 67 * J Hadi Salim : - Backlog queue sampling 68 * - netif_rx() feedback 69 */ 70 71 #include <linux/uaccess.h> 72 #include <linux/bitmap.h> 73 #include <linux/capability.h> 74 #include <linux/cpu.h> 75 #include <linux/types.h> 76 #include <linux/kernel.h> 77 #include <linux/hash.h> 78 #include <linux/slab.h> 79 #include <linux/sched.h> 80 #include <linux/sched/isolation.h> 81 #include <linux/sched/mm.h> 82 #include <linux/smpboot.h> 83 #include <linux/mutex.h> 84 #include <linux/rwsem.h> 85 #include <linux/string.h> 86 #include <linux/mm.h> 87 #include <linux/socket.h> 88 #include <linux/sockios.h> 89 #include <linux/errno.h> 90 #include <linux/interrupt.h> 91 #include <linux/if_ether.h> 92 #include <linux/netdevice.h> 93 #include <linux/etherdevice.h> 94 #include <linux/ethtool.h> 95 #include <linux/skbuff.h> 96 #include <linux/kthread.h> 97 #include <linux/bpf.h> 98 #include <linux/bpf_trace.h> 99 #include <net/net_namespace.h> 100 #include <net/sock.h> 101 #include <net/busy_poll.h> 102 #include <linux/rtnetlink.h> 103 #include <linux/stat.h> 104 #include <net/dsa.h> 105 #include <net/dst.h> 106 #include <net/dst_metadata.h> 107 #include <net/gro.h> 108 #include <net/pkt_sched.h> 109 #include <net/pkt_cls.h> 110 #include <net/checksum.h> 111 #include <net/xfrm.h> 112 #include <net/tcx.h> 113 #include <linux/highmem.h> 114 #include <linux/init.h> 115 #include <linux/module.h> 116 #include <linux/netpoll.h> 117 #include <linux/rcupdate.h> 118 #include <linux/delay.h> 119 #include <net/iw_handler.h> 120 #include <asm/current.h> 121 #include <linux/audit.h> 122 #include <linux/dmaengine.h> 123 #include <linux/err.h> 124 #include <linux/ctype.h> 125 #include <linux/if_arp.h> 126 #include <linux/if_vlan.h> 127 #include <linux/ip.h> 128 #include <net/ip.h> 129 #include <net/mpls.h> 130 #include <linux/ipv6.h> 131 #include <linux/in.h> 132 #include <linux/jhash.h> 133 #include <linux/random.h> 134 #include <trace/events/napi.h> 135 #include <trace/events/net.h> 136 #include <trace/events/skb.h> 137 #include <trace/events/qdisc.h> 138 #include <trace/events/xdp.h> 139 #include <linux/inetdevice.h> 140 #include <linux/cpu_rmap.h> 141 #include <linux/static_key.h> 142 #include <linux/hashtable.h> 143 #include <linux/vmalloc.h> 144 #include <linux/if_macvlan.h> 145 #include <linux/errqueue.h> 146 #include <linux/hrtimer.h> 147 #include <linux/netfilter_netdev.h> 148 #include <linux/crash_dump.h> 149 #include <linux/sctp.h> 150 #include <net/udp_tunnel.h> 151 #include <linux/net_namespace.h> 152 #include <linux/indirect_call_wrapper.h> 153 #include <net/devlink.h> 154 #include <linux/pm_runtime.h> 155 #include <linux/prandom.h> 156 #include <linux/once_lite.h> 157 #include <net/netdev_rx_queue.h> 158 #include <net/page_pool/types.h> 159 #include <net/page_pool/helpers.h> 160 #include <net/rps.h> 161 #include <linux/phy_link_topology.h> 162 163 #include "dev.h" 164 #include "devmem.h" 165 #include "net-sysfs.h" 166 167 static DEFINE_SPINLOCK(ptype_lock); 168 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 169 170 static int netif_rx_internal(struct sk_buff *skb); 171 static int call_netdevice_notifiers_extack(unsigned long val, 172 struct net_device *dev, 173 struct netlink_ext_ack *extack); 174 175 static DEFINE_MUTEX(ifalias_mutex); 176 177 /* protects napi_hash addition/deletion and napi_gen_id */ 178 static DEFINE_SPINLOCK(napi_hash_lock); 179 180 static unsigned int napi_gen_id = NR_CPUS; 181 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); 182 183 static DECLARE_RWSEM(devnet_rename_sem); 184 185 static inline void dev_base_seq_inc(struct net *net) 186 { 187 unsigned int val = net->dev_base_seq + 1; 188 189 WRITE_ONCE(net->dev_base_seq, val ?: 1); 190 } 191 192 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) 193 { 194 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ)); 195 196 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; 197 } 198 199 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) 200 { 201 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; 202 } 203 204 #ifndef CONFIG_PREEMPT_RT 205 206 static DEFINE_STATIC_KEY_FALSE(use_backlog_threads_key); 207 208 static int __init setup_backlog_napi_threads(char *arg) 209 { 210 static_branch_enable(&use_backlog_threads_key); 211 return 0; 212 } 213 early_param("thread_backlog_napi", setup_backlog_napi_threads); 214 215 static bool use_backlog_threads(void) 216 { 217 return static_branch_unlikely(&use_backlog_threads_key); 218 } 219 220 #else 221 222 static bool use_backlog_threads(void) 223 { 224 return true; 225 } 226 227 #endif 228 229 static inline void backlog_lock_irq_save(struct softnet_data *sd, 230 unsigned long *flags) 231 { 232 if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads()) 233 spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags); 234 else 235 local_irq_save(*flags); 236 } 237 238 static inline void backlog_lock_irq_disable(struct softnet_data *sd) 239 { 240 if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads()) 241 spin_lock_irq(&sd->input_pkt_queue.lock); 242 else 243 local_irq_disable(); 244 } 245 246 static inline void backlog_unlock_irq_restore(struct softnet_data *sd, 247 unsigned long *flags) 248 { 249 if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads()) 250 spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags); 251 else 252 local_irq_restore(*flags); 253 } 254 255 static inline void backlog_unlock_irq_enable(struct softnet_data *sd) 256 { 257 if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads()) 258 spin_unlock_irq(&sd->input_pkt_queue.lock); 259 else 260 local_irq_enable(); 261 } 262 263 static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev, 264 const char *name) 265 { 266 struct netdev_name_node *name_node; 267 268 name_node = kmalloc(sizeof(*name_node), GFP_KERNEL); 269 if (!name_node) 270 return NULL; 271 INIT_HLIST_NODE(&name_node->hlist); 272 name_node->dev = dev; 273 name_node->name = name; 274 return name_node; 275 } 276 277 static struct netdev_name_node * 278 netdev_name_node_head_alloc(struct net_device *dev) 279 { 280 struct netdev_name_node *name_node; 281 282 name_node = netdev_name_node_alloc(dev, dev->name); 283 if (!name_node) 284 return NULL; 285 INIT_LIST_HEAD(&name_node->list); 286 return name_node; 287 } 288 289 static void netdev_name_node_free(struct netdev_name_node *name_node) 290 { 291 kfree(name_node); 292 } 293 294 static void netdev_name_node_add(struct net *net, 295 struct netdev_name_node *name_node) 296 { 297 hlist_add_head_rcu(&name_node->hlist, 298 dev_name_hash(net, name_node->name)); 299 } 300 301 static void netdev_name_node_del(struct netdev_name_node *name_node) 302 { 303 hlist_del_rcu(&name_node->hlist); 304 } 305 306 static struct netdev_name_node *netdev_name_node_lookup(struct net *net, 307 const char *name) 308 { 309 struct hlist_head *head = dev_name_hash(net, name); 310 struct netdev_name_node *name_node; 311 312 hlist_for_each_entry(name_node, head, hlist) 313 if (!strcmp(name_node->name, name)) 314 return name_node; 315 return NULL; 316 } 317 318 static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net, 319 const char *name) 320 { 321 struct hlist_head *head = dev_name_hash(net, name); 322 struct netdev_name_node *name_node; 323 324 hlist_for_each_entry_rcu(name_node, head, hlist) 325 if (!strcmp(name_node->name, name)) 326 return name_node; 327 return NULL; 328 } 329 330 bool netdev_name_in_use(struct net *net, const char *name) 331 { 332 return netdev_name_node_lookup(net, name); 333 } 334 EXPORT_SYMBOL(netdev_name_in_use); 335 336 int netdev_name_node_alt_create(struct net_device *dev, const char *name) 337 { 338 struct netdev_name_node *name_node; 339 struct net *net = dev_net(dev); 340 341 name_node = netdev_name_node_lookup(net, name); 342 if (name_node) 343 return -EEXIST; 344 name_node = netdev_name_node_alloc(dev, name); 345 if (!name_node) 346 return -ENOMEM; 347 netdev_name_node_add(net, name_node); 348 /* The node that holds dev->name acts as a head of per-device list. */ 349 list_add_tail_rcu(&name_node->list, &dev->name_node->list); 350 351 return 0; 352 } 353 354 static void netdev_name_node_alt_free(struct rcu_head *head) 355 { 356 struct netdev_name_node *name_node = 357 container_of(head, struct netdev_name_node, rcu); 358 359 kfree(name_node->name); 360 netdev_name_node_free(name_node); 361 } 362 363 static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node) 364 { 365 netdev_name_node_del(name_node); 366 list_del(&name_node->list); 367 call_rcu(&name_node->rcu, netdev_name_node_alt_free); 368 } 369 370 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name) 371 { 372 struct netdev_name_node *name_node; 373 struct net *net = dev_net(dev); 374 375 name_node = netdev_name_node_lookup(net, name); 376 if (!name_node) 377 return -ENOENT; 378 /* lookup might have found our primary name or a name belonging 379 * to another device. 380 */ 381 if (name_node == dev->name_node || name_node->dev != dev) 382 return -EINVAL; 383 384 __netdev_name_node_alt_destroy(name_node); 385 return 0; 386 } 387 388 static void netdev_name_node_alt_flush(struct net_device *dev) 389 { 390 struct netdev_name_node *name_node, *tmp; 391 392 list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list) { 393 list_del(&name_node->list); 394 netdev_name_node_alt_free(&name_node->rcu); 395 } 396 } 397 398 /* Device list insertion */ 399 static void list_netdevice(struct net_device *dev) 400 { 401 struct netdev_name_node *name_node; 402 struct net *net = dev_net(dev); 403 404 ASSERT_RTNL(); 405 406 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); 407 netdev_name_node_add(net, dev->name_node); 408 hlist_add_head_rcu(&dev->index_hlist, 409 dev_index_hash(net, dev->ifindex)); 410 411 netdev_for_each_altname(dev, name_node) 412 netdev_name_node_add(net, name_node); 413 414 /* We reserved the ifindex, this can't fail */ 415 WARN_ON(xa_store(&net->dev_by_index, dev->ifindex, dev, GFP_KERNEL)); 416 417 dev_base_seq_inc(net); 418 } 419 420 /* Device list removal 421 * caller must respect a RCU grace period before freeing/reusing dev 422 */ 423 static void unlist_netdevice(struct net_device *dev) 424 { 425 struct netdev_name_node *name_node; 426 struct net *net = dev_net(dev); 427 428 ASSERT_RTNL(); 429 430 xa_erase(&net->dev_by_index, dev->ifindex); 431 432 netdev_for_each_altname(dev, name_node) 433 netdev_name_node_del(name_node); 434 435 /* Unlink dev from the device chain */ 436 list_del_rcu(&dev->dev_list); 437 netdev_name_node_del(dev->name_node); 438 hlist_del_rcu(&dev->index_hlist); 439 440 dev_base_seq_inc(dev_net(dev)); 441 } 442 443 /* 444 * Our notifier list 445 */ 446 447 static RAW_NOTIFIER_HEAD(netdev_chain); 448 449 /* 450 * Device drivers call our routines to queue packets here. We empty the 451 * queue in the local softnet handler. 452 */ 453 454 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data) = { 455 .process_queue_bh_lock = INIT_LOCAL_LOCK(process_queue_bh_lock), 456 }; 457 EXPORT_PER_CPU_SYMBOL(softnet_data); 458 459 /* Page_pool has a lockless array/stack to alloc/recycle pages. 460 * PP consumers must pay attention to run APIs in the appropriate context 461 * (e.g. NAPI context). 462 */ 463 static DEFINE_PER_CPU(struct page_pool *, system_page_pool); 464 465 #ifdef CONFIG_LOCKDEP 466 /* 467 * register_netdevice() inits txq->_xmit_lock and sets lockdep class 468 * according to dev->type 469 */ 470 static const unsigned short netdev_lock_type[] = { 471 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, 472 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, 473 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, 474 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, 475 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, 476 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, 477 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, 478 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, 479 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, 480 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, 481 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, 482 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 483 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, 484 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, 485 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; 486 487 static const char *const netdev_lock_name[] = { 488 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", 489 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", 490 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", 491 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", 492 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", 493 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", 494 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", 495 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", 496 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", 497 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", 498 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", 499 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 500 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", 501 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", 502 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; 503 504 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 505 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; 506 507 static inline unsigned short netdev_lock_pos(unsigned short dev_type) 508 { 509 int i; 510 511 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) 512 if (netdev_lock_type[i] == dev_type) 513 return i; 514 /* the last key is used by default */ 515 return ARRAY_SIZE(netdev_lock_type) - 1; 516 } 517 518 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 519 unsigned short dev_type) 520 { 521 int i; 522 523 i = netdev_lock_pos(dev_type); 524 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], 525 netdev_lock_name[i]); 526 } 527 528 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 529 { 530 int i; 531 532 i = netdev_lock_pos(dev->type); 533 lockdep_set_class_and_name(&dev->addr_list_lock, 534 &netdev_addr_lock_key[i], 535 netdev_lock_name[i]); 536 } 537 #else 538 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 539 unsigned short dev_type) 540 { 541 } 542 543 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 544 { 545 } 546 #endif 547 548 /******************************************************************************* 549 * 550 * Protocol management and registration routines 551 * 552 *******************************************************************************/ 553 554 555 /* 556 * Add a protocol ID to the list. Now that the input handler is 557 * smarter we can dispense with all the messy stuff that used to be 558 * here. 559 * 560 * BEWARE!!! Protocol handlers, mangling input packets, 561 * MUST BE last in hash buckets and checking protocol handlers 562 * MUST start from promiscuous ptype_all chain in net_bh. 563 * It is true now, do not change it. 564 * Explanation follows: if protocol handler, mangling packet, will 565 * be the first on list, it is not able to sense, that packet 566 * is cloned and should be copied-on-write, so that it will 567 * change it and subsequent readers will get broken packet. 568 * --ANK (980803) 569 */ 570 571 static inline struct list_head *ptype_head(const struct packet_type *pt) 572 { 573 if (pt->type == htons(ETH_P_ALL)) 574 return pt->dev ? &pt->dev->ptype_all : &net_hotdata.ptype_all; 575 else 576 return pt->dev ? &pt->dev->ptype_specific : 577 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; 578 } 579 580 /** 581 * dev_add_pack - add packet handler 582 * @pt: packet type declaration 583 * 584 * Add a protocol handler to the networking stack. The passed &packet_type 585 * is linked into kernel lists and may not be freed until it has been 586 * removed from the kernel lists. 587 * 588 * This call does not sleep therefore it can not 589 * guarantee all CPU's that are in middle of receiving packets 590 * will see the new packet type (until the next received packet). 591 */ 592 593 void dev_add_pack(struct packet_type *pt) 594 { 595 struct list_head *head = ptype_head(pt); 596 597 spin_lock(&ptype_lock); 598 list_add_rcu(&pt->list, head); 599 spin_unlock(&ptype_lock); 600 } 601 EXPORT_SYMBOL(dev_add_pack); 602 603 /** 604 * __dev_remove_pack - remove packet handler 605 * @pt: packet type declaration 606 * 607 * Remove a protocol handler that was previously added to the kernel 608 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 609 * from the kernel lists and can be freed or reused once this function 610 * returns. 611 * 612 * The packet type might still be in use by receivers 613 * and must not be freed until after all the CPU's have gone 614 * through a quiescent state. 615 */ 616 void __dev_remove_pack(struct packet_type *pt) 617 { 618 struct list_head *head = ptype_head(pt); 619 struct packet_type *pt1; 620 621 spin_lock(&ptype_lock); 622 623 list_for_each_entry(pt1, head, list) { 624 if (pt == pt1) { 625 list_del_rcu(&pt->list); 626 goto out; 627 } 628 } 629 630 pr_warn("dev_remove_pack: %p not found\n", pt); 631 out: 632 spin_unlock(&ptype_lock); 633 } 634 EXPORT_SYMBOL(__dev_remove_pack); 635 636 /** 637 * dev_remove_pack - remove packet handler 638 * @pt: packet type declaration 639 * 640 * Remove a protocol handler that was previously added to the kernel 641 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 642 * from the kernel lists and can be freed or reused once this function 643 * returns. 644 * 645 * This call sleeps to guarantee that no CPU is looking at the packet 646 * type after return. 647 */ 648 void dev_remove_pack(struct packet_type *pt) 649 { 650 __dev_remove_pack(pt); 651 652 synchronize_net(); 653 } 654 EXPORT_SYMBOL(dev_remove_pack); 655 656 657 /******************************************************************************* 658 * 659 * Device Interface Subroutines 660 * 661 *******************************************************************************/ 662 663 /** 664 * dev_get_iflink - get 'iflink' value of a interface 665 * @dev: targeted interface 666 * 667 * Indicates the ifindex the interface is linked to. 668 * Physical interfaces have the same 'ifindex' and 'iflink' values. 669 */ 670 671 int dev_get_iflink(const struct net_device *dev) 672 { 673 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) 674 return dev->netdev_ops->ndo_get_iflink(dev); 675 676 return READ_ONCE(dev->ifindex); 677 } 678 EXPORT_SYMBOL(dev_get_iflink); 679 680 /** 681 * dev_fill_metadata_dst - Retrieve tunnel egress information. 682 * @dev: targeted interface 683 * @skb: The packet. 684 * 685 * For better visibility of tunnel traffic OVS needs to retrieve 686 * egress tunnel information for a packet. Following API allows 687 * user to get this info. 688 */ 689 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 690 { 691 struct ip_tunnel_info *info; 692 693 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst) 694 return -EINVAL; 695 696 info = skb_tunnel_info_unclone(skb); 697 if (!info) 698 return -ENOMEM; 699 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX))) 700 return -EINVAL; 701 702 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb); 703 } 704 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst); 705 706 static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack) 707 { 708 int k = stack->num_paths++; 709 710 if (WARN_ON_ONCE(k >= NET_DEVICE_PATH_STACK_MAX)) 711 return NULL; 712 713 return &stack->path[k]; 714 } 715 716 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr, 717 struct net_device_path_stack *stack) 718 { 719 const struct net_device *last_dev; 720 struct net_device_path_ctx ctx = { 721 .dev = dev, 722 }; 723 struct net_device_path *path; 724 int ret = 0; 725 726 memcpy(ctx.daddr, daddr, sizeof(ctx.daddr)); 727 stack->num_paths = 0; 728 while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) { 729 last_dev = ctx.dev; 730 path = dev_fwd_path(stack); 731 if (!path) 732 return -1; 733 734 memset(path, 0, sizeof(struct net_device_path)); 735 ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path); 736 if (ret < 0) 737 return -1; 738 739 if (WARN_ON_ONCE(last_dev == ctx.dev)) 740 return -1; 741 } 742 743 if (!ctx.dev) 744 return ret; 745 746 path = dev_fwd_path(stack); 747 if (!path) 748 return -1; 749 path->type = DEV_PATH_ETHERNET; 750 path->dev = ctx.dev; 751 752 return ret; 753 } 754 EXPORT_SYMBOL_GPL(dev_fill_forward_path); 755 756 /** 757 * __dev_get_by_name - find a device by its name 758 * @net: the applicable net namespace 759 * @name: name to find 760 * 761 * Find an interface by name. Must be called under RTNL semaphore. 762 * If the name is found a pointer to the device is returned. 763 * If the name is not found then %NULL is returned. The 764 * reference counters are not incremented so the caller must be 765 * careful with locks. 766 */ 767 768 struct net_device *__dev_get_by_name(struct net *net, const char *name) 769 { 770 struct netdev_name_node *node_name; 771 772 node_name = netdev_name_node_lookup(net, name); 773 return node_name ? node_name->dev : NULL; 774 } 775 EXPORT_SYMBOL(__dev_get_by_name); 776 777 /** 778 * dev_get_by_name_rcu - find a device by its name 779 * @net: the applicable net namespace 780 * @name: name to find 781 * 782 * Find an interface by name. 783 * If the name is found a pointer to the device is returned. 784 * If the name is not found then %NULL is returned. 785 * The reference counters are not incremented so the caller must be 786 * careful with locks. The caller must hold RCU lock. 787 */ 788 789 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) 790 { 791 struct netdev_name_node *node_name; 792 793 node_name = netdev_name_node_lookup_rcu(net, name); 794 return node_name ? node_name->dev : NULL; 795 } 796 EXPORT_SYMBOL(dev_get_by_name_rcu); 797 798 /* Deprecated for new users, call netdev_get_by_name() instead */ 799 struct net_device *dev_get_by_name(struct net *net, const char *name) 800 { 801 struct net_device *dev; 802 803 rcu_read_lock(); 804 dev = dev_get_by_name_rcu(net, name); 805 dev_hold(dev); 806 rcu_read_unlock(); 807 return dev; 808 } 809 EXPORT_SYMBOL(dev_get_by_name); 810 811 /** 812 * netdev_get_by_name() - find a device by its name 813 * @net: the applicable net namespace 814 * @name: name to find 815 * @tracker: tracking object for the acquired reference 816 * @gfp: allocation flags for the tracker 817 * 818 * Find an interface by name. This can be called from any 819 * context and does its own locking. The returned handle has 820 * the usage count incremented and the caller must use netdev_put() to 821 * release it when it is no longer needed. %NULL is returned if no 822 * matching device is found. 823 */ 824 struct net_device *netdev_get_by_name(struct net *net, const char *name, 825 netdevice_tracker *tracker, gfp_t gfp) 826 { 827 struct net_device *dev; 828 829 dev = dev_get_by_name(net, name); 830 if (dev) 831 netdev_tracker_alloc(dev, tracker, gfp); 832 return dev; 833 } 834 EXPORT_SYMBOL(netdev_get_by_name); 835 836 /** 837 * __dev_get_by_index - find a device by its ifindex 838 * @net: the applicable net namespace 839 * @ifindex: index of device 840 * 841 * Search for an interface by index. Returns %NULL if the device 842 * is not found or a pointer to the device. The device has not 843 * had its reference counter increased so the caller must be careful 844 * about locking. The caller must hold the RTNL semaphore. 845 */ 846 847 struct net_device *__dev_get_by_index(struct net *net, int ifindex) 848 { 849 struct net_device *dev; 850 struct hlist_head *head = dev_index_hash(net, ifindex); 851 852 hlist_for_each_entry(dev, head, index_hlist) 853 if (dev->ifindex == ifindex) 854 return dev; 855 856 return NULL; 857 } 858 EXPORT_SYMBOL(__dev_get_by_index); 859 860 /** 861 * dev_get_by_index_rcu - find a device by its ifindex 862 * @net: the applicable net namespace 863 * @ifindex: index of device 864 * 865 * Search for an interface by index. Returns %NULL if the device 866 * is not found or a pointer to the device. The device has not 867 * had its reference counter increased so the caller must be careful 868 * about locking. The caller must hold RCU lock. 869 */ 870 871 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) 872 { 873 struct net_device *dev; 874 struct hlist_head *head = dev_index_hash(net, ifindex); 875 876 hlist_for_each_entry_rcu(dev, head, index_hlist) 877 if (dev->ifindex == ifindex) 878 return dev; 879 880 return NULL; 881 } 882 EXPORT_SYMBOL(dev_get_by_index_rcu); 883 884 /* Deprecated for new users, call netdev_get_by_index() instead */ 885 struct net_device *dev_get_by_index(struct net *net, int ifindex) 886 { 887 struct net_device *dev; 888 889 rcu_read_lock(); 890 dev = dev_get_by_index_rcu(net, ifindex); 891 dev_hold(dev); 892 rcu_read_unlock(); 893 return dev; 894 } 895 EXPORT_SYMBOL(dev_get_by_index); 896 897 /** 898 * netdev_get_by_index() - find a device by its ifindex 899 * @net: the applicable net namespace 900 * @ifindex: index of device 901 * @tracker: tracking object for the acquired reference 902 * @gfp: allocation flags for the tracker 903 * 904 * Search for an interface by index. Returns NULL if the device 905 * is not found or a pointer to the device. The device returned has 906 * had a reference added and the pointer is safe until the user calls 907 * netdev_put() to indicate they have finished with it. 908 */ 909 struct net_device *netdev_get_by_index(struct net *net, int ifindex, 910 netdevice_tracker *tracker, gfp_t gfp) 911 { 912 struct net_device *dev; 913 914 dev = dev_get_by_index(net, ifindex); 915 if (dev) 916 netdev_tracker_alloc(dev, tracker, gfp); 917 return dev; 918 } 919 EXPORT_SYMBOL(netdev_get_by_index); 920 921 /** 922 * dev_get_by_napi_id - find a device by napi_id 923 * @napi_id: ID of the NAPI struct 924 * 925 * Search for an interface by NAPI ID. Returns %NULL if the device 926 * is not found or a pointer to the device. The device has not had 927 * its reference counter increased so the caller must be careful 928 * about locking. The caller must hold RCU lock. 929 */ 930 931 struct net_device *dev_get_by_napi_id(unsigned int napi_id) 932 { 933 struct napi_struct *napi; 934 935 WARN_ON_ONCE(!rcu_read_lock_held()); 936 937 if (napi_id < MIN_NAPI_ID) 938 return NULL; 939 940 napi = napi_by_id(napi_id); 941 942 return napi ? napi->dev : NULL; 943 } 944 EXPORT_SYMBOL(dev_get_by_napi_id); 945 946 static DEFINE_SEQLOCK(netdev_rename_lock); 947 948 void netdev_copy_name(struct net_device *dev, char *name) 949 { 950 unsigned int seq; 951 952 do { 953 seq = read_seqbegin(&netdev_rename_lock); 954 strscpy(name, dev->name, IFNAMSIZ); 955 } while (read_seqretry(&netdev_rename_lock, seq)); 956 } 957 958 /** 959 * netdev_get_name - get a netdevice name, knowing its ifindex. 960 * @net: network namespace 961 * @name: a pointer to the buffer where the name will be stored. 962 * @ifindex: the ifindex of the interface to get the name from. 963 */ 964 int netdev_get_name(struct net *net, char *name, int ifindex) 965 { 966 struct net_device *dev; 967 int ret; 968 969 rcu_read_lock(); 970 971 dev = dev_get_by_index_rcu(net, ifindex); 972 if (!dev) { 973 ret = -ENODEV; 974 goto out; 975 } 976 977 netdev_copy_name(dev, name); 978 979 ret = 0; 980 out: 981 rcu_read_unlock(); 982 return ret; 983 } 984 985 /** 986 * dev_getbyhwaddr_rcu - find a device by its hardware address 987 * @net: the applicable net namespace 988 * @type: media type of device 989 * @ha: hardware address 990 * 991 * Search for an interface by MAC address. Returns NULL if the device 992 * is not found or a pointer to the device. 993 * The caller must hold RCU or RTNL. 994 * The returned device has not had its ref count increased 995 * and the caller must therefore be careful about locking 996 * 997 */ 998 999 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 1000 const char *ha) 1001 { 1002 struct net_device *dev; 1003 1004 for_each_netdev_rcu(net, dev) 1005 if (dev->type == type && 1006 !memcmp(dev->dev_addr, ha, dev->addr_len)) 1007 return dev; 1008 1009 return NULL; 1010 } 1011 EXPORT_SYMBOL(dev_getbyhwaddr_rcu); 1012 1013 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) 1014 { 1015 struct net_device *dev, *ret = NULL; 1016 1017 rcu_read_lock(); 1018 for_each_netdev_rcu(net, dev) 1019 if (dev->type == type) { 1020 dev_hold(dev); 1021 ret = dev; 1022 break; 1023 } 1024 rcu_read_unlock(); 1025 return ret; 1026 } 1027 EXPORT_SYMBOL(dev_getfirstbyhwtype); 1028 1029 /** 1030 * __dev_get_by_flags - find any device with given flags 1031 * @net: the applicable net namespace 1032 * @if_flags: IFF_* values 1033 * @mask: bitmask of bits in if_flags to check 1034 * 1035 * Search for any interface with the given flags. Returns NULL if a device 1036 * is not found or a pointer to the device. Must be called inside 1037 * rtnl_lock(), and result refcount is unchanged. 1038 */ 1039 1040 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags, 1041 unsigned short mask) 1042 { 1043 struct net_device *dev, *ret; 1044 1045 ASSERT_RTNL(); 1046 1047 ret = NULL; 1048 for_each_netdev(net, dev) { 1049 if (((dev->flags ^ if_flags) & mask) == 0) { 1050 ret = dev; 1051 break; 1052 } 1053 } 1054 return ret; 1055 } 1056 EXPORT_SYMBOL(__dev_get_by_flags); 1057 1058 /** 1059 * dev_valid_name - check if name is okay for network device 1060 * @name: name string 1061 * 1062 * Network device names need to be valid file names to 1063 * allow sysfs to work. We also disallow any kind of 1064 * whitespace. 1065 */ 1066 bool dev_valid_name(const char *name) 1067 { 1068 if (*name == '\0') 1069 return false; 1070 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ) 1071 return false; 1072 if (!strcmp(name, ".") || !strcmp(name, "..")) 1073 return false; 1074 1075 while (*name) { 1076 if (*name == '/' || *name == ':' || isspace(*name)) 1077 return false; 1078 name++; 1079 } 1080 return true; 1081 } 1082 EXPORT_SYMBOL(dev_valid_name); 1083 1084 /** 1085 * __dev_alloc_name - allocate a name for a device 1086 * @net: network namespace to allocate the device name in 1087 * @name: name format string 1088 * @res: result name string 1089 * 1090 * Passed a format string - eg "lt%d" it will try and find a suitable 1091 * id. It scans list of devices to build up a free map, then chooses 1092 * the first empty slot. The caller must hold the dev_base or rtnl lock 1093 * while allocating the name and adding the device in order to avoid 1094 * duplicates. 1095 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1096 * Returns the number of the unit assigned or a negative errno code. 1097 */ 1098 1099 static int __dev_alloc_name(struct net *net, const char *name, char *res) 1100 { 1101 int i = 0; 1102 const char *p; 1103 const int max_netdevices = 8*PAGE_SIZE; 1104 unsigned long *inuse; 1105 struct net_device *d; 1106 char buf[IFNAMSIZ]; 1107 1108 /* Verify the string as this thing may have come from the user. 1109 * There must be one "%d" and no other "%" characters. 1110 */ 1111 p = strchr(name, '%'); 1112 if (!p || p[1] != 'd' || strchr(p + 2, '%')) 1113 return -EINVAL; 1114 1115 /* Use one page as a bit array of possible slots */ 1116 inuse = bitmap_zalloc(max_netdevices, GFP_ATOMIC); 1117 if (!inuse) 1118 return -ENOMEM; 1119 1120 for_each_netdev(net, d) { 1121 struct netdev_name_node *name_node; 1122 1123 netdev_for_each_altname(d, name_node) { 1124 if (!sscanf(name_node->name, name, &i)) 1125 continue; 1126 if (i < 0 || i >= max_netdevices) 1127 continue; 1128 1129 /* avoid cases where sscanf is not exact inverse of printf */ 1130 snprintf(buf, IFNAMSIZ, name, i); 1131 if (!strncmp(buf, name_node->name, IFNAMSIZ)) 1132 __set_bit(i, inuse); 1133 } 1134 if (!sscanf(d->name, name, &i)) 1135 continue; 1136 if (i < 0 || i >= max_netdevices) 1137 continue; 1138 1139 /* avoid cases where sscanf is not exact inverse of printf */ 1140 snprintf(buf, IFNAMSIZ, name, i); 1141 if (!strncmp(buf, d->name, IFNAMSIZ)) 1142 __set_bit(i, inuse); 1143 } 1144 1145 i = find_first_zero_bit(inuse, max_netdevices); 1146 bitmap_free(inuse); 1147 if (i == max_netdevices) 1148 return -ENFILE; 1149 1150 /* 'res' and 'name' could overlap, use 'buf' as an intermediate buffer */ 1151 strscpy(buf, name, IFNAMSIZ); 1152 snprintf(res, IFNAMSIZ, buf, i); 1153 return i; 1154 } 1155 1156 /* Returns negative errno or allocated unit id (see __dev_alloc_name()) */ 1157 static int dev_prep_valid_name(struct net *net, struct net_device *dev, 1158 const char *want_name, char *out_name, 1159 int dup_errno) 1160 { 1161 if (!dev_valid_name(want_name)) 1162 return -EINVAL; 1163 1164 if (strchr(want_name, '%')) 1165 return __dev_alloc_name(net, want_name, out_name); 1166 1167 if (netdev_name_in_use(net, want_name)) 1168 return -dup_errno; 1169 if (out_name != want_name) 1170 strscpy(out_name, want_name, IFNAMSIZ); 1171 return 0; 1172 } 1173 1174 /** 1175 * dev_alloc_name - allocate a name for a device 1176 * @dev: device 1177 * @name: name format string 1178 * 1179 * Passed a format string - eg "lt%d" it will try and find a suitable 1180 * id. It scans list of devices to build up a free map, then chooses 1181 * the first empty slot. The caller must hold the dev_base or rtnl lock 1182 * while allocating the name and adding the device in order to avoid 1183 * duplicates. 1184 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1185 * Returns the number of the unit assigned or a negative errno code. 1186 */ 1187 1188 int dev_alloc_name(struct net_device *dev, const char *name) 1189 { 1190 return dev_prep_valid_name(dev_net(dev), dev, name, dev->name, ENFILE); 1191 } 1192 EXPORT_SYMBOL(dev_alloc_name); 1193 1194 static int dev_get_valid_name(struct net *net, struct net_device *dev, 1195 const char *name) 1196 { 1197 int ret; 1198 1199 ret = dev_prep_valid_name(net, dev, name, dev->name, EEXIST); 1200 return ret < 0 ? ret : 0; 1201 } 1202 1203 /** 1204 * dev_change_name - change name of a device 1205 * @dev: device 1206 * @newname: name (or format string) must be at least IFNAMSIZ 1207 * 1208 * Change name of a device, can pass format strings "eth%d". 1209 * for wildcarding. 1210 */ 1211 int dev_change_name(struct net_device *dev, const char *newname) 1212 { 1213 unsigned char old_assign_type; 1214 char oldname[IFNAMSIZ]; 1215 int err = 0; 1216 int ret; 1217 struct net *net; 1218 1219 ASSERT_RTNL(); 1220 BUG_ON(!dev_net(dev)); 1221 1222 net = dev_net(dev); 1223 1224 down_write(&devnet_rename_sem); 1225 1226 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { 1227 up_write(&devnet_rename_sem); 1228 return 0; 1229 } 1230 1231 memcpy(oldname, dev->name, IFNAMSIZ); 1232 1233 write_seqlock_bh(&netdev_rename_lock); 1234 err = dev_get_valid_name(net, dev, newname); 1235 write_sequnlock_bh(&netdev_rename_lock); 1236 1237 if (err < 0) { 1238 up_write(&devnet_rename_sem); 1239 return err; 1240 } 1241 1242 if (oldname[0] && !strchr(oldname, '%')) 1243 netdev_info(dev, "renamed from %s%s\n", oldname, 1244 dev->flags & IFF_UP ? " (while UP)" : ""); 1245 1246 old_assign_type = dev->name_assign_type; 1247 WRITE_ONCE(dev->name_assign_type, NET_NAME_RENAMED); 1248 1249 rollback: 1250 ret = device_rename(&dev->dev, dev->name); 1251 if (ret) { 1252 memcpy(dev->name, oldname, IFNAMSIZ); 1253 WRITE_ONCE(dev->name_assign_type, old_assign_type); 1254 up_write(&devnet_rename_sem); 1255 return ret; 1256 } 1257 1258 up_write(&devnet_rename_sem); 1259 1260 netdev_adjacent_rename_links(dev, oldname); 1261 1262 netdev_name_node_del(dev->name_node); 1263 1264 synchronize_net(); 1265 1266 netdev_name_node_add(net, dev->name_node); 1267 1268 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); 1269 ret = notifier_to_errno(ret); 1270 1271 if (ret) { 1272 /* err >= 0 after dev_alloc_name() or stores the first errno */ 1273 if (err >= 0) { 1274 err = ret; 1275 down_write(&devnet_rename_sem); 1276 write_seqlock_bh(&netdev_rename_lock); 1277 memcpy(dev->name, oldname, IFNAMSIZ); 1278 write_sequnlock_bh(&netdev_rename_lock); 1279 memcpy(oldname, newname, IFNAMSIZ); 1280 WRITE_ONCE(dev->name_assign_type, old_assign_type); 1281 old_assign_type = NET_NAME_RENAMED; 1282 goto rollback; 1283 } else { 1284 netdev_err(dev, "name change rollback failed: %d\n", 1285 ret); 1286 } 1287 } 1288 1289 return err; 1290 } 1291 1292 /** 1293 * dev_set_alias - change ifalias of a device 1294 * @dev: device 1295 * @alias: name up to IFALIASZ 1296 * @len: limit of bytes to copy from info 1297 * 1298 * Set ifalias for a device, 1299 */ 1300 int dev_set_alias(struct net_device *dev, const char *alias, size_t len) 1301 { 1302 struct dev_ifalias *new_alias = NULL; 1303 1304 if (len >= IFALIASZ) 1305 return -EINVAL; 1306 1307 if (len) { 1308 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL); 1309 if (!new_alias) 1310 return -ENOMEM; 1311 1312 memcpy(new_alias->ifalias, alias, len); 1313 new_alias->ifalias[len] = 0; 1314 } 1315 1316 mutex_lock(&ifalias_mutex); 1317 new_alias = rcu_replace_pointer(dev->ifalias, new_alias, 1318 mutex_is_locked(&ifalias_mutex)); 1319 mutex_unlock(&ifalias_mutex); 1320 1321 if (new_alias) 1322 kfree_rcu(new_alias, rcuhead); 1323 1324 return len; 1325 } 1326 EXPORT_SYMBOL(dev_set_alias); 1327 1328 /** 1329 * dev_get_alias - get ifalias of a device 1330 * @dev: device 1331 * @name: buffer to store name of ifalias 1332 * @len: size of buffer 1333 * 1334 * get ifalias for a device. Caller must make sure dev cannot go 1335 * away, e.g. rcu read lock or own a reference count to device. 1336 */ 1337 int dev_get_alias(const struct net_device *dev, char *name, size_t len) 1338 { 1339 const struct dev_ifalias *alias; 1340 int ret = 0; 1341 1342 rcu_read_lock(); 1343 alias = rcu_dereference(dev->ifalias); 1344 if (alias) 1345 ret = snprintf(name, len, "%s", alias->ifalias); 1346 rcu_read_unlock(); 1347 1348 return ret; 1349 } 1350 1351 /** 1352 * netdev_features_change - device changes features 1353 * @dev: device to cause notification 1354 * 1355 * Called to indicate a device has changed features. 1356 */ 1357 void netdev_features_change(struct net_device *dev) 1358 { 1359 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); 1360 } 1361 EXPORT_SYMBOL(netdev_features_change); 1362 1363 /** 1364 * netdev_state_change - device changes state 1365 * @dev: device to cause notification 1366 * 1367 * Called to indicate a device has changed state. This function calls 1368 * the notifier chains for netdev_chain and sends a NEWLINK message 1369 * to the routing socket. 1370 */ 1371 void netdev_state_change(struct net_device *dev) 1372 { 1373 if (dev->flags & IFF_UP) { 1374 struct netdev_notifier_change_info change_info = { 1375 .info.dev = dev, 1376 }; 1377 1378 call_netdevice_notifiers_info(NETDEV_CHANGE, 1379 &change_info.info); 1380 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL, 0, NULL); 1381 } 1382 } 1383 EXPORT_SYMBOL(netdev_state_change); 1384 1385 /** 1386 * __netdev_notify_peers - notify network peers about existence of @dev, 1387 * to be called when rtnl lock is already held. 1388 * @dev: network device 1389 * 1390 * Generate traffic such that interested network peers are aware of 1391 * @dev, such as by generating a gratuitous ARP. This may be used when 1392 * a device wants to inform the rest of the network about some sort of 1393 * reconfiguration such as a failover event or virtual machine 1394 * migration. 1395 */ 1396 void __netdev_notify_peers(struct net_device *dev) 1397 { 1398 ASSERT_RTNL(); 1399 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); 1400 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev); 1401 } 1402 EXPORT_SYMBOL(__netdev_notify_peers); 1403 1404 /** 1405 * netdev_notify_peers - notify network peers about existence of @dev 1406 * @dev: network device 1407 * 1408 * Generate traffic such that interested network peers are aware of 1409 * @dev, such as by generating a gratuitous ARP. This may be used when 1410 * a device wants to inform the rest of the network about some sort of 1411 * reconfiguration such as a failover event or virtual machine 1412 * migration. 1413 */ 1414 void netdev_notify_peers(struct net_device *dev) 1415 { 1416 rtnl_lock(); 1417 __netdev_notify_peers(dev); 1418 rtnl_unlock(); 1419 } 1420 EXPORT_SYMBOL(netdev_notify_peers); 1421 1422 static int napi_threaded_poll(void *data); 1423 1424 static int napi_kthread_create(struct napi_struct *n) 1425 { 1426 int err = 0; 1427 1428 /* Create and wake up the kthread once to put it in 1429 * TASK_INTERRUPTIBLE mode to avoid the blocked task 1430 * warning and work with loadavg. 1431 */ 1432 n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d", 1433 n->dev->name, n->napi_id); 1434 if (IS_ERR(n->thread)) { 1435 err = PTR_ERR(n->thread); 1436 pr_err("kthread_run failed with err %d\n", err); 1437 n->thread = NULL; 1438 } 1439 1440 return err; 1441 } 1442 1443 static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack) 1444 { 1445 const struct net_device_ops *ops = dev->netdev_ops; 1446 int ret; 1447 1448 ASSERT_RTNL(); 1449 dev_addr_check(dev); 1450 1451 if (!netif_device_present(dev)) { 1452 /* may be detached because parent is runtime-suspended */ 1453 if (dev->dev.parent) 1454 pm_runtime_resume(dev->dev.parent); 1455 if (!netif_device_present(dev)) 1456 return -ENODEV; 1457 } 1458 1459 /* Block netpoll from trying to do any rx path servicing. 1460 * If we don't do this there is a chance ndo_poll_controller 1461 * or ndo_poll may be running while we open the device 1462 */ 1463 netpoll_poll_disable(dev); 1464 1465 ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack); 1466 ret = notifier_to_errno(ret); 1467 if (ret) 1468 return ret; 1469 1470 set_bit(__LINK_STATE_START, &dev->state); 1471 1472 if (ops->ndo_validate_addr) 1473 ret = ops->ndo_validate_addr(dev); 1474 1475 if (!ret && ops->ndo_open) 1476 ret = ops->ndo_open(dev); 1477 1478 netpoll_poll_enable(dev); 1479 1480 if (ret) 1481 clear_bit(__LINK_STATE_START, &dev->state); 1482 else { 1483 dev->flags |= IFF_UP; 1484 dev_set_rx_mode(dev); 1485 dev_activate(dev); 1486 add_device_randomness(dev->dev_addr, dev->addr_len); 1487 } 1488 1489 return ret; 1490 } 1491 1492 /** 1493 * dev_open - prepare an interface for use. 1494 * @dev: device to open 1495 * @extack: netlink extended ack 1496 * 1497 * Takes a device from down to up state. The device's private open 1498 * function is invoked and then the multicast lists are loaded. Finally 1499 * the device is moved into the up state and a %NETDEV_UP message is 1500 * sent to the netdev notifier chain. 1501 * 1502 * Calling this function on an active interface is a nop. On a failure 1503 * a negative errno code is returned. 1504 */ 1505 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack) 1506 { 1507 int ret; 1508 1509 if (dev->flags & IFF_UP) 1510 return 0; 1511 1512 ret = __dev_open(dev, extack); 1513 if (ret < 0) 1514 return ret; 1515 1516 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL); 1517 call_netdevice_notifiers(NETDEV_UP, dev); 1518 1519 return ret; 1520 } 1521 EXPORT_SYMBOL(dev_open); 1522 1523 static void __dev_close_many(struct list_head *head) 1524 { 1525 struct net_device *dev; 1526 1527 ASSERT_RTNL(); 1528 might_sleep(); 1529 1530 list_for_each_entry(dev, head, close_list) { 1531 /* Temporarily disable netpoll until the interface is down */ 1532 netpoll_poll_disable(dev); 1533 1534 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); 1535 1536 clear_bit(__LINK_STATE_START, &dev->state); 1537 1538 /* Synchronize to scheduled poll. We cannot touch poll list, it 1539 * can be even on different cpu. So just clear netif_running(). 1540 * 1541 * dev->stop() will invoke napi_disable() on all of it's 1542 * napi_struct instances on this device. 1543 */ 1544 smp_mb__after_atomic(); /* Commit netif_running(). */ 1545 } 1546 1547 dev_deactivate_many(head); 1548 1549 list_for_each_entry(dev, head, close_list) { 1550 const struct net_device_ops *ops = dev->netdev_ops; 1551 1552 /* 1553 * Call the device specific close. This cannot fail. 1554 * Only if device is UP 1555 * 1556 * We allow it to be called even after a DETACH hot-plug 1557 * event. 1558 */ 1559 if (ops->ndo_stop) 1560 ops->ndo_stop(dev); 1561 1562 dev->flags &= ~IFF_UP; 1563 netpoll_poll_enable(dev); 1564 } 1565 } 1566 1567 static void __dev_close(struct net_device *dev) 1568 { 1569 LIST_HEAD(single); 1570 1571 list_add(&dev->close_list, &single); 1572 __dev_close_many(&single); 1573 list_del(&single); 1574 } 1575 1576 void dev_close_many(struct list_head *head, bool unlink) 1577 { 1578 struct net_device *dev, *tmp; 1579 1580 /* Remove the devices that don't need to be closed */ 1581 list_for_each_entry_safe(dev, tmp, head, close_list) 1582 if (!(dev->flags & IFF_UP)) 1583 list_del_init(&dev->close_list); 1584 1585 __dev_close_many(head); 1586 1587 list_for_each_entry_safe(dev, tmp, head, close_list) { 1588 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL); 1589 call_netdevice_notifiers(NETDEV_DOWN, dev); 1590 if (unlink) 1591 list_del_init(&dev->close_list); 1592 } 1593 } 1594 EXPORT_SYMBOL(dev_close_many); 1595 1596 /** 1597 * dev_close - shutdown an interface. 1598 * @dev: device to shutdown 1599 * 1600 * This function moves an active device into down state. A 1601 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device 1602 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier 1603 * chain. 1604 */ 1605 void dev_close(struct net_device *dev) 1606 { 1607 if (dev->flags & IFF_UP) { 1608 LIST_HEAD(single); 1609 1610 list_add(&dev->close_list, &single); 1611 dev_close_many(&single, true); 1612 list_del(&single); 1613 } 1614 } 1615 EXPORT_SYMBOL(dev_close); 1616 1617 1618 /** 1619 * dev_disable_lro - disable Large Receive Offload on a device 1620 * @dev: device 1621 * 1622 * Disable Large Receive Offload (LRO) on a net device. Must be 1623 * called under RTNL. This is needed if received packets may be 1624 * forwarded to another interface. 1625 */ 1626 void dev_disable_lro(struct net_device *dev) 1627 { 1628 struct net_device *lower_dev; 1629 struct list_head *iter; 1630 1631 dev->wanted_features &= ~NETIF_F_LRO; 1632 netdev_update_features(dev); 1633 1634 if (unlikely(dev->features & NETIF_F_LRO)) 1635 netdev_WARN(dev, "failed to disable LRO!\n"); 1636 1637 netdev_for_each_lower_dev(dev, lower_dev, iter) 1638 dev_disable_lro(lower_dev); 1639 } 1640 EXPORT_SYMBOL(dev_disable_lro); 1641 1642 /** 1643 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device 1644 * @dev: device 1645 * 1646 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be 1647 * called under RTNL. This is needed if Generic XDP is installed on 1648 * the device. 1649 */ 1650 static void dev_disable_gro_hw(struct net_device *dev) 1651 { 1652 dev->wanted_features &= ~NETIF_F_GRO_HW; 1653 netdev_update_features(dev); 1654 1655 if (unlikely(dev->features & NETIF_F_GRO_HW)) 1656 netdev_WARN(dev, "failed to disable GRO_HW!\n"); 1657 } 1658 1659 const char *netdev_cmd_to_name(enum netdev_cmd cmd) 1660 { 1661 #define N(val) \ 1662 case NETDEV_##val: \ 1663 return "NETDEV_" __stringify(val); 1664 switch (cmd) { 1665 N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER) 1666 N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE) 1667 N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE) 1668 N(POST_INIT) N(PRE_UNINIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) 1669 N(CHANGEUPPER) N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) 1670 N(BONDING_INFO) N(PRECHANGEUPPER) N(CHANGELOWERSTATE) 1671 N(UDP_TUNNEL_PUSH_INFO) N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN) 1672 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO) 1673 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO) 1674 N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE) 1675 N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA) 1676 N(XDP_FEAT_CHANGE) 1677 } 1678 #undef N 1679 return "UNKNOWN_NETDEV_EVENT"; 1680 } 1681 EXPORT_SYMBOL_GPL(netdev_cmd_to_name); 1682 1683 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val, 1684 struct net_device *dev) 1685 { 1686 struct netdev_notifier_info info = { 1687 .dev = dev, 1688 }; 1689 1690 return nb->notifier_call(nb, val, &info); 1691 } 1692 1693 static int call_netdevice_register_notifiers(struct notifier_block *nb, 1694 struct net_device *dev) 1695 { 1696 int err; 1697 1698 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev); 1699 err = notifier_to_errno(err); 1700 if (err) 1701 return err; 1702 1703 if (!(dev->flags & IFF_UP)) 1704 return 0; 1705 1706 call_netdevice_notifier(nb, NETDEV_UP, dev); 1707 return 0; 1708 } 1709 1710 static void call_netdevice_unregister_notifiers(struct notifier_block *nb, 1711 struct net_device *dev) 1712 { 1713 if (dev->flags & IFF_UP) { 1714 call_netdevice_notifier(nb, NETDEV_GOING_DOWN, 1715 dev); 1716 call_netdevice_notifier(nb, NETDEV_DOWN, dev); 1717 } 1718 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); 1719 } 1720 1721 static int call_netdevice_register_net_notifiers(struct notifier_block *nb, 1722 struct net *net) 1723 { 1724 struct net_device *dev; 1725 int err; 1726 1727 for_each_netdev(net, dev) { 1728 err = call_netdevice_register_notifiers(nb, dev); 1729 if (err) 1730 goto rollback; 1731 } 1732 return 0; 1733 1734 rollback: 1735 for_each_netdev_continue_reverse(net, dev) 1736 call_netdevice_unregister_notifiers(nb, dev); 1737 return err; 1738 } 1739 1740 static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb, 1741 struct net *net) 1742 { 1743 struct net_device *dev; 1744 1745 for_each_netdev(net, dev) 1746 call_netdevice_unregister_notifiers(nb, dev); 1747 } 1748 1749 static int dev_boot_phase = 1; 1750 1751 /** 1752 * register_netdevice_notifier - register a network notifier block 1753 * @nb: notifier 1754 * 1755 * Register a notifier to be called when network device events occur. 1756 * The notifier passed is linked into the kernel structures and must 1757 * not be reused until it has been unregistered. A negative errno code 1758 * is returned on a failure. 1759 * 1760 * When registered all registration and up events are replayed 1761 * to the new notifier to allow device to have a race free 1762 * view of the network device list. 1763 */ 1764 1765 int register_netdevice_notifier(struct notifier_block *nb) 1766 { 1767 struct net *net; 1768 int err; 1769 1770 /* Close race with setup_net() and cleanup_net() */ 1771 down_write(&pernet_ops_rwsem); 1772 rtnl_lock(); 1773 err = raw_notifier_chain_register(&netdev_chain, nb); 1774 if (err) 1775 goto unlock; 1776 if (dev_boot_phase) 1777 goto unlock; 1778 for_each_net(net) { 1779 err = call_netdevice_register_net_notifiers(nb, net); 1780 if (err) 1781 goto rollback; 1782 } 1783 1784 unlock: 1785 rtnl_unlock(); 1786 up_write(&pernet_ops_rwsem); 1787 return err; 1788 1789 rollback: 1790 for_each_net_continue_reverse(net) 1791 call_netdevice_unregister_net_notifiers(nb, net); 1792 1793 raw_notifier_chain_unregister(&netdev_chain, nb); 1794 goto unlock; 1795 } 1796 EXPORT_SYMBOL(register_netdevice_notifier); 1797 1798 /** 1799 * unregister_netdevice_notifier - unregister a network notifier block 1800 * @nb: notifier 1801 * 1802 * Unregister a notifier previously registered by 1803 * register_netdevice_notifier(). The notifier is unlinked into the 1804 * kernel structures and may then be reused. A negative errno code 1805 * is returned on a failure. 1806 * 1807 * After unregistering unregister and down device events are synthesized 1808 * for all devices on the device list to the removed notifier to remove 1809 * the need for special case cleanup code. 1810 */ 1811 1812 int unregister_netdevice_notifier(struct notifier_block *nb) 1813 { 1814 struct net *net; 1815 int err; 1816 1817 /* Close race with setup_net() and cleanup_net() */ 1818 down_write(&pernet_ops_rwsem); 1819 rtnl_lock(); 1820 err = raw_notifier_chain_unregister(&netdev_chain, nb); 1821 if (err) 1822 goto unlock; 1823 1824 for_each_net(net) 1825 call_netdevice_unregister_net_notifiers(nb, net); 1826 1827 unlock: 1828 rtnl_unlock(); 1829 up_write(&pernet_ops_rwsem); 1830 return err; 1831 } 1832 EXPORT_SYMBOL(unregister_netdevice_notifier); 1833 1834 static int __register_netdevice_notifier_net(struct net *net, 1835 struct notifier_block *nb, 1836 bool ignore_call_fail) 1837 { 1838 int err; 1839 1840 err = raw_notifier_chain_register(&net->netdev_chain, nb); 1841 if (err) 1842 return err; 1843 if (dev_boot_phase) 1844 return 0; 1845 1846 err = call_netdevice_register_net_notifiers(nb, net); 1847 if (err && !ignore_call_fail) 1848 goto chain_unregister; 1849 1850 return 0; 1851 1852 chain_unregister: 1853 raw_notifier_chain_unregister(&net->netdev_chain, nb); 1854 return err; 1855 } 1856 1857 static int __unregister_netdevice_notifier_net(struct net *net, 1858 struct notifier_block *nb) 1859 { 1860 int err; 1861 1862 err = raw_notifier_chain_unregister(&net->netdev_chain, nb); 1863 if (err) 1864 return err; 1865 1866 call_netdevice_unregister_net_notifiers(nb, net); 1867 return 0; 1868 } 1869 1870 /** 1871 * register_netdevice_notifier_net - register a per-netns network notifier block 1872 * @net: network namespace 1873 * @nb: notifier 1874 * 1875 * Register a notifier to be called when network device events occur. 1876 * The notifier passed is linked into the kernel structures and must 1877 * not be reused until it has been unregistered. A negative errno code 1878 * is returned on a failure. 1879 * 1880 * When registered all registration and up events are replayed 1881 * to the new notifier to allow device to have a race free 1882 * view of the network device list. 1883 */ 1884 1885 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb) 1886 { 1887 int err; 1888 1889 rtnl_lock(); 1890 err = __register_netdevice_notifier_net(net, nb, false); 1891 rtnl_unlock(); 1892 return err; 1893 } 1894 EXPORT_SYMBOL(register_netdevice_notifier_net); 1895 1896 /** 1897 * unregister_netdevice_notifier_net - unregister a per-netns 1898 * network notifier block 1899 * @net: network namespace 1900 * @nb: notifier 1901 * 1902 * Unregister a notifier previously registered by 1903 * register_netdevice_notifier_net(). The notifier is unlinked from the 1904 * kernel structures and may then be reused. A negative errno code 1905 * is returned on a failure. 1906 * 1907 * After unregistering unregister and down device events are synthesized 1908 * for all devices on the device list to the removed notifier to remove 1909 * the need for special case cleanup code. 1910 */ 1911 1912 int unregister_netdevice_notifier_net(struct net *net, 1913 struct notifier_block *nb) 1914 { 1915 int err; 1916 1917 rtnl_lock(); 1918 err = __unregister_netdevice_notifier_net(net, nb); 1919 rtnl_unlock(); 1920 return err; 1921 } 1922 EXPORT_SYMBOL(unregister_netdevice_notifier_net); 1923 1924 static void __move_netdevice_notifier_net(struct net *src_net, 1925 struct net *dst_net, 1926 struct notifier_block *nb) 1927 { 1928 __unregister_netdevice_notifier_net(src_net, nb); 1929 __register_netdevice_notifier_net(dst_net, nb, true); 1930 } 1931 1932 int register_netdevice_notifier_dev_net(struct net_device *dev, 1933 struct notifier_block *nb, 1934 struct netdev_net_notifier *nn) 1935 { 1936 int err; 1937 1938 rtnl_lock(); 1939 err = __register_netdevice_notifier_net(dev_net(dev), nb, false); 1940 if (!err) { 1941 nn->nb = nb; 1942 list_add(&nn->list, &dev->net_notifier_list); 1943 } 1944 rtnl_unlock(); 1945 return err; 1946 } 1947 EXPORT_SYMBOL(register_netdevice_notifier_dev_net); 1948 1949 int unregister_netdevice_notifier_dev_net(struct net_device *dev, 1950 struct notifier_block *nb, 1951 struct netdev_net_notifier *nn) 1952 { 1953 int err; 1954 1955 rtnl_lock(); 1956 list_del(&nn->list); 1957 err = __unregister_netdevice_notifier_net(dev_net(dev), nb); 1958 rtnl_unlock(); 1959 return err; 1960 } 1961 EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net); 1962 1963 static void move_netdevice_notifiers_dev_net(struct net_device *dev, 1964 struct net *net) 1965 { 1966 struct netdev_net_notifier *nn; 1967 1968 list_for_each_entry(nn, &dev->net_notifier_list, list) 1969 __move_netdevice_notifier_net(dev_net(dev), net, nn->nb); 1970 } 1971 1972 /** 1973 * call_netdevice_notifiers_info - call all network notifier blocks 1974 * @val: value passed unmodified to notifier function 1975 * @info: notifier information data 1976 * 1977 * Call all network notifier blocks. Parameters and return value 1978 * are as for raw_notifier_call_chain(). 1979 */ 1980 1981 int call_netdevice_notifiers_info(unsigned long val, 1982 struct netdev_notifier_info *info) 1983 { 1984 struct net *net = dev_net(info->dev); 1985 int ret; 1986 1987 ASSERT_RTNL(); 1988 1989 /* Run per-netns notifier block chain first, then run the global one. 1990 * Hopefully, one day, the global one is going to be removed after 1991 * all notifier block registrators get converted to be per-netns. 1992 */ 1993 ret = raw_notifier_call_chain(&net->netdev_chain, val, info); 1994 if (ret & NOTIFY_STOP_MASK) 1995 return ret; 1996 return raw_notifier_call_chain(&netdev_chain, val, info); 1997 } 1998 1999 /** 2000 * call_netdevice_notifiers_info_robust - call per-netns notifier blocks 2001 * for and rollback on error 2002 * @val_up: value passed unmodified to notifier function 2003 * @val_down: value passed unmodified to the notifier function when 2004 * recovering from an error on @val_up 2005 * @info: notifier information data 2006 * 2007 * Call all per-netns network notifier blocks, but not notifier blocks on 2008 * the global notifier chain. Parameters and return value are as for 2009 * raw_notifier_call_chain_robust(). 2010 */ 2011 2012 static int 2013 call_netdevice_notifiers_info_robust(unsigned long val_up, 2014 unsigned long val_down, 2015 struct netdev_notifier_info *info) 2016 { 2017 struct net *net = dev_net(info->dev); 2018 2019 ASSERT_RTNL(); 2020 2021 return raw_notifier_call_chain_robust(&net->netdev_chain, 2022 val_up, val_down, info); 2023 } 2024 2025 static int call_netdevice_notifiers_extack(unsigned long val, 2026 struct net_device *dev, 2027 struct netlink_ext_ack *extack) 2028 { 2029 struct netdev_notifier_info info = { 2030 .dev = dev, 2031 .extack = extack, 2032 }; 2033 2034 return call_netdevice_notifiers_info(val, &info); 2035 } 2036 2037 /** 2038 * call_netdevice_notifiers - call all network notifier blocks 2039 * @val: value passed unmodified to notifier function 2040 * @dev: net_device pointer passed unmodified to notifier function 2041 * 2042 * Call all network notifier blocks. Parameters and return value 2043 * are as for raw_notifier_call_chain(). 2044 */ 2045 2046 int call_netdevice_notifiers(unsigned long val, struct net_device *dev) 2047 { 2048 return call_netdevice_notifiers_extack(val, dev, NULL); 2049 } 2050 EXPORT_SYMBOL(call_netdevice_notifiers); 2051 2052 /** 2053 * call_netdevice_notifiers_mtu - call all network notifier blocks 2054 * @val: value passed unmodified to notifier function 2055 * @dev: net_device pointer passed unmodified to notifier function 2056 * @arg: additional u32 argument passed to the notifier function 2057 * 2058 * Call all network notifier blocks. Parameters and return value 2059 * are as for raw_notifier_call_chain(). 2060 */ 2061 static int call_netdevice_notifiers_mtu(unsigned long val, 2062 struct net_device *dev, u32 arg) 2063 { 2064 struct netdev_notifier_info_ext info = { 2065 .info.dev = dev, 2066 .ext.mtu = arg, 2067 }; 2068 2069 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0); 2070 2071 return call_netdevice_notifiers_info(val, &info.info); 2072 } 2073 2074 #ifdef CONFIG_NET_INGRESS 2075 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key); 2076 2077 void net_inc_ingress_queue(void) 2078 { 2079 static_branch_inc(&ingress_needed_key); 2080 } 2081 EXPORT_SYMBOL_GPL(net_inc_ingress_queue); 2082 2083 void net_dec_ingress_queue(void) 2084 { 2085 static_branch_dec(&ingress_needed_key); 2086 } 2087 EXPORT_SYMBOL_GPL(net_dec_ingress_queue); 2088 #endif 2089 2090 #ifdef CONFIG_NET_EGRESS 2091 static DEFINE_STATIC_KEY_FALSE(egress_needed_key); 2092 2093 void net_inc_egress_queue(void) 2094 { 2095 static_branch_inc(&egress_needed_key); 2096 } 2097 EXPORT_SYMBOL_GPL(net_inc_egress_queue); 2098 2099 void net_dec_egress_queue(void) 2100 { 2101 static_branch_dec(&egress_needed_key); 2102 } 2103 EXPORT_SYMBOL_GPL(net_dec_egress_queue); 2104 #endif 2105 2106 #ifdef CONFIG_NET_CLS_ACT 2107 DEFINE_STATIC_KEY_FALSE(tcf_bypass_check_needed_key); 2108 EXPORT_SYMBOL(tcf_bypass_check_needed_key); 2109 #endif 2110 2111 DEFINE_STATIC_KEY_FALSE(netstamp_needed_key); 2112 EXPORT_SYMBOL(netstamp_needed_key); 2113 #ifdef CONFIG_JUMP_LABEL 2114 static atomic_t netstamp_needed_deferred; 2115 static atomic_t netstamp_wanted; 2116 static void netstamp_clear(struct work_struct *work) 2117 { 2118 int deferred = atomic_xchg(&netstamp_needed_deferred, 0); 2119 int wanted; 2120 2121 wanted = atomic_add_return(deferred, &netstamp_wanted); 2122 if (wanted > 0) 2123 static_branch_enable(&netstamp_needed_key); 2124 else 2125 static_branch_disable(&netstamp_needed_key); 2126 } 2127 static DECLARE_WORK(netstamp_work, netstamp_clear); 2128 #endif 2129 2130 void net_enable_timestamp(void) 2131 { 2132 #ifdef CONFIG_JUMP_LABEL 2133 int wanted = atomic_read(&netstamp_wanted); 2134 2135 while (wanted > 0) { 2136 if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted + 1)) 2137 return; 2138 } 2139 atomic_inc(&netstamp_needed_deferred); 2140 schedule_work(&netstamp_work); 2141 #else 2142 static_branch_inc(&netstamp_needed_key); 2143 #endif 2144 } 2145 EXPORT_SYMBOL(net_enable_timestamp); 2146 2147 void net_disable_timestamp(void) 2148 { 2149 #ifdef CONFIG_JUMP_LABEL 2150 int wanted = atomic_read(&netstamp_wanted); 2151 2152 while (wanted > 1) { 2153 if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted - 1)) 2154 return; 2155 } 2156 atomic_dec(&netstamp_needed_deferred); 2157 schedule_work(&netstamp_work); 2158 #else 2159 static_branch_dec(&netstamp_needed_key); 2160 #endif 2161 } 2162 EXPORT_SYMBOL(net_disable_timestamp); 2163 2164 static inline void net_timestamp_set(struct sk_buff *skb) 2165 { 2166 skb->tstamp = 0; 2167 skb->tstamp_type = SKB_CLOCK_REALTIME; 2168 if (static_branch_unlikely(&netstamp_needed_key)) 2169 skb->tstamp = ktime_get_real(); 2170 } 2171 2172 #define net_timestamp_check(COND, SKB) \ 2173 if (static_branch_unlikely(&netstamp_needed_key)) { \ 2174 if ((COND) && !(SKB)->tstamp) \ 2175 (SKB)->tstamp = ktime_get_real(); \ 2176 } \ 2177 2178 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb) 2179 { 2180 return __is_skb_forwardable(dev, skb, true); 2181 } 2182 EXPORT_SYMBOL_GPL(is_skb_forwardable); 2183 2184 static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb, 2185 bool check_mtu) 2186 { 2187 int ret = ____dev_forward_skb(dev, skb, check_mtu); 2188 2189 if (likely(!ret)) { 2190 skb->protocol = eth_type_trans(skb, dev); 2191 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 2192 } 2193 2194 return ret; 2195 } 2196 2197 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 2198 { 2199 return __dev_forward_skb2(dev, skb, true); 2200 } 2201 EXPORT_SYMBOL_GPL(__dev_forward_skb); 2202 2203 /** 2204 * dev_forward_skb - loopback an skb to another netif 2205 * 2206 * @dev: destination network device 2207 * @skb: buffer to forward 2208 * 2209 * return values: 2210 * NET_RX_SUCCESS (no congestion) 2211 * NET_RX_DROP (packet was dropped, but freed) 2212 * 2213 * dev_forward_skb can be used for injecting an skb from the 2214 * start_xmit function of one device into the receive queue 2215 * of another device. 2216 * 2217 * The receiving device may be in another namespace, so 2218 * we have to clear all information in the skb that could 2219 * impact namespace isolation. 2220 */ 2221 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 2222 { 2223 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb); 2224 } 2225 EXPORT_SYMBOL_GPL(dev_forward_skb); 2226 2227 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb) 2228 { 2229 return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb); 2230 } 2231 2232 static inline int deliver_skb(struct sk_buff *skb, 2233 struct packet_type *pt_prev, 2234 struct net_device *orig_dev) 2235 { 2236 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 2237 return -ENOMEM; 2238 refcount_inc(&skb->users); 2239 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 2240 } 2241 2242 static inline void deliver_ptype_list_skb(struct sk_buff *skb, 2243 struct packet_type **pt, 2244 struct net_device *orig_dev, 2245 __be16 type, 2246 struct list_head *ptype_list) 2247 { 2248 struct packet_type *ptype, *pt_prev = *pt; 2249 2250 list_for_each_entry_rcu(ptype, ptype_list, list) { 2251 if (ptype->type != type) 2252 continue; 2253 if (pt_prev) 2254 deliver_skb(skb, pt_prev, orig_dev); 2255 pt_prev = ptype; 2256 } 2257 *pt = pt_prev; 2258 } 2259 2260 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) 2261 { 2262 if (!ptype->af_packet_priv || !skb->sk) 2263 return false; 2264 2265 if (ptype->id_match) 2266 return ptype->id_match(ptype, skb->sk); 2267 else if ((struct sock *)ptype->af_packet_priv == skb->sk) 2268 return true; 2269 2270 return false; 2271 } 2272 2273 /** 2274 * dev_nit_active - return true if any network interface taps are in use 2275 * 2276 * @dev: network device to check for the presence of taps 2277 */ 2278 bool dev_nit_active(struct net_device *dev) 2279 { 2280 return !list_empty(&net_hotdata.ptype_all) || 2281 !list_empty(&dev->ptype_all); 2282 } 2283 EXPORT_SYMBOL_GPL(dev_nit_active); 2284 2285 /* 2286 * Support routine. Sends outgoing frames to any network 2287 * taps currently in use. 2288 */ 2289 2290 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) 2291 { 2292 struct list_head *ptype_list = &net_hotdata.ptype_all; 2293 struct packet_type *ptype, *pt_prev = NULL; 2294 struct sk_buff *skb2 = NULL; 2295 2296 rcu_read_lock(); 2297 again: 2298 list_for_each_entry_rcu(ptype, ptype_list, list) { 2299 if (READ_ONCE(ptype->ignore_outgoing)) 2300 continue; 2301 2302 /* Never send packets back to the socket 2303 * they originated from - MvS (miquels@drinkel.ow.org) 2304 */ 2305 if (skb_loop_sk(ptype, skb)) 2306 continue; 2307 2308 if (pt_prev) { 2309 deliver_skb(skb2, pt_prev, skb->dev); 2310 pt_prev = ptype; 2311 continue; 2312 } 2313 2314 /* need to clone skb, done only once */ 2315 skb2 = skb_clone(skb, GFP_ATOMIC); 2316 if (!skb2) 2317 goto out_unlock; 2318 2319 net_timestamp_set(skb2); 2320 2321 /* skb->nh should be correctly 2322 * set by sender, so that the second statement is 2323 * just protection against buggy protocols. 2324 */ 2325 skb_reset_mac_header(skb2); 2326 2327 if (skb_network_header(skb2) < skb2->data || 2328 skb_network_header(skb2) > skb_tail_pointer(skb2)) { 2329 net_crit_ratelimited("protocol %04x is buggy, dev %s\n", 2330 ntohs(skb2->protocol), 2331 dev->name); 2332 skb_reset_network_header(skb2); 2333 } 2334 2335 skb2->transport_header = skb2->network_header; 2336 skb2->pkt_type = PACKET_OUTGOING; 2337 pt_prev = ptype; 2338 } 2339 2340 if (ptype_list == &net_hotdata.ptype_all) { 2341 ptype_list = &dev->ptype_all; 2342 goto again; 2343 } 2344 out_unlock: 2345 if (pt_prev) { 2346 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC)) 2347 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); 2348 else 2349 kfree_skb(skb2); 2350 } 2351 rcu_read_unlock(); 2352 } 2353 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit); 2354 2355 /** 2356 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change 2357 * @dev: Network device 2358 * @txq: number of queues available 2359 * 2360 * If real_num_tx_queues is changed the tc mappings may no longer be 2361 * valid. To resolve this verify the tc mapping remains valid and if 2362 * not NULL the mapping. With no priorities mapping to this 2363 * offset/count pair it will no longer be used. In the worst case TC0 2364 * is invalid nothing can be done so disable priority mappings. If is 2365 * expected that drivers will fix this mapping if they can before 2366 * calling netif_set_real_num_tx_queues. 2367 */ 2368 static void netif_setup_tc(struct net_device *dev, unsigned int txq) 2369 { 2370 int i; 2371 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 2372 2373 /* If TC0 is invalidated disable TC mapping */ 2374 if (tc->offset + tc->count > txq) { 2375 netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); 2376 dev->num_tc = 0; 2377 return; 2378 } 2379 2380 /* Invalidated prio to tc mappings set to TC0 */ 2381 for (i = 1; i < TC_BITMASK + 1; i++) { 2382 int q = netdev_get_prio_tc_map(dev, i); 2383 2384 tc = &dev->tc_to_txq[q]; 2385 if (tc->offset + tc->count > txq) { 2386 netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", 2387 i, q); 2388 netdev_set_prio_tc_map(dev, i, 0); 2389 } 2390 } 2391 } 2392 2393 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq) 2394 { 2395 if (dev->num_tc) { 2396 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 2397 int i; 2398 2399 /* walk through the TCs and see if it falls into any of them */ 2400 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) { 2401 if ((txq - tc->offset) < tc->count) 2402 return i; 2403 } 2404 2405 /* didn't find it, just return -1 to indicate no match */ 2406 return -1; 2407 } 2408 2409 return 0; 2410 } 2411 EXPORT_SYMBOL(netdev_txq_to_tc); 2412 2413 #ifdef CONFIG_XPS 2414 static struct static_key xps_needed __read_mostly; 2415 static struct static_key xps_rxqs_needed __read_mostly; 2416 static DEFINE_MUTEX(xps_map_mutex); 2417 #define xmap_dereference(P) \ 2418 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) 2419 2420 static bool remove_xps_queue(struct xps_dev_maps *dev_maps, 2421 struct xps_dev_maps *old_maps, int tci, u16 index) 2422 { 2423 struct xps_map *map = NULL; 2424 int pos; 2425 2426 map = xmap_dereference(dev_maps->attr_map[tci]); 2427 if (!map) 2428 return false; 2429 2430 for (pos = map->len; pos--;) { 2431 if (map->queues[pos] != index) 2432 continue; 2433 2434 if (map->len > 1) { 2435 map->queues[pos] = map->queues[--map->len]; 2436 break; 2437 } 2438 2439 if (old_maps) 2440 RCU_INIT_POINTER(old_maps->attr_map[tci], NULL); 2441 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL); 2442 kfree_rcu(map, rcu); 2443 return false; 2444 } 2445 2446 return true; 2447 } 2448 2449 static bool remove_xps_queue_cpu(struct net_device *dev, 2450 struct xps_dev_maps *dev_maps, 2451 int cpu, u16 offset, u16 count) 2452 { 2453 int num_tc = dev_maps->num_tc; 2454 bool active = false; 2455 int tci; 2456 2457 for (tci = cpu * num_tc; num_tc--; tci++) { 2458 int i, j; 2459 2460 for (i = count, j = offset; i--; j++) { 2461 if (!remove_xps_queue(dev_maps, NULL, tci, j)) 2462 break; 2463 } 2464 2465 active |= i < 0; 2466 } 2467 2468 return active; 2469 } 2470 2471 static void reset_xps_maps(struct net_device *dev, 2472 struct xps_dev_maps *dev_maps, 2473 enum xps_map_type type) 2474 { 2475 static_key_slow_dec_cpuslocked(&xps_needed); 2476 if (type == XPS_RXQS) 2477 static_key_slow_dec_cpuslocked(&xps_rxqs_needed); 2478 2479 RCU_INIT_POINTER(dev->xps_maps[type], NULL); 2480 2481 kfree_rcu(dev_maps, rcu); 2482 } 2483 2484 static void clean_xps_maps(struct net_device *dev, enum xps_map_type type, 2485 u16 offset, u16 count) 2486 { 2487 struct xps_dev_maps *dev_maps; 2488 bool active = false; 2489 int i, j; 2490 2491 dev_maps = xmap_dereference(dev->xps_maps[type]); 2492 if (!dev_maps) 2493 return; 2494 2495 for (j = 0; j < dev_maps->nr_ids; j++) 2496 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, count); 2497 if (!active) 2498 reset_xps_maps(dev, dev_maps, type); 2499 2500 if (type == XPS_CPUS) { 2501 for (i = offset + (count - 1); count--; i--) 2502 netdev_queue_numa_node_write( 2503 netdev_get_tx_queue(dev, i), NUMA_NO_NODE); 2504 } 2505 } 2506 2507 static void netif_reset_xps_queues(struct net_device *dev, u16 offset, 2508 u16 count) 2509 { 2510 if (!static_key_false(&xps_needed)) 2511 return; 2512 2513 cpus_read_lock(); 2514 mutex_lock(&xps_map_mutex); 2515 2516 if (static_key_false(&xps_rxqs_needed)) 2517 clean_xps_maps(dev, XPS_RXQS, offset, count); 2518 2519 clean_xps_maps(dev, XPS_CPUS, offset, count); 2520 2521 mutex_unlock(&xps_map_mutex); 2522 cpus_read_unlock(); 2523 } 2524 2525 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index) 2526 { 2527 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index); 2528 } 2529 2530 static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index, 2531 u16 index, bool is_rxqs_map) 2532 { 2533 struct xps_map *new_map; 2534 int alloc_len = XPS_MIN_MAP_ALLOC; 2535 int i, pos; 2536 2537 for (pos = 0; map && pos < map->len; pos++) { 2538 if (map->queues[pos] != index) 2539 continue; 2540 return map; 2541 } 2542 2543 /* Need to add tx-queue to this CPU's/rx-queue's existing map */ 2544 if (map) { 2545 if (pos < map->alloc_len) 2546 return map; 2547 2548 alloc_len = map->alloc_len * 2; 2549 } 2550 2551 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's 2552 * map 2553 */ 2554 if (is_rxqs_map) 2555 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL); 2556 else 2557 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL, 2558 cpu_to_node(attr_index)); 2559 if (!new_map) 2560 return NULL; 2561 2562 for (i = 0; i < pos; i++) 2563 new_map->queues[i] = map->queues[i]; 2564 new_map->alloc_len = alloc_len; 2565 new_map->len = pos; 2566 2567 return new_map; 2568 } 2569 2570 /* Copy xps maps at a given index */ 2571 static void xps_copy_dev_maps(struct xps_dev_maps *dev_maps, 2572 struct xps_dev_maps *new_dev_maps, int index, 2573 int tc, bool skip_tc) 2574 { 2575 int i, tci = index * dev_maps->num_tc; 2576 struct xps_map *map; 2577 2578 /* copy maps belonging to foreign traffic classes */ 2579 for (i = 0; i < dev_maps->num_tc; i++, tci++) { 2580 if (i == tc && skip_tc) 2581 continue; 2582 2583 /* fill in the new device map from the old device map */ 2584 map = xmap_dereference(dev_maps->attr_map[tci]); 2585 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2586 } 2587 } 2588 2589 /* Must be called under cpus_read_lock */ 2590 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, 2591 u16 index, enum xps_map_type type) 2592 { 2593 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL, *old_dev_maps = NULL; 2594 const unsigned long *online_mask = NULL; 2595 bool active = false, copy = false; 2596 int i, j, tci, numa_node_id = -2; 2597 int maps_sz, num_tc = 1, tc = 0; 2598 struct xps_map *map, *new_map; 2599 unsigned int nr_ids; 2600 2601 WARN_ON_ONCE(index >= dev->num_tx_queues); 2602 2603 if (dev->num_tc) { 2604 /* Do not allow XPS on subordinate device directly */ 2605 num_tc = dev->num_tc; 2606 if (num_tc < 0) 2607 return -EINVAL; 2608 2609 /* If queue belongs to subordinate dev use its map */ 2610 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; 2611 2612 tc = netdev_txq_to_tc(dev, index); 2613 if (tc < 0) 2614 return -EINVAL; 2615 } 2616 2617 mutex_lock(&xps_map_mutex); 2618 2619 dev_maps = xmap_dereference(dev->xps_maps[type]); 2620 if (type == XPS_RXQS) { 2621 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues); 2622 nr_ids = dev->num_rx_queues; 2623 } else { 2624 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc); 2625 if (num_possible_cpus() > 1) 2626 online_mask = cpumask_bits(cpu_online_mask); 2627 nr_ids = nr_cpu_ids; 2628 } 2629 2630 if (maps_sz < L1_CACHE_BYTES) 2631 maps_sz = L1_CACHE_BYTES; 2632 2633 /* The old dev_maps could be larger or smaller than the one we're 2634 * setting up now, as dev->num_tc or nr_ids could have been updated in 2635 * between. We could try to be smart, but let's be safe instead and only 2636 * copy foreign traffic classes if the two map sizes match. 2637 */ 2638 if (dev_maps && 2639 dev_maps->num_tc == num_tc && dev_maps->nr_ids == nr_ids) 2640 copy = true; 2641 2642 /* allocate memory for queue storage */ 2643 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids), 2644 j < nr_ids;) { 2645 if (!new_dev_maps) { 2646 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); 2647 if (!new_dev_maps) { 2648 mutex_unlock(&xps_map_mutex); 2649 return -ENOMEM; 2650 } 2651 2652 new_dev_maps->nr_ids = nr_ids; 2653 new_dev_maps->num_tc = num_tc; 2654 } 2655 2656 tci = j * num_tc + tc; 2657 map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL; 2658 2659 map = expand_xps_map(map, j, index, type == XPS_RXQS); 2660 if (!map) 2661 goto error; 2662 2663 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2664 } 2665 2666 if (!new_dev_maps) 2667 goto out_no_new_maps; 2668 2669 if (!dev_maps) { 2670 /* Increment static keys at most once per type */ 2671 static_key_slow_inc_cpuslocked(&xps_needed); 2672 if (type == XPS_RXQS) 2673 static_key_slow_inc_cpuslocked(&xps_rxqs_needed); 2674 } 2675 2676 for (j = 0; j < nr_ids; j++) { 2677 bool skip_tc = false; 2678 2679 tci = j * num_tc + tc; 2680 if (netif_attr_test_mask(j, mask, nr_ids) && 2681 netif_attr_test_online(j, online_mask, nr_ids)) { 2682 /* add tx-queue to CPU/rx-queue maps */ 2683 int pos = 0; 2684 2685 skip_tc = true; 2686 2687 map = xmap_dereference(new_dev_maps->attr_map[tci]); 2688 while ((pos < map->len) && (map->queues[pos] != index)) 2689 pos++; 2690 2691 if (pos == map->len) 2692 map->queues[map->len++] = index; 2693 #ifdef CONFIG_NUMA 2694 if (type == XPS_CPUS) { 2695 if (numa_node_id == -2) 2696 numa_node_id = cpu_to_node(j); 2697 else if (numa_node_id != cpu_to_node(j)) 2698 numa_node_id = -1; 2699 } 2700 #endif 2701 } 2702 2703 if (copy) 2704 xps_copy_dev_maps(dev_maps, new_dev_maps, j, tc, 2705 skip_tc); 2706 } 2707 2708 rcu_assign_pointer(dev->xps_maps[type], new_dev_maps); 2709 2710 /* Cleanup old maps */ 2711 if (!dev_maps) 2712 goto out_no_old_maps; 2713 2714 for (j = 0; j < dev_maps->nr_ids; j++) { 2715 for (i = num_tc, tci = j * dev_maps->num_tc; i--; tci++) { 2716 map = xmap_dereference(dev_maps->attr_map[tci]); 2717 if (!map) 2718 continue; 2719 2720 if (copy) { 2721 new_map = xmap_dereference(new_dev_maps->attr_map[tci]); 2722 if (map == new_map) 2723 continue; 2724 } 2725 2726 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL); 2727 kfree_rcu(map, rcu); 2728 } 2729 } 2730 2731 old_dev_maps = dev_maps; 2732 2733 out_no_old_maps: 2734 dev_maps = new_dev_maps; 2735 active = true; 2736 2737 out_no_new_maps: 2738 if (type == XPS_CPUS) 2739 /* update Tx queue numa node */ 2740 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index), 2741 (numa_node_id >= 0) ? 2742 numa_node_id : NUMA_NO_NODE); 2743 2744 if (!dev_maps) 2745 goto out_no_maps; 2746 2747 /* removes tx-queue from unused CPUs/rx-queues */ 2748 for (j = 0; j < dev_maps->nr_ids; j++) { 2749 tci = j * dev_maps->num_tc; 2750 2751 for (i = 0; i < dev_maps->num_tc; i++, tci++) { 2752 if (i == tc && 2753 netif_attr_test_mask(j, mask, dev_maps->nr_ids) && 2754 netif_attr_test_online(j, online_mask, dev_maps->nr_ids)) 2755 continue; 2756 2757 active |= remove_xps_queue(dev_maps, 2758 copy ? old_dev_maps : NULL, 2759 tci, index); 2760 } 2761 } 2762 2763 if (old_dev_maps) 2764 kfree_rcu(old_dev_maps, rcu); 2765 2766 /* free map if not active */ 2767 if (!active) 2768 reset_xps_maps(dev, dev_maps, type); 2769 2770 out_no_maps: 2771 mutex_unlock(&xps_map_mutex); 2772 2773 return 0; 2774 error: 2775 /* remove any maps that we added */ 2776 for (j = 0; j < nr_ids; j++) { 2777 for (i = num_tc, tci = j * num_tc; i--; tci++) { 2778 new_map = xmap_dereference(new_dev_maps->attr_map[tci]); 2779 map = copy ? 2780 xmap_dereference(dev_maps->attr_map[tci]) : 2781 NULL; 2782 if (new_map && new_map != map) 2783 kfree(new_map); 2784 } 2785 } 2786 2787 mutex_unlock(&xps_map_mutex); 2788 2789 kfree(new_dev_maps); 2790 return -ENOMEM; 2791 } 2792 EXPORT_SYMBOL_GPL(__netif_set_xps_queue); 2793 2794 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 2795 u16 index) 2796 { 2797 int ret; 2798 2799 cpus_read_lock(); 2800 ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, XPS_CPUS); 2801 cpus_read_unlock(); 2802 2803 return ret; 2804 } 2805 EXPORT_SYMBOL(netif_set_xps_queue); 2806 2807 #endif 2808 static void netdev_unbind_all_sb_channels(struct net_device *dev) 2809 { 2810 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; 2811 2812 /* Unbind any subordinate channels */ 2813 while (txq-- != &dev->_tx[0]) { 2814 if (txq->sb_dev) 2815 netdev_unbind_sb_channel(dev, txq->sb_dev); 2816 } 2817 } 2818 2819 void netdev_reset_tc(struct net_device *dev) 2820 { 2821 #ifdef CONFIG_XPS 2822 netif_reset_xps_queues_gt(dev, 0); 2823 #endif 2824 netdev_unbind_all_sb_channels(dev); 2825 2826 /* Reset TC configuration of device */ 2827 dev->num_tc = 0; 2828 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); 2829 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); 2830 } 2831 EXPORT_SYMBOL(netdev_reset_tc); 2832 2833 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) 2834 { 2835 if (tc >= dev->num_tc) 2836 return -EINVAL; 2837 2838 #ifdef CONFIG_XPS 2839 netif_reset_xps_queues(dev, offset, count); 2840 #endif 2841 dev->tc_to_txq[tc].count = count; 2842 dev->tc_to_txq[tc].offset = offset; 2843 return 0; 2844 } 2845 EXPORT_SYMBOL(netdev_set_tc_queue); 2846 2847 int netdev_set_num_tc(struct net_device *dev, u8 num_tc) 2848 { 2849 if (num_tc > TC_MAX_QUEUE) 2850 return -EINVAL; 2851 2852 #ifdef CONFIG_XPS 2853 netif_reset_xps_queues_gt(dev, 0); 2854 #endif 2855 netdev_unbind_all_sb_channels(dev); 2856 2857 dev->num_tc = num_tc; 2858 return 0; 2859 } 2860 EXPORT_SYMBOL(netdev_set_num_tc); 2861 2862 void netdev_unbind_sb_channel(struct net_device *dev, 2863 struct net_device *sb_dev) 2864 { 2865 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; 2866 2867 #ifdef CONFIG_XPS 2868 netif_reset_xps_queues_gt(sb_dev, 0); 2869 #endif 2870 memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq)); 2871 memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map)); 2872 2873 while (txq-- != &dev->_tx[0]) { 2874 if (txq->sb_dev == sb_dev) 2875 txq->sb_dev = NULL; 2876 } 2877 } 2878 EXPORT_SYMBOL(netdev_unbind_sb_channel); 2879 2880 int netdev_bind_sb_channel_queue(struct net_device *dev, 2881 struct net_device *sb_dev, 2882 u8 tc, u16 count, u16 offset) 2883 { 2884 /* Make certain the sb_dev and dev are already configured */ 2885 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc) 2886 return -EINVAL; 2887 2888 /* We cannot hand out queues we don't have */ 2889 if ((offset + count) > dev->real_num_tx_queues) 2890 return -EINVAL; 2891 2892 /* Record the mapping */ 2893 sb_dev->tc_to_txq[tc].count = count; 2894 sb_dev->tc_to_txq[tc].offset = offset; 2895 2896 /* Provide a way for Tx queue to find the tc_to_txq map or 2897 * XPS map for itself. 2898 */ 2899 while (count--) 2900 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev; 2901 2902 return 0; 2903 } 2904 EXPORT_SYMBOL(netdev_bind_sb_channel_queue); 2905 2906 int netdev_set_sb_channel(struct net_device *dev, u16 channel) 2907 { 2908 /* Do not use a multiqueue device to represent a subordinate channel */ 2909 if (netif_is_multiqueue(dev)) 2910 return -ENODEV; 2911 2912 /* We allow channels 1 - 32767 to be used for subordinate channels. 2913 * Channel 0 is meant to be "native" mode and used only to represent 2914 * the main root device. We allow writing 0 to reset the device back 2915 * to normal mode after being used as a subordinate channel. 2916 */ 2917 if (channel > S16_MAX) 2918 return -EINVAL; 2919 2920 dev->num_tc = -channel; 2921 2922 return 0; 2923 } 2924 EXPORT_SYMBOL(netdev_set_sb_channel); 2925 2926 /* 2927 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues 2928 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed. 2929 */ 2930 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) 2931 { 2932 bool disabling; 2933 int rc; 2934 2935 disabling = txq < dev->real_num_tx_queues; 2936 2937 if (txq < 1 || txq > dev->num_tx_queues) 2938 return -EINVAL; 2939 2940 if (dev->reg_state == NETREG_REGISTERED || 2941 dev->reg_state == NETREG_UNREGISTERING) { 2942 ASSERT_RTNL(); 2943 2944 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, 2945 txq); 2946 if (rc) 2947 return rc; 2948 2949 if (dev->num_tc) 2950 netif_setup_tc(dev, txq); 2951 2952 net_shaper_set_real_num_tx_queues(dev, txq); 2953 2954 dev_qdisc_change_real_num_tx(dev, txq); 2955 2956 dev->real_num_tx_queues = txq; 2957 2958 if (disabling) { 2959 synchronize_net(); 2960 qdisc_reset_all_tx_gt(dev, txq); 2961 #ifdef CONFIG_XPS 2962 netif_reset_xps_queues_gt(dev, txq); 2963 #endif 2964 } 2965 } else { 2966 dev->real_num_tx_queues = txq; 2967 } 2968 2969 return 0; 2970 } 2971 EXPORT_SYMBOL(netif_set_real_num_tx_queues); 2972 2973 #ifdef CONFIG_SYSFS 2974 /** 2975 * netif_set_real_num_rx_queues - set actual number of RX queues used 2976 * @dev: Network device 2977 * @rxq: Actual number of RX queues 2978 * 2979 * This must be called either with the rtnl_lock held or before 2980 * registration of the net device. Returns 0 on success, or a 2981 * negative error code. If called before registration, it always 2982 * succeeds. 2983 */ 2984 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) 2985 { 2986 int rc; 2987 2988 if (rxq < 1 || rxq > dev->num_rx_queues) 2989 return -EINVAL; 2990 2991 if (dev->reg_state == NETREG_REGISTERED) { 2992 ASSERT_RTNL(); 2993 2994 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, 2995 rxq); 2996 if (rc) 2997 return rc; 2998 } 2999 3000 dev->real_num_rx_queues = rxq; 3001 return 0; 3002 } 3003 EXPORT_SYMBOL(netif_set_real_num_rx_queues); 3004 #endif 3005 3006 /** 3007 * netif_set_real_num_queues - set actual number of RX and TX queues used 3008 * @dev: Network device 3009 * @txq: Actual number of TX queues 3010 * @rxq: Actual number of RX queues 3011 * 3012 * Set the real number of both TX and RX queues. 3013 * Does nothing if the number of queues is already correct. 3014 */ 3015 int netif_set_real_num_queues(struct net_device *dev, 3016 unsigned int txq, unsigned int rxq) 3017 { 3018 unsigned int old_rxq = dev->real_num_rx_queues; 3019 int err; 3020 3021 if (txq < 1 || txq > dev->num_tx_queues || 3022 rxq < 1 || rxq > dev->num_rx_queues) 3023 return -EINVAL; 3024 3025 /* Start from increases, so the error path only does decreases - 3026 * decreases can't fail. 3027 */ 3028 if (rxq > dev->real_num_rx_queues) { 3029 err = netif_set_real_num_rx_queues(dev, rxq); 3030 if (err) 3031 return err; 3032 } 3033 if (txq > dev->real_num_tx_queues) { 3034 err = netif_set_real_num_tx_queues(dev, txq); 3035 if (err) 3036 goto undo_rx; 3037 } 3038 if (rxq < dev->real_num_rx_queues) 3039 WARN_ON(netif_set_real_num_rx_queues(dev, rxq)); 3040 if (txq < dev->real_num_tx_queues) 3041 WARN_ON(netif_set_real_num_tx_queues(dev, txq)); 3042 3043 return 0; 3044 undo_rx: 3045 WARN_ON(netif_set_real_num_rx_queues(dev, old_rxq)); 3046 return err; 3047 } 3048 EXPORT_SYMBOL(netif_set_real_num_queues); 3049 3050 /** 3051 * netif_set_tso_max_size() - set the max size of TSO frames supported 3052 * @dev: netdev to update 3053 * @size: max skb->len of a TSO frame 3054 * 3055 * Set the limit on the size of TSO super-frames the device can handle. 3056 * Unless explicitly set the stack will assume the value of 3057 * %GSO_LEGACY_MAX_SIZE. 3058 */ 3059 void netif_set_tso_max_size(struct net_device *dev, unsigned int size) 3060 { 3061 dev->tso_max_size = min(GSO_MAX_SIZE, size); 3062 if (size < READ_ONCE(dev->gso_max_size)) 3063 netif_set_gso_max_size(dev, size); 3064 if (size < READ_ONCE(dev->gso_ipv4_max_size)) 3065 netif_set_gso_ipv4_max_size(dev, size); 3066 } 3067 EXPORT_SYMBOL(netif_set_tso_max_size); 3068 3069 /** 3070 * netif_set_tso_max_segs() - set the max number of segs supported for TSO 3071 * @dev: netdev to update 3072 * @segs: max number of TCP segments 3073 * 3074 * Set the limit on the number of TCP segments the device can generate from 3075 * a single TSO super-frame. 3076 * Unless explicitly set the stack will assume the value of %GSO_MAX_SEGS. 3077 */ 3078 void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs) 3079 { 3080 dev->tso_max_segs = segs; 3081 if (segs < READ_ONCE(dev->gso_max_segs)) 3082 netif_set_gso_max_segs(dev, segs); 3083 } 3084 EXPORT_SYMBOL(netif_set_tso_max_segs); 3085 3086 /** 3087 * netif_inherit_tso_max() - copy all TSO limits from a lower device to an upper 3088 * @to: netdev to update 3089 * @from: netdev from which to copy the limits 3090 */ 3091 void netif_inherit_tso_max(struct net_device *to, const struct net_device *from) 3092 { 3093 netif_set_tso_max_size(to, from->tso_max_size); 3094 netif_set_tso_max_segs(to, from->tso_max_segs); 3095 } 3096 EXPORT_SYMBOL(netif_inherit_tso_max); 3097 3098 /** 3099 * netif_get_num_default_rss_queues - default number of RSS queues 3100 * 3101 * Default value is the number of physical cores if there are only 1 or 2, or 3102 * divided by 2 if there are more. 3103 */ 3104 int netif_get_num_default_rss_queues(void) 3105 { 3106 cpumask_var_t cpus; 3107 int cpu, count = 0; 3108 3109 if (unlikely(is_kdump_kernel() || !zalloc_cpumask_var(&cpus, GFP_KERNEL))) 3110 return 1; 3111 3112 cpumask_copy(cpus, cpu_online_mask); 3113 for_each_cpu(cpu, cpus) { 3114 ++count; 3115 cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu)); 3116 } 3117 free_cpumask_var(cpus); 3118 3119 return count > 2 ? DIV_ROUND_UP(count, 2) : count; 3120 } 3121 EXPORT_SYMBOL(netif_get_num_default_rss_queues); 3122 3123 static void __netif_reschedule(struct Qdisc *q) 3124 { 3125 struct softnet_data *sd; 3126 unsigned long flags; 3127 3128 local_irq_save(flags); 3129 sd = this_cpu_ptr(&softnet_data); 3130 q->next_sched = NULL; 3131 *sd->output_queue_tailp = q; 3132 sd->output_queue_tailp = &q->next_sched; 3133 raise_softirq_irqoff(NET_TX_SOFTIRQ); 3134 local_irq_restore(flags); 3135 } 3136 3137 void __netif_schedule(struct Qdisc *q) 3138 { 3139 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) 3140 __netif_reschedule(q); 3141 } 3142 EXPORT_SYMBOL(__netif_schedule); 3143 3144 struct dev_kfree_skb_cb { 3145 enum skb_drop_reason reason; 3146 }; 3147 3148 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb) 3149 { 3150 return (struct dev_kfree_skb_cb *)skb->cb; 3151 } 3152 3153 void netif_schedule_queue(struct netdev_queue *txq) 3154 { 3155 rcu_read_lock(); 3156 if (!netif_xmit_stopped(txq)) { 3157 struct Qdisc *q = rcu_dereference(txq->qdisc); 3158 3159 __netif_schedule(q); 3160 } 3161 rcu_read_unlock(); 3162 } 3163 EXPORT_SYMBOL(netif_schedule_queue); 3164 3165 void netif_tx_wake_queue(struct netdev_queue *dev_queue) 3166 { 3167 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) { 3168 struct Qdisc *q; 3169 3170 rcu_read_lock(); 3171 q = rcu_dereference(dev_queue->qdisc); 3172 __netif_schedule(q); 3173 rcu_read_unlock(); 3174 } 3175 } 3176 EXPORT_SYMBOL(netif_tx_wake_queue); 3177 3178 void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason) 3179 { 3180 unsigned long flags; 3181 3182 if (unlikely(!skb)) 3183 return; 3184 3185 if (likely(refcount_read(&skb->users) == 1)) { 3186 smp_rmb(); 3187 refcount_set(&skb->users, 0); 3188 } else if (likely(!refcount_dec_and_test(&skb->users))) { 3189 return; 3190 } 3191 get_kfree_skb_cb(skb)->reason = reason; 3192 local_irq_save(flags); 3193 skb->next = __this_cpu_read(softnet_data.completion_queue); 3194 __this_cpu_write(softnet_data.completion_queue, skb); 3195 raise_softirq_irqoff(NET_TX_SOFTIRQ); 3196 local_irq_restore(flags); 3197 } 3198 EXPORT_SYMBOL(dev_kfree_skb_irq_reason); 3199 3200 void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason) 3201 { 3202 if (in_hardirq() || irqs_disabled()) 3203 dev_kfree_skb_irq_reason(skb, reason); 3204 else 3205 kfree_skb_reason(skb, reason); 3206 } 3207 EXPORT_SYMBOL(dev_kfree_skb_any_reason); 3208 3209 3210 /** 3211 * netif_device_detach - mark device as removed 3212 * @dev: network device 3213 * 3214 * Mark device as removed from system and therefore no longer available. 3215 */ 3216 void netif_device_detach(struct net_device *dev) 3217 { 3218 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && 3219 netif_running(dev)) { 3220 netif_tx_stop_all_queues(dev); 3221 } 3222 } 3223 EXPORT_SYMBOL(netif_device_detach); 3224 3225 /** 3226 * netif_device_attach - mark device as attached 3227 * @dev: network device 3228 * 3229 * Mark device as attached from system and restart if needed. 3230 */ 3231 void netif_device_attach(struct net_device *dev) 3232 { 3233 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && 3234 netif_running(dev)) { 3235 netif_tx_wake_all_queues(dev); 3236 __netdev_watchdog_up(dev); 3237 } 3238 } 3239 EXPORT_SYMBOL(netif_device_attach); 3240 3241 /* 3242 * Returns a Tx hash based on the given packet descriptor a Tx queues' number 3243 * to be used as a distribution range. 3244 */ 3245 static u16 skb_tx_hash(const struct net_device *dev, 3246 const struct net_device *sb_dev, 3247 struct sk_buff *skb) 3248 { 3249 u32 hash; 3250 u16 qoffset = 0; 3251 u16 qcount = dev->real_num_tx_queues; 3252 3253 if (dev->num_tc) { 3254 u8 tc = netdev_get_prio_tc_map(dev, skb->priority); 3255 3256 qoffset = sb_dev->tc_to_txq[tc].offset; 3257 qcount = sb_dev->tc_to_txq[tc].count; 3258 if (unlikely(!qcount)) { 3259 net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n", 3260 sb_dev->name, qoffset, tc); 3261 qoffset = 0; 3262 qcount = dev->real_num_tx_queues; 3263 } 3264 } 3265 3266 if (skb_rx_queue_recorded(skb)) { 3267 DEBUG_NET_WARN_ON_ONCE(qcount == 0); 3268 hash = skb_get_rx_queue(skb); 3269 if (hash >= qoffset) 3270 hash -= qoffset; 3271 while (unlikely(hash >= qcount)) 3272 hash -= qcount; 3273 return hash + qoffset; 3274 } 3275 3276 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; 3277 } 3278 3279 void skb_warn_bad_offload(const struct sk_buff *skb) 3280 { 3281 static const netdev_features_t null_features; 3282 struct net_device *dev = skb->dev; 3283 const char *name = ""; 3284 3285 if (!net_ratelimit()) 3286 return; 3287 3288 if (dev) { 3289 if (dev->dev.parent) 3290 name = dev_driver_string(dev->dev.parent); 3291 else 3292 name = netdev_name(dev); 3293 } 3294 skb_dump(KERN_WARNING, skb, false); 3295 WARN(1, "%s: caps=(%pNF, %pNF)\n", 3296 name, dev ? &dev->features : &null_features, 3297 skb->sk ? &skb->sk->sk_route_caps : &null_features); 3298 } 3299 3300 /* 3301 * Invalidate hardware checksum when packet is to be mangled, and 3302 * complete checksum manually on outgoing path. 3303 */ 3304 int skb_checksum_help(struct sk_buff *skb) 3305 { 3306 __wsum csum; 3307 int ret = 0, offset; 3308 3309 if (skb->ip_summed == CHECKSUM_COMPLETE) 3310 goto out_set_summed; 3311 3312 if (unlikely(skb_is_gso(skb))) { 3313 skb_warn_bad_offload(skb); 3314 return -EINVAL; 3315 } 3316 3317 if (!skb_frags_readable(skb)) { 3318 return -EFAULT; 3319 } 3320 3321 /* Before computing a checksum, we should make sure no frag could 3322 * be modified by an external entity : checksum could be wrong. 3323 */ 3324 if (skb_has_shared_frag(skb)) { 3325 ret = __skb_linearize(skb); 3326 if (ret) 3327 goto out; 3328 } 3329 3330 offset = skb_checksum_start_offset(skb); 3331 ret = -EINVAL; 3332 if (unlikely(offset >= skb_headlen(skb))) { 3333 DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false); 3334 WARN_ONCE(true, "offset (%d) >= skb_headlen() (%u)\n", 3335 offset, skb_headlen(skb)); 3336 goto out; 3337 } 3338 csum = skb_checksum(skb, offset, skb->len - offset, 0); 3339 3340 offset += skb->csum_offset; 3341 if (unlikely(offset + sizeof(__sum16) > skb_headlen(skb))) { 3342 DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false); 3343 WARN_ONCE(true, "offset+2 (%zu) > skb_headlen() (%u)\n", 3344 offset + sizeof(__sum16), skb_headlen(skb)); 3345 goto out; 3346 } 3347 ret = skb_ensure_writable(skb, offset + sizeof(__sum16)); 3348 if (ret) 3349 goto out; 3350 3351 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0; 3352 out_set_summed: 3353 skb->ip_summed = CHECKSUM_NONE; 3354 out: 3355 return ret; 3356 } 3357 EXPORT_SYMBOL(skb_checksum_help); 3358 3359 int skb_crc32c_csum_help(struct sk_buff *skb) 3360 { 3361 __le32 crc32c_csum; 3362 int ret = 0, offset, start; 3363 3364 if (skb->ip_summed != CHECKSUM_PARTIAL) 3365 goto out; 3366 3367 if (unlikely(skb_is_gso(skb))) 3368 goto out; 3369 3370 /* Before computing a checksum, we should make sure no frag could 3371 * be modified by an external entity : checksum could be wrong. 3372 */ 3373 if (unlikely(skb_has_shared_frag(skb))) { 3374 ret = __skb_linearize(skb); 3375 if (ret) 3376 goto out; 3377 } 3378 start = skb_checksum_start_offset(skb); 3379 offset = start + offsetof(struct sctphdr, checksum); 3380 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) { 3381 ret = -EINVAL; 3382 goto out; 3383 } 3384 3385 ret = skb_ensure_writable(skb, offset + sizeof(__le32)); 3386 if (ret) 3387 goto out; 3388 3389 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start, 3390 skb->len - start, ~(__u32)0, 3391 crc32c_csum_stub)); 3392 *(__le32 *)(skb->data + offset) = crc32c_csum; 3393 skb_reset_csum_not_inet(skb); 3394 out: 3395 return ret; 3396 } 3397 EXPORT_SYMBOL(skb_crc32c_csum_help); 3398 3399 __be16 skb_network_protocol(struct sk_buff *skb, int *depth) 3400 { 3401 __be16 type = skb->protocol; 3402 3403 /* Tunnel gso handlers can set protocol to ethernet. */ 3404 if (type == htons(ETH_P_TEB)) { 3405 struct ethhdr *eth; 3406 3407 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) 3408 return 0; 3409 3410 eth = (struct ethhdr *)skb->data; 3411 type = eth->h_proto; 3412 } 3413 3414 return vlan_get_protocol_and_depth(skb, type, depth); 3415 } 3416 3417 3418 /* Take action when hardware reception checksum errors are detected. */ 3419 #ifdef CONFIG_BUG 3420 static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) 3421 { 3422 netdev_err(dev, "hw csum failure\n"); 3423 skb_dump(KERN_ERR, skb, true); 3424 dump_stack(); 3425 } 3426 3427 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) 3428 { 3429 DO_ONCE_LITE(do_netdev_rx_csum_fault, dev, skb); 3430 } 3431 EXPORT_SYMBOL(netdev_rx_csum_fault); 3432 #endif 3433 3434 /* XXX: check that highmem exists at all on the given machine. */ 3435 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 3436 { 3437 #ifdef CONFIG_HIGHMEM 3438 int i; 3439 3440 if (!(dev->features & NETIF_F_HIGHDMA)) { 3441 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3442 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3443 struct page *page = skb_frag_page(frag); 3444 3445 if (page && PageHighMem(page)) 3446 return 1; 3447 } 3448 } 3449 #endif 3450 return 0; 3451 } 3452 3453 /* If MPLS offload request, verify we are testing hardware MPLS features 3454 * instead of standard features for the netdev. 3455 */ 3456 #if IS_ENABLED(CONFIG_NET_MPLS_GSO) 3457 static netdev_features_t net_mpls_features(struct sk_buff *skb, 3458 netdev_features_t features, 3459 __be16 type) 3460 { 3461 if (eth_p_mpls(type)) 3462 features &= skb->dev->mpls_features; 3463 3464 return features; 3465 } 3466 #else 3467 static netdev_features_t net_mpls_features(struct sk_buff *skb, 3468 netdev_features_t features, 3469 __be16 type) 3470 { 3471 return features; 3472 } 3473 #endif 3474 3475 static netdev_features_t harmonize_features(struct sk_buff *skb, 3476 netdev_features_t features) 3477 { 3478 __be16 type; 3479 3480 type = skb_network_protocol(skb, NULL); 3481 features = net_mpls_features(skb, features, type); 3482 3483 if (skb->ip_summed != CHECKSUM_NONE && 3484 !can_checksum_protocol(features, type)) { 3485 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3486 } 3487 if (illegal_highdma(skb->dev, skb)) 3488 features &= ~NETIF_F_SG; 3489 3490 return features; 3491 } 3492 3493 netdev_features_t passthru_features_check(struct sk_buff *skb, 3494 struct net_device *dev, 3495 netdev_features_t features) 3496 { 3497 return features; 3498 } 3499 EXPORT_SYMBOL(passthru_features_check); 3500 3501 static netdev_features_t dflt_features_check(struct sk_buff *skb, 3502 struct net_device *dev, 3503 netdev_features_t features) 3504 { 3505 return vlan_features_check(skb, features); 3506 } 3507 3508 static netdev_features_t gso_features_check(const struct sk_buff *skb, 3509 struct net_device *dev, 3510 netdev_features_t features) 3511 { 3512 u16 gso_segs = skb_shinfo(skb)->gso_segs; 3513 3514 if (gso_segs > READ_ONCE(dev->gso_max_segs)) 3515 return features & ~NETIF_F_GSO_MASK; 3516 3517 if (unlikely(skb->len >= netif_get_gso_max_size(dev, skb))) 3518 return features & ~NETIF_F_GSO_MASK; 3519 3520 if (!skb_shinfo(skb)->gso_type) { 3521 skb_warn_bad_offload(skb); 3522 return features & ~NETIF_F_GSO_MASK; 3523 } 3524 3525 /* Support for GSO partial features requires software 3526 * intervention before we can actually process the packets 3527 * so we need to strip support for any partial features now 3528 * and we can pull them back in after we have partially 3529 * segmented the frame. 3530 */ 3531 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL)) 3532 features &= ~dev->gso_partial_features; 3533 3534 /* Make sure to clear the IPv4 ID mangling feature if the 3535 * IPv4 header has the potential to be fragmented. 3536 */ 3537 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { 3538 struct iphdr *iph = skb->encapsulation ? 3539 inner_ip_hdr(skb) : ip_hdr(skb); 3540 3541 if (!(iph->frag_off & htons(IP_DF))) 3542 features &= ~NETIF_F_TSO_MANGLEID; 3543 } 3544 3545 return features; 3546 } 3547 3548 netdev_features_t netif_skb_features(struct sk_buff *skb) 3549 { 3550 struct net_device *dev = skb->dev; 3551 netdev_features_t features = dev->features; 3552 3553 if (skb_is_gso(skb)) 3554 features = gso_features_check(skb, dev, features); 3555 3556 /* If encapsulation offload request, verify we are testing 3557 * hardware encapsulation features instead of standard 3558 * features for the netdev 3559 */ 3560 if (skb->encapsulation) 3561 features &= dev->hw_enc_features; 3562 3563 if (skb_vlan_tagged(skb)) 3564 features = netdev_intersect_features(features, 3565 dev->vlan_features | 3566 NETIF_F_HW_VLAN_CTAG_TX | 3567 NETIF_F_HW_VLAN_STAG_TX); 3568 3569 if (dev->netdev_ops->ndo_features_check) 3570 features &= dev->netdev_ops->ndo_features_check(skb, dev, 3571 features); 3572 else 3573 features &= dflt_features_check(skb, dev, features); 3574 3575 return harmonize_features(skb, features); 3576 } 3577 EXPORT_SYMBOL(netif_skb_features); 3578 3579 static int xmit_one(struct sk_buff *skb, struct net_device *dev, 3580 struct netdev_queue *txq, bool more) 3581 { 3582 unsigned int len; 3583 int rc; 3584 3585 if (dev_nit_active(dev)) 3586 dev_queue_xmit_nit(skb, dev); 3587 3588 len = skb->len; 3589 trace_net_dev_start_xmit(skb, dev); 3590 rc = netdev_start_xmit(skb, dev, txq, more); 3591 trace_net_dev_xmit(skb, rc, dev, len); 3592 3593 return rc; 3594 } 3595 3596 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev, 3597 struct netdev_queue *txq, int *ret) 3598 { 3599 struct sk_buff *skb = first; 3600 int rc = NETDEV_TX_OK; 3601 3602 while (skb) { 3603 struct sk_buff *next = skb->next; 3604 3605 skb_mark_not_on_list(skb); 3606 rc = xmit_one(skb, dev, txq, next != NULL); 3607 if (unlikely(!dev_xmit_complete(rc))) { 3608 skb->next = next; 3609 goto out; 3610 } 3611 3612 skb = next; 3613 if (netif_tx_queue_stopped(txq) && skb) { 3614 rc = NETDEV_TX_BUSY; 3615 break; 3616 } 3617 } 3618 3619 out: 3620 *ret = rc; 3621 return skb; 3622 } 3623 3624 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, 3625 netdev_features_t features) 3626 { 3627 if (skb_vlan_tag_present(skb) && 3628 !vlan_hw_offload_capable(features, skb->vlan_proto)) 3629 skb = __vlan_hwaccel_push_inside(skb); 3630 return skb; 3631 } 3632 3633 int skb_csum_hwoffload_help(struct sk_buff *skb, 3634 const netdev_features_t features) 3635 { 3636 if (unlikely(skb_csum_is_sctp(skb))) 3637 return !!(features & NETIF_F_SCTP_CRC) ? 0 : 3638 skb_crc32c_csum_help(skb); 3639 3640 if (features & NETIF_F_HW_CSUM) 3641 return 0; 3642 3643 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { 3644 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6) && 3645 skb_network_header_len(skb) != sizeof(struct ipv6hdr)) 3646 goto sw_checksum; 3647 switch (skb->csum_offset) { 3648 case offsetof(struct tcphdr, check): 3649 case offsetof(struct udphdr, check): 3650 return 0; 3651 } 3652 } 3653 3654 sw_checksum: 3655 return skb_checksum_help(skb); 3656 } 3657 EXPORT_SYMBOL(skb_csum_hwoffload_help); 3658 3659 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again) 3660 { 3661 netdev_features_t features; 3662 3663 features = netif_skb_features(skb); 3664 skb = validate_xmit_vlan(skb, features); 3665 if (unlikely(!skb)) 3666 goto out_null; 3667 3668 skb = sk_validate_xmit_skb(skb, dev); 3669 if (unlikely(!skb)) 3670 goto out_null; 3671 3672 if (netif_needs_gso(skb, features)) { 3673 struct sk_buff *segs; 3674 3675 segs = skb_gso_segment(skb, features); 3676 if (IS_ERR(segs)) { 3677 goto out_kfree_skb; 3678 } else if (segs) { 3679 consume_skb(skb); 3680 skb = segs; 3681 } 3682 } else { 3683 if (skb_needs_linearize(skb, features) && 3684 __skb_linearize(skb)) 3685 goto out_kfree_skb; 3686 3687 /* If packet is not checksummed and device does not 3688 * support checksumming for this protocol, complete 3689 * checksumming here. 3690 */ 3691 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3692 if (skb->encapsulation) 3693 skb_set_inner_transport_header(skb, 3694 skb_checksum_start_offset(skb)); 3695 else 3696 skb_set_transport_header(skb, 3697 skb_checksum_start_offset(skb)); 3698 if (skb_csum_hwoffload_help(skb, features)) 3699 goto out_kfree_skb; 3700 } 3701 } 3702 3703 skb = validate_xmit_xfrm(skb, features, again); 3704 3705 return skb; 3706 3707 out_kfree_skb: 3708 kfree_skb(skb); 3709 out_null: 3710 dev_core_stats_tx_dropped_inc(dev); 3711 return NULL; 3712 } 3713 3714 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again) 3715 { 3716 struct sk_buff *next, *head = NULL, *tail; 3717 3718 for (; skb != NULL; skb = next) { 3719 next = skb->next; 3720 skb_mark_not_on_list(skb); 3721 3722 /* in case skb won't be segmented, point to itself */ 3723 skb->prev = skb; 3724 3725 skb = validate_xmit_skb(skb, dev, again); 3726 if (!skb) 3727 continue; 3728 3729 if (!head) 3730 head = skb; 3731 else 3732 tail->next = skb; 3733 /* If skb was segmented, skb->prev points to 3734 * the last segment. If not, it still contains skb. 3735 */ 3736 tail = skb->prev; 3737 } 3738 return head; 3739 } 3740 EXPORT_SYMBOL_GPL(validate_xmit_skb_list); 3741 3742 static void qdisc_pkt_len_init(struct sk_buff *skb) 3743 { 3744 const struct skb_shared_info *shinfo = skb_shinfo(skb); 3745 3746 qdisc_skb_cb(skb)->pkt_len = skb->len; 3747 3748 /* To get more precise estimation of bytes sent on wire, 3749 * we add to pkt_len the headers size of all segments 3750 */ 3751 if (shinfo->gso_size && skb_transport_header_was_set(skb)) { 3752 u16 gso_segs = shinfo->gso_segs; 3753 unsigned int hdr_len; 3754 3755 /* mac layer + network layer */ 3756 hdr_len = skb_transport_offset(skb); 3757 3758 /* + transport layer */ 3759 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 3760 const struct tcphdr *th; 3761 struct tcphdr _tcphdr; 3762 3763 th = skb_header_pointer(skb, hdr_len, 3764 sizeof(_tcphdr), &_tcphdr); 3765 if (likely(th)) 3766 hdr_len += __tcp_hdrlen(th); 3767 } else if (shinfo->gso_type & SKB_GSO_UDP_L4) { 3768 struct udphdr _udphdr; 3769 3770 if (skb_header_pointer(skb, hdr_len, 3771 sizeof(_udphdr), &_udphdr)) 3772 hdr_len += sizeof(struct udphdr); 3773 } 3774 3775 if (unlikely(shinfo->gso_type & SKB_GSO_DODGY)) { 3776 int payload = skb->len - hdr_len; 3777 3778 /* Malicious packet. */ 3779 if (payload <= 0) 3780 return; 3781 gso_segs = DIV_ROUND_UP(payload, shinfo->gso_size); 3782 } 3783 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len; 3784 } 3785 } 3786 3787 static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q, 3788 struct sk_buff **to_free, 3789 struct netdev_queue *txq) 3790 { 3791 int rc; 3792 3793 rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK; 3794 if (rc == NET_XMIT_SUCCESS) 3795 trace_qdisc_enqueue(q, txq, skb); 3796 return rc; 3797 } 3798 3799 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, 3800 struct net_device *dev, 3801 struct netdev_queue *txq) 3802 { 3803 spinlock_t *root_lock = qdisc_lock(q); 3804 struct sk_buff *to_free = NULL; 3805 bool contended; 3806 int rc; 3807 3808 qdisc_calculate_pkt_len(skb, q); 3809 3810 tcf_set_drop_reason(skb, SKB_DROP_REASON_QDISC_DROP); 3811 3812 if (q->flags & TCQ_F_NOLOCK) { 3813 if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) && 3814 qdisc_run_begin(q)) { 3815 /* Retest nolock_qdisc_is_empty() within the protection 3816 * of q->seqlock to protect from racing with requeuing. 3817 */ 3818 if (unlikely(!nolock_qdisc_is_empty(q))) { 3819 rc = dev_qdisc_enqueue(skb, q, &to_free, txq); 3820 __qdisc_run(q); 3821 qdisc_run_end(q); 3822 3823 goto no_lock_out; 3824 } 3825 3826 qdisc_bstats_cpu_update(q, skb); 3827 if (sch_direct_xmit(skb, q, dev, txq, NULL, true) && 3828 !nolock_qdisc_is_empty(q)) 3829 __qdisc_run(q); 3830 3831 qdisc_run_end(q); 3832 return NET_XMIT_SUCCESS; 3833 } 3834 3835 rc = dev_qdisc_enqueue(skb, q, &to_free, txq); 3836 qdisc_run(q); 3837 3838 no_lock_out: 3839 if (unlikely(to_free)) 3840 kfree_skb_list_reason(to_free, 3841 tcf_get_drop_reason(to_free)); 3842 return rc; 3843 } 3844 3845 if (unlikely(READ_ONCE(q->owner) == smp_processor_id())) { 3846 kfree_skb_reason(skb, SKB_DROP_REASON_TC_RECLASSIFY_LOOP); 3847 return NET_XMIT_DROP; 3848 } 3849 /* 3850 * Heuristic to force contended enqueues to serialize on a 3851 * separate lock before trying to get qdisc main lock. 3852 * This permits qdisc->running owner to get the lock more 3853 * often and dequeue packets faster. 3854 * On PREEMPT_RT it is possible to preempt the qdisc owner during xmit 3855 * and then other tasks will only enqueue packets. The packets will be 3856 * sent after the qdisc owner is scheduled again. To prevent this 3857 * scenario the task always serialize on the lock. 3858 */ 3859 contended = qdisc_is_running(q) || IS_ENABLED(CONFIG_PREEMPT_RT); 3860 if (unlikely(contended)) 3861 spin_lock(&q->busylock); 3862 3863 spin_lock(root_lock); 3864 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { 3865 __qdisc_drop(skb, &to_free); 3866 rc = NET_XMIT_DROP; 3867 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && 3868 qdisc_run_begin(q)) { 3869 /* 3870 * This is a work-conserving queue; there are no old skbs 3871 * waiting to be sent out; and the qdisc is not running - 3872 * xmit the skb directly. 3873 */ 3874 3875 qdisc_bstats_update(q, skb); 3876 3877 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) { 3878 if (unlikely(contended)) { 3879 spin_unlock(&q->busylock); 3880 contended = false; 3881 } 3882 __qdisc_run(q); 3883 } 3884 3885 qdisc_run_end(q); 3886 rc = NET_XMIT_SUCCESS; 3887 } else { 3888 WRITE_ONCE(q->owner, smp_processor_id()); 3889 rc = dev_qdisc_enqueue(skb, q, &to_free, txq); 3890 WRITE_ONCE(q->owner, -1); 3891 if (qdisc_run_begin(q)) { 3892 if (unlikely(contended)) { 3893 spin_unlock(&q->busylock); 3894 contended = false; 3895 } 3896 __qdisc_run(q); 3897 qdisc_run_end(q); 3898 } 3899 } 3900 spin_unlock(root_lock); 3901 if (unlikely(to_free)) 3902 kfree_skb_list_reason(to_free, 3903 tcf_get_drop_reason(to_free)); 3904 if (unlikely(contended)) 3905 spin_unlock(&q->busylock); 3906 return rc; 3907 } 3908 3909 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 3910 static void skb_update_prio(struct sk_buff *skb) 3911 { 3912 const struct netprio_map *map; 3913 const struct sock *sk; 3914 unsigned int prioidx; 3915 3916 if (skb->priority) 3917 return; 3918 map = rcu_dereference_bh(skb->dev->priomap); 3919 if (!map) 3920 return; 3921 sk = skb_to_full_sk(skb); 3922 if (!sk) 3923 return; 3924 3925 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data); 3926 3927 if (prioidx < map->priomap_len) 3928 skb->priority = map->priomap[prioidx]; 3929 } 3930 #else 3931 #define skb_update_prio(skb) 3932 #endif 3933 3934 /** 3935 * dev_loopback_xmit - loop back @skb 3936 * @net: network namespace this loopback is happening in 3937 * @sk: sk needed to be a netfilter okfn 3938 * @skb: buffer to transmit 3939 */ 3940 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) 3941 { 3942 skb_reset_mac_header(skb); 3943 __skb_pull(skb, skb_network_offset(skb)); 3944 skb->pkt_type = PACKET_LOOPBACK; 3945 if (skb->ip_summed == CHECKSUM_NONE) 3946 skb->ip_summed = CHECKSUM_UNNECESSARY; 3947 DEBUG_NET_WARN_ON_ONCE(!skb_dst(skb)); 3948 skb_dst_force(skb); 3949 netif_rx(skb); 3950 return 0; 3951 } 3952 EXPORT_SYMBOL(dev_loopback_xmit); 3953 3954 #ifdef CONFIG_NET_EGRESS 3955 static struct netdev_queue * 3956 netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb) 3957 { 3958 int qm = skb_get_queue_mapping(skb); 3959 3960 return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm)); 3961 } 3962 3963 #ifndef CONFIG_PREEMPT_RT 3964 static bool netdev_xmit_txqueue_skipped(void) 3965 { 3966 return __this_cpu_read(softnet_data.xmit.skip_txqueue); 3967 } 3968 3969 void netdev_xmit_skip_txqueue(bool skip) 3970 { 3971 __this_cpu_write(softnet_data.xmit.skip_txqueue, skip); 3972 } 3973 EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue); 3974 3975 #else 3976 static bool netdev_xmit_txqueue_skipped(void) 3977 { 3978 return current->net_xmit.skip_txqueue; 3979 } 3980 3981 void netdev_xmit_skip_txqueue(bool skip) 3982 { 3983 current->net_xmit.skip_txqueue = skip; 3984 } 3985 EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue); 3986 #endif 3987 #endif /* CONFIG_NET_EGRESS */ 3988 3989 #ifdef CONFIG_NET_XGRESS 3990 static int tc_run(struct tcx_entry *entry, struct sk_buff *skb, 3991 enum skb_drop_reason *drop_reason) 3992 { 3993 int ret = TC_ACT_UNSPEC; 3994 #ifdef CONFIG_NET_CLS_ACT 3995 struct mini_Qdisc *miniq = rcu_dereference_bh(entry->miniq); 3996 struct tcf_result res; 3997 3998 if (!miniq) 3999 return ret; 4000 4001 if (static_branch_unlikely(&tcf_bypass_check_needed_key)) { 4002 if (tcf_block_bypass_sw(miniq->block)) 4003 return ret; 4004 } 4005 4006 tc_skb_cb(skb)->mru = 0; 4007 tc_skb_cb(skb)->post_ct = false; 4008 tcf_set_drop_reason(skb, *drop_reason); 4009 4010 mini_qdisc_bstats_cpu_update(miniq, skb); 4011 ret = tcf_classify(skb, miniq->block, miniq->filter_list, &res, false); 4012 /* Only tcf related quirks below. */ 4013 switch (ret) { 4014 case TC_ACT_SHOT: 4015 *drop_reason = tcf_get_drop_reason(skb); 4016 mini_qdisc_qstats_cpu_drop(miniq); 4017 break; 4018 case TC_ACT_OK: 4019 case TC_ACT_RECLASSIFY: 4020 skb->tc_index = TC_H_MIN(res.classid); 4021 break; 4022 } 4023 #endif /* CONFIG_NET_CLS_ACT */ 4024 return ret; 4025 } 4026 4027 static DEFINE_STATIC_KEY_FALSE(tcx_needed_key); 4028 4029 void tcx_inc(void) 4030 { 4031 static_branch_inc(&tcx_needed_key); 4032 } 4033 4034 void tcx_dec(void) 4035 { 4036 static_branch_dec(&tcx_needed_key); 4037 } 4038 4039 static __always_inline enum tcx_action_base 4040 tcx_run(const struct bpf_mprog_entry *entry, struct sk_buff *skb, 4041 const bool needs_mac) 4042 { 4043 const struct bpf_mprog_fp *fp; 4044 const struct bpf_prog *prog; 4045 int ret = TCX_NEXT; 4046 4047 if (needs_mac) 4048 __skb_push(skb, skb->mac_len); 4049 bpf_mprog_foreach_prog(entry, fp, prog) { 4050 bpf_compute_data_pointers(skb); 4051 ret = bpf_prog_run(prog, skb); 4052 if (ret != TCX_NEXT) 4053 break; 4054 } 4055 if (needs_mac) 4056 __skb_pull(skb, skb->mac_len); 4057 return tcx_action_code(skb, ret); 4058 } 4059 4060 static __always_inline struct sk_buff * 4061 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, 4062 struct net_device *orig_dev, bool *another) 4063 { 4064 struct bpf_mprog_entry *entry = rcu_dereference_bh(skb->dev->tcx_ingress); 4065 enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_INGRESS; 4066 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; 4067 int sch_ret; 4068 4069 if (!entry) 4070 return skb; 4071 4072 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); 4073 if (*pt_prev) { 4074 *ret = deliver_skb(skb, *pt_prev, orig_dev); 4075 *pt_prev = NULL; 4076 } 4077 4078 qdisc_skb_cb(skb)->pkt_len = skb->len; 4079 tcx_set_ingress(skb, true); 4080 4081 if (static_branch_unlikely(&tcx_needed_key)) { 4082 sch_ret = tcx_run(entry, skb, true); 4083 if (sch_ret != TC_ACT_UNSPEC) 4084 goto ingress_verdict; 4085 } 4086 sch_ret = tc_run(tcx_entry(entry), skb, &drop_reason); 4087 ingress_verdict: 4088 switch (sch_ret) { 4089 case TC_ACT_REDIRECT: 4090 /* skb_mac_header check was done by BPF, so we can safely 4091 * push the L2 header back before redirecting to another 4092 * netdev. 4093 */ 4094 __skb_push(skb, skb->mac_len); 4095 if (skb_do_redirect(skb) == -EAGAIN) { 4096 __skb_pull(skb, skb->mac_len); 4097 *another = true; 4098 break; 4099 } 4100 *ret = NET_RX_SUCCESS; 4101 bpf_net_ctx_clear(bpf_net_ctx); 4102 return NULL; 4103 case TC_ACT_SHOT: 4104 kfree_skb_reason(skb, drop_reason); 4105 *ret = NET_RX_DROP; 4106 bpf_net_ctx_clear(bpf_net_ctx); 4107 return NULL; 4108 /* used by tc_run */ 4109 case TC_ACT_STOLEN: 4110 case TC_ACT_QUEUED: 4111 case TC_ACT_TRAP: 4112 consume_skb(skb); 4113 fallthrough; 4114 case TC_ACT_CONSUMED: 4115 *ret = NET_RX_SUCCESS; 4116 bpf_net_ctx_clear(bpf_net_ctx); 4117 return NULL; 4118 } 4119 bpf_net_ctx_clear(bpf_net_ctx); 4120 4121 return skb; 4122 } 4123 4124 static __always_inline struct sk_buff * 4125 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) 4126 { 4127 struct bpf_mprog_entry *entry = rcu_dereference_bh(dev->tcx_egress); 4128 enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_EGRESS; 4129 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; 4130 int sch_ret; 4131 4132 if (!entry) 4133 return skb; 4134 4135 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); 4136 4137 /* qdisc_skb_cb(skb)->pkt_len & tcx_set_ingress() was 4138 * already set by the caller. 4139 */ 4140 if (static_branch_unlikely(&tcx_needed_key)) { 4141 sch_ret = tcx_run(entry, skb, false); 4142 if (sch_ret != TC_ACT_UNSPEC) 4143 goto egress_verdict; 4144 } 4145 sch_ret = tc_run(tcx_entry(entry), skb, &drop_reason); 4146 egress_verdict: 4147 switch (sch_ret) { 4148 case TC_ACT_REDIRECT: 4149 /* No need to push/pop skb's mac_header here on egress! */ 4150 skb_do_redirect(skb); 4151 *ret = NET_XMIT_SUCCESS; 4152 bpf_net_ctx_clear(bpf_net_ctx); 4153 return NULL; 4154 case TC_ACT_SHOT: 4155 kfree_skb_reason(skb, drop_reason); 4156 *ret = NET_XMIT_DROP; 4157 bpf_net_ctx_clear(bpf_net_ctx); 4158 return NULL; 4159 /* used by tc_run */ 4160 case TC_ACT_STOLEN: 4161 case TC_ACT_QUEUED: 4162 case TC_ACT_TRAP: 4163 consume_skb(skb); 4164 fallthrough; 4165 case TC_ACT_CONSUMED: 4166 *ret = NET_XMIT_SUCCESS; 4167 bpf_net_ctx_clear(bpf_net_ctx); 4168 return NULL; 4169 } 4170 bpf_net_ctx_clear(bpf_net_ctx); 4171 4172 return skb; 4173 } 4174 #else 4175 static __always_inline struct sk_buff * 4176 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, 4177 struct net_device *orig_dev, bool *another) 4178 { 4179 return skb; 4180 } 4181 4182 static __always_inline struct sk_buff * 4183 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) 4184 { 4185 return skb; 4186 } 4187 #endif /* CONFIG_NET_XGRESS */ 4188 4189 #ifdef CONFIG_XPS 4190 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb, 4191 struct xps_dev_maps *dev_maps, unsigned int tci) 4192 { 4193 int tc = netdev_get_prio_tc_map(dev, skb->priority); 4194 struct xps_map *map; 4195 int queue_index = -1; 4196 4197 if (tc >= dev_maps->num_tc || tci >= dev_maps->nr_ids) 4198 return queue_index; 4199 4200 tci *= dev_maps->num_tc; 4201 tci += tc; 4202 4203 map = rcu_dereference(dev_maps->attr_map[tci]); 4204 if (map) { 4205 if (map->len == 1) 4206 queue_index = map->queues[0]; 4207 else 4208 queue_index = map->queues[reciprocal_scale( 4209 skb_get_hash(skb), map->len)]; 4210 if (unlikely(queue_index >= dev->real_num_tx_queues)) 4211 queue_index = -1; 4212 } 4213 return queue_index; 4214 } 4215 #endif 4216 4217 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev, 4218 struct sk_buff *skb) 4219 { 4220 #ifdef CONFIG_XPS 4221 struct xps_dev_maps *dev_maps; 4222 struct sock *sk = skb->sk; 4223 int queue_index = -1; 4224 4225 if (!static_key_false(&xps_needed)) 4226 return -1; 4227 4228 rcu_read_lock(); 4229 if (!static_key_false(&xps_rxqs_needed)) 4230 goto get_cpus_map; 4231 4232 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_RXQS]); 4233 if (dev_maps) { 4234 int tci = sk_rx_queue_get(sk); 4235 4236 if (tci >= 0) 4237 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, 4238 tci); 4239 } 4240 4241 get_cpus_map: 4242 if (queue_index < 0) { 4243 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_CPUS]); 4244 if (dev_maps) { 4245 unsigned int tci = skb->sender_cpu - 1; 4246 4247 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, 4248 tci); 4249 } 4250 } 4251 rcu_read_unlock(); 4252 4253 return queue_index; 4254 #else 4255 return -1; 4256 #endif 4257 } 4258 4259 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, 4260 struct net_device *sb_dev) 4261 { 4262 return 0; 4263 } 4264 EXPORT_SYMBOL(dev_pick_tx_zero); 4265 4266 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, 4267 struct net_device *sb_dev) 4268 { 4269 struct sock *sk = skb->sk; 4270 int queue_index = sk_tx_queue_get(sk); 4271 4272 sb_dev = sb_dev ? : dev; 4273 4274 if (queue_index < 0 || skb->ooo_okay || 4275 queue_index >= dev->real_num_tx_queues) { 4276 int new_index = get_xps_queue(dev, sb_dev, skb); 4277 4278 if (new_index < 0) 4279 new_index = skb_tx_hash(dev, sb_dev, skb); 4280 4281 if (queue_index != new_index && sk && 4282 sk_fullsock(sk) && 4283 rcu_access_pointer(sk->sk_dst_cache)) 4284 sk_tx_queue_set(sk, new_index); 4285 4286 queue_index = new_index; 4287 } 4288 4289 return queue_index; 4290 } 4291 EXPORT_SYMBOL(netdev_pick_tx); 4292 4293 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, 4294 struct sk_buff *skb, 4295 struct net_device *sb_dev) 4296 { 4297 int queue_index = 0; 4298 4299 #ifdef CONFIG_XPS 4300 u32 sender_cpu = skb->sender_cpu - 1; 4301 4302 if (sender_cpu >= (u32)NR_CPUS) 4303 skb->sender_cpu = raw_smp_processor_id() + 1; 4304 #endif 4305 4306 if (dev->real_num_tx_queues != 1) { 4307 const struct net_device_ops *ops = dev->netdev_ops; 4308 4309 if (ops->ndo_select_queue) 4310 queue_index = ops->ndo_select_queue(dev, skb, sb_dev); 4311 else 4312 queue_index = netdev_pick_tx(dev, skb, sb_dev); 4313 4314 queue_index = netdev_cap_txqueue(dev, queue_index); 4315 } 4316 4317 skb_set_queue_mapping(skb, queue_index); 4318 return netdev_get_tx_queue(dev, queue_index); 4319 } 4320 4321 /** 4322 * __dev_queue_xmit() - transmit a buffer 4323 * @skb: buffer to transmit 4324 * @sb_dev: suboordinate device used for L2 forwarding offload 4325 * 4326 * Queue a buffer for transmission to a network device. The caller must 4327 * have set the device and priority and built the buffer before calling 4328 * this function. The function can be called from an interrupt. 4329 * 4330 * When calling this method, interrupts MUST be enabled. This is because 4331 * the BH enable code must have IRQs enabled so that it will not deadlock. 4332 * 4333 * Regardless of the return value, the skb is consumed, so it is currently 4334 * difficult to retry a send to this method. (You can bump the ref count 4335 * before sending to hold a reference for retry if you are careful.) 4336 * 4337 * Return: 4338 * * 0 - buffer successfully transmitted 4339 * * positive qdisc return code - NET_XMIT_DROP etc. 4340 * * negative errno - other errors 4341 */ 4342 int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) 4343 { 4344 struct net_device *dev = skb->dev; 4345 struct netdev_queue *txq = NULL; 4346 struct Qdisc *q; 4347 int rc = -ENOMEM; 4348 bool again = false; 4349 4350 skb_reset_mac_header(skb); 4351 skb_assert_len(skb); 4352 4353 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP)) 4354 __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED); 4355 4356 /* Disable soft irqs for various locks below. Also 4357 * stops preemption for RCU. 4358 */ 4359 rcu_read_lock_bh(); 4360 4361 skb_update_prio(skb); 4362 4363 qdisc_pkt_len_init(skb); 4364 tcx_set_ingress(skb, false); 4365 #ifdef CONFIG_NET_EGRESS 4366 if (static_branch_unlikely(&egress_needed_key)) { 4367 if (nf_hook_egress_active()) { 4368 skb = nf_hook_egress(skb, &rc, dev); 4369 if (!skb) 4370 goto out; 4371 } 4372 4373 netdev_xmit_skip_txqueue(false); 4374 4375 nf_skip_egress(skb, true); 4376 skb = sch_handle_egress(skb, &rc, dev); 4377 if (!skb) 4378 goto out; 4379 nf_skip_egress(skb, false); 4380 4381 if (netdev_xmit_txqueue_skipped()) 4382 txq = netdev_tx_queue_mapping(dev, skb); 4383 } 4384 #endif 4385 /* If device/qdisc don't need skb->dst, release it right now while 4386 * its hot in this cpu cache. 4387 */ 4388 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 4389 skb_dst_drop(skb); 4390 else 4391 skb_dst_force(skb); 4392 4393 if (!txq) 4394 txq = netdev_core_pick_tx(dev, skb, sb_dev); 4395 4396 q = rcu_dereference_bh(txq->qdisc); 4397 4398 trace_net_dev_queue(skb); 4399 if (q->enqueue) { 4400 rc = __dev_xmit_skb(skb, q, dev, txq); 4401 goto out; 4402 } 4403 4404 /* The device has no queue. Common case for software devices: 4405 * loopback, all the sorts of tunnels... 4406 4407 * Really, it is unlikely that netif_tx_lock protection is necessary 4408 * here. (f.e. loopback and IP tunnels are clean ignoring statistics 4409 * counters.) 4410 * However, it is possible, that they rely on protection 4411 * made by us here. 4412 4413 * Check this and shot the lock. It is not prone from deadlocks. 4414 *Either shot noqueue qdisc, it is even simpler 8) 4415 */ 4416 if (dev->flags & IFF_UP) { 4417 int cpu = smp_processor_id(); /* ok because BHs are off */ 4418 4419 /* Other cpus might concurrently change txq->xmit_lock_owner 4420 * to -1 or to their cpu id, but not to our id. 4421 */ 4422 if (READ_ONCE(txq->xmit_lock_owner) != cpu) { 4423 if (dev_xmit_recursion()) 4424 goto recursion_alert; 4425 4426 skb = validate_xmit_skb(skb, dev, &again); 4427 if (!skb) 4428 goto out; 4429 4430 HARD_TX_LOCK(dev, txq, cpu); 4431 4432 if (!netif_xmit_stopped(txq)) { 4433 dev_xmit_recursion_inc(); 4434 skb = dev_hard_start_xmit(skb, dev, txq, &rc); 4435 dev_xmit_recursion_dec(); 4436 if (dev_xmit_complete(rc)) { 4437 HARD_TX_UNLOCK(dev, txq); 4438 goto out; 4439 } 4440 } 4441 HARD_TX_UNLOCK(dev, txq); 4442 net_crit_ratelimited("Virtual device %s asks to queue packet!\n", 4443 dev->name); 4444 } else { 4445 /* Recursion is detected! It is possible, 4446 * unfortunately 4447 */ 4448 recursion_alert: 4449 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n", 4450 dev->name); 4451 } 4452 } 4453 4454 rc = -ENETDOWN; 4455 rcu_read_unlock_bh(); 4456 4457 dev_core_stats_tx_dropped_inc(dev); 4458 kfree_skb_list(skb); 4459 return rc; 4460 out: 4461 rcu_read_unlock_bh(); 4462 return rc; 4463 } 4464 EXPORT_SYMBOL(__dev_queue_xmit); 4465 4466 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id) 4467 { 4468 struct net_device *dev = skb->dev; 4469 struct sk_buff *orig_skb = skb; 4470 struct netdev_queue *txq; 4471 int ret = NETDEV_TX_BUSY; 4472 bool again = false; 4473 4474 if (unlikely(!netif_running(dev) || 4475 !netif_carrier_ok(dev))) 4476 goto drop; 4477 4478 skb = validate_xmit_skb_list(skb, dev, &again); 4479 if (skb != orig_skb) 4480 goto drop; 4481 4482 skb_set_queue_mapping(skb, queue_id); 4483 txq = skb_get_tx_queue(dev, skb); 4484 4485 local_bh_disable(); 4486 4487 dev_xmit_recursion_inc(); 4488 HARD_TX_LOCK(dev, txq, smp_processor_id()); 4489 if (!netif_xmit_frozen_or_drv_stopped(txq)) 4490 ret = netdev_start_xmit(skb, dev, txq, false); 4491 HARD_TX_UNLOCK(dev, txq); 4492 dev_xmit_recursion_dec(); 4493 4494 local_bh_enable(); 4495 return ret; 4496 drop: 4497 dev_core_stats_tx_dropped_inc(dev); 4498 kfree_skb_list(skb); 4499 return NET_XMIT_DROP; 4500 } 4501 EXPORT_SYMBOL(__dev_direct_xmit); 4502 4503 /************************************************************************* 4504 * Receiver routines 4505 *************************************************************************/ 4506 static DEFINE_PER_CPU(struct task_struct *, backlog_napi); 4507 4508 int weight_p __read_mostly = 64; /* old backlog weight */ 4509 int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */ 4510 int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */ 4511 4512 /* Called with irq disabled */ 4513 static inline void ____napi_schedule(struct softnet_data *sd, 4514 struct napi_struct *napi) 4515 { 4516 struct task_struct *thread; 4517 4518 lockdep_assert_irqs_disabled(); 4519 4520 if (test_bit(NAPI_STATE_THREADED, &napi->state)) { 4521 /* Paired with smp_mb__before_atomic() in 4522 * napi_enable()/dev_set_threaded(). 4523 * Use READ_ONCE() to guarantee a complete 4524 * read on napi->thread. Only call 4525 * wake_up_process() when it's not NULL. 4526 */ 4527 thread = READ_ONCE(napi->thread); 4528 if (thread) { 4529 if (use_backlog_threads() && thread == raw_cpu_read(backlog_napi)) 4530 goto use_local_napi; 4531 4532 set_bit(NAPI_STATE_SCHED_THREADED, &napi->state); 4533 wake_up_process(thread); 4534 return; 4535 } 4536 } 4537 4538 use_local_napi: 4539 list_add_tail(&napi->poll_list, &sd->poll_list); 4540 WRITE_ONCE(napi->list_owner, smp_processor_id()); 4541 /* If not called from net_rx_action() 4542 * we have to raise NET_RX_SOFTIRQ. 4543 */ 4544 if (!sd->in_net_rx_action) 4545 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4546 } 4547 4548 #ifdef CONFIG_RPS 4549 4550 struct static_key_false rps_needed __read_mostly; 4551 EXPORT_SYMBOL(rps_needed); 4552 struct static_key_false rfs_needed __read_mostly; 4553 EXPORT_SYMBOL(rfs_needed); 4554 4555 static struct rps_dev_flow * 4556 set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 4557 struct rps_dev_flow *rflow, u16 next_cpu) 4558 { 4559 if (next_cpu < nr_cpu_ids) { 4560 u32 head; 4561 #ifdef CONFIG_RFS_ACCEL 4562 struct netdev_rx_queue *rxqueue; 4563 struct rps_dev_flow_table *flow_table; 4564 struct rps_dev_flow *old_rflow; 4565 u16 rxq_index; 4566 u32 flow_id; 4567 int rc; 4568 4569 /* Should we steer this flow to a different hardware queue? */ 4570 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || 4571 !(dev->features & NETIF_F_NTUPLE)) 4572 goto out; 4573 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); 4574 if (rxq_index == skb_get_rx_queue(skb)) 4575 goto out; 4576 4577 rxqueue = dev->_rx + rxq_index; 4578 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4579 if (!flow_table) 4580 goto out; 4581 flow_id = skb_get_hash(skb) & flow_table->mask; 4582 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, 4583 rxq_index, flow_id); 4584 if (rc < 0) 4585 goto out; 4586 old_rflow = rflow; 4587 rflow = &flow_table->flows[flow_id]; 4588 WRITE_ONCE(rflow->filter, rc); 4589 if (old_rflow->filter == rc) 4590 WRITE_ONCE(old_rflow->filter, RPS_NO_FILTER); 4591 out: 4592 #endif 4593 head = READ_ONCE(per_cpu(softnet_data, next_cpu).input_queue_head); 4594 rps_input_queue_tail_save(&rflow->last_qtail, head); 4595 } 4596 4597 WRITE_ONCE(rflow->cpu, next_cpu); 4598 return rflow; 4599 } 4600 4601 /* 4602 * get_rps_cpu is called from netif_receive_skb and returns the target 4603 * CPU from the RPS map of the receiving queue for a given skb. 4604 * rcu_read_lock must be held on entry. 4605 */ 4606 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, 4607 struct rps_dev_flow **rflowp) 4608 { 4609 const struct rps_sock_flow_table *sock_flow_table; 4610 struct netdev_rx_queue *rxqueue = dev->_rx; 4611 struct rps_dev_flow_table *flow_table; 4612 struct rps_map *map; 4613 int cpu = -1; 4614 u32 tcpu; 4615 u32 hash; 4616 4617 if (skb_rx_queue_recorded(skb)) { 4618 u16 index = skb_get_rx_queue(skb); 4619 4620 if (unlikely(index >= dev->real_num_rx_queues)) { 4621 WARN_ONCE(dev->real_num_rx_queues > 1, 4622 "%s received packet on queue %u, but number " 4623 "of RX queues is %u\n", 4624 dev->name, index, dev->real_num_rx_queues); 4625 goto done; 4626 } 4627 rxqueue += index; 4628 } 4629 4630 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */ 4631 4632 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4633 map = rcu_dereference(rxqueue->rps_map); 4634 if (!flow_table && !map) 4635 goto done; 4636 4637 skb_reset_network_header(skb); 4638 hash = skb_get_hash(skb); 4639 if (!hash) 4640 goto done; 4641 4642 sock_flow_table = rcu_dereference(net_hotdata.rps_sock_flow_table); 4643 if (flow_table && sock_flow_table) { 4644 struct rps_dev_flow *rflow; 4645 u32 next_cpu; 4646 u32 ident; 4647 4648 /* First check into global flow table if there is a match. 4649 * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow(). 4650 */ 4651 ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]); 4652 if ((ident ^ hash) & ~net_hotdata.rps_cpu_mask) 4653 goto try_rps; 4654 4655 next_cpu = ident & net_hotdata.rps_cpu_mask; 4656 4657 /* OK, now we know there is a match, 4658 * we can look at the local (per receive queue) flow table 4659 */ 4660 rflow = &flow_table->flows[hash & flow_table->mask]; 4661 tcpu = rflow->cpu; 4662 4663 /* 4664 * If the desired CPU (where last recvmsg was done) is 4665 * different from current CPU (one in the rx-queue flow 4666 * table entry), switch if one of the following holds: 4667 * - Current CPU is unset (>= nr_cpu_ids). 4668 * - Current CPU is offline. 4669 * - The current CPU's queue tail has advanced beyond the 4670 * last packet that was enqueued using this table entry. 4671 * This guarantees that all previous packets for the flow 4672 * have been dequeued, thus preserving in order delivery. 4673 */ 4674 if (unlikely(tcpu != next_cpu) && 4675 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) || 4676 ((int)(READ_ONCE(per_cpu(softnet_data, tcpu).input_queue_head) - 4677 rflow->last_qtail)) >= 0)) { 4678 tcpu = next_cpu; 4679 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); 4680 } 4681 4682 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) { 4683 *rflowp = rflow; 4684 cpu = tcpu; 4685 goto done; 4686 } 4687 } 4688 4689 try_rps: 4690 4691 if (map) { 4692 tcpu = map->cpus[reciprocal_scale(hash, map->len)]; 4693 if (cpu_online(tcpu)) { 4694 cpu = tcpu; 4695 goto done; 4696 } 4697 } 4698 4699 done: 4700 return cpu; 4701 } 4702 4703 #ifdef CONFIG_RFS_ACCEL 4704 4705 /** 4706 * rps_may_expire_flow - check whether an RFS hardware filter may be removed 4707 * @dev: Device on which the filter was set 4708 * @rxq_index: RX queue index 4709 * @flow_id: Flow ID passed to ndo_rx_flow_steer() 4710 * @filter_id: Filter ID returned by ndo_rx_flow_steer() 4711 * 4712 * Drivers that implement ndo_rx_flow_steer() should periodically call 4713 * this function for each installed filter and remove the filters for 4714 * which it returns %true. 4715 */ 4716 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, 4717 u32 flow_id, u16 filter_id) 4718 { 4719 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index; 4720 struct rps_dev_flow_table *flow_table; 4721 struct rps_dev_flow *rflow; 4722 bool expire = true; 4723 unsigned int cpu; 4724 4725 rcu_read_lock(); 4726 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4727 if (flow_table && flow_id <= flow_table->mask) { 4728 rflow = &flow_table->flows[flow_id]; 4729 cpu = READ_ONCE(rflow->cpu); 4730 if (READ_ONCE(rflow->filter) == filter_id && cpu < nr_cpu_ids && 4731 ((int)(READ_ONCE(per_cpu(softnet_data, cpu).input_queue_head) - 4732 READ_ONCE(rflow->last_qtail)) < 4733 (int)(10 * flow_table->mask))) 4734 expire = false; 4735 } 4736 rcu_read_unlock(); 4737 return expire; 4738 } 4739 EXPORT_SYMBOL(rps_may_expire_flow); 4740 4741 #endif /* CONFIG_RFS_ACCEL */ 4742 4743 /* Called from hardirq (IPI) context */ 4744 static void rps_trigger_softirq(void *data) 4745 { 4746 struct softnet_data *sd = data; 4747 4748 ____napi_schedule(sd, &sd->backlog); 4749 sd->received_rps++; 4750 } 4751 4752 #endif /* CONFIG_RPS */ 4753 4754 /* Called from hardirq (IPI) context */ 4755 static void trigger_rx_softirq(void *data) 4756 { 4757 struct softnet_data *sd = data; 4758 4759 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4760 smp_store_release(&sd->defer_ipi_scheduled, 0); 4761 } 4762 4763 /* 4764 * After we queued a packet into sd->input_pkt_queue, 4765 * we need to make sure this queue is serviced soon. 4766 * 4767 * - If this is another cpu queue, link it to our rps_ipi_list, 4768 * and make sure we will process rps_ipi_list from net_rx_action(). 4769 * 4770 * - If this is our own queue, NAPI schedule our backlog. 4771 * Note that this also raises NET_RX_SOFTIRQ. 4772 */ 4773 static void napi_schedule_rps(struct softnet_data *sd) 4774 { 4775 struct softnet_data *mysd = this_cpu_ptr(&softnet_data); 4776 4777 #ifdef CONFIG_RPS 4778 if (sd != mysd) { 4779 if (use_backlog_threads()) { 4780 __napi_schedule_irqoff(&sd->backlog); 4781 return; 4782 } 4783 4784 sd->rps_ipi_next = mysd->rps_ipi_list; 4785 mysd->rps_ipi_list = sd; 4786 4787 /* If not called from net_rx_action() or napi_threaded_poll() 4788 * we have to raise NET_RX_SOFTIRQ. 4789 */ 4790 if (!mysd->in_net_rx_action && !mysd->in_napi_threaded_poll) 4791 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4792 return; 4793 } 4794 #endif /* CONFIG_RPS */ 4795 __napi_schedule_irqoff(&mysd->backlog); 4796 } 4797 4798 void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu) 4799 { 4800 unsigned long flags; 4801 4802 if (use_backlog_threads()) { 4803 backlog_lock_irq_save(sd, &flags); 4804 4805 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) 4806 __napi_schedule_irqoff(&sd->backlog); 4807 4808 backlog_unlock_irq_restore(sd, &flags); 4809 4810 } else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) { 4811 smp_call_function_single_async(cpu, &sd->defer_csd); 4812 } 4813 } 4814 4815 #ifdef CONFIG_NET_FLOW_LIMIT 4816 int netdev_flow_limit_table_len __read_mostly = (1 << 12); 4817 #endif 4818 4819 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) 4820 { 4821 #ifdef CONFIG_NET_FLOW_LIMIT 4822 struct sd_flow_limit *fl; 4823 struct softnet_data *sd; 4824 unsigned int old_flow, new_flow; 4825 4826 if (qlen < (READ_ONCE(net_hotdata.max_backlog) >> 1)) 4827 return false; 4828 4829 sd = this_cpu_ptr(&softnet_data); 4830 4831 rcu_read_lock(); 4832 fl = rcu_dereference(sd->flow_limit); 4833 if (fl) { 4834 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1); 4835 old_flow = fl->history[fl->history_head]; 4836 fl->history[fl->history_head] = new_flow; 4837 4838 fl->history_head++; 4839 fl->history_head &= FLOW_LIMIT_HISTORY - 1; 4840 4841 if (likely(fl->buckets[old_flow])) 4842 fl->buckets[old_flow]--; 4843 4844 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) { 4845 fl->count++; 4846 rcu_read_unlock(); 4847 return true; 4848 } 4849 } 4850 rcu_read_unlock(); 4851 #endif 4852 return false; 4853 } 4854 4855 /* 4856 * enqueue_to_backlog is called to queue an skb to a per CPU backlog 4857 * queue (may be a remote CPU queue). 4858 */ 4859 static int enqueue_to_backlog(struct sk_buff *skb, int cpu, 4860 unsigned int *qtail) 4861 { 4862 enum skb_drop_reason reason; 4863 struct softnet_data *sd; 4864 unsigned long flags; 4865 unsigned int qlen; 4866 int max_backlog; 4867 u32 tail; 4868 4869 reason = SKB_DROP_REASON_DEV_READY; 4870 if (!netif_running(skb->dev)) 4871 goto bad_dev; 4872 4873 reason = SKB_DROP_REASON_CPU_BACKLOG; 4874 sd = &per_cpu(softnet_data, cpu); 4875 4876 qlen = skb_queue_len_lockless(&sd->input_pkt_queue); 4877 max_backlog = READ_ONCE(net_hotdata.max_backlog); 4878 if (unlikely(qlen > max_backlog)) 4879 goto cpu_backlog_drop; 4880 backlog_lock_irq_save(sd, &flags); 4881 qlen = skb_queue_len(&sd->input_pkt_queue); 4882 if (qlen <= max_backlog && !skb_flow_limit(skb, qlen)) { 4883 if (!qlen) { 4884 /* Schedule NAPI for backlog device. We can use 4885 * non atomic operation as we own the queue lock. 4886 */ 4887 if (!__test_and_set_bit(NAPI_STATE_SCHED, 4888 &sd->backlog.state)) 4889 napi_schedule_rps(sd); 4890 } 4891 __skb_queue_tail(&sd->input_pkt_queue, skb); 4892 tail = rps_input_queue_tail_incr(sd); 4893 backlog_unlock_irq_restore(sd, &flags); 4894 4895 /* save the tail outside of the critical section */ 4896 rps_input_queue_tail_save(qtail, tail); 4897 return NET_RX_SUCCESS; 4898 } 4899 4900 backlog_unlock_irq_restore(sd, &flags); 4901 4902 cpu_backlog_drop: 4903 atomic_inc(&sd->dropped); 4904 bad_dev: 4905 dev_core_stats_rx_dropped_inc(skb->dev); 4906 kfree_skb_reason(skb, reason); 4907 return NET_RX_DROP; 4908 } 4909 4910 static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb) 4911 { 4912 struct net_device *dev = skb->dev; 4913 struct netdev_rx_queue *rxqueue; 4914 4915 rxqueue = dev->_rx; 4916 4917 if (skb_rx_queue_recorded(skb)) { 4918 u16 index = skb_get_rx_queue(skb); 4919 4920 if (unlikely(index >= dev->real_num_rx_queues)) { 4921 WARN_ONCE(dev->real_num_rx_queues > 1, 4922 "%s received packet on queue %u, but number " 4923 "of RX queues is %u\n", 4924 dev->name, index, dev->real_num_rx_queues); 4925 4926 return rxqueue; /* Return first rxqueue */ 4927 } 4928 rxqueue += index; 4929 } 4930 return rxqueue; 4931 } 4932 4933 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, 4934 struct bpf_prog *xdp_prog) 4935 { 4936 void *orig_data, *orig_data_end, *hard_start; 4937 struct netdev_rx_queue *rxqueue; 4938 bool orig_bcast, orig_host; 4939 u32 mac_len, frame_sz; 4940 __be16 orig_eth_type; 4941 struct ethhdr *eth; 4942 u32 metalen, act; 4943 int off; 4944 4945 /* The XDP program wants to see the packet starting at the MAC 4946 * header. 4947 */ 4948 mac_len = skb->data - skb_mac_header(skb); 4949 hard_start = skb->data - skb_headroom(skb); 4950 4951 /* SKB "head" area always have tailroom for skb_shared_info */ 4952 frame_sz = (void *)skb_end_pointer(skb) - hard_start; 4953 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4954 4955 rxqueue = netif_get_rxqueue(skb); 4956 xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq); 4957 xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len, 4958 skb_headlen(skb) + mac_len, true); 4959 if (skb_is_nonlinear(skb)) { 4960 skb_shinfo(skb)->xdp_frags_size = skb->data_len; 4961 xdp_buff_set_frags_flag(xdp); 4962 } else { 4963 xdp_buff_clear_frags_flag(xdp); 4964 } 4965 4966 orig_data_end = xdp->data_end; 4967 orig_data = xdp->data; 4968 eth = (struct ethhdr *)xdp->data; 4969 orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr); 4970 orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest); 4971 orig_eth_type = eth->h_proto; 4972 4973 act = bpf_prog_run_xdp(xdp_prog, xdp); 4974 4975 /* check if bpf_xdp_adjust_head was used */ 4976 off = xdp->data - orig_data; 4977 if (off) { 4978 if (off > 0) 4979 __skb_pull(skb, off); 4980 else if (off < 0) 4981 __skb_push(skb, -off); 4982 4983 skb->mac_header += off; 4984 skb_reset_network_header(skb); 4985 } 4986 4987 /* check if bpf_xdp_adjust_tail was used */ 4988 off = xdp->data_end - orig_data_end; 4989 if (off != 0) { 4990 skb_set_tail_pointer(skb, xdp->data_end - xdp->data); 4991 skb->len += off; /* positive on grow, negative on shrink */ 4992 } 4993 4994 /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers 4995 * (e.g. bpf_xdp_adjust_tail), we need to update data_len here. 4996 */ 4997 if (xdp_buff_has_frags(xdp)) 4998 skb->data_len = skb_shinfo(skb)->xdp_frags_size; 4999 else 5000 skb->data_len = 0; 5001 5002 /* check if XDP changed eth hdr such SKB needs update */ 5003 eth = (struct ethhdr *)xdp->data; 5004 if ((orig_eth_type != eth->h_proto) || 5005 (orig_host != ether_addr_equal_64bits(eth->h_dest, 5006 skb->dev->dev_addr)) || 5007 (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) { 5008 __skb_push(skb, ETH_HLEN); 5009 skb->pkt_type = PACKET_HOST; 5010 skb->protocol = eth_type_trans(skb, skb->dev); 5011 } 5012 5013 /* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull 5014 * before calling us again on redirect path. We do not call do_redirect 5015 * as we leave that up to the caller. 5016 * 5017 * Caller is responsible for managing lifetime of skb (i.e. calling 5018 * kfree_skb in response to actions it cannot handle/XDP_DROP). 5019 */ 5020 switch (act) { 5021 case XDP_REDIRECT: 5022 case XDP_TX: 5023 __skb_push(skb, mac_len); 5024 break; 5025 case XDP_PASS: 5026 metalen = xdp->data - xdp->data_meta; 5027 if (metalen) 5028 skb_metadata_set(skb, metalen); 5029 break; 5030 } 5031 5032 return act; 5033 } 5034 5035 static int 5036 netif_skb_check_for_xdp(struct sk_buff **pskb, struct bpf_prog *prog) 5037 { 5038 struct sk_buff *skb = *pskb; 5039 int err, hroom, troom; 5040 5041 if (!skb_cow_data_for_xdp(this_cpu_read(system_page_pool), pskb, prog)) 5042 return 0; 5043 5044 /* In case we have to go down the path and also linearize, 5045 * then lets do the pskb_expand_head() work just once here. 5046 */ 5047 hroom = XDP_PACKET_HEADROOM - skb_headroom(skb); 5048 troom = skb->tail + skb->data_len - skb->end; 5049 err = pskb_expand_head(skb, 5050 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0, 5051 troom > 0 ? troom + 128 : 0, GFP_ATOMIC); 5052 if (err) 5053 return err; 5054 5055 return skb_linearize(skb); 5056 } 5057 5058 static u32 netif_receive_generic_xdp(struct sk_buff **pskb, 5059 struct xdp_buff *xdp, 5060 struct bpf_prog *xdp_prog) 5061 { 5062 struct sk_buff *skb = *pskb; 5063 u32 mac_len, act = XDP_DROP; 5064 5065 /* Reinjected packets coming from act_mirred or similar should 5066 * not get XDP generic processing. 5067 */ 5068 if (skb_is_redirected(skb)) 5069 return XDP_PASS; 5070 5071 /* XDP packets must have sufficient headroom of XDP_PACKET_HEADROOM 5072 * bytes. This is the guarantee that also native XDP provides, 5073 * thus we need to do it here as well. 5074 */ 5075 mac_len = skb->data - skb_mac_header(skb); 5076 __skb_push(skb, mac_len); 5077 5078 if (skb_cloned(skb) || skb_is_nonlinear(skb) || 5079 skb_headroom(skb) < XDP_PACKET_HEADROOM) { 5080 if (netif_skb_check_for_xdp(pskb, xdp_prog)) 5081 goto do_drop; 5082 } 5083 5084 __skb_pull(*pskb, mac_len); 5085 5086 act = bpf_prog_run_generic_xdp(*pskb, xdp, xdp_prog); 5087 switch (act) { 5088 case XDP_REDIRECT: 5089 case XDP_TX: 5090 case XDP_PASS: 5091 break; 5092 default: 5093 bpf_warn_invalid_xdp_action((*pskb)->dev, xdp_prog, act); 5094 fallthrough; 5095 case XDP_ABORTED: 5096 trace_xdp_exception((*pskb)->dev, xdp_prog, act); 5097 fallthrough; 5098 case XDP_DROP: 5099 do_drop: 5100 kfree_skb(*pskb); 5101 break; 5102 } 5103 5104 return act; 5105 } 5106 5107 /* When doing generic XDP we have to bypass the qdisc layer and the 5108 * network taps in order to match in-driver-XDP behavior. This also means 5109 * that XDP packets are able to starve other packets going through a qdisc, 5110 * and DDOS attacks will be more effective. In-driver-XDP use dedicated TX 5111 * queues, so they do not have this starvation issue. 5112 */ 5113 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog) 5114 { 5115 struct net_device *dev = skb->dev; 5116 struct netdev_queue *txq; 5117 bool free_skb = true; 5118 int cpu, rc; 5119 5120 txq = netdev_core_pick_tx(dev, skb, NULL); 5121 cpu = smp_processor_id(); 5122 HARD_TX_LOCK(dev, txq, cpu); 5123 if (!netif_xmit_frozen_or_drv_stopped(txq)) { 5124 rc = netdev_start_xmit(skb, dev, txq, 0); 5125 if (dev_xmit_complete(rc)) 5126 free_skb = false; 5127 } 5128 HARD_TX_UNLOCK(dev, txq); 5129 if (free_skb) { 5130 trace_xdp_exception(dev, xdp_prog, XDP_TX); 5131 dev_core_stats_tx_dropped_inc(dev); 5132 kfree_skb(skb); 5133 } 5134 } 5135 5136 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key); 5137 5138 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb) 5139 { 5140 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; 5141 5142 if (xdp_prog) { 5143 struct xdp_buff xdp; 5144 u32 act; 5145 int err; 5146 5147 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); 5148 act = netif_receive_generic_xdp(pskb, &xdp, xdp_prog); 5149 if (act != XDP_PASS) { 5150 switch (act) { 5151 case XDP_REDIRECT: 5152 err = xdp_do_generic_redirect((*pskb)->dev, *pskb, 5153 &xdp, xdp_prog); 5154 if (err) 5155 goto out_redir; 5156 break; 5157 case XDP_TX: 5158 generic_xdp_tx(*pskb, xdp_prog); 5159 break; 5160 } 5161 bpf_net_ctx_clear(bpf_net_ctx); 5162 return XDP_DROP; 5163 } 5164 bpf_net_ctx_clear(bpf_net_ctx); 5165 } 5166 return XDP_PASS; 5167 out_redir: 5168 bpf_net_ctx_clear(bpf_net_ctx); 5169 kfree_skb_reason(*pskb, SKB_DROP_REASON_XDP); 5170 return XDP_DROP; 5171 } 5172 EXPORT_SYMBOL_GPL(do_xdp_generic); 5173 5174 static int netif_rx_internal(struct sk_buff *skb) 5175 { 5176 int ret; 5177 5178 net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb); 5179 5180 trace_netif_rx(skb); 5181 5182 #ifdef CONFIG_RPS 5183 if (static_branch_unlikely(&rps_needed)) { 5184 struct rps_dev_flow voidflow, *rflow = &voidflow; 5185 int cpu; 5186 5187 rcu_read_lock(); 5188 5189 cpu = get_rps_cpu(skb->dev, skb, &rflow); 5190 if (cpu < 0) 5191 cpu = smp_processor_id(); 5192 5193 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5194 5195 rcu_read_unlock(); 5196 } else 5197 #endif 5198 { 5199 unsigned int qtail; 5200 5201 ret = enqueue_to_backlog(skb, smp_processor_id(), &qtail); 5202 } 5203 return ret; 5204 } 5205 5206 /** 5207 * __netif_rx - Slightly optimized version of netif_rx 5208 * @skb: buffer to post 5209 * 5210 * This behaves as netif_rx except that it does not disable bottom halves. 5211 * As a result this function may only be invoked from the interrupt context 5212 * (either hard or soft interrupt). 5213 */ 5214 int __netif_rx(struct sk_buff *skb) 5215 { 5216 int ret; 5217 5218 lockdep_assert_once(hardirq_count() | softirq_count()); 5219 5220 trace_netif_rx_entry(skb); 5221 ret = netif_rx_internal(skb); 5222 trace_netif_rx_exit(ret); 5223 return ret; 5224 } 5225 EXPORT_SYMBOL(__netif_rx); 5226 5227 /** 5228 * netif_rx - post buffer to the network code 5229 * @skb: buffer to post 5230 * 5231 * This function receives a packet from a device driver and queues it for 5232 * the upper (protocol) levels to process via the backlog NAPI device. It 5233 * always succeeds. The buffer may be dropped during processing for 5234 * congestion control or by the protocol layers. 5235 * The network buffer is passed via the backlog NAPI device. Modern NIC 5236 * driver should use NAPI and GRO. 5237 * This function can used from interrupt and from process context. The 5238 * caller from process context must not disable interrupts before invoking 5239 * this function. 5240 * 5241 * return values: 5242 * NET_RX_SUCCESS (no congestion) 5243 * NET_RX_DROP (packet was dropped) 5244 * 5245 */ 5246 int netif_rx(struct sk_buff *skb) 5247 { 5248 bool need_bh_off = !(hardirq_count() | softirq_count()); 5249 int ret; 5250 5251 if (need_bh_off) 5252 local_bh_disable(); 5253 trace_netif_rx_entry(skb); 5254 ret = netif_rx_internal(skb); 5255 trace_netif_rx_exit(ret); 5256 if (need_bh_off) 5257 local_bh_enable(); 5258 return ret; 5259 } 5260 EXPORT_SYMBOL(netif_rx); 5261 5262 static __latent_entropy void net_tx_action(void) 5263 { 5264 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 5265 5266 if (sd->completion_queue) { 5267 struct sk_buff *clist; 5268 5269 local_irq_disable(); 5270 clist = sd->completion_queue; 5271 sd->completion_queue = NULL; 5272 local_irq_enable(); 5273 5274 while (clist) { 5275 struct sk_buff *skb = clist; 5276 5277 clist = clist->next; 5278 5279 WARN_ON(refcount_read(&skb->users)); 5280 if (likely(get_kfree_skb_cb(skb)->reason == SKB_CONSUMED)) 5281 trace_consume_skb(skb, net_tx_action); 5282 else 5283 trace_kfree_skb(skb, net_tx_action, 5284 get_kfree_skb_cb(skb)->reason, NULL); 5285 5286 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) 5287 __kfree_skb(skb); 5288 else 5289 __napi_kfree_skb(skb, 5290 get_kfree_skb_cb(skb)->reason); 5291 } 5292 } 5293 5294 if (sd->output_queue) { 5295 struct Qdisc *head; 5296 5297 local_irq_disable(); 5298 head = sd->output_queue; 5299 sd->output_queue = NULL; 5300 sd->output_queue_tailp = &sd->output_queue; 5301 local_irq_enable(); 5302 5303 rcu_read_lock(); 5304 5305 while (head) { 5306 struct Qdisc *q = head; 5307 spinlock_t *root_lock = NULL; 5308 5309 head = head->next_sched; 5310 5311 /* We need to make sure head->next_sched is read 5312 * before clearing __QDISC_STATE_SCHED 5313 */ 5314 smp_mb__before_atomic(); 5315 5316 if (!(q->flags & TCQ_F_NOLOCK)) { 5317 root_lock = qdisc_lock(q); 5318 spin_lock(root_lock); 5319 } else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, 5320 &q->state))) { 5321 /* There is a synchronize_net() between 5322 * STATE_DEACTIVATED flag being set and 5323 * qdisc_reset()/some_qdisc_is_busy() in 5324 * dev_deactivate(), so we can safely bail out 5325 * early here to avoid data race between 5326 * qdisc_deactivate() and some_qdisc_is_busy() 5327 * for lockless qdisc. 5328 */ 5329 clear_bit(__QDISC_STATE_SCHED, &q->state); 5330 continue; 5331 } 5332 5333 clear_bit(__QDISC_STATE_SCHED, &q->state); 5334 qdisc_run(q); 5335 if (root_lock) 5336 spin_unlock(root_lock); 5337 } 5338 5339 rcu_read_unlock(); 5340 } 5341 5342 xfrm_dev_backlog(sd); 5343 } 5344 5345 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE) 5346 /* This hook is defined here for ATM LANE */ 5347 int (*br_fdb_test_addr_hook)(struct net_device *dev, 5348 unsigned char *addr) __read_mostly; 5349 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); 5350 #endif 5351 5352 /** 5353 * netdev_is_rx_handler_busy - check if receive handler is registered 5354 * @dev: device to check 5355 * 5356 * Check if a receive handler is already registered for a given device. 5357 * Return true if there one. 5358 * 5359 * The caller must hold the rtnl_mutex. 5360 */ 5361 bool netdev_is_rx_handler_busy(struct net_device *dev) 5362 { 5363 ASSERT_RTNL(); 5364 return dev && rtnl_dereference(dev->rx_handler); 5365 } 5366 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy); 5367 5368 /** 5369 * netdev_rx_handler_register - register receive handler 5370 * @dev: device to register a handler for 5371 * @rx_handler: receive handler to register 5372 * @rx_handler_data: data pointer that is used by rx handler 5373 * 5374 * Register a receive handler for a device. This handler will then be 5375 * called from __netif_receive_skb. A negative errno code is returned 5376 * on a failure. 5377 * 5378 * The caller must hold the rtnl_mutex. 5379 * 5380 * For a general description of rx_handler, see enum rx_handler_result. 5381 */ 5382 int netdev_rx_handler_register(struct net_device *dev, 5383 rx_handler_func_t *rx_handler, 5384 void *rx_handler_data) 5385 { 5386 if (netdev_is_rx_handler_busy(dev)) 5387 return -EBUSY; 5388 5389 if (dev->priv_flags & IFF_NO_RX_HANDLER) 5390 return -EINVAL; 5391 5392 /* Note: rx_handler_data must be set before rx_handler */ 5393 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); 5394 rcu_assign_pointer(dev->rx_handler, rx_handler); 5395 5396 return 0; 5397 } 5398 EXPORT_SYMBOL_GPL(netdev_rx_handler_register); 5399 5400 /** 5401 * netdev_rx_handler_unregister - unregister receive handler 5402 * @dev: device to unregister a handler from 5403 * 5404 * Unregister a receive handler from a device. 5405 * 5406 * The caller must hold the rtnl_mutex. 5407 */ 5408 void netdev_rx_handler_unregister(struct net_device *dev) 5409 { 5410 5411 ASSERT_RTNL(); 5412 RCU_INIT_POINTER(dev->rx_handler, NULL); 5413 /* a reader seeing a non NULL rx_handler in a rcu_read_lock() 5414 * section has a guarantee to see a non NULL rx_handler_data 5415 * as well. 5416 */ 5417 synchronize_net(); 5418 RCU_INIT_POINTER(dev->rx_handler_data, NULL); 5419 } 5420 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 5421 5422 /* 5423 * Limit the use of PFMEMALLOC reserves to those protocols that implement 5424 * the special handling of PFMEMALLOC skbs. 5425 */ 5426 static bool skb_pfmemalloc_protocol(struct sk_buff *skb) 5427 { 5428 switch (skb->protocol) { 5429 case htons(ETH_P_ARP): 5430 case htons(ETH_P_IP): 5431 case htons(ETH_P_IPV6): 5432 case htons(ETH_P_8021Q): 5433 case htons(ETH_P_8021AD): 5434 return true; 5435 default: 5436 return false; 5437 } 5438 } 5439 5440 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev, 5441 int *ret, struct net_device *orig_dev) 5442 { 5443 if (nf_hook_ingress_active(skb)) { 5444 int ingress_retval; 5445 5446 if (*pt_prev) { 5447 *ret = deliver_skb(skb, *pt_prev, orig_dev); 5448 *pt_prev = NULL; 5449 } 5450 5451 rcu_read_lock(); 5452 ingress_retval = nf_hook_ingress(skb); 5453 rcu_read_unlock(); 5454 return ingress_retval; 5455 } 5456 return 0; 5457 } 5458 5459 static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc, 5460 struct packet_type **ppt_prev) 5461 { 5462 struct packet_type *ptype, *pt_prev; 5463 rx_handler_func_t *rx_handler; 5464 struct sk_buff *skb = *pskb; 5465 struct net_device *orig_dev; 5466 bool deliver_exact = false; 5467 int ret = NET_RX_DROP; 5468 __be16 type; 5469 5470 net_timestamp_check(!READ_ONCE(net_hotdata.tstamp_prequeue), skb); 5471 5472 trace_netif_receive_skb(skb); 5473 5474 orig_dev = skb->dev; 5475 5476 skb_reset_network_header(skb); 5477 if (!skb_transport_header_was_set(skb)) 5478 skb_reset_transport_header(skb); 5479 skb_reset_mac_len(skb); 5480 5481 pt_prev = NULL; 5482 5483 another_round: 5484 skb->skb_iif = skb->dev->ifindex; 5485 5486 __this_cpu_inc(softnet_data.processed); 5487 5488 if (static_branch_unlikely(&generic_xdp_needed_key)) { 5489 int ret2; 5490 5491 migrate_disable(); 5492 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), 5493 &skb); 5494 migrate_enable(); 5495 5496 if (ret2 != XDP_PASS) { 5497 ret = NET_RX_DROP; 5498 goto out; 5499 } 5500 } 5501 5502 if (eth_type_vlan(skb->protocol)) { 5503 skb = skb_vlan_untag(skb); 5504 if (unlikely(!skb)) 5505 goto out; 5506 } 5507 5508 if (skb_skip_tc_classify(skb)) 5509 goto skip_classify; 5510 5511 if (pfmemalloc) 5512 goto skip_taps; 5513 5514 list_for_each_entry_rcu(ptype, &net_hotdata.ptype_all, list) { 5515 if (pt_prev) 5516 ret = deliver_skb(skb, pt_prev, orig_dev); 5517 pt_prev = ptype; 5518 } 5519 5520 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) { 5521 if (pt_prev) 5522 ret = deliver_skb(skb, pt_prev, orig_dev); 5523 pt_prev = ptype; 5524 } 5525 5526 skip_taps: 5527 #ifdef CONFIG_NET_INGRESS 5528 if (static_branch_unlikely(&ingress_needed_key)) { 5529 bool another = false; 5530 5531 nf_skip_egress(skb, true); 5532 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev, 5533 &another); 5534 if (another) 5535 goto another_round; 5536 if (!skb) 5537 goto out; 5538 5539 nf_skip_egress(skb, false); 5540 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0) 5541 goto out; 5542 } 5543 #endif 5544 skb_reset_redirect(skb); 5545 skip_classify: 5546 if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) 5547 goto drop; 5548 5549 if (skb_vlan_tag_present(skb)) { 5550 if (pt_prev) { 5551 ret = deliver_skb(skb, pt_prev, orig_dev); 5552 pt_prev = NULL; 5553 } 5554 if (vlan_do_receive(&skb)) 5555 goto another_round; 5556 else if (unlikely(!skb)) 5557 goto out; 5558 } 5559 5560 rx_handler = rcu_dereference(skb->dev->rx_handler); 5561 if (rx_handler) { 5562 if (pt_prev) { 5563 ret = deliver_skb(skb, pt_prev, orig_dev); 5564 pt_prev = NULL; 5565 } 5566 switch (rx_handler(&skb)) { 5567 case RX_HANDLER_CONSUMED: 5568 ret = NET_RX_SUCCESS; 5569 goto out; 5570 case RX_HANDLER_ANOTHER: 5571 goto another_round; 5572 case RX_HANDLER_EXACT: 5573 deliver_exact = true; 5574 break; 5575 case RX_HANDLER_PASS: 5576 break; 5577 default: 5578 BUG(); 5579 } 5580 } 5581 5582 if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) { 5583 check_vlan_id: 5584 if (skb_vlan_tag_get_id(skb)) { 5585 /* Vlan id is non 0 and vlan_do_receive() above couldn't 5586 * find vlan device. 5587 */ 5588 skb->pkt_type = PACKET_OTHERHOST; 5589 } else if (eth_type_vlan(skb->protocol)) { 5590 /* Outer header is 802.1P with vlan 0, inner header is 5591 * 802.1Q or 802.1AD and vlan_do_receive() above could 5592 * not find vlan dev for vlan id 0. 5593 */ 5594 __vlan_hwaccel_clear_tag(skb); 5595 skb = skb_vlan_untag(skb); 5596 if (unlikely(!skb)) 5597 goto out; 5598 if (vlan_do_receive(&skb)) 5599 /* After stripping off 802.1P header with vlan 0 5600 * vlan dev is found for inner header. 5601 */ 5602 goto another_round; 5603 else if (unlikely(!skb)) 5604 goto out; 5605 else 5606 /* We have stripped outer 802.1P vlan 0 header. 5607 * But could not find vlan dev. 5608 * check again for vlan id to set OTHERHOST. 5609 */ 5610 goto check_vlan_id; 5611 } 5612 /* Note: we might in the future use prio bits 5613 * and set skb->priority like in vlan_do_receive() 5614 * For the time being, just ignore Priority Code Point 5615 */ 5616 __vlan_hwaccel_clear_tag(skb); 5617 } 5618 5619 type = skb->protocol; 5620 5621 /* deliver only exact match when indicated */ 5622 if (likely(!deliver_exact)) { 5623 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5624 &ptype_base[ntohs(type) & 5625 PTYPE_HASH_MASK]); 5626 } 5627 5628 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5629 &orig_dev->ptype_specific); 5630 5631 if (unlikely(skb->dev != orig_dev)) { 5632 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5633 &skb->dev->ptype_specific); 5634 } 5635 5636 if (pt_prev) { 5637 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 5638 goto drop; 5639 *ppt_prev = pt_prev; 5640 } else { 5641 drop: 5642 if (!deliver_exact) 5643 dev_core_stats_rx_dropped_inc(skb->dev); 5644 else 5645 dev_core_stats_rx_nohandler_inc(skb->dev); 5646 kfree_skb_reason(skb, SKB_DROP_REASON_UNHANDLED_PROTO); 5647 /* Jamal, now you will not able to escape explaining 5648 * me how you were going to use this. :-) 5649 */ 5650 ret = NET_RX_DROP; 5651 } 5652 5653 out: 5654 /* The invariant here is that if *ppt_prev is not NULL 5655 * then skb should also be non-NULL. 5656 * 5657 * Apparently *ppt_prev assignment above holds this invariant due to 5658 * skb dereferencing near it. 5659 */ 5660 *pskb = skb; 5661 return ret; 5662 } 5663 5664 static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc) 5665 { 5666 struct net_device *orig_dev = skb->dev; 5667 struct packet_type *pt_prev = NULL; 5668 int ret; 5669 5670 ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev); 5671 if (pt_prev) 5672 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb, 5673 skb->dev, pt_prev, orig_dev); 5674 return ret; 5675 } 5676 5677 /** 5678 * netif_receive_skb_core - special purpose version of netif_receive_skb 5679 * @skb: buffer to process 5680 * 5681 * More direct receive version of netif_receive_skb(). It should 5682 * only be used by callers that have a need to skip RPS and Generic XDP. 5683 * Caller must also take care of handling if ``(page_is_)pfmemalloc``. 5684 * 5685 * This function may only be called from softirq context and interrupts 5686 * should be enabled. 5687 * 5688 * Return values (usually ignored): 5689 * NET_RX_SUCCESS: no congestion 5690 * NET_RX_DROP: packet was dropped 5691 */ 5692 int netif_receive_skb_core(struct sk_buff *skb) 5693 { 5694 int ret; 5695 5696 rcu_read_lock(); 5697 ret = __netif_receive_skb_one_core(skb, false); 5698 rcu_read_unlock(); 5699 5700 return ret; 5701 } 5702 EXPORT_SYMBOL(netif_receive_skb_core); 5703 5704 static inline void __netif_receive_skb_list_ptype(struct list_head *head, 5705 struct packet_type *pt_prev, 5706 struct net_device *orig_dev) 5707 { 5708 struct sk_buff *skb, *next; 5709 5710 if (!pt_prev) 5711 return; 5712 if (list_empty(head)) 5713 return; 5714 if (pt_prev->list_func != NULL) 5715 INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv, 5716 ip_list_rcv, head, pt_prev, orig_dev); 5717 else 5718 list_for_each_entry_safe(skb, next, head, list) { 5719 skb_list_del_init(skb); 5720 pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 5721 } 5722 } 5723 5724 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc) 5725 { 5726 /* Fast-path assumptions: 5727 * - There is no RX handler. 5728 * - Only one packet_type matches. 5729 * If either of these fails, we will end up doing some per-packet 5730 * processing in-line, then handling the 'last ptype' for the whole 5731 * sublist. This can't cause out-of-order delivery to any single ptype, 5732 * because the 'last ptype' must be constant across the sublist, and all 5733 * other ptypes are handled per-packet. 5734 */ 5735 /* Current (common) ptype of sublist */ 5736 struct packet_type *pt_curr = NULL; 5737 /* Current (common) orig_dev of sublist */ 5738 struct net_device *od_curr = NULL; 5739 struct sk_buff *skb, *next; 5740 LIST_HEAD(sublist); 5741 5742 list_for_each_entry_safe(skb, next, head, list) { 5743 struct net_device *orig_dev = skb->dev; 5744 struct packet_type *pt_prev = NULL; 5745 5746 skb_list_del_init(skb); 5747 __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev); 5748 if (!pt_prev) 5749 continue; 5750 if (pt_curr != pt_prev || od_curr != orig_dev) { 5751 /* dispatch old sublist */ 5752 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); 5753 /* start new sublist */ 5754 INIT_LIST_HEAD(&sublist); 5755 pt_curr = pt_prev; 5756 od_curr = orig_dev; 5757 } 5758 list_add_tail(&skb->list, &sublist); 5759 } 5760 5761 /* dispatch final sublist */ 5762 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); 5763 } 5764 5765 static int __netif_receive_skb(struct sk_buff *skb) 5766 { 5767 int ret; 5768 5769 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) { 5770 unsigned int noreclaim_flag; 5771 5772 /* 5773 * PFMEMALLOC skbs are special, they should 5774 * - be delivered to SOCK_MEMALLOC sockets only 5775 * - stay away from userspace 5776 * - have bounded memory usage 5777 * 5778 * Use PF_MEMALLOC as this saves us from propagating the allocation 5779 * context down to all allocation sites. 5780 */ 5781 noreclaim_flag = memalloc_noreclaim_save(); 5782 ret = __netif_receive_skb_one_core(skb, true); 5783 memalloc_noreclaim_restore(noreclaim_flag); 5784 } else 5785 ret = __netif_receive_skb_one_core(skb, false); 5786 5787 return ret; 5788 } 5789 5790 static void __netif_receive_skb_list(struct list_head *head) 5791 { 5792 unsigned long noreclaim_flag = 0; 5793 struct sk_buff *skb, *next; 5794 bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */ 5795 5796 list_for_each_entry_safe(skb, next, head, list) { 5797 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) { 5798 struct list_head sublist; 5799 5800 /* Handle the previous sublist */ 5801 list_cut_before(&sublist, head, &skb->list); 5802 if (!list_empty(&sublist)) 5803 __netif_receive_skb_list_core(&sublist, pfmemalloc); 5804 pfmemalloc = !pfmemalloc; 5805 /* See comments in __netif_receive_skb */ 5806 if (pfmemalloc) 5807 noreclaim_flag = memalloc_noreclaim_save(); 5808 else 5809 memalloc_noreclaim_restore(noreclaim_flag); 5810 } 5811 } 5812 /* Handle the remaining sublist */ 5813 if (!list_empty(head)) 5814 __netif_receive_skb_list_core(head, pfmemalloc); 5815 /* Restore pflags */ 5816 if (pfmemalloc) 5817 memalloc_noreclaim_restore(noreclaim_flag); 5818 } 5819 5820 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) 5821 { 5822 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog); 5823 struct bpf_prog *new = xdp->prog; 5824 int ret = 0; 5825 5826 switch (xdp->command) { 5827 case XDP_SETUP_PROG: 5828 rcu_assign_pointer(dev->xdp_prog, new); 5829 if (old) 5830 bpf_prog_put(old); 5831 5832 if (old && !new) { 5833 static_branch_dec(&generic_xdp_needed_key); 5834 } else if (new && !old) { 5835 static_branch_inc(&generic_xdp_needed_key); 5836 dev_disable_lro(dev); 5837 dev_disable_gro_hw(dev); 5838 } 5839 break; 5840 5841 default: 5842 ret = -EINVAL; 5843 break; 5844 } 5845 5846 return ret; 5847 } 5848 5849 static int netif_receive_skb_internal(struct sk_buff *skb) 5850 { 5851 int ret; 5852 5853 net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb); 5854 5855 if (skb_defer_rx_timestamp(skb)) 5856 return NET_RX_SUCCESS; 5857 5858 rcu_read_lock(); 5859 #ifdef CONFIG_RPS 5860 if (static_branch_unlikely(&rps_needed)) { 5861 struct rps_dev_flow voidflow, *rflow = &voidflow; 5862 int cpu = get_rps_cpu(skb->dev, skb, &rflow); 5863 5864 if (cpu >= 0) { 5865 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5866 rcu_read_unlock(); 5867 return ret; 5868 } 5869 } 5870 #endif 5871 ret = __netif_receive_skb(skb); 5872 rcu_read_unlock(); 5873 return ret; 5874 } 5875 5876 void netif_receive_skb_list_internal(struct list_head *head) 5877 { 5878 struct sk_buff *skb, *next; 5879 LIST_HEAD(sublist); 5880 5881 list_for_each_entry_safe(skb, next, head, list) { 5882 net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), 5883 skb); 5884 skb_list_del_init(skb); 5885 if (!skb_defer_rx_timestamp(skb)) 5886 list_add_tail(&skb->list, &sublist); 5887 } 5888 list_splice_init(&sublist, head); 5889 5890 rcu_read_lock(); 5891 #ifdef CONFIG_RPS 5892 if (static_branch_unlikely(&rps_needed)) { 5893 list_for_each_entry_safe(skb, next, head, list) { 5894 struct rps_dev_flow voidflow, *rflow = &voidflow; 5895 int cpu = get_rps_cpu(skb->dev, skb, &rflow); 5896 5897 if (cpu >= 0) { 5898 /* Will be handled, remove from list */ 5899 skb_list_del_init(skb); 5900 enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5901 } 5902 } 5903 } 5904 #endif 5905 __netif_receive_skb_list(head); 5906 rcu_read_unlock(); 5907 } 5908 5909 /** 5910 * netif_receive_skb - process receive buffer from network 5911 * @skb: buffer to process 5912 * 5913 * netif_receive_skb() is the main receive data processing function. 5914 * It always succeeds. The buffer may be dropped during processing 5915 * for congestion control or by the protocol layers. 5916 * 5917 * This function may only be called from softirq context and interrupts 5918 * should be enabled. 5919 * 5920 * Return values (usually ignored): 5921 * NET_RX_SUCCESS: no congestion 5922 * NET_RX_DROP: packet was dropped 5923 */ 5924 int netif_receive_skb(struct sk_buff *skb) 5925 { 5926 int ret; 5927 5928 trace_netif_receive_skb_entry(skb); 5929 5930 ret = netif_receive_skb_internal(skb); 5931 trace_netif_receive_skb_exit(ret); 5932 5933 return ret; 5934 } 5935 EXPORT_SYMBOL(netif_receive_skb); 5936 5937 /** 5938 * netif_receive_skb_list - process many receive buffers from network 5939 * @head: list of skbs to process. 5940 * 5941 * Since return value of netif_receive_skb() is normally ignored, and 5942 * wouldn't be meaningful for a list, this function returns void. 5943 * 5944 * This function may only be called from softirq context and interrupts 5945 * should be enabled. 5946 */ 5947 void netif_receive_skb_list(struct list_head *head) 5948 { 5949 struct sk_buff *skb; 5950 5951 if (list_empty(head)) 5952 return; 5953 if (trace_netif_receive_skb_list_entry_enabled()) { 5954 list_for_each_entry(skb, head, list) 5955 trace_netif_receive_skb_list_entry(skb); 5956 } 5957 netif_receive_skb_list_internal(head); 5958 trace_netif_receive_skb_list_exit(0); 5959 } 5960 EXPORT_SYMBOL(netif_receive_skb_list); 5961 5962 static DEFINE_PER_CPU(struct work_struct, flush_works); 5963 5964 /* Network device is going away, flush any packets still pending */ 5965 static void flush_backlog(struct work_struct *work) 5966 { 5967 struct sk_buff *skb, *tmp; 5968 struct softnet_data *sd; 5969 5970 local_bh_disable(); 5971 sd = this_cpu_ptr(&softnet_data); 5972 5973 backlog_lock_irq_disable(sd); 5974 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { 5975 if (skb->dev->reg_state == NETREG_UNREGISTERING) { 5976 __skb_unlink(skb, &sd->input_pkt_queue); 5977 dev_kfree_skb_irq(skb); 5978 rps_input_queue_head_incr(sd); 5979 } 5980 } 5981 backlog_unlock_irq_enable(sd); 5982 5983 local_lock_nested_bh(&softnet_data.process_queue_bh_lock); 5984 skb_queue_walk_safe(&sd->process_queue, skb, tmp) { 5985 if (skb->dev->reg_state == NETREG_UNREGISTERING) { 5986 __skb_unlink(skb, &sd->process_queue); 5987 kfree_skb(skb); 5988 rps_input_queue_head_incr(sd); 5989 } 5990 } 5991 local_unlock_nested_bh(&softnet_data.process_queue_bh_lock); 5992 local_bh_enable(); 5993 } 5994 5995 static bool flush_required(int cpu) 5996 { 5997 #if IS_ENABLED(CONFIG_RPS) 5998 struct softnet_data *sd = &per_cpu(softnet_data, cpu); 5999 bool do_flush; 6000 6001 backlog_lock_irq_disable(sd); 6002 6003 /* as insertion into process_queue happens with the rps lock held, 6004 * process_queue access may race only with dequeue 6005 */ 6006 do_flush = !skb_queue_empty(&sd->input_pkt_queue) || 6007 !skb_queue_empty_lockless(&sd->process_queue); 6008 backlog_unlock_irq_enable(sd); 6009 6010 return do_flush; 6011 #endif 6012 /* without RPS we can't safely check input_pkt_queue: during a 6013 * concurrent remote skb_queue_splice() we can detect as empty both 6014 * input_pkt_queue and process_queue even if the latter could end-up 6015 * containing a lot of packets. 6016 */ 6017 return true; 6018 } 6019 6020 static void flush_all_backlogs(void) 6021 { 6022 static cpumask_t flush_cpus; 6023 unsigned int cpu; 6024 6025 /* since we are under rtnl lock protection we can use static data 6026 * for the cpumask and avoid allocating on stack the possibly 6027 * large mask 6028 */ 6029 ASSERT_RTNL(); 6030 6031 cpus_read_lock(); 6032 6033 cpumask_clear(&flush_cpus); 6034 for_each_online_cpu(cpu) { 6035 if (flush_required(cpu)) { 6036 queue_work_on(cpu, system_highpri_wq, 6037 per_cpu_ptr(&flush_works, cpu)); 6038 cpumask_set_cpu(cpu, &flush_cpus); 6039 } 6040 } 6041 6042 /* we can have in flight packet[s] on the cpus we are not flushing, 6043 * synchronize_net() in unregister_netdevice_many() will take care of 6044 * them 6045 */ 6046 for_each_cpu(cpu, &flush_cpus) 6047 flush_work(per_cpu_ptr(&flush_works, cpu)); 6048 6049 cpus_read_unlock(); 6050 } 6051 6052 static void net_rps_send_ipi(struct softnet_data *remsd) 6053 { 6054 #ifdef CONFIG_RPS 6055 while (remsd) { 6056 struct softnet_data *next = remsd->rps_ipi_next; 6057 6058 if (cpu_online(remsd->cpu)) 6059 smp_call_function_single_async(remsd->cpu, &remsd->csd); 6060 remsd = next; 6061 } 6062 #endif 6063 } 6064 6065 /* 6066 * net_rps_action_and_irq_enable sends any pending IPI's for rps. 6067 * Note: called with local irq disabled, but exits with local irq enabled. 6068 */ 6069 static void net_rps_action_and_irq_enable(struct softnet_data *sd) 6070 { 6071 #ifdef CONFIG_RPS 6072 struct softnet_data *remsd = sd->rps_ipi_list; 6073 6074 if (!use_backlog_threads() && remsd) { 6075 sd->rps_ipi_list = NULL; 6076 6077 local_irq_enable(); 6078 6079 /* Send pending IPI's to kick RPS processing on remote cpus. */ 6080 net_rps_send_ipi(remsd); 6081 } else 6082 #endif 6083 local_irq_enable(); 6084 } 6085 6086 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) 6087 { 6088 #ifdef CONFIG_RPS 6089 return !use_backlog_threads() && sd->rps_ipi_list; 6090 #else 6091 return false; 6092 #endif 6093 } 6094 6095 static int process_backlog(struct napi_struct *napi, int quota) 6096 { 6097 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); 6098 bool again = true; 6099 int work = 0; 6100 6101 /* Check if we have pending ipi, its better to send them now, 6102 * not waiting net_rx_action() end. 6103 */ 6104 if (sd_has_rps_ipi_waiting(sd)) { 6105 local_irq_disable(); 6106 net_rps_action_and_irq_enable(sd); 6107 } 6108 6109 napi->weight = READ_ONCE(net_hotdata.dev_rx_weight); 6110 while (again) { 6111 struct sk_buff *skb; 6112 6113 local_lock_nested_bh(&softnet_data.process_queue_bh_lock); 6114 while ((skb = __skb_dequeue(&sd->process_queue))) { 6115 local_unlock_nested_bh(&softnet_data.process_queue_bh_lock); 6116 rcu_read_lock(); 6117 __netif_receive_skb(skb); 6118 rcu_read_unlock(); 6119 if (++work >= quota) { 6120 rps_input_queue_head_add(sd, work); 6121 return work; 6122 } 6123 6124 local_lock_nested_bh(&softnet_data.process_queue_bh_lock); 6125 } 6126 local_unlock_nested_bh(&softnet_data.process_queue_bh_lock); 6127 6128 backlog_lock_irq_disable(sd); 6129 if (skb_queue_empty(&sd->input_pkt_queue)) { 6130 /* 6131 * Inline a custom version of __napi_complete(). 6132 * only current cpu owns and manipulates this napi, 6133 * and NAPI_STATE_SCHED is the only possible flag set 6134 * on backlog. 6135 * We can use a plain write instead of clear_bit(), 6136 * and we dont need an smp_mb() memory barrier. 6137 */ 6138 napi->state &= NAPIF_STATE_THREADED; 6139 again = false; 6140 } else { 6141 local_lock_nested_bh(&softnet_data.process_queue_bh_lock); 6142 skb_queue_splice_tail_init(&sd->input_pkt_queue, 6143 &sd->process_queue); 6144 local_unlock_nested_bh(&softnet_data.process_queue_bh_lock); 6145 } 6146 backlog_unlock_irq_enable(sd); 6147 } 6148 6149 if (work) 6150 rps_input_queue_head_add(sd, work); 6151 return work; 6152 } 6153 6154 /** 6155 * __napi_schedule - schedule for receive 6156 * @n: entry to schedule 6157 * 6158 * The entry's receive function will be scheduled to run. 6159 * Consider using __napi_schedule_irqoff() if hard irqs are masked. 6160 */ 6161 void __napi_schedule(struct napi_struct *n) 6162 { 6163 unsigned long flags; 6164 6165 local_irq_save(flags); 6166 ____napi_schedule(this_cpu_ptr(&softnet_data), n); 6167 local_irq_restore(flags); 6168 } 6169 EXPORT_SYMBOL(__napi_schedule); 6170 6171 /** 6172 * napi_schedule_prep - check if napi can be scheduled 6173 * @n: napi context 6174 * 6175 * Test if NAPI routine is already running, and if not mark 6176 * it as running. This is used as a condition variable to 6177 * insure only one NAPI poll instance runs. We also make 6178 * sure there is no pending NAPI disable. 6179 */ 6180 bool napi_schedule_prep(struct napi_struct *n) 6181 { 6182 unsigned long new, val = READ_ONCE(n->state); 6183 6184 do { 6185 if (unlikely(val & NAPIF_STATE_DISABLE)) 6186 return false; 6187 new = val | NAPIF_STATE_SCHED; 6188 6189 /* Sets STATE_MISSED bit if STATE_SCHED was already set 6190 * This was suggested by Alexander Duyck, as compiler 6191 * emits better code than : 6192 * if (val & NAPIF_STATE_SCHED) 6193 * new |= NAPIF_STATE_MISSED; 6194 */ 6195 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED * 6196 NAPIF_STATE_MISSED; 6197 } while (!try_cmpxchg(&n->state, &val, new)); 6198 6199 return !(val & NAPIF_STATE_SCHED); 6200 } 6201 EXPORT_SYMBOL(napi_schedule_prep); 6202 6203 /** 6204 * __napi_schedule_irqoff - schedule for receive 6205 * @n: entry to schedule 6206 * 6207 * Variant of __napi_schedule() assuming hard irqs are masked. 6208 * 6209 * On PREEMPT_RT enabled kernels this maps to __napi_schedule() 6210 * because the interrupt disabled assumption might not be true 6211 * due to force-threaded interrupts and spinlock substitution. 6212 */ 6213 void __napi_schedule_irqoff(struct napi_struct *n) 6214 { 6215 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 6216 ____napi_schedule(this_cpu_ptr(&softnet_data), n); 6217 else 6218 __napi_schedule(n); 6219 } 6220 EXPORT_SYMBOL(__napi_schedule_irqoff); 6221 6222 bool napi_complete_done(struct napi_struct *n, int work_done) 6223 { 6224 unsigned long flags, val, new, timeout = 0; 6225 bool ret = true; 6226 6227 /* 6228 * 1) Don't let napi dequeue from the cpu poll list 6229 * just in case its running on a different cpu. 6230 * 2) If we are busy polling, do nothing here, we have 6231 * the guarantee we will be called later. 6232 */ 6233 if (unlikely(n->state & (NAPIF_STATE_NPSVC | 6234 NAPIF_STATE_IN_BUSY_POLL))) 6235 return false; 6236 6237 if (work_done) { 6238 if (n->gro_bitmask) 6239 timeout = napi_get_gro_flush_timeout(n); 6240 n->defer_hard_irqs_count = napi_get_defer_hard_irqs(n); 6241 } 6242 if (n->defer_hard_irqs_count > 0) { 6243 n->defer_hard_irqs_count--; 6244 timeout = napi_get_gro_flush_timeout(n); 6245 if (timeout) 6246 ret = false; 6247 } 6248 if (n->gro_bitmask) { 6249 /* When the NAPI instance uses a timeout and keeps postponing 6250 * it, we need to bound somehow the time packets are kept in 6251 * the GRO layer 6252 */ 6253 napi_gro_flush(n, !!timeout); 6254 } 6255 6256 gro_normal_list(n); 6257 6258 if (unlikely(!list_empty(&n->poll_list))) { 6259 /* If n->poll_list is not empty, we need to mask irqs */ 6260 local_irq_save(flags); 6261 list_del_init(&n->poll_list); 6262 local_irq_restore(flags); 6263 } 6264 WRITE_ONCE(n->list_owner, -1); 6265 6266 val = READ_ONCE(n->state); 6267 do { 6268 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED)); 6269 6270 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED | 6271 NAPIF_STATE_SCHED_THREADED | 6272 NAPIF_STATE_PREFER_BUSY_POLL); 6273 6274 /* If STATE_MISSED was set, leave STATE_SCHED set, 6275 * because we will call napi->poll() one more time. 6276 * This C code was suggested by Alexander Duyck to help gcc. 6277 */ 6278 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED * 6279 NAPIF_STATE_SCHED; 6280 } while (!try_cmpxchg(&n->state, &val, new)); 6281 6282 if (unlikely(val & NAPIF_STATE_MISSED)) { 6283 __napi_schedule(n); 6284 return false; 6285 } 6286 6287 if (timeout) 6288 hrtimer_start(&n->timer, ns_to_ktime(timeout), 6289 HRTIMER_MODE_REL_PINNED); 6290 return ret; 6291 } 6292 EXPORT_SYMBOL(napi_complete_done); 6293 6294 /* must be called under rcu_read_lock(), as we dont take a reference */ 6295 struct napi_struct *napi_by_id(unsigned int napi_id) 6296 { 6297 unsigned int hash = napi_id % HASH_SIZE(napi_hash); 6298 struct napi_struct *napi; 6299 6300 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node) 6301 if (napi->napi_id == napi_id) 6302 return napi; 6303 6304 return NULL; 6305 } 6306 6307 static void skb_defer_free_flush(struct softnet_data *sd) 6308 { 6309 struct sk_buff *skb, *next; 6310 6311 /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */ 6312 if (!READ_ONCE(sd->defer_list)) 6313 return; 6314 6315 spin_lock(&sd->defer_lock); 6316 skb = sd->defer_list; 6317 sd->defer_list = NULL; 6318 sd->defer_count = 0; 6319 spin_unlock(&sd->defer_lock); 6320 6321 while (skb != NULL) { 6322 next = skb->next; 6323 napi_consume_skb(skb, 1); 6324 skb = next; 6325 } 6326 } 6327 6328 #if defined(CONFIG_NET_RX_BUSY_POLL) 6329 6330 static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule) 6331 { 6332 if (!skip_schedule) { 6333 gro_normal_list(napi); 6334 __napi_schedule(napi); 6335 return; 6336 } 6337 6338 if (napi->gro_bitmask) { 6339 /* flush too old packets 6340 * If HZ < 1000, flush all packets. 6341 */ 6342 napi_gro_flush(napi, HZ >= 1000); 6343 } 6344 6345 gro_normal_list(napi); 6346 clear_bit(NAPI_STATE_SCHED, &napi->state); 6347 } 6348 6349 enum { 6350 NAPI_F_PREFER_BUSY_POLL = 1, 6351 NAPI_F_END_ON_RESCHED = 2, 6352 }; 6353 6354 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, 6355 unsigned flags, u16 budget) 6356 { 6357 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; 6358 bool skip_schedule = false; 6359 unsigned long timeout; 6360 int rc; 6361 6362 /* Busy polling means there is a high chance device driver hard irq 6363 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was 6364 * set in napi_schedule_prep(). 6365 * Since we are about to call napi->poll() once more, we can safely 6366 * clear NAPI_STATE_MISSED. 6367 * 6368 * Note: x86 could use a single "lock and ..." instruction 6369 * to perform these two clear_bit() 6370 */ 6371 clear_bit(NAPI_STATE_MISSED, &napi->state); 6372 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state); 6373 6374 local_bh_disable(); 6375 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); 6376 6377 if (flags & NAPI_F_PREFER_BUSY_POLL) { 6378 napi->defer_hard_irqs_count = napi_get_defer_hard_irqs(napi); 6379 timeout = napi_get_gro_flush_timeout(napi); 6380 if (napi->defer_hard_irqs_count && timeout) { 6381 hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED); 6382 skip_schedule = true; 6383 } 6384 } 6385 6386 /* All we really want here is to re-enable device interrupts. 6387 * Ideally, a new ndo_busy_poll_stop() could avoid another round. 6388 */ 6389 rc = napi->poll(napi, budget); 6390 /* We can't gro_normal_list() here, because napi->poll() might have 6391 * rearmed the napi (napi_complete_done()) in which case it could 6392 * already be running on another CPU. 6393 */ 6394 trace_napi_poll(napi, rc, budget); 6395 netpoll_poll_unlock(have_poll_lock); 6396 if (rc == budget) 6397 __busy_poll_stop(napi, skip_schedule); 6398 bpf_net_ctx_clear(bpf_net_ctx); 6399 local_bh_enable(); 6400 } 6401 6402 static void __napi_busy_loop(unsigned int napi_id, 6403 bool (*loop_end)(void *, unsigned long), 6404 void *loop_end_arg, unsigned flags, u16 budget) 6405 { 6406 unsigned long start_time = loop_end ? busy_loop_current_time() : 0; 6407 int (*napi_poll)(struct napi_struct *napi, int budget); 6408 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; 6409 void *have_poll_lock = NULL; 6410 struct napi_struct *napi; 6411 6412 WARN_ON_ONCE(!rcu_read_lock_held()); 6413 6414 restart: 6415 napi_poll = NULL; 6416 6417 napi = napi_by_id(napi_id); 6418 if (!napi) 6419 return; 6420 6421 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 6422 preempt_disable(); 6423 for (;;) { 6424 int work = 0; 6425 6426 local_bh_disable(); 6427 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); 6428 if (!napi_poll) { 6429 unsigned long val = READ_ONCE(napi->state); 6430 6431 /* If multiple threads are competing for this napi, 6432 * we avoid dirtying napi->state as much as we can. 6433 */ 6434 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED | 6435 NAPIF_STATE_IN_BUSY_POLL)) { 6436 if (flags & NAPI_F_PREFER_BUSY_POLL) 6437 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); 6438 goto count; 6439 } 6440 if (cmpxchg(&napi->state, val, 6441 val | NAPIF_STATE_IN_BUSY_POLL | 6442 NAPIF_STATE_SCHED) != val) { 6443 if (flags & NAPI_F_PREFER_BUSY_POLL) 6444 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); 6445 goto count; 6446 } 6447 have_poll_lock = netpoll_poll_lock(napi); 6448 napi_poll = napi->poll; 6449 } 6450 work = napi_poll(napi, budget); 6451 trace_napi_poll(napi, work, budget); 6452 gro_normal_list(napi); 6453 count: 6454 if (work > 0) 6455 __NET_ADD_STATS(dev_net(napi->dev), 6456 LINUX_MIB_BUSYPOLLRXPACKETS, work); 6457 skb_defer_free_flush(this_cpu_ptr(&softnet_data)); 6458 bpf_net_ctx_clear(bpf_net_ctx); 6459 local_bh_enable(); 6460 6461 if (!loop_end || loop_end(loop_end_arg, start_time)) 6462 break; 6463 6464 if (unlikely(need_resched())) { 6465 if (flags & NAPI_F_END_ON_RESCHED) 6466 break; 6467 if (napi_poll) 6468 busy_poll_stop(napi, have_poll_lock, flags, budget); 6469 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 6470 preempt_enable(); 6471 rcu_read_unlock(); 6472 cond_resched(); 6473 rcu_read_lock(); 6474 if (loop_end(loop_end_arg, start_time)) 6475 return; 6476 goto restart; 6477 } 6478 cpu_relax(); 6479 } 6480 if (napi_poll) 6481 busy_poll_stop(napi, have_poll_lock, flags, budget); 6482 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 6483 preempt_enable(); 6484 } 6485 6486 void napi_busy_loop_rcu(unsigned int napi_id, 6487 bool (*loop_end)(void *, unsigned long), 6488 void *loop_end_arg, bool prefer_busy_poll, u16 budget) 6489 { 6490 unsigned flags = NAPI_F_END_ON_RESCHED; 6491 6492 if (prefer_busy_poll) 6493 flags |= NAPI_F_PREFER_BUSY_POLL; 6494 6495 __napi_busy_loop(napi_id, loop_end, loop_end_arg, flags, budget); 6496 } 6497 6498 void napi_busy_loop(unsigned int napi_id, 6499 bool (*loop_end)(void *, unsigned long), 6500 void *loop_end_arg, bool prefer_busy_poll, u16 budget) 6501 { 6502 unsigned flags = prefer_busy_poll ? NAPI_F_PREFER_BUSY_POLL : 0; 6503 6504 rcu_read_lock(); 6505 __napi_busy_loop(napi_id, loop_end, loop_end_arg, flags, budget); 6506 rcu_read_unlock(); 6507 } 6508 EXPORT_SYMBOL(napi_busy_loop); 6509 6510 void napi_suspend_irqs(unsigned int napi_id) 6511 { 6512 struct napi_struct *napi; 6513 6514 rcu_read_lock(); 6515 napi = napi_by_id(napi_id); 6516 if (napi) { 6517 unsigned long timeout = napi_get_irq_suspend_timeout(napi); 6518 6519 if (timeout) 6520 hrtimer_start(&napi->timer, ns_to_ktime(timeout), 6521 HRTIMER_MODE_REL_PINNED); 6522 } 6523 rcu_read_unlock(); 6524 } 6525 6526 void napi_resume_irqs(unsigned int napi_id) 6527 { 6528 struct napi_struct *napi; 6529 6530 rcu_read_lock(); 6531 napi = napi_by_id(napi_id); 6532 if (napi) { 6533 /* If irq_suspend_timeout is set to 0 between the call to 6534 * napi_suspend_irqs and now, the original value still 6535 * determines the safety timeout as intended and napi_watchdog 6536 * will resume irq processing. 6537 */ 6538 if (napi_get_irq_suspend_timeout(napi)) { 6539 local_bh_disable(); 6540 napi_schedule(napi); 6541 local_bh_enable(); 6542 } 6543 } 6544 rcu_read_unlock(); 6545 } 6546 6547 #endif /* CONFIG_NET_RX_BUSY_POLL */ 6548 6549 static void __napi_hash_add_with_id(struct napi_struct *napi, 6550 unsigned int napi_id) 6551 { 6552 napi->napi_id = napi_id; 6553 hlist_add_head_rcu(&napi->napi_hash_node, 6554 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); 6555 } 6556 6557 static void napi_hash_add_with_id(struct napi_struct *napi, 6558 unsigned int napi_id) 6559 { 6560 unsigned long flags; 6561 6562 spin_lock_irqsave(&napi_hash_lock, flags); 6563 WARN_ON_ONCE(napi_by_id(napi_id)); 6564 __napi_hash_add_with_id(napi, napi_id); 6565 spin_unlock_irqrestore(&napi_hash_lock, flags); 6566 } 6567 6568 static void napi_hash_add(struct napi_struct *napi) 6569 { 6570 unsigned long flags; 6571 6572 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state)) 6573 return; 6574 6575 spin_lock_irqsave(&napi_hash_lock, flags); 6576 6577 /* 0..NR_CPUS range is reserved for sender_cpu use */ 6578 do { 6579 if (unlikely(++napi_gen_id < MIN_NAPI_ID)) 6580 napi_gen_id = MIN_NAPI_ID; 6581 } while (napi_by_id(napi_gen_id)); 6582 6583 __napi_hash_add_with_id(napi, napi_gen_id); 6584 6585 spin_unlock_irqrestore(&napi_hash_lock, flags); 6586 } 6587 6588 /* Warning : caller is responsible to make sure rcu grace period 6589 * is respected before freeing memory containing @napi 6590 */ 6591 static void napi_hash_del(struct napi_struct *napi) 6592 { 6593 unsigned long flags; 6594 6595 spin_lock_irqsave(&napi_hash_lock, flags); 6596 6597 hlist_del_init_rcu(&napi->napi_hash_node); 6598 6599 spin_unlock_irqrestore(&napi_hash_lock, flags); 6600 } 6601 6602 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) 6603 { 6604 struct napi_struct *napi; 6605 6606 napi = container_of(timer, struct napi_struct, timer); 6607 6608 /* Note : we use a relaxed variant of napi_schedule_prep() not setting 6609 * NAPI_STATE_MISSED, since we do not react to a device IRQ. 6610 */ 6611 if (!napi_disable_pending(napi) && 6612 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) { 6613 clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); 6614 __napi_schedule_irqoff(napi); 6615 } 6616 6617 return HRTIMER_NORESTART; 6618 } 6619 6620 static void init_gro_hash(struct napi_struct *napi) 6621 { 6622 int i; 6623 6624 for (i = 0; i < GRO_HASH_BUCKETS; i++) { 6625 INIT_LIST_HEAD(&napi->gro_hash[i].list); 6626 napi->gro_hash[i].count = 0; 6627 } 6628 napi->gro_bitmask = 0; 6629 } 6630 6631 int dev_set_threaded(struct net_device *dev, bool threaded) 6632 { 6633 struct napi_struct *napi; 6634 int err = 0; 6635 6636 if (dev->threaded == threaded) 6637 return 0; 6638 6639 if (threaded) { 6640 list_for_each_entry(napi, &dev->napi_list, dev_list) { 6641 if (!napi->thread) { 6642 err = napi_kthread_create(napi); 6643 if (err) { 6644 threaded = false; 6645 break; 6646 } 6647 } 6648 } 6649 } 6650 6651 WRITE_ONCE(dev->threaded, threaded); 6652 6653 /* Make sure kthread is created before THREADED bit 6654 * is set. 6655 */ 6656 smp_mb__before_atomic(); 6657 6658 /* Setting/unsetting threaded mode on a napi might not immediately 6659 * take effect, if the current napi instance is actively being 6660 * polled. In this case, the switch between threaded mode and 6661 * softirq mode will happen in the next round of napi_schedule(). 6662 * This should not cause hiccups/stalls to the live traffic. 6663 */ 6664 list_for_each_entry(napi, &dev->napi_list, dev_list) 6665 assign_bit(NAPI_STATE_THREADED, &napi->state, threaded); 6666 6667 return err; 6668 } 6669 EXPORT_SYMBOL(dev_set_threaded); 6670 6671 /** 6672 * netif_queue_set_napi - Associate queue with the napi 6673 * @dev: device to which NAPI and queue belong 6674 * @queue_index: Index of queue 6675 * @type: queue type as RX or TX 6676 * @napi: NAPI context, pass NULL to clear previously set NAPI 6677 * 6678 * Set queue with its corresponding napi context. This should be done after 6679 * registering the NAPI handler for the queue-vector and the queues have been 6680 * mapped to the corresponding interrupt vector. 6681 */ 6682 void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index, 6683 enum netdev_queue_type type, struct napi_struct *napi) 6684 { 6685 struct netdev_rx_queue *rxq; 6686 struct netdev_queue *txq; 6687 6688 if (WARN_ON_ONCE(napi && !napi->dev)) 6689 return; 6690 if (dev->reg_state >= NETREG_REGISTERED) 6691 ASSERT_RTNL(); 6692 6693 switch (type) { 6694 case NETDEV_QUEUE_TYPE_RX: 6695 rxq = __netif_get_rx_queue(dev, queue_index); 6696 rxq->napi = napi; 6697 return; 6698 case NETDEV_QUEUE_TYPE_TX: 6699 txq = netdev_get_tx_queue(dev, queue_index); 6700 txq->napi = napi; 6701 return; 6702 default: 6703 return; 6704 } 6705 } 6706 EXPORT_SYMBOL(netif_queue_set_napi); 6707 6708 static void napi_restore_config(struct napi_struct *n) 6709 { 6710 n->defer_hard_irqs = n->config->defer_hard_irqs; 6711 n->gro_flush_timeout = n->config->gro_flush_timeout; 6712 n->irq_suspend_timeout = n->config->irq_suspend_timeout; 6713 /* a NAPI ID might be stored in the config, if so use it. if not, use 6714 * napi_hash_add to generate one for us. It will be saved to the config 6715 * in napi_disable. 6716 */ 6717 if (n->config->napi_id) 6718 napi_hash_add_with_id(n, n->config->napi_id); 6719 else 6720 napi_hash_add(n); 6721 } 6722 6723 static void napi_save_config(struct napi_struct *n) 6724 { 6725 n->config->defer_hard_irqs = n->defer_hard_irqs; 6726 n->config->gro_flush_timeout = n->gro_flush_timeout; 6727 n->config->irq_suspend_timeout = n->irq_suspend_timeout; 6728 n->config->napi_id = n->napi_id; 6729 napi_hash_del(n); 6730 } 6731 6732 void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, 6733 int (*poll)(struct napi_struct *, int), int weight) 6734 { 6735 if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state))) 6736 return; 6737 6738 INIT_LIST_HEAD(&napi->poll_list); 6739 INIT_HLIST_NODE(&napi->napi_hash_node); 6740 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); 6741 napi->timer.function = napi_watchdog; 6742 init_gro_hash(napi); 6743 napi->skb = NULL; 6744 INIT_LIST_HEAD(&napi->rx_list); 6745 napi->rx_count = 0; 6746 napi->poll = poll; 6747 if (weight > NAPI_POLL_WEIGHT) 6748 netdev_err_once(dev, "%s() called with weight %d\n", __func__, 6749 weight); 6750 napi->weight = weight; 6751 napi->dev = dev; 6752 #ifdef CONFIG_NETPOLL 6753 napi->poll_owner = -1; 6754 #endif 6755 napi->list_owner = -1; 6756 set_bit(NAPI_STATE_SCHED, &napi->state); 6757 set_bit(NAPI_STATE_NPSVC, &napi->state); 6758 list_add_rcu(&napi->dev_list, &dev->napi_list); 6759 6760 /* default settings from sysfs are applied to all NAPIs. any per-NAPI 6761 * configuration will be loaded in napi_enable 6762 */ 6763 napi_set_defer_hard_irqs(napi, READ_ONCE(dev->napi_defer_hard_irqs)); 6764 napi_set_gro_flush_timeout(napi, READ_ONCE(dev->gro_flush_timeout)); 6765 6766 napi_get_frags_check(napi); 6767 /* Create kthread for this napi if dev->threaded is set. 6768 * Clear dev->threaded if kthread creation failed so that 6769 * threaded mode will not be enabled in napi_enable(). 6770 */ 6771 if (dev->threaded && napi_kthread_create(napi)) 6772 dev->threaded = false; 6773 netif_napi_set_irq(napi, -1); 6774 } 6775 EXPORT_SYMBOL(netif_napi_add_weight); 6776 6777 void napi_disable(struct napi_struct *n) 6778 { 6779 unsigned long val, new; 6780 6781 might_sleep(); 6782 set_bit(NAPI_STATE_DISABLE, &n->state); 6783 6784 val = READ_ONCE(n->state); 6785 do { 6786 while (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) { 6787 usleep_range(20, 200); 6788 val = READ_ONCE(n->state); 6789 } 6790 6791 new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC; 6792 new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL); 6793 } while (!try_cmpxchg(&n->state, &val, new)); 6794 6795 hrtimer_cancel(&n->timer); 6796 6797 if (n->config) 6798 napi_save_config(n); 6799 else 6800 napi_hash_del(n); 6801 6802 clear_bit(NAPI_STATE_DISABLE, &n->state); 6803 } 6804 EXPORT_SYMBOL(napi_disable); 6805 6806 /** 6807 * napi_enable - enable NAPI scheduling 6808 * @n: NAPI context 6809 * 6810 * Resume NAPI from being scheduled on this context. 6811 * Must be paired with napi_disable. 6812 */ 6813 void napi_enable(struct napi_struct *n) 6814 { 6815 unsigned long new, val = READ_ONCE(n->state); 6816 6817 if (n->config) 6818 napi_restore_config(n); 6819 else 6820 napi_hash_add(n); 6821 6822 do { 6823 BUG_ON(!test_bit(NAPI_STATE_SCHED, &val)); 6824 6825 new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC); 6826 if (n->dev->threaded && n->thread) 6827 new |= NAPIF_STATE_THREADED; 6828 } while (!try_cmpxchg(&n->state, &val, new)); 6829 } 6830 EXPORT_SYMBOL(napi_enable); 6831 6832 static void flush_gro_hash(struct napi_struct *napi) 6833 { 6834 int i; 6835 6836 for (i = 0; i < GRO_HASH_BUCKETS; i++) { 6837 struct sk_buff *skb, *n; 6838 6839 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list) 6840 kfree_skb(skb); 6841 napi->gro_hash[i].count = 0; 6842 } 6843 } 6844 6845 /* Must be called in process context */ 6846 void __netif_napi_del(struct napi_struct *napi) 6847 { 6848 if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state)) 6849 return; 6850 6851 if (napi->config) { 6852 napi->index = -1; 6853 napi->config = NULL; 6854 } 6855 6856 list_del_rcu(&napi->dev_list); 6857 napi_free_frags(napi); 6858 6859 flush_gro_hash(napi); 6860 napi->gro_bitmask = 0; 6861 6862 if (napi->thread) { 6863 kthread_stop(napi->thread); 6864 napi->thread = NULL; 6865 } 6866 } 6867 EXPORT_SYMBOL(__netif_napi_del); 6868 6869 static int __napi_poll(struct napi_struct *n, bool *repoll) 6870 { 6871 int work, weight; 6872 6873 weight = n->weight; 6874 6875 /* This NAPI_STATE_SCHED test is for avoiding a race 6876 * with netpoll's poll_napi(). Only the entity which 6877 * obtains the lock and sees NAPI_STATE_SCHED set will 6878 * actually make the ->poll() call. Therefore we avoid 6879 * accidentally calling ->poll() when NAPI is not scheduled. 6880 */ 6881 work = 0; 6882 if (napi_is_scheduled(n)) { 6883 work = n->poll(n, weight); 6884 trace_napi_poll(n, work, weight); 6885 6886 xdp_do_check_flushed(n); 6887 } 6888 6889 if (unlikely(work > weight)) 6890 netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n", 6891 n->poll, work, weight); 6892 6893 if (likely(work < weight)) 6894 return work; 6895 6896 /* Drivers must not modify the NAPI state if they 6897 * consume the entire weight. In such cases this code 6898 * still "owns" the NAPI instance and therefore can 6899 * move the instance around on the list at-will. 6900 */ 6901 if (unlikely(napi_disable_pending(n))) { 6902 napi_complete(n); 6903 return work; 6904 } 6905 6906 /* The NAPI context has more processing work, but busy-polling 6907 * is preferred. Exit early. 6908 */ 6909 if (napi_prefer_busy_poll(n)) { 6910 if (napi_complete_done(n, work)) { 6911 /* If timeout is not set, we need to make sure 6912 * that the NAPI is re-scheduled. 6913 */ 6914 napi_schedule(n); 6915 } 6916 return work; 6917 } 6918 6919 if (n->gro_bitmask) { 6920 /* flush too old packets 6921 * If HZ < 1000, flush all packets. 6922 */ 6923 napi_gro_flush(n, HZ >= 1000); 6924 } 6925 6926 gro_normal_list(n); 6927 6928 /* Some drivers may have called napi_schedule 6929 * prior to exhausting their budget. 6930 */ 6931 if (unlikely(!list_empty(&n->poll_list))) { 6932 pr_warn_once("%s: Budget exhausted after napi rescheduled\n", 6933 n->dev ? n->dev->name : "backlog"); 6934 return work; 6935 } 6936 6937 *repoll = true; 6938 6939 return work; 6940 } 6941 6942 static int napi_poll(struct napi_struct *n, struct list_head *repoll) 6943 { 6944 bool do_repoll = false; 6945 void *have; 6946 int work; 6947 6948 list_del_init(&n->poll_list); 6949 6950 have = netpoll_poll_lock(n); 6951 6952 work = __napi_poll(n, &do_repoll); 6953 6954 if (do_repoll) 6955 list_add_tail(&n->poll_list, repoll); 6956 6957 netpoll_poll_unlock(have); 6958 6959 return work; 6960 } 6961 6962 static int napi_thread_wait(struct napi_struct *napi) 6963 { 6964 set_current_state(TASK_INTERRUPTIBLE); 6965 6966 while (!kthread_should_stop()) { 6967 /* Testing SCHED_THREADED bit here to make sure the current 6968 * kthread owns this napi and could poll on this napi. 6969 * Testing SCHED bit is not enough because SCHED bit might be 6970 * set by some other busy poll thread or by napi_disable(). 6971 */ 6972 if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state)) { 6973 WARN_ON(!list_empty(&napi->poll_list)); 6974 __set_current_state(TASK_RUNNING); 6975 return 0; 6976 } 6977 6978 schedule(); 6979 set_current_state(TASK_INTERRUPTIBLE); 6980 } 6981 __set_current_state(TASK_RUNNING); 6982 6983 return -1; 6984 } 6985 6986 static void napi_threaded_poll_loop(struct napi_struct *napi) 6987 { 6988 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; 6989 struct softnet_data *sd; 6990 unsigned long last_qs = jiffies; 6991 6992 for (;;) { 6993 bool repoll = false; 6994 void *have; 6995 6996 local_bh_disable(); 6997 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); 6998 6999 sd = this_cpu_ptr(&softnet_data); 7000 sd->in_napi_threaded_poll = true; 7001 7002 have = netpoll_poll_lock(napi); 7003 __napi_poll(napi, &repoll); 7004 netpoll_poll_unlock(have); 7005 7006 sd->in_napi_threaded_poll = false; 7007 barrier(); 7008 7009 if (sd_has_rps_ipi_waiting(sd)) { 7010 local_irq_disable(); 7011 net_rps_action_and_irq_enable(sd); 7012 } 7013 skb_defer_free_flush(sd); 7014 bpf_net_ctx_clear(bpf_net_ctx); 7015 local_bh_enable(); 7016 7017 if (!repoll) 7018 break; 7019 7020 rcu_softirq_qs_periodic(last_qs); 7021 cond_resched(); 7022 } 7023 } 7024 7025 static int napi_threaded_poll(void *data) 7026 { 7027 struct napi_struct *napi = data; 7028 7029 while (!napi_thread_wait(napi)) 7030 napi_threaded_poll_loop(napi); 7031 7032 return 0; 7033 } 7034 7035 static __latent_entropy void net_rx_action(void) 7036 { 7037 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 7038 unsigned long time_limit = jiffies + 7039 usecs_to_jiffies(READ_ONCE(net_hotdata.netdev_budget_usecs)); 7040 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; 7041 int budget = READ_ONCE(net_hotdata.netdev_budget); 7042 LIST_HEAD(list); 7043 LIST_HEAD(repoll); 7044 7045 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); 7046 start: 7047 sd->in_net_rx_action = true; 7048 local_irq_disable(); 7049 list_splice_init(&sd->poll_list, &list); 7050 local_irq_enable(); 7051 7052 for (;;) { 7053 struct napi_struct *n; 7054 7055 skb_defer_free_flush(sd); 7056 7057 if (list_empty(&list)) { 7058 if (list_empty(&repoll)) { 7059 sd->in_net_rx_action = false; 7060 barrier(); 7061 /* We need to check if ____napi_schedule() 7062 * had refilled poll_list while 7063 * sd->in_net_rx_action was true. 7064 */ 7065 if (!list_empty(&sd->poll_list)) 7066 goto start; 7067 if (!sd_has_rps_ipi_waiting(sd)) 7068 goto end; 7069 } 7070 break; 7071 } 7072 7073 n = list_first_entry(&list, struct napi_struct, poll_list); 7074 budget -= napi_poll(n, &repoll); 7075 7076 /* If softirq window is exhausted then punt. 7077 * Allow this to run for 2 jiffies since which will allow 7078 * an average latency of 1.5/HZ. 7079 */ 7080 if (unlikely(budget <= 0 || 7081 time_after_eq(jiffies, time_limit))) { 7082 sd->time_squeeze++; 7083 break; 7084 } 7085 } 7086 7087 local_irq_disable(); 7088 7089 list_splice_tail_init(&sd->poll_list, &list); 7090 list_splice_tail(&repoll, &list); 7091 list_splice(&list, &sd->poll_list); 7092 if (!list_empty(&sd->poll_list)) 7093 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 7094 else 7095 sd->in_net_rx_action = false; 7096 7097 net_rps_action_and_irq_enable(sd); 7098 end: 7099 bpf_net_ctx_clear(bpf_net_ctx); 7100 } 7101 7102 struct netdev_adjacent { 7103 struct net_device *dev; 7104 netdevice_tracker dev_tracker; 7105 7106 /* upper master flag, there can only be one master device per list */ 7107 bool master; 7108 7109 /* lookup ignore flag */ 7110 bool ignore; 7111 7112 /* counter for the number of times this device was added to us */ 7113 u16 ref_nr; 7114 7115 /* private field for the users */ 7116 void *private; 7117 7118 struct list_head list; 7119 struct rcu_head rcu; 7120 }; 7121 7122 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev, 7123 struct list_head *adj_list) 7124 { 7125 struct netdev_adjacent *adj; 7126 7127 list_for_each_entry(adj, adj_list, list) { 7128 if (adj->dev == adj_dev) 7129 return adj; 7130 } 7131 return NULL; 7132 } 7133 7134 static int ____netdev_has_upper_dev(struct net_device *upper_dev, 7135 struct netdev_nested_priv *priv) 7136 { 7137 struct net_device *dev = (struct net_device *)priv->data; 7138 7139 return upper_dev == dev; 7140 } 7141 7142 /** 7143 * netdev_has_upper_dev - Check if device is linked to an upper device 7144 * @dev: device 7145 * @upper_dev: upper device to check 7146 * 7147 * Find out if a device is linked to specified upper device and return true 7148 * in case it is. Note that this checks only immediate upper device, 7149 * not through a complete stack of devices. The caller must hold the RTNL lock. 7150 */ 7151 bool netdev_has_upper_dev(struct net_device *dev, 7152 struct net_device *upper_dev) 7153 { 7154 struct netdev_nested_priv priv = { 7155 .data = (void *)upper_dev, 7156 }; 7157 7158 ASSERT_RTNL(); 7159 7160 return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev, 7161 &priv); 7162 } 7163 EXPORT_SYMBOL(netdev_has_upper_dev); 7164 7165 /** 7166 * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device 7167 * @dev: device 7168 * @upper_dev: upper device to check 7169 * 7170 * Find out if a device is linked to specified upper device and return true 7171 * in case it is. Note that this checks the entire upper device chain. 7172 * The caller must hold rcu lock. 7173 */ 7174 7175 bool netdev_has_upper_dev_all_rcu(struct net_device *dev, 7176 struct net_device *upper_dev) 7177 { 7178 struct netdev_nested_priv priv = { 7179 .data = (void *)upper_dev, 7180 }; 7181 7182 return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev, 7183 &priv); 7184 } 7185 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu); 7186 7187 /** 7188 * netdev_has_any_upper_dev - Check if device is linked to some device 7189 * @dev: device 7190 * 7191 * Find out if a device is linked to an upper device and return true in case 7192 * it is. The caller must hold the RTNL lock. 7193 */ 7194 bool netdev_has_any_upper_dev(struct net_device *dev) 7195 { 7196 ASSERT_RTNL(); 7197 7198 return !list_empty(&dev->adj_list.upper); 7199 } 7200 EXPORT_SYMBOL(netdev_has_any_upper_dev); 7201 7202 /** 7203 * netdev_master_upper_dev_get - Get master upper device 7204 * @dev: device 7205 * 7206 * Find a master upper device and return pointer to it or NULL in case 7207 * it's not there. The caller must hold the RTNL lock. 7208 */ 7209 struct net_device *netdev_master_upper_dev_get(struct net_device *dev) 7210 { 7211 struct netdev_adjacent *upper; 7212 7213 ASSERT_RTNL(); 7214 7215 if (list_empty(&dev->adj_list.upper)) 7216 return NULL; 7217 7218 upper = list_first_entry(&dev->adj_list.upper, 7219 struct netdev_adjacent, list); 7220 if (likely(upper->master)) 7221 return upper->dev; 7222 return NULL; 7223 } 7224 EXPORT_SYMBOL(netdev_master_upper_dev_get); 7225 7226 static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev) 7227 { 7228 struct netdev_adjacent *upper; 7229 7230 ASSERT_RTNL(); 7231 7232 if (list_empty(&dev->adj_list.upper)) 7233 return NULL; 7234 7235 upper = list_first_entry(&dev->adj_list.upper, 7236 struct netdev_adjacent, list); 7237 if (likely(upper->master) && !upper->ignore) 7238 return upper->dev; 7239 return NULL; 7240 } 7241 7242 /** 7243 * netdev_has_any_lower_dev - Check if device is linked to some device 7244 * @dev: device 7245 * 7246 * Find out if a device is linked to a lower device and return true in case 7247 * it is. The caller must hold the RTNL lock. 7248 */ 7249 static bool netdev_has_any_lower_dev(struct net_device *dev) 7250 { 7251 ASSERT_RTNL(); 7252 7253 return !list_empty(&dev->adj_list.lower); 7254 } 7255 7256 void *netdev_adjacent_get_private(struct list_head *adj_list) 7257 { 7258 struct netdev_adjacent *adj; 7259 7260 adj = list_entry(adj_list, struct netdev_adjacent, list); 7261 7262 return adj->private; 7263 } 7264 EXPORT_SYMBOL(netdev_adjacent_get_private); 7265 7266 /** 7267 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list 7268 * @dev: device 7269 * @iter: list_head ** of the current position 7270 * 7271 * Gets the next device from the dev's upper list, starting from iter 7272 * position. The caller must hold RCU read lock. 7273 */ 7274 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, 7275 struct list_head **iter) 7276 { 7277 struct netdev_adjacent *upper; 7278 7279 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 7280 7281 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 7282 7283 if (&upper->list == &dev->adj_list.upper) 7284 return NULL; 7285 7286 *iter = &upper->list; 7287 7288 return upper->dev; 7289 } 7290 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu); 7291 7292 static struct net_device *__netdev_next_upper_dev(struct net_device *dev, 7293 struct list_head **iter, 7294 bool *ignore) 7295 { 7296 struct netdev_adjacent *upper; 7297 7298 upper = list_entry((*iter)->next, struct netdev_adjacent, list); 7299 7300 if (&upper->list == &dev->adj_list.upper) 7301 return NULL; 7302 7303 *iter = &upper->list; 7304 *ignore = upper->ignore; 7305 7306 return upper->dev; 7307 } 7308 7309 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev, 7310 struct list_head **iter) 7311 { 7312 struct netdev_adjacent *upper; 7313 7314 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 7315 7316 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 7317 7318 if (&upper->list == &dev->adj_list.upper) 7319 return NULL; 7320 7321 *iter = &upper->list; 7322 7323 return upper->dev; 7324 } 7325 7326 static int __netdev_walk_all_upper_dev(struct net_device *dev, 7327 int (*fn)(struct net_device *dev, 7328 struct netdev_nested_priv *priv), 7329 struct netdev_nested_priv *priv) 7330 { 7331 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7332 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7333 int ret, cur = 0; 7334 bool ignore; 7335 7336 now = dev; 7337 iter = &dev->adj_list.upper; 7338 7339 while (1) { 7340 if (now != dev) { 7341 ret = fn(now, priv); 7342 if (ret) 7343 return ret; 7344 } 7345 7346 next = NULL; 7347 while (1) { 7348 udev = __netdev_next_upper_dev(now, &iter, &ignore); 7349 if (!udev) 7350 break; 7351 if (ignore) 7352 continue; 7353 7354 next = udev; 7355 niter = &udev->adj_list.upper; 7356 dev_stack[cur] = now; 7357 iter_stack[cur++] = iter; 7358 break; 7359 } 7360 7361 if (!next) { 7362 if (!cur) 7363 return 0; 7364 next = dev_stack[--cur]; 7365 niter = iter_stack[cur]; 7366 } 7367 7368 now = next; 7369 iter = niter; 7370 } 7371 7372 return 0; 7373 } 7374 7375 int netdev_walk_all_upper_dev_rcu(struct net_device *dev, 7376 int (*fn)(struct net_device *dev, 7377 struct netdev_nested_priv *priv), 7378 struct netdev_nested_priv *priv) 7379 { 7380 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7381 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7382 int ret, cur = 0; 7383 7384 now = dev; 7385 iter = &dev->adj_list.upper; 7386 7387 while (1) { 7388 if (now != dev) { 7389 ret = fn(now, priv); 7390 if (ret) 7391 return ret; 7392 } 7393 7394 next = NULL; 7395 while (1) { 7396 udev = netdev_next_upper_dev_rcu(now, &iter); 7397 if (!udev) 7398 break; 7399 7400 next = udev; 7401 niter = &udev->adj_list.upper; 7402 dev_stack[cur] = now; 7403 iter_stack[cur++] = iter; 7404 break; 7405 } 7406 7407 if (!next) { 7408 if (!cur) 7409 return 0; 7410 next = dev_stack[--cur]; 7411 niter = iter_stack[cur]; 7412 } 7413 7414 now = next; 7415 iter = niter; 7416 } 7417 7418 return 0; 7419 } 7420 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu); 7421 7422 static bool __netdev_has_upper_dev(struct net_device *dev, 7423 struct net_device *upper_dev) 7424 { 7425 struct netdev_nested_priv priv = { 7426 .flags = 0, 7427 .data = (void *)upper_dev, 7428 }; 7429 7430 ASSERT_RTNL(); 7431 7432 return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev, 7433 &priv); 7434 } 7435 7436 /** 7437 * netdev_lower_get_next_private - Get the next ->private from the 7438 * lower neighbour list 7439 * @dev: device 7440 * @iter: list_head ** of the current position 7441 * 7442 * Gets the next netdev_adjacent->private from the dev's lower neighbour 7443 * list, starting from iter position. The caller must hold either hold the 7444 * RTNL lock or its own locking that guarantees that the neighbour lower 7445 * list will remain unchanged. 7446 */ 7447 void *netdev_lower_get_next_private(struct net_device *dev, 7448 struct list_head **iter) 7449 { 7450 struct netdev_adjacent *lower; 7451 7452 lower = list_entry(*iter, struct netdev_adjacent, list); 7453 7454 if (&lower->list == &dev->adj_list.lower) 7455 return NULL; 7456 7457 *iter = lower->list.next; 7458 7459 return lower->private; 7460 } 7461 EXPORT_SYMBOL(netdev_lower_get_next_private); 7462 7463 /** 7464 * netdev_lower_get_next_private_rcu - Get the next ->private from the 7465 * lower neighbour list, RCU 7466 * variant 7467 * @dev: device 7468 * @iter: list_head ** of the current position 7469 * 7470 * Gets the next netdev_adjacent->private from the dev's lower neighbour 7471 * list, starting from iter position. The caller must hold RCU read lock. 7472 */ 7473 void *netdev_lower_get_next_private_rcu(struct net_device *dev, 7474 struct list_head **iter) 7475 { 7476 struct netdev_adjacent *lower; 7477 7478 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 7479 7480 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 7481 7482 if (&lower->list == &dev->adj_list.lower) 7483 return NULL; 7484 7485 *iter = &lower->list; 7486 7487 return lower->private; 7488 } 7489 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu); 7490 7491 /** 7492 * netdev_lower_get_next - Get the next device from the lower neighbour 7493 * list 7494 * @dev: device 7495 * @iter: list_head ** of the current position 7496 * 7497 * Gets the next netdev_adjacent from the dev's lower neighbour 7498 * list, starting from iter position. The caller must hold RTNL lock or 7499 * its own locking that guarantees that the neighbour lower 7500 * list will remain unchanged. 7501 */ 7502 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter) 7503 { 7504 struct netdev_adjacent *lower; 7505 7506 lower = list_entry(*iter, struct netdev_adjacent, list); 7507 7508 if (&lower->list == &dev->adj_list.lower) 7509 return NULL; 7510 7511 *iter = lower->list.next; 7512 7513 return lower->dev; 7514 } 7515 EXPORT_SYMBOL(netdev_lower_get_next); 7516 7517 static struct net_device *netdev_next_lower_dev(struct net_device *dev, 7518 struct list_head **iter) 7519 { 7520 struct netdev_adjacent *lower; 7521 7522 lower = list_entry((*iter)->next, struct netdev_adjacent, list); 7523 7524 if (&lower->list == &dev->adj_list.lower) 7525 return NULL; 7526 7527 *iter = &lower->list; 7528 7529 return lower->dev; 7530 } 7531 7532 static struct net_device *__netdev_next_lower_dev(struct net_device *dev, 7533 struct list_head **iter, 7534 bool *ignore) 7535 { 7536 struct netdev_adjacent *lower; 7537 7538 lower = list_entry((*iter)->next, struct netdev_adjacent, list); 7539 7540 if (&lower->list == &dev->adj_list.lower) 7541 return NULL; 7542 7543 *iter = &lower->list; 7544 *ignore = lower->ignore; 7545 7546 return lower->dev; 7547 } 7548 7549 int netdev_walk_all_lower_dev(struct net_device *dev, 7550 int (*fn)(struct net_device *dev, 7551 struct netdev_nested_priv *priv), 7552 struct netdev_nested_priv *priv) 7553 { 7554 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7555 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7556 int ret, cur = 0; 7557 7558 now = dev; 7559 iter = &dev->adj_list.lower; 7560 7561 while (1) { 7562 if (now != dev) { 7563 ret = fn(now, priv); 7564 if (ret) 7565 return ret; 7566 } 7567 7568 next = NULL; 7569 while (1) { 7570 ldev = netdev_next_lower_dev(now, &iter); 7571 if (!ldev) 7572 break; 7573 7574 next = ldev; 7575 niter = &ldev->adj_list.lower; 7576 dev_stack[cur] = now; 7577 iter_stack[cur++] = iter; 7578 break; 7579 } 7580 7581 if (!next) { 7582 if (!cur) 7583 return 0; 7584 next = dev_stack[--cur]; 7585 niter = iter_stack[cur]; 7586 } 7587 7588 now = next; 7589 iter = niter; 7590 } 7591 7592 return 0; 7593 } 7594 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev); 7595 7596 static int __netdev_walk_all_lower_dev(struct net_device *dev, 7597 int (*fn)(struct net_device *dev, 7598 struct netdev_nested_priv *priv), 7599 struct netdev_nested_priv *priv) 7600 { 7601 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7602 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7603 int ret, cur = 0; 7604 bool ignore; 7605 7606 now = dev; 7607 iter = &dev->adj_list.lower; 7608 7609 while (1) { 7610 if (now != dev) { 7611 ret = fn(now, priv); 7612 if (ret) 7613 return ret; 7614 } 7615 7616 next = NULL; 7617 while (1) { 7618 ldev = __netdev_next_lower_dev(now, &iter, &ignore); 7619 if (!ldev) 7620 break; 7621 if (ignore) 7622 continue; 7623 7624 next = ldev; 7625 niter = &ldev->adj_list.lower; 7626 dev_stack[cur] = now; 7627 iter_stack[cur++] = iter; 7628 break; 7629 } 7630 7631 if (!next) { 7632 if (!cur) 7633 return 0; 7634 next = dev_stack[--cur]; 7635 niter = iter_stack[cur]; 7636 } 7637 7638 now = next; 7639 iter = niter; 7640 } 7641 7642 return 0; 7643 } 7644 7645 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, 7646 struct list_head **iter) 7647 { 7648 struct netdev_adjacent *lower; 7649 7650 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 7651 if (&lower->list == &dev->adj_list.lower) 7652 return NULL; 7653 7654 *iter = &lower->list; 7655 7656 return lower->dev; 7657 } 7658 EXPORT_SYMBOL(netdev_next_lower_dev_rcu); 7659 7660 static u8 __netdev_upper_depth(struct net_device *dev) 7661 { 7662 struct net_device *udev; 7663 struct list_head *iter; 7664 u8 max_depth = 0; 7665 bool ignore; 7666 7667 for (iter = &dev->adj_list.upper, 7668 udev = __netdev_next_upper_dev(dev, &iter, &ignore); 7669 udev; 7670 udev = __netdev_next_upper_dev(dev, &iter, &ignore)) { 7671 if (ignore) 7672 continue; 7673 if (max_depth < udev->upper_level) 7674 max_depth = udev->upper_level; 7675 } 7676 7677 return max_depth; 7678 } 7679 7680 static u8 __netdev_lower_depth(struct net_device *dev) 7681 { 7682 struct net_device *ldev; 7683 struct list_head *iter; 7684 u8 max_depth = 0; 7685 bool ignore; 7686 7687 for (iter = &dev->adj_list.lower, 7688 ldev = __netdev_next_lower_dev(dev, &iter, &ignore); 7689 ldev; 7690 ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) { 7691 if (ignore) 7692 continue; 7693 if (max_depth < ldev->lower_level) 7694 max_depth = ldev->lower_level; 7695 } 7696 7697 return max_depth; 7698 } 7699 7700 static int __netdev_update_upper_level(struct net_device *dev, 7701 struct netdev_nested_priv *__unused) 7702 { 7703 dev->upper_level = __netdev_upper_depth(dev) + 1; 7704 return 0; 7705 } 7706 7707 #ifdef CONFIG_LOCKDEP 7708 static LIST_HEAD(net_unlink_list); 7709 7710 static void net_unlink_todo(struct net_device *dev) 7711 { 7712 if (list_empty(&dev->unlink_list)) 7713 list_add_tail(&dev->unlink_list, &net_unlink_list); 7714 } 7715 #endif 7716 7717 static int __netdev_update_lower_level(struct net_device *dev, 7718 struct netdev_nested_priv *priv) 7719 { 7720 dev->lower_level = __netdev_lower_depth(dev) + 1; 7721 7722 #ifdef CONFIG_LOCKDEP 7723 if (!priv) 7724 return 0; 7725 7726 if (priv->flags & NESTED_SYNC_IMM) 7727 dev->nested_level = dev->lower_level - 1; 7728 if (priv->flags & NESTED_SYNC_TODO) 7729 net_unlink_todo(dev); 7730 #endif 7731 return 0; 7732 } 7733 7734 int netdev_walk_all_lower_dev_rcu(struct net_device *dev, 7735 int (*fn)(struct net_device *dev, 7736 struct netdev_nested_priv *priv), 7737 struct netdev_nested_priv *priv) 7738 { 7739 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7740 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7741 int ret, cur = 0; 7742 7743 now = dev; 7744 iter = &dev->adj_list.lower; 7745 7746 while (1) { 7747 if (now != dev) { 7748 ret = fn(now, priv); 7749 if (ret) 7750 return ret; 7751 } 7752 7753 next = NULL; 7754 while (1) { 7755 ldev = netdev_next_lower_dev_rcu(now, &iter); 7756 if (!ldev) 7757 break; 7758 7759 next = ldev; 7760 niter = &ldev->adj_list.lower; 7761 dev_stack[cur] = now; 7762 iter_stack[cur++] = iter; 7763 break; 7764 } 7765 7766 if (!next) { 7767 if (!cur) 7768 return 0; 7769 next = dev_stack[--cur]; 7770 niter = iter_stack[cur]; 7771 } 7772 7773 now = next; 7774 iter = niter; 7775 } 7776 7777 return 0; 7778 } 7779 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu); 7780 7781 /** 7782 * netdev_lower_get_first_private_rcu - Get the first ->private from the 7783 * lower neighbour list, RCU 7784 * variant 7785 * @dev: device 7786 * 7787 * Gets the first netdev_adjacent->private from the dev's lower neighbour 7788 * list. The caller must hold RCU read lock. 7789 */ 7790 void *netdev_lower_get_first_private_rcu(struct net_device *dev) 7791 { 7792 struct netdev_adjacent *lower; 7793 7794 lower = list_first_or_null_rcu(&dev->adj_list.lower, 7795 struct netdev_adjacent, list); 7796 if (lower) 7797 return lower->private; 7798 return NULL; 7799 } 7800 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu); 7801 7802 /** 7803 * netdev_master_upper_dev_get_rcu - Get master upper device 7804 * @dev: device 7805 * 7806 * Find a master upper device and return pointer to it or NULL in case 7807 * it's not there. The caller must hold the RCU read lock. 7808 */ 7809 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev) 7810 { 7811 struct netdev_adjacent *upper; 7812 7813 upper = list_first_or_null_rcu(&dev->adj_list.upper, 7814 struct netdev_adjacent, list); 7815 if (upper && likely(upper->master)) 7816 return upper->dev; 7817 return NULL; 7818 } 7819 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu); 7820 7821 static int netdev_adjacent_sysfs_add(struct net_device *dev, 7822 struct net_device *adj_dev, 7823 struct list_head *dev_list) 7824 { 7825 char linkname[IFNAMSIZ+7]; 7826 7827 sprintf(linkname, dev_list == &dev->adj_list.upper ? 7828 "upper_%s" : "lower_%s", adj_dev->name); 7829 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), 7830 linkname); 7831 } 7832 static void netdev_adjacent_sysfs_del(struct net_device *dev, 7833 char *name, 7834 struct list_head *dev_list) 7835 { 7836 char linkname[IFNAMSIZ+7]; 7837 7838 sprintf(linkname, dev_list == &dev->adj_list.upper ? 7839 "upper_%s" : "lower_%s", name); 7840 sysfs_remove_link(&(dev->dev.kobj), linkname); 7841 } 7842 7843 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev, 7844 struct net_device *adj_dev, 7845 struct list_head *dev_list) 7846 { 7847 return (dev_list == &dev->adj_list.upper || 7848 dev_list == &dev->adj_list.lower) && 7849 net_eq(dev_net(dev), dev_net(adj_dev)); 7850 } 7851 7852 static int __netdev_adjacent_dev_insert(struct net_device *dev, 7853 struct net_device *adj_dev, 7854 struct list_head *dev_list, 7855 void *private, bool master) 7856 { 7857 struct netdev_adjacent *adj; 7858 int ret; 7859 7860 adj = __netdev_find_adj(adj_dev, dev_list); 7861 7862 if (adj) { 7863 adj->ref_nr += 1; 7864 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n", 7865 dev->name, adj_dev->name, adj->ref_nr); 7866 7867 return 0; 7868 } 7869 7870 adj = kmalloc(sizeof(*adj), GFP_KERNEL); 7871 if (!adj) 7872 return -ENOMEM; 7873 7874 adj->dev = adj_dev; 7875 adj->master = master; 7876 adj->ref_nr = 1; 7877 adj->private = private; 7878 adj->ignore = false; 7879 netdev_hold(adj_dev, &adj->dev_tracker, GFP_KERNEL); 7880 7881 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n", 7882 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name); 7883 7884 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) { 7885 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list); 7886 if (ret) 7887 goto free_adj; 7888 } 7889 7890 /* Ensure that master link is always the first item in list. */ 7891 if (master) { 7892 ret = sysfs_create_link(&(dev->dev.kobj), 7893 &(adj_dev->dev.kobj), "master"); 7894 if (ret) 7895 goto remove_symlinks; 7896 7897 list_add_rcu(&adj->list, dev_list); 7898 } else { 7899 list_add_tail_rcu(&adj->list, dev_list); 7900 } 7901 7902 return 0; 7903 7904 remove_symlinks: 7905 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) 7906 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 7907 free_adj: 7908 netdev_put(adj_dev, &adj->dev_tracker); 7909 kfree(adj); 7910 7911 return ret; 7912 } 7913 7914 static void __netdev_adjacent_dev_remove(struct net_device *dev, 7915 struct net_device *adj_dev, 7916 u16 ref_nr, 7917 struct list_head *dev_list) 7918 { 7919 struct netdev_adjacent *adj; 7920 7921 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n", 7922 dev->name, adj_dev->name, ref_nr); 7923 7924 adj = __netdev_find_adj(adj_dev, dev_list); 7925 7926 if (!adj) { 7927 pr_err("Adjacency does not exist for device %s from %s\n", 7928 dev->name, adj_dev->name); 7929 WARN_ON(1); 7930 return; 7931 } 7932 7933 if (adj->ref_nr > ref_nr) { 7934 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n", 7935 dev->name, adj_dev->name, ref_nr, 7936 adj->ref_nr - ref_nr); 7937 adj->ref_nr -= ref_nr; 7938 return; 7939 } 7940 7941 if (adj->master) 7942 sysfs_remove_link(&(dev->dev.kobj), "master"); 7943 7944 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) 7945 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 7946 7947 list_del_rcu(&adj->list); 7948 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n", 7949 adj_dev->name, dev->name, adj_dev->name); 7950 netdev_put(adj_dev, &adj->dev_tracker); 7951 kfree_rcu(adj, rcu); 7952 } 7953 7954 static int __netdev_adjacent_dev_link_lists(struct net_device *dev, 7955 struct net_device *upper_dev, 7956 struct list_head *up_list, 7957 struct list_head *down_list, 7958 void *private, bool master) 7959 { 7960 int ret; 7961 7962 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, 7963 private, master); 7964 if (ret) 7965 return ret; 7966 7967 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, 7968 private, false); 7969 if (ret) { 7970 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list); 7971 return ret; 7972 } 7973 7974 return 0; 7975 } 7976 7977 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev, 7978 struct net_device *upper_dev, 7979 u16 ref_nr, 7980 struct list_head *up_list, 7981 struct list_head *down_list) 7982 { 7983 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list); 7984 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list); 7985 } 7986 7987 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev, 7988 struct net_device *upper_dev, 7989 void *private, bool master) 7990 { 7991 return __netdev_adjacent_dev_link_lists(dev, upper_dev, 7992 &dev->adj_list.upper, 7993 &upper_dev->adj_list.lower, 7994 private, master); 7995 } 7996 7997 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev, 7998 struct net_device *upper_dev) 7999 { 8000 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1, 8001 &dev->adj_list.upper, 8002 &upper_dev->adj_list.lower); 8003 } 8004 8005 static int __netdev_upper_dev_link(struct net_device *dev, 8006 struct net_device *upper_dev, bool master, 8007 void *upper_priv, void *upper_info, 8008 struct netdev_nested_priv *priv, 8009 struct netlink_ext_ack *extack) 8010 { 8011 struct netdev_notifier_changeupper_info changeupper_info = { 8012 .info = { 8013 .dev = dev, 8014 .extack = extack, 8015 }, 8016 .upper_dev = upper_dev, 8017 .master = master, 8018 .linking = true, 8019 .upper_info = upper_info, 8020 }; 8021 struct net_device *master_dev; 8022 int ret = 0; 8023 8024 ASSERT_RTNL(); 8025 8026 if (dev == upper_dev) 8027 return -EBUSY; 8028 8029 /* To prevent loops, check if dev is not upper device to upper_dev. */ 8030 if (__netdev_has_upper_dev(upper_dev, dev)) 8031 return -EBUSY; 8032 8033 if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV) 8034 return -EMLINK; 8035 8036 if (!master) { 8037 if (__netdev_has_upper_dev(dev, upper_dev)) 8038 return -EEXIST; 8039 } else { 8040 master_dev = __netdev_master_upper_dev_get(dev); 8041 if (master_dev) 8042 return master_dev == upper_dev ? -EEXIST : -EBUSY; 8043 } 8044 8045 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 8046 &changeupper_info.info); 8047 ret = notifier_to_errno(ret); 8048 if (ret) 8049 return ret; 8050 8051 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv, 8052 master); 8053 if (ret) 8054 return ret; 8055 8056 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 8057 &changeupper_info.info); 8058 ret = notifier_to_errno(ret); 8059 if (ret) 8060 goto rollback; 8061 8062 __netdev_update_upper_level(dev, NULL); 8063 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL); 8064 8065 __netdev_update_lower_level(upper_dev, priv); 8066 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level, 8067 priv); 8068 8069 return 0; 8070 8071 rollback: 8072 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 8073 8074 return ret; 8075 } 8076 8077 /** 8078 * netdev_upper_dev_link - Add a link to the upper device 8079 * @dev: device 8080 * @upper_dev: new upper device 8081 * @extack: netlink extended ack 8082 * 8083 * Adds a link to device which is upper to this one. The caller must hold 8084 * the RTNL lock. On a failure a negative errno code is returned. 8085 * On success the reference counts are adjusted and the function 8086 * returns zero. 8087 */ 8088 int netdev_upper_dev_link(struct net_device *dev, 8089 struct net_device *upper_dev, 8090 struct netlink_ext_ack *extack) 8091 { 8092 struct netdev_nested_priv priv = { 8093 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO, 8094 .data = NULL, 8095 }; 8096 8097 return __netdev_upper_dev_link(dev, upper_dev, false, 8098 NULL, NULL, &priv, extack); 8099 } 8100 EXPORT_SYMBOL(netdev_upper_dev_link); 8101 8102 /** 8103 * netdev_master_upper_dev_link - Add a master link to the upper device 8104 * @dev: device 8105 * @upper_dev: new upper device 8106 * @upper_priv: upper device private 8107 * @upper_info: upper info to be passed down via notifier 8108 * @extack: netlink extended ack 8109 * 8110 * Adds a link to device which is upper to this one. In this case, only 8111 * one master upper device can be linked, although other non-master devices 8112 * might be linked as well. The caller must hold the RTNL lock. 8113 * On a failure a negative errno code is returned. On success the reference 8114 * counts are adjusted and the function returns zero. 8115 */ 8116 int netdev_master_upper_dev_link(struct net_device *dev, 8117 struct net_device *upper_dev, 8118 void *upper_priv, void *upper_info, 8119 struct netlink_ext_ack *extack) 8120 { 8121 struct netdev_nested_priv priv = { 8122 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO, 8123 .data = NULL, 8124 }; 8125 8126 return __netdev_upper_dev_link(dev, upper_dev, true, 8127 upper_priv, upper_info, &priv, extack); 8128 } 8129 EXPORT_SYMBOL(netdev_master_upper_dev_link); 8130 8131 static void __netdev_upper_dev_unlink(struct net_device *dev, 8132 struct net_device *upper_dev, 8133 struct netdev_nested_priv *priv) 8134 { 8135 struct netdev_notifier_changeupper_info changeupper_info = { 8136 .info = { 8137 .dev = dev, 8138 }, 8139 .upper_dev = upper_dev, 8140 .linking = false, 8141 }; 8142 8143 ASSERT_RTNL(); 8144 8145 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev; 8146 8147 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 8148 &changeupper_info.info); 8149 8150 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 8151 8152 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 8153 &changeupper_info.info); 8154 8155 __netdev_update_upper_level(dev, NULL); 8156 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL); 8157 8158 __netdev_update_lower_level(upper_dev, priv); 8159 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level, 8160 priv); 8161 } 8162 8163 /** 8164 * netdev_upper_dev_unlink - Removes a link to upper device 8165 * @dev: device 8166 * @upper_dev: new upper device 8167 * 8168 * Removes a link to device which is upper to this one. The caller must hold 8169 * the RTNL lock. 8170 */ 8171 void netdev_upper_dev_unlink(struct net_device *dev, 8172 struct net_device *upper_dev) 8173 { 8174 struct netdev_nested_priv priv = { 8175 .flags = NESTED_SYNC_TODO, 8176 .data = NULL, 8177 }; 8178 8179 __netdev_upper_dev_unlink(dev, upper_dev, &priv); 8180 } 8181 EXPORT_SYMBOL(netdev_upper_dev_unlink); 8182 8183 static void __netdev_adjacent_dev_set(struct net_device *upper_dev, 8184 struct net_device *lower_dev, 8185 bool val) 8186 { 8187 struct netdev_adjacent *adj; 8188 8189 adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower); 8190 if (adj) 8191 adj->ignore = val; 8192 8193 adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper); 8194 if (adj) 8195 adj->ignore = val; 8196 } 8197 8198 static void netdev_adjacent_dev_disable(struct net_device *upper_dev, 8199 struct net_device *lower_dev) 8200 { 8201 __netdev_adjacent_dev_set(upper_dev, lower_dev, true); 8202 } 8203 8204 static void netdev_adjacent_dev_enable(struct net_device *upper_dev, 8205 struct net_device *lower_dev) 8206 { 8207 __netdev_adjacent_dev_set(upper_dev, lower_dev, false); 8208 } 8209 8210 int netdev_adjacent_change_prepare(struct net_device *old_dev, 8211 struct net_device *new_dev, 8212 struct net_device *dev, 8213 struct netlink_ext_ack *extack) 8214 { 8215 struct netdev_nested_priv priv = { 8216 .flags = 0, 8217 .data = NULL, 8218 }; 8219 int err; 8220 8221 if (!new_dev) 8222 return 0; 8223 8224 if (old_dev && new_dev != old_dev) 8225 netdev_adjacent_dev_disable(dev, old_dev); 8226 err = __netdev_upper_dev_link(new_dev, dev, false, NULL, NULL, &priv, 8227 extack); 8228 if (err) { 8229 if (old_dev && new_dev != old_dev) 8230 netdev_adjacent_dev_enable(dev, old_dev); 8231 return err; 8232 } 8233 8234 return 0; 8235 } 8236 EXPORT_SYMBOL(netdev_adjacent_change_prepare); 8237 8238 void netdev_adjacent_change_commit(struct net_device *old_dev, 8239 struct net_device *new_dev, 8240 struct net_device *dev) 8241 { 8242 struct netdev_nested_priv priv = { 8243 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO, 8244 .data = NULL, 8245 }; 8246 8247 if (!new_dev || !old_dev) 8248 return; 8249 8250 if (new_dev == old_dev) 8251 return; 8252 8253 netdev_adjacent_dev_enable(dev, old_dev); 8254 __netdev_upper_dev_unlink(old_dev, dev, &priv); 8255 } 8256 EXPORT_SYMBOL(netdev_adjacent_change_commit); 8257 8258 void netdev_adjacent_change_abort(struct net_device *old_dev, 8259 struct net_device *new_dev, 8260 struct net_device *dev) 8261 { 8262 struct netdev_nested_priv priv = { 8263 .flags = 0, 8264 .data = NULL, 8265 }; 8266 8267 if (!new_dev) 8268 return; 8269 8270 if (old_dev && new_dev != old_dev) 8271 netdev_adjacent_dev_enable(dev, old_dev); 8272 8273 __netdev_upper_dev_unlink(new_dev, dev, &priv); 8274 } 8275 EXPORT_SYMBOL(netdev_adjacent_change_abort); 8276 8277 /** 8278 * netdev_bonding_info_change - Dispatch event about slave change 8279 * @dev: device 8280 * @bonding_info: info to dispatch 8281 * 8282 * Send NETDEV_BONDING_INFO to netdev notifiers with info. 8283 * The caller must hold the RTNL lock. 8284 */ 8285 void netdev_bonding_info_change(struct net_device *dev, 8286 struct netdev_bonding_info *bonding_info) 8287 { 8288 struct netdev_notifier_bonding_info info = { 8289 .info.dev = dev, 8290 }; 8291 8292 memcpy(&info.bonding_info, bonding_info, 8293 sizeof(struct netdev_bonding_info)); 8294 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, 8295 &info.info); 8296 } 8297 EXPORT_SYMBOL(netdev_bonding_info_change); 8298 8299 static int netdev_offload_xstats_enable_l3(struct net_device *dev, 8300 struct netlink_ext_ack *extack) 8301 { 8302 struct netdev_notifier_offload_xstats_info info = { 8303 .info.dev = dev, 8304 .info.extack = extack, 8305 .type = NETDEV_OFFLOAD_XSTATS_TYPE_L3, 8306 }; 8307 int err; 8308 int rc; 8309 8310 dev->offload_xstats_l3 = kzalloc(sizeof(*dev->offload_xstats_l3), 8311 GFP_KERNEL); 8312 if (!dev->offload_xstats_l3) 8313 return -ENOMEM; 8314 8315 rc = call_netdevice_notifiers_info_robust(NETDEV_OFFLOAD_XSTATS_ENABLE, 8316 NETDEV_OFFLOAD_XSTATS_DISABLE, 8317 &info.info); 8318 err = notifier_to_errno(rc); 8319 if (err) 8320 goto free_stats; 8321 8322 return 0; 8323 8324 free_stats: 8325 kfree(dev->offload_xstats_l3); 8326 dev->offload_xstats_l3 = NULL; 8327 return err; 8328 } 8329 8330 int netdev_offload_xstats_enable(struct net_device *dev, 8331 enum netdev_offload_xstats_type type, 8332 struct netlink_ext_ack *extack) 8333 { 8334 ASSERT_RTNL(); 8335 8336 if (netdev_offload_xstats_enabled(dev, type)) 8337 return -EALREADY; 8338 8339 switch (type) { 8340 case NETDEV_OFFLOAD_XSTATS_TYPE_L3: 8341 return netdev_offload_xstats_enable_l3(dev, extack); 8342 } 8343 8344 WARN_ON(1); 8345 return -EINVAL; 8346 } 8347 EXPORT_SYMBOL(netdev_offload_xstats_enable); 8348 8349 static void netdev_offload_xstats_disable_l3(struct net_device *dev) 8350 { 8351 struct netdev_notifier_offload_xstats_info info = { 8352 .info.dev = dev, 8353 .type = NETDEV_OFFLOAD_XSTATS_TYPE_L3, 8354 }; 8355 8356 call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_DISABLE, 8357 &info.info); 8358 kfree(dev->offload_xstats_l3); 8359 dev->offload_xstats_l3 = NULL; 8360 } 8361 8362 int netdev_offload_xstats_disable(struct net_device *dev, 8363 enum netdev_offload_xstats_type type) 8364 { 8365 ASSERT_RTNL(); 8366 8367 if (!netdev_offload_xstats_enabled(dev, type)) 8368 return -EALREADY; 8369 8370 switch (type) { 8371 case NETDEV_OFFLOAD_XSTATS_TYPE_L3: 8372 netdev_offload_xstats_disable_l3(dev); 8373 return 0; 8374 } 8375 8376 WARN_ON(1); 8377 return -EINVAL; 8378 } 8379 EXPORT_SYMBOL(netdev_offload_xstats_disable); 8380 8381 static void netdev_offload_xstats_disable_all(struct net_device *dev) 8382 { 8383 netdev_offload_xstats_disable(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3); 8384 } 8385 8386 static struct rtnl_hw_stats64 * 8387 netdev_offload_xstats_get_ptr(const struct net_device *dev, 8388 enum netdev_offload_xstats_type type) 8389 { 8390 switch (type) { 8391 case NETDEV_OFFLOAD_XSTATS_TYPE_L3: 8392 return dev->offload_xstats_l3; 8393 } 8394 8395 WARN_ON(1); 8396 return NULL; 8397 } 8398 8399 bool netdev_offload_xstats_enabled(const struct net_device *dev, 8400 enum netdev_offload_xstats_type type) 8401 { 8402 ASSERT_RTNL(); 8403 8404 return netdev_offload_xstats_get_ptr(dev, type); 8405 } 8406 EXPORT_SYMBOL(netdev_offload_xstats_enabled); 8407 8408 struct netdev_notifier_offload_xstats_ru { 8409 bool used; 8410 }; 8411 8412 struct netdev_notifier_offload_xstats_rd { 8413 struct rtnl_hw_stats64 stats; 8414 bool used; 8415 }; 8416 8417 static void netdev_hw_stats64_add(struct rtnl_hw_stats64 *dest, 8418 const struct rtnl_hw_stats64 *src) 8419 { 8420 dest->rx_packets += src->rx_packets; 8421 dest->tx_packets += src->tx_packets; 8422 dest->rx_bytes += src->rx_bytes; 8423 dest->tx_bytes += src->tx_bytes; 8424 dest->rx_errors += src->rx_errors; 8425 dest->tx_errors += src->tx_errors; 8426 dest->rx_dropped += src->rx_dropped; 8427 dest->tx_dropped += src->tx_dropped; 8428 dest->multicast += src->multicast; 8429 } 8430 8431 static int netdev_offload_xstats_get_used(struct net_device *dev, 8432 enum netdev_offload_xstats_type type, 8433 bool *p_used, 8434 struct netlink_ext_ack *extack) 8435 { 8436 struct netdev_notifier_offload_xstats_ru report_used = {}; 8437 struct netdev_notifier_offload_xstats_info info = { 8438 .info.dev = dev, 8439 .info.extack = extack, 8440 .type = type, 8441 .report_used = &report_used, 8442 }; 8443 int rc; 8444 8445 WARN_ON(!netdev_offload_xstats_enabled(dev, type)); 8446 rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_USED, 8447 &info.info); 8448 *p_used = report_used.used; 8449 return notifier_to_errno(rc); 8450 } 8451 8452 static int netdev_offload_xstats_get_stats(struct net_device *dev, 8453 enum netdev_offload_xstats_type type, 8454 struct rtnl_hw_stats64 *p_stats, 8455 bool *p_used, 8456 struct netlink_ext_ack *extack) 8457 { 8458 struct netdev_notifier_offload_xstats_rd report_delta = {}; 8459 struct netdev_notifier_offload_xstats_info info = { 8460 .info.dev = dev, 8461 .info.extack = extack, 8462 .type = type, 8463 .report_delta = &report_delta, 8464 }; 8465 struct rtnl_hw_stats64 *stats; 8466 int rc; 8467 8468 stats = netdev_offload_xstats_get_ptr(dev, type); 8469 if (WARN_ON(!stats)) 8470 return -EINVAL; 8471 8472 rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_DELTA, 8473 &info.info); 8474 8475 /* Cache whatever we got, even if there was an error, otherwise the 8476 * successful stats retrievals would get lost. 8477 */ 8478 netdev_hw_stats64_add(stats, &report_delta.stats); 8479 8480 if (p_stats) 8481 *p_stats = *stats; 8482 *p_used = report_delta.used; 8483 8484 return notifier_to_errno(rc); 8485 } 8486 8487 int netdev_offload_xstats_get(struct net_device *dev, 8488 enum netdev_offload_xstats_type type, 8489 struct rtnl_hw_stats64 *p_stats, bool *p_used, 8490 struct netlink_ext_ack *extack) 8491 { 8492 ASSERT_RTNL(); 8493 8494 if (p_stats) 8495 return netdev_offload_xstats_get_stats(dev, type, p_stats, 8496 p_used, extack); 8497 else 8498 return netdev_offload_xstats_get_used(dev, type, p_used, 8499 extack); 8500 } 8501 EXPORT_SYMBOL(netdev_offload_xstats_get); 8502 8503 void 8504 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *report_delta, 8505 const struct rtnl_hw_stats64 *stats) 8506 { 8507 report_delta->used = true; 8508 netdev_hw_stats64_add(&report_delta->stats, stats); 8509 } 8510 EXPORT_SYMBOL(netdev_offload_xstats_report_delta); 8511 8512 void 8513 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *report_used) 8514 { 8515 report_used->used = true; 8516 } 8517 EXPORT_SYMBOL(netdev_offload_xstats_report_used); 8518 8519 void netdev_offload_xstats_push_delta(struct net_device *dev, 8520 enum netdev_offload_xstats_type type, 8521 const struct rtnl_hw_stats64 *p_stats) 8522 { 8523 struct rtnl_hw_stats64 *stats; 8524 8525 ASSERT_RTNL(); 8526 8527 stats = netdev_offload_xstats_get_ptr(dev, type); 8528 if (WARN_ON(!stats)) 8529 return; 8530 8531 netdev_hw_stats64_add(stats, p_stats); 8532 } 8533 EXPORT_SYMBOL(netdev_offload_xstats_push_delta); 8534 8535 /** 8536 * netdev_get_xmit_slave - Get the xmit slave of master device 8537 * @dev: device 8538 * @skb: The packet 8539 * @all_slaves: assume all the slaves are active 8540 * 8541 * The reference counters are not incremented so the caller must be 8542 * careful with locks. The caller must hold RCU lock. 8543 * %NULL is returned if no slave is found. 8544 */ 8545 8546 struct net_device *netdev_get_xmit_slave(struct net_device *dev, 8547 struct sk_buff *skb, 8548 bool all_slaves) 8549 { 8550 const struct net_device_ops *ops = dev->netdev_ops; 8551 8552 if (!ops->ndo_get_xmit_slave) 8553 return NULL; 8554 return ops->ndo_get_xmit_slave(dev, skb, all_slaves); 8555 } 8556 EXPORT_SYMBOL(netdev_get_xmit_slave); 8557 8558 static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev, 8559 struct sock *sk) 8560 { 8561 const struct net_device_ops *ops = dev->netdev_ops; 8562 8563 if (!ops->ndo_sk_get_lower_dev) 8564 return NULL; 8565 return ops->ndo_sk_get_lower_dev(dev, sk); 8566 } 8567 8568 /** 8569 * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket 8570 * @dev: device 8571 * @sk: the socket 8572 * 8573 * %NULL is returned if no lower device is found. 8574 */ 8575 8576 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev, 8577 struct sock *sk) 8578 { 8579 struct net_device *lower; 8580 8581 lower = netdev_sk_get_lower_dev(dev, sk); 8582 while (lower) { 8583 dev = lower; 8584 lower = netdev_sk_get_lower_dev(dev, sk); 8585 } 8586 8587 return dev; 8588 } 8589 EXPORT_SYMBOL(netdev_sk_get_lowest_dev); 8590 8591 static void netdev_adjacent_add_links(struct net_device *dev) 8592 { 8593 struct netdev_adjacent *iter; 8594 8595 struct net *net = dev_net(dev); 8596 8597 list_for_each_entry(iter, &dev->adj_list.upper, list) { 8598 if (!net_eq(net, dev_net(iter->dev))) 8599 continue; 8600 netdev_adjacent_sysfs_add(iter->dev, dev, 8601 &iter->dev->adj_list.lower); 8602 netdev_adjacent_sysfs_add(dev, iter->dev, 8603 &dev->adj_list.upper); 8604 } 8605 8606 list_for_each_entry(iter, &dev->adj_list.lower, list) { 8607 if (!net_eq(net, dev_net(iter->dev))) 8608 continue; 8609 netdev_adjacent_sysfs_add(iter->dev, dev, 8610 &iter->dev->adj_list.upper); 8611 netdev_adjacent_sysfs_add(dev, iter->dev, 8612 &dev->adj_list.lower); 8613 } 8614 } 8615 8616 static void netdev_adjacent_del_links(struct net_device *dev) 8617 { 8618 struct netdev_adjacent *iter; 8619 8620 struct net *net = dev_net(dev); 8621 8622 list_for_each_entry(iter, &dev->adj_list.upper, list) { 8623 if (!net_eq(net, dev_net(iter->dev))) 8624 continue; 8625 netdev_adjacent_sysfs_del(iter->dev, dev->name, 8626 &iter->dev->adj_list.lower); 8627 netdev_adjacent_sysfs_del(dev, iter->dev->name, 8628 &dev->adj_list.upper); 8629 } 8630 8631 list_for_each_entry(iter, &dev->adj_list.lower, list) { 8632 if (!net_eq(net, dev_net(iter->dev))) 8633 continue; 8634 netdev_adjacent_sysfs_del(iter->dev, dev->name, 8635 &iter->dev->adj_list.upper); 8636 netdev_adjacent_sysfs_del(dev, iter->dev->name, 8637 &dev->adj_list.lower); 8638 } 8639 } 8640 8641 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname) 8642 { 8643 struct netdev_adjacent *iter; 8644 8645 struct net *net = dev_net(dev); 8646 8647 list_for_each_entry(iter, &dev->adj_list.upper, list) { 8648 if (!net_eq(net, dev_net(iter->dev))) 8649 continue; 8650 netdev_adjacent_sysfs_del(iter->dev, oldname, 8651 &iter->dev->adj_list.lower); 8652 netdev_adjacent_sysfs_add(iter->dev, dev, 8653 &iter->dev->adj_list.lower); 8654 } 8655 8656 list_for_each_entry(iter, &dev->adj_list.lower, list) { 8657 if (!net_eq(net, dev_net(iter->dev))) 8658 continue; 8659 netdev_adjacent_sysfs_del(iter->dev, oldname, 8660 &iter->dev->adj_list.upper); 8661 netdev_adjacent_sysfs_add(iter->dev, dev, 8662 &iter->dev->adj_list.upper); 8663 } 8664 } 8665 8666 void *netdev_lower_dev_get_private(struct net_device *dev, 8667 struct net_device *lower_dev) 8668 { 8669 struct netdev_adjacent *lower; 8670 8671 if (!lower_dev) 8672 return NULL; 8673 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower); 8674 if (!lower) 8675 return NULL; 8676 8677 return lower->private; 8678 } 8679 EXPORT_SYMBOL(netdev_lower_dev_get_private); 8680 8681 8682 /** 8683 * netdev_lower_state_changed - Dispatch event about lower device state change 8684 * @lower_dev: device 8685 * @lower_state_info: state to dispatch 8686 * 8687 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info. 8688 * The caller must hold the RTNL lock. 8689 */ 8690 void netdev_lower_state_changed(struct net_device *lower_dev, 8691 void *lower_state_info) 8692 { 8693 struct netdev_notifier_changelowerstate_info changelowerstate_info = { 8694 .info.dev = lower_dev, 8695 }; 8696 8697 ASSERT_RTNL(); 8698 changelowerstate_info.lower_state_info = lower_state_info; 8699 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, 8700 &changelowerstate_info.info); 8701 } 8702 EXPORT_SYMBOL(netdev_lower_state_changed); 8703 8704 static void dev_change_rx_flags(struct net_device *dev, int flags) 8705 { 8706 const struct net_device_ops *ops = dev->netdev_ops; 8707 8708 if (ops->ndo_change_rx_flags) 8709 ops->ndo_change_rx_flags(dev, flags); 8710 } 8711 8712 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify) 8713 { 8714 unsigned int old_flags = dev->flags; 8715 unsigned int promiscuity, flags; 8716 kuid_t uid; 8717 kgid_t gid; 8718 8719 ASSERT_RTNL(); 8720 8721 promiscuity = dev->promiscuity + inc; 8722 if (promiscuity == 0) { 8723 /* 8724 * Avoid overflow. 8725 * If inc causes overflow, untouch promisc and return error. 8726 */ 8727 if (unlikely(inc > 0)) { 8728 netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n"); 8729 return -EOVERFLOW; 8730 } 8731 flags = old_flags & ~IFF_PROMISC; 8732 } else { 8733 flags = old_flags | IFF_PROMISC; 8734 } 8735 WRITE_ONCE(dev->promiscuity, promiscuity); 8736 if (flags != old_flags) { 8737 WRITE_ONCE(dev->flags, flags); 8738 netdev_info(dev, "%s promiscuous mode\n", 8739 dev->flags & IFF_PROMISC ? "entered" : "left"); 8740 if (audit_enabled) { 8741 current_uid_gid(&uid, &gid); 8742 audit_log(audit_context(), GFP_ATOMIC, 8743 AUDIT_ANOM_PROMISCUOUS, 8744 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", 8745 dev->name, (dev->flags & IFF_PROMISC), 8746 (old_flags & IFF_PROMISC), 8747 from_kuid(&init_user_ns, audit_get_loginuid(current)), 8748 from_kuid(&init_user_ns, uid), 8749 from_kgid(&init_user_ns, gid), 8750 audit_get_sessionid(current)); 8751 } 8752 8753 dev_change_rx_flags(dev, IFF_PROMISC); 8754 } 8755 if (notify) 8756 __dev_notify_flags(dev, old_flags, IFF_PROMISC, 0, NULL); 8757 return 0; 8758 } 8759 8760 /** 8761 * dev_set_promiscuity - update promiscuity count on a device 8762 * @dev: device 8763 * @inc: modifier 8764 * 8765 * Add or remove promiscuity from a device. While the count in the device 8766 * remains above zero the interface remains promiscuous. Once it hits zero 8767 * the device reverts back to normal filtering operation. A negative inc 8768 * value is used to drop promiscuity on the device. 8769 * Return 0 if successful or a negative errno code on error. 8770 */ 8771 int dev_set_promiscuity(struct net_device *dev, int inc) 8772 { 8773 unsigned int old_flags = dev->flags; 8774 int err; 8775 8776 err = __dev_set_promiscuity(dev, inc, true); 8777 if (err < 0) 8778 return err; 8779 if (dev->flags != old_flags) 8780 dev_set_rx_mode(dev); 8781 return err; 8782 } 8783 EXPORT_SYMBOL(dev_set_promiscuity); 8784 8785 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify) 8786 { 8787 unsigned int old_flags = dev->flags, old_gflags = dev->gflags; 8788 unsigned int allmulti, flags; 8789 8790 ASSERT_RTNL(); 8791 8792 allmulti = dev->allmulti + inc; 8793 if (allmulti == 0) { 8794 /* 8795 * Avoid overflow. 8796 * If inc causes overflow, untouch allmulti and return error. 8797 */ 8798 if (unlikely(inc > 0)) { 8799 netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n"); 8800 return -EOVERFLOW; 8801 } 8802 flags = old_flags & ~IFF_ALLMULTI; 8803 } else { 8804 flags = old_flags | IFF_ALLMULTI; 8805 } 8806 WRITE_ONCE(dev->allmulti, allmulti); 8807 if (flags != old_flags) { 8808 WRITE_ONCE(dev->flags, flags); 8809 netdev_info(dev, "%s allmulticast mode\n", 8810 dev->flags & IFF_ALLMULTI ? "entered" : "left"); 8811 dev_change_rx_flags(dev, IFF_ALLMULTI); 8812 dev_set_rx_mode(dev); 8813 if (notify) 8814 __dev_notify_flags(dev, old_flags, 8815 dev->gflags ^ old_gflags, 0, NULL); 8816 } 8817 return 0; 8818 } 8819 8820 /** 8821 * dev_set_allmulti - update allmulti count on a device 8822 * @dev: device 8823 * @inc: modifier 8824 * 8825 * Add or remove reception of all multicast frames to a device. While the 8826 * count in the device remains above zero the interface remains listening 8827 * to all interfaces. Once it hits zero the device reverts back to normal 8828 * filtering operation. A negative @inc value is used to drop the counter 8829 * when releasing a resource needing all multicasts. 8830 * Return 0 if successful or a negative errno code on error. 8831 */ 8832 8833 int dev_set_allmulti(struct net_device *dev, int inc) 8834 { 8835 return __dev_set_allmulti(dev, inc, true); 8836 } 8837 EXPORT_SYMBOL(dev_set_allmulti); 8838 8839 /* 8840 * Upload unicast and multicast address lists to device and 8841 * configure RX filtering. When the device doesn't support unicast 8842 * filtering it is put in promiscuous mode while unicast addresses 8843 * are present. 8844 */ 8845 void __dev_set_rx_mode(struct net_device *dev) 8846 { 8847 const struct net_device_ops *ops = dev->netdev_ops; 8848 8849 /* dev_open will call this function so the list will stay sane. */ 8850 if (!(dev->flags&IFF_UP)) 8851 return; 8852 8853 if (!netif_device_present(dev)) 8854 return; 8855 8856 if (!(dev->priv_flags & IFF_UNICAST_FLT)) { 8857 /* Unicast addresses changes may only happen under the rtnl, 8858 * therefore calling __dev_set_promiscuity here is safe. 8859 */ 8860 if (!netdev_uc_empty(dev) && !dev->uc_promisc) { 8861 __dev_set_promiscuity(dev, 1, false); 8862 dev->uc_promisc = true; 8863 } else if (netdev_uc_empty(dev) && dev->uc_promisc) { 8864 __dev_set_promiscuity(dev, -1, false); 8865 dev->uc_promisc = false; 8866 } 8867 } 8868 8869 if (ops->ndo_set_rx_mode) 8870 ops->ndo_set_rx_mode(dev); 8871 } 8872 8873 void dev_set_rx_mode(struct net_device *dev) 8874 { 8875 netif_addr_lock_bh(dev); 8876 __dev_set_rx_mode(dev); 8877 netif_addr_unlock_bh(dev); 8878 } 8879 8880 /** 8881 * dev_get_flags - get flags reported to userspace 8882 * @dev: device 8883 * 8884 * Get the combination of flag bits exported through APIs to userspace. 8885 */ 8886 unsigned int dev_get_flags(const struct net_device *dev) 8887 { 8888 unsigned int flags; 8889 8890 flags = (READ_ONCE(dev->flags) & ~(IFF_PROMISC | 8891 IFF_ALLMULTI | 8892 IFF_RUNNING | 8893 IFF_LOWER_UP | 8894 IFF_DORMANT)) | 8895 (READ_ONCE(dev->gflags) & (IFF_PROMISC | 8896 IFF_ALLMULTI)); 8897 8898 if (netif_running(dev)) { 8899 if (netif_oper_up(dev)) 8900 flags |= IFF_RUNNING; 8901 if (netif_carrier_ok(dev)) 8902 flags |= IFF_LOWER_UP; 8903 if (netif_dormant(dev)) 8904 flags |= IFF_DORMANT; 8905 } 8906 8907 return flags; 8908 } 8909 EXPORT_SYMBOL(dev_get_flags); 8910 8911 int __dev_change_flags(struct net_device *dev, unsigned int flags, 8912 struct netlink_ext_ack *extack) 8913 { 8914 unsigned int old_flags = dev->flags; 8915 int ret; 8916 8917 ASSERT_RTNL(); 8918 8919 /* 8920 * Set the flags on our device. 8921 */ 8922 8923 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | 8924 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | 8925 IFF_AUTOMEDIA)) | 8926 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | 8927 IFF_ALLMULTI)); 8928 8929 /* 8930 * Load in the correct multicast list now the flags have changed. 8931 */ 8932 8933 if ((old_flags ^ flags) & IFF_MULTICAST) 8934 dev_change_rx_flags(dev, IFF_MULTICAST); 8935 8936 dev_set_rx_mode(dev); 8937 8938 /* 8939 * Have we downed the interface. We handle IFF_UP ourselves 8940 * according to user attempts to set it, rather than blindly 8941 * setting it. 8942 */ 8943 8944 ret = 0; 8945 if ((old_flags ^ flags) & IFF_UP) { 8946 if (old_flags & IFF_UP) 8947 __dev_close(dev); 8948 else 8949 ret = __dev_open(dev, extack); 8950 } 8951 8952 if ((flags ^ dev->gflags) & IFF_PROMISC) { 8953 int inc = (flags & IFF_PROMISC) ? 1 : -1; 8954 unsigned int old_flags = dev->flags; 8955 8956 dev->gflags ^= IFF_PROMISC; 8957 8958 if (__dev_set_promiscuity(dev, inc, false) >= 0) 8959 if (dev->flags != old_flags) 8960 dev_set_rx_mode(dev); 8961 } 8962 8963 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI 8964 * is important. Some (broken) drivers set IFF_PROMISC, when 8965 * IFF_ALLMULTI is requested not asking us and not reporting. 8966 */ 8967 if ((flags ^ dev->gflags) & IFF_ALLMULTI) { 8968 int inc = (flags & IFF_ALLMULTI) ? 1 : -1; 8969 8970 dev->gflags ^= IFF_ALLMULTI; 8971 __dev_set_allmulti(dev, inc, false); 8972 } 8973 8974 return ret; 8975 } 8976 8977 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags, 8978 unsigned int gchanges, u32 portid, 8979 const struct nlmsghdr *nlh) 8980 { 8981 unsigned int changes = dev->flags ^ old_flags; 8982 8983 if (gchanges) 8984 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC, portid, nlh); 8985 8986 if (changes & IFF_UP) { 8987 if (dev->flags & IFF_UP) 8988 call_netdevice_notifiers(NETDEV_UP, dev); 8989 else 8990 call_netdevice_notifiers(NETDEV_DOWN, dev); 8991 } 8992 8993 if (dev->flags & IFF_UP && 8994 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) { 8995 struct netdev_notifier_change_info change_info = { 8996 .info = { 8997 .dev = dev, 8998 }, 8999 .flags_changed = changes, 9000 }; 9001 9002 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info); 9003 } 9004 } 9005 9006 /** 9007 * dev_change_flags - change device settings 9008 * @dev: device 9009 * @flags: device state flags 9010 * @extack: netlink extended ack 9011 * 9012 * Change settings on device based state flags. The flags are 9013 * in the userspace exported format. 9014 */ 9015 int dev_change_flags(struct net_device *dev, unsigned int flags, 9016 struct netlink_ext_ack *extack) 9017 { 9018 int ret; 9019 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags; 9020 9021 ret = __dev_change_flags(dev, flags, extack); 9022 if (ret < 0) 9023 return ret; 9024 9025 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags); 9026 __dev_notify_flags(dev, old_flags, changes, 0, NULL); 9027 return ret; 9028 } 9029 EXPORT_SYMBOL(dev_change_flags); 9030 9031 int __dev_set_mtu(struct net_device *dev, int new_mtu) 9032 { 9033 const struct net_device_ops *ops = dev->netdev_ops; 9034 9035 if (ops->ndo_change_mtu) 9036 return ops->ndo_change_mtu(dev, new_mtu); 9037 9038 /* Pairs with all the lockless reads of dev->mtu in the stack */ 9039 WRITE_ONCE(dev->mtu, new_mtu); 9040 return 0; 9041 } 9042 EXPORT_SYMBOL(__dev_set_mtu); 9043 9044 int dev_validate_mtu(struct net_device *dev, int new_mtu, 9045 struct netlink_ext_ack *extack) 9046 { 9047 /* MTU must be positive, and in range */ 9048 if (new_mtu < 0 || new_mtu < dev->min_mtu) { 9049 NL_SET_ERR_MSG(extack, "mtu less than device minimum"); 9050 return -EINVAL; 9051 } 9052 9053 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) { 9054 NL_SET_ERR_MSG(extack, "mtu greater than device maximum"); 9055 return -EINVAL; 9056 } 9057 return 0; 9058 } 9059 9060 /** 9061 * dev_set_mtu_ext - Change maximum transfer unit 9062 * @dev: device 9063 * @new_mtu: new transfer unit 9064 * @extack: netlink extended ack 9065 * 9066 * Change the maximum transfer size of the network device. 9067 */ 9068 int dev_set_mtu_ext(struct net_device *dev, int new_mtu, 9069 struct netlink_ext_ack *extack) 9070 { 9071 int err, orig_mtu; 9072 9073 if (new_mtu == dev->mtu) 9074 return 0; 9075 9076 err = dev_validate_mtu(dev, new_mtu, extack); 9077 if (err) 9078 return err; 9079 9080 if (!netif_device_present(dev)) 9081 return -ENODEV; 9082 9083 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev); 9084 err = notifier_to_errno(err); 9085 if (err) 9086 return err; 9087 9088 orig_mtu = dev->mtu; 9089 err = __dev_set_mtu(dev, new_mtu); 9090 9091 if (!err) { 9092 err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, 9093 orig_mtu); 9094 err = notifier_to_errno(err); 9095 if (err) { 9096 /* setting mtu back and notifying everyone again, 9097 * so that they have a chance to revert changes. 9098 */ 9099 __dev_set_mtu(dev, orig_mtu); 9100 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, 9101 new_mtu); 9102 } 9103 } 9104 return err; 9105 } 9106 9107 int dev_set_mtu(struct net_device *dev, int new_mtu) 9108 { 9109 struct netlink_ext_ack extack; 9110 int err; 9111 9112 memset(&extack, 0, sizeof(extack)); 9113 err = dev_set_mtu_ext(dev, new_mtu, &extack); 9114 if (err && extack._msg) 9115 net_err_ratelimited("%s: %s\n", dev->name, extack._msg); 9116 return err; 9117 } 9118 EXPORT_SYMBOL(dev_set_mtu); 9119 9120 /** 9121 * dev_change_tx_queue_len - Change TX queue length of a netdevice 9122 * @dev: device 9123 * @new_len: new tx queue length 9124 */ 9125 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len) 9126 { 9127 unsigned int orig_len = dev->tx_queue_len; 9128 int res; 9129 9130 if (new_len != (unsigned int)new_len) 9131 return -ERANGE; 9132 9133 if (new_len != orig_len) { 9134 WRITE_ONCE(dev->tx_queue_len, new_len); 9135 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev); 9136 res = notifier_to_errno(res); 9137 if (res) 9138 goto err_rollback; 9139 res = dev_qdisc_change_tx_queue_len(dev); 9140 if (res) 9141 goto err_rollback; 9142 } 9143 9144 return 0; 9145 9146 err_rollback: 9147 netdev_err(dev, "refused to change device tx_queue_len\n"); 9148 WRITE_ONCE(dev->tx_queue_len, orig_len); 9149 return res; 9150 } 9151 9152 /** 9153 * dev_set_group - Change group this device belongs to 9154 * @dev: device 9155 * @new_group: group this device should belong to 9156 */ 9157 void dev_set_group(struct net_device *dev, int new_group) 9158 { 9159 dev->group = new_group; 9160 } 9161 9162 /** 9163 * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR. 9164 * @dev: device 9165 * @addr: new address 9166 * @extack: netlink extended ack 9167 */ 9168 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, 9169 struct netlink_ext_ack *extack) 9170 { 9171 struct netdev_notifier_pre_changeaddr_info info = { 9172 .info.dev = dev, 9173 .info.extack = extack, 9174 .dev_addr = addr, 9175 }; 9176 int rc; 9177 9178 rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info); 9179 return notifier_to_errno(rc); 9180 } 9181 EXPORT_SYMBOL(dev_pre_changeaddr_notify); 9182 9183 /** 9184 * dev_set_mac_address - Change Media Access Control Address 9185 * @dev: device 9186 * @sa: new address 9187 * @extack: netlink extended ack 9188 * 9189 * Change the hardware (MAC) address of the device 9190 */ 9191 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, 9192 struct netlink_ext_ack *extack) 9193 { 9194 const struct net_device_ops *ops = dev->netdev_ops; 9195 int err; 9196 9197 if (!ops->ndo_set_mac_address) 9198 return -EOPNOTSUPP; 9199 if (sa->sa_family != dev->type) 9200 return -EINVAL; 9201 if (!netif_device_present(dev)) 9202 return -ENODEV; 9203 err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack); 9204 if (err) 9205 return err; 9206 if (memcmp(dev->dev_addr, sa->sa_data, dev->addr_len)) { 9207 err = ops->ndo_set_mac_address(dev, sa); 9208 if (err) 9209 return err; 9210 } 9211 dev->addr_assign_type = NET_ADDR_SET; 9212 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 9213 add_device_randomness(dev->dev_addr, dev->addr_len); 9214 return 0; 9215 } 9216 EXPORT_SYMBOL(dev_set_mac_address); 9217 9218 DECLARE_RWSEM(dev_addr_sem); 9219 9220 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, 9221 struct netlink_ext_ack *extack) 9222 { 9223 int ret; 9224 9225 down_write(&dev_addr_sem); 9226 ret = dev_set_mac_address(dev, sa, extack); 9227 up_write(&dev_addr_sem); 9228 return ret; 9229 } 9230 EXPORT_SYMBOL(dev_set_mac_address_user); 9231 9232 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name) 9233 { 9234 size_t size = sizeof(sa->sa_data_min); 9235 struct net_device *dev; 9236 int ret = 0; 9237 9238 down_read(&dev_addr_sem); 9239 rcu_read_lock(); 9240 9241 dev = dev_get_by_name_rcu(net, dev_name); 9242 if (!dev) { 9243 ret = -ENODEV; 9244 goto unlock; 9245 } 9246 if (!dev->addr_len) 9247 memset(sa->sa_data, 0, size); 9248 else 9249 memcpy(sa->sa_data, dev->dev_addr, 9250 min_t(size_t, size, dev->addr_len)); 9251 sa->sa_family = dev->type; 9252 9253 unlock: 9254 rcu_read_unlock(); 9255 up_read(&dev_addr_sem); 9256 return ret; 9257 } 9258 EXPORT_SYMBOL(dev_get_mac_address); 9259 9260 /** 9261 * dev_change_carrier - Change device carrier 9262 * @dev: device 9263 * @new_carrier: new value 9264 * 9265 * Change device carrier 9266 */ 9267 int dev_change_carrier(struct net_device *dev, bool new_carrier) 9268 { 9269 const struct net_device_ops *ops = dev->netdev_ops; 9270 9271 if (!ops->ndo_change_carrier) 9272 return -EOPNOTSUPP; 9273 if (!netif_device_present(dev)) 9274 return -ENODEV; 9275 return ops->ndo_change_carrier(dev, new_carrier); 9276 } 9277 9278 /** 9279 * dev_get_phys_port_id - Get device physical port ID 9280 * @dev: device 9281 * @ppid: port ID 9282 * 9283 * Get device physical port ID 9284 */ 9285 int dev_get_phys_port_id(struct net_device *dev, 9286 struct netdev_phys_item_id *ppid) 9287 { 9288 const struct net_device_ops *ops = dev->netdev_ops; 9289 9290 if (!ops->ndo_get_phys_port_id) 9291 return -EOPNOTSUPP; 9292 return ops->ndo_get_phys_port_id(dev, ppid); 9293 } 9294 9295 /** 9296 * dev_get_phys_port_name - Get device physical port name 9297 * @dev: device 9298 * @name: port name 9299 * @len: limit of bytes to copy to name 9300 * 9301 * Get device physical port name 9302 */ 9303 int dev_get_phys_port_name(struct net_device *dev, 9304 char *name, size_t len) 9305 { 9306 const struct net_device_ops *ops = dev->netdev_ops; 9307 int err; 9308 9309 if (ops->ndo_get_phys_port_name) { 9310 err = ops->ndo_get_phys_port_name(dev, name, len); 9311 if (err != -EOPNOTSUPP) 9312 return err; 9313 } 9314 return devlink_compat_phys_port_name_get(dev, name, len); 9315 } 9316 9317 /** 9318 * dev_get_port_parent_id - Get the device's port parent identifier 9319 * @dev: network device 9320 * @ppid: pointer to a storage for the port's parent identifier 9321 * @recurse: allow/disallow recursion to lower devices 9322 * 9323 * Get the devices's port parent identifier 9324 */ 9325 int dev_get_port_parent_id(struct net_device *dev, 9326 struct netdev_phys_item_id *ppid, 9327 bool recurse) 9328 { 9329 const struct net_device_ops *ops = dev->netdev_ops; 9330 struct netdev_phys_item_id first = { }; 9331 struct net_device *lower_dev; 9332 struct list_head *iter; 9333 int err; 9334 9335 if (ops->ndo_get_port_parent_id) { 9336 err = ops->ndo_get_port_parent_id(dev, ppid); 9337 if (err != -EOPNOTSUPP) 9338 return err; 9339 } 9340 9341 err = devlink_compat_switch_id_get(dev, ppid); 9342 if (!recurse || err != -EOPNOTSUPP) 9343 return err; 9344 9345 netdev_for_each_lower_dev(dev, lower_dev, iter) { 9346 err = dev_get_port_parent_id(lower_dev, ppid, true); 9347 if (err) 9348 break; 9349 if (!first.id_len) 9350 first = *ppid; 9351 else if (memcmp(&first, ppid, sizeof(*ppid))) 9352 return -EOPNOTSUPP; 9353 } 9354 9355 return err; 9356 } 9357 EXPORT_SYMBOL(dev_get_port_parent_id); 9358 9359 /** 9360 * netdev_port_same_parent_id - Indicate if two network devices have 9361 * the same port parent identifier 9362 * @a: first network device 9363 * @b: second network device 9364 */ 9365 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b) 9366 { 9367 struct netdev_phys_item_id a_id = { }; 9368 struct netdev_phys_item_id b_id = { }; 9369 9370 if (dev_get_port_parent_id(a, &a_id, true) || 9371 dev_get_port_parent_id(b, &b_id, true)) 9372 return false; 9373 9374 return netdev_phys_item_id_same(&a_id, &b_id); 9375 } 9376 EXPORT_SYMBOL(netdev_port_same_parent_id); 9377 9378 /** 9379 * dev_change_proto_down - set carrier according to proto_down. 9380 * 9381 * @dev: device 9382 * @proto_down: new value 9383 */ 9384 int dev_change_proto_down(struct net_device *dev, bool proto_down) 9385 { 9386 if (!dev->change_proto_down) 9387 return -EOPNOTSUPP; 9388 if (!netif_device_present(dev)) 9389 return -ENODEV; 9390 if (proto_down) 9391 netif_carrier_off(dev); 9392 else 9393 netif_carrier_on(dev); 9394 WRITE_ONCE(dev->proto_down, proto_down); 9395 return 0; 9396 } 9397 9398 /** 9399 * dev_change_proto_down_reason - proto down reason 9400 * 9401 * @dev: device 9402 * @mask: proto down mask 9403 * @value: proto down value 9404 */ 9405 void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask, 9406 u32 value) 9407 { 9408 u32 proto_down_reason; 9409 int b; 9410 9411 if (!mask) { 9412 proto_down_reason = value; 9413 } else { 9414 proto_down_reason = dev->proto_down_reason; 9415 for_each_set_bit(b, &mask, 32) { 9416 if (value & (1 << b)) 9417 proto_down_reason |= BIT(b); 9418 else 9419 proto_down_reason &= ~BIT(b); 9420 } 9421 } 9422 WRITE_ONCE(dev->proto_down_reason, proto_down_reason); 9423 } 9424 9425 struct bpf_xdp_link { 9426 struct bpf_link link; 9427 struct net_device *dev; /* protected by rtnl_lock, no refcnt held */ 9428 int flags; 9429 }; 9430 9431 static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags) 9432 { 9433 if (flags & XDP_FLAGS_HW_MODE) 9434 return XDP_MODE_HW; 9435 if (flags & XDP_FLAGS_DRV_MODE) 9436 return XDP_MODE_DRV; 9437 if (flags & XDP_FLAGS_SKB_MODE) 9438 return XDP_MODE_SKB; 9439 return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB; 9440 } 9441 9442 static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode) 9443 { 9444 switch (mode) { 9445 case XDP_MODE_SKB: 9446 return generic_xdp_install; 9447 case XDP_MODE_DRV: 9448 case XDP_MODE_HW: 9449 return dev->netdev_ops->ndo_bpf; 9450 default: 9451 return NULL; 9452 } 9453 } 9454 9455 static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev, 9456 enum bpf_xdp_mode mode) 9457 { 9458 return dev->xdp_state[mode].link; 9459 } 9460 9461 static struct bpf_prog *dev_xdp_prog(struct net_device *dev, 9462 enum bpf_xdp_mode mode) 9463 { 9464 struct bpf_xdp_link *link = dev_xdp_link(dev, mode); 9465 9466 if (link) 9467 return link->link.prog; 9468 return dev->xdp_state[mode].prog; 9469 } 9470 9471 u8 dev_xdp_prog_count(struct net_device *dev) 9472 { 9473 u8 count = 0; 9474 int i; 9475 9476 for (i = 0; i < __MAX_XDP_MODE; i++) 9477 if (dev->xdp_state[i].prog || dev->xdp_state[i].link) 9478 count++; 9479 return count; 9480 } 9481 EXPORT_SYMBOL_GPL(dev_xdp_prog_count); 9482 9483 int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf) 9484 { 9485 if (!dev->netdev_ops->ndo_bpf) 9486 return -EOPNOTSUPP; 9487 9488 if (dev_get_min_mp_channel_count(dev)) { 9489 NL_SET_ERR_MSG(bpf->extack, "unable to propagate XDP to device using memory provider"); 9490 return -EBUSY; 9491 } 9492 9493 return dev->netdev_ops->ndo_bpf(dev, bpf); 9494 } 9495 EXPORT_SYMBOL_GPL(dev_xdp_propagate); 9496 9497 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode) 9498 { 9499 struct bpf_prog *prog = dev_xdp_prog(dev, mode); 9500 9501 return prog ? prog->aux->id : 0; 9502 } 9503 9504 static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode, 9505 struct bpf_xdp_link *link) 9506 { 9507 dev->xdp_state[mode].link = link; 9508 dev->xdp_state[mode].prog = NULL; 9509 } 9510 9511 static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode, 9512 struct bpf_prog *prog) 9513 { 9514 dev->xdp_state[mode].link = NULL; 9515 dev->xdp_state[mode].prog = prog; 9516 } 9517 9518 static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode, 9519 bpf_op_t bpf_op, struct netlink_ext_ack *extack, 9520 u32 flags, struct bpf_prog *prog) 9521 { 9522 struct netdev_bpf xdp; 9523 int err; 9524 9525 if (dev_get_min_mp_channel_count(dev)) { 9526 NL_SET_ERR_MSG(extack, "unable to install XDP to device using memory provider"); 9527 return -EBUSY; 9528 } 9529 9530 memset(&xdp, 0, sizeof(xdp)); 9531 xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG; 9532 xdp.extack = extack; 9533 xdp.flags = flags; 9534 xdp.prog = prog; 9535 9536 /* Drivers assume refcnt is already incremented (i.e, prog pointer is 9537 * "moved" into driver), so they don't increment it on their own, but 9538 * they do decrement refcnt when program is detached or replaced. 9539 * Given net_device also owns link/prog, we need to bump refcnt here 9540 * to prevent drivers from underflowing it. 9541 */ 9542 if (prog) 9543 bpf_prog_inc(prog); 9544 err = bpf_op(dev, &xdp); 9545 if (err) { 9546 if (prog) 9547 bpf_prog_put(prog); 9548 return err; 9549 } 9550 9551 if (mode != XDP_MODE_HW) 9552 bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog); 9553 9554 return 0; 9555 } 9556 9557 static void dev_xdp_uninstall(struct net_device *dev) 9558 { 9559 struct bpf_xdp_link *link; 9560 struct bpf_prog *prog; 9561 enum bpf_xdp_mode mode; 9562 bpf_op_t bpf_op; 9563 9564 ASSERT_RTNL(); 9565 9566 for (mode = XDP_MODE_SKB; mode < __MAX_XDP_MODE; mode++) { 9567 prog = dev_xdp_prog(dev, mode); 9568 if (!prog) 9569 continue; 9570 9571 bpf_op = dev_xdp_bpf_op(dev, mode); 9572 if (!bpf_op) 9573 continue; 9574 9575 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL)); 9576 9577 /* auto-detach link from net device */ 9578 link = dev_xdp_link(dev, mode); 9579 if (link) 9580 link->dev = NULL; 9581 else 9582 bpf_prog_put(prog); 9583 9584 dev_xdp_set_link(dev, mode, NULL); 9585 } 9586 } 9587 9588 static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack, 9589 struct bpf_xdp_link *link, struct bpf_prog *new_prog, 9590 struct bpf_prog *old_prog, u32 flags) 9591 { 9592 unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES); 9593 struct bpf_prog *cur_prog; 9594 struct net_device *upper; 9595 struct list_head *iter; 9596 enum bpf_xdp_mode mode; 9597 bpf_op_t bpf_op; 9598 int err; 9599 9600 ASSERT_RTNL(); 9601 9602 /* either link or prog attachment, never both */ 9603 if (link && (new_prog || old_prog)) 9604 return -EINVAL; 9605 /* link supports only XDP mode flags */ 9606 if (link && (flags & ~XDP_FLAGS_MODES)) { 9607 NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment"); 9608 return -EINVAL; 9609 } 9610 /* just one XDP mode bit should be set, zero defaults to drv/skb mode */ 9611 if (num_modes > 1) { 9612 NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set"); 9613 return -EINVAL; 9614 } 9615 /* avoid ambiguity if offload + drv/skb mode progs are both loaded */ 9616 if (!num_modes && dev_xdp_prog_count(dev) > 1) { 9617 NL_SET_ERR_MSG(extack, 9618 "More than one program loaded, unset mode is ambiguous"); 9619 return -EINVAL; 9620 } 9621 /* old_prog != NULL implies XDP_FLAGS_REPLACE is set */ 9622 if (old_prog && !(flags & XDP_FLAGS_REPLACE)) { 9623 NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified"); 9624 return -EINVAL; 9625 } 9626 9627 mode = dev_xdp_mode(dev, flags); 9628 /* can't replace attached link */ 9629 if (dev_xdp_link(dev, mode)) { 9630 NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link"); 9631 return -EBUSY; 9632 } 9633 9634 /* don't allow if an upper device already has a program */ 9635 netdev_for_each_upper_dev_rcu(dev, upper, iter) { 9636 if (dev_xdp_prog_count(upper) > 0) { 9637 NL_SET_ERR_MSG(extack, "Cannot attach when an upper device already has a program"); 9638 return -EEXIST; 9639 } 9640 } 9641 9642 cur_prog = dev_xdp_prog(dev, mode); 9643 /* can't replace attached prog with link */ 9644 if (link && cur_prog) { 9645 NL_SET_ERR_MSG(extack, "Can't replace active XDP program with BPF link"); 9646 return -EBUSY; 9647 } 9648 if ((flags & XDP_FLAGS_REPLACE) && cur_prog != old_prog) { 9649 NL_SET_ERR_MSG(extack, "Active program does not match expected"); 9650 return -EEXIST; 9651 } 9652 9653 /* put effective new program into new_prog */ 9654 if (link) 9655 new_prog = link->link.prog; 9656 9657 if (new_prog) { 9658 bool offload = mode == XDP_MODE_HW; 9659 enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB 9660 ? XDP_MODE_DRV : XDP_MODE_SKB; 9661 9662 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) { 9663 NL_SET_ERR_MSG(extack, "XDP program already attached"); 9664 return -EBUSY; 9665 } 9666 if (!offload && dev_xdp_prog(dev, other_mode)) { 9667 NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time"); 9668 return -EEXIST; 9669 } 9670 if (!offload && bpf_prog_is_offloaded(new_prog->aux)) { 9671 NL_SET_ERR_MSG(extack, "Using offloaded program without HW_MODE flag is not supported"); 9672 return -EINVAL; 9673 } 9674 if (bpf_prog_is_dev_bound(new_prog->aux) && !bpf_offload_dev_match(new_prog, dev)) { 9675 NL_SET_ERR_MSG(extack, "Program bound to different device"); 9676 return -EINVAL; 9677 } 9678 if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) { 9679 NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device"); 9680 return -EINVAL; 9681 } 9682 if (new_prog->expected_attach_type == BPF_XDP_CPUMAP) { 9683 NL_SET_ERR_MSG(extack, "BPF_XDP_CPUMAP programs can not be attached to a device"); 9684 return -EINVAL; 9685 } 9686 } 9687 9688 /* don't call drivers if the effective program didn't change */ 9689 if (new_prog != cur_prog) { 9690 bpf_op = dev_xdp_bpf_op(dev, mode); 9691 if (!bpf_op) { 9692 NL_SET_ERR_MSG(extack, "Underlying driver does not support XDP in native mode"); 9693 return -EOPNOTSUPP; 9694 } 9695 9696 err = dev_xdp_install(dev, mode, bpf_op, extack, flags, new_prog); 9697 if (err) 9698 return err; 9699 } 9700 9701 if (link) 9702 dev_xdp_set_link(dev, mode, link); 9703 else 9704 dev_xdp_set_prog(dev, mode, new_prog); 9705 if (cur_prog) 9706 bpf_prog_put(cur_prog); 9707 9708 return 0; 9709 } 9710 9711 static int dev_xdp_attach_link(struct net_device *dev, 9712 struct netlink_ext_ack *extack, 9713 struct bpf_xdp_link *link) 9714 { 9715 return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags); 9716 } 9717 9718 static int dev_xdp_detach_link(struct net_device *dev, 9719 struct netlink_ext_ack *extack, 9720 struct bpf_xdp_link *link) 9721 { 9722 enum bpf_xdp_mode mode; 9723 bpf_op_t bpf_op; 9724 9725 ASSERT_RTNL(); 9726 9727 mode = dev_xdp_mode(dev, link->flags); 9728 if (dev_xdp_link(dev, mode) != link) 9729 return -EINVAL; 9730 9731 bpf_op = dev_xdp_bpf_op(dev, mode); 9732 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL)); 9733 dev_xdp_set_link(dev, mode, NULL); 9734 return 0; 9735 } 9736 9737 static void bpf_xdp_link_release(struct bpf_link *link) 9738 { 9739 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9740 9741 rtnl_lock(); 9742 9743 /* if racing with net_device's tear down, xdp_link->dev might be 9744 * already NULL, in which case link was already auto-detached 9745 */ 9746 if (xdp_link->dev) { 9747 WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link)); 9748 xdp_link->dev = NULL; 9749 } 9750 9751 rtnl_unlock(); 9752 } 9753 9754 static int bpf_xdp_link_detach(struct bpf_link *link) 9755 { 9756 bpf_xdp_link_release(link); 9757 return 0; 9758 } 9759 9760 static void bpf_xdp_link_dealloc(struct bpf_link *link) 9761 { 9762 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9763 9764 kfree(xdp_link); 9765 } 9766 9767 static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link, 9768 struct seq_file *seq) 9769 { 9770 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9771 u32 ifindex = 0; 9772 9773 rtnl_lock(); 9774 if (xdp_link->dev) 9775 ifindex = xdp_link->dev->ifindex; 9776 rtnl_unlock(); 9777 9778 seq_printf(seq, "ifindex:\t%u\n", ifindex); 9779 } 9780 9781 static int bpf_xdp_link_fill_link_info(const struct bpf_link *link, 9782 struct bpf_link_info *info) 9783 { 9784 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9785 u32 ifindex = 0; 9786 9787 rtnl_lock(); 9788 if (xdp_link->dev) 9789 ifindex = xdp_link->dev->ifindex; 9790 rtnl_unlock(); 9791 9792 info->xdp.ifindex = ifindex; 9793 return 0; 9794 } 9795 9796 static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog, 9797 struct bpf_prog *old_prog) 9798 { 9799 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9800 enum bpf_xdp_mode mode; 9801 bpf_op_t bpf_op; 9802 int err = 0; 9803 9804 rtnl_lock(); 9805 9806 /* link might have been auto-released already, so fail */ 9807 if (!xdp_link->dev) { 9808 err = -ENOLINK; 9809 goto out_unlock; 9810 } 9811 9812 if (old_prog && link->prog != old_prog) { 9813 err = -EPERM; 9814 goto out_unlock; 9815 } 9816 old_prog = link->prog; 9817 if (old_prog->type != new_prog->type || 9818 old_prog->expected_attach_type != new_prog->expected_attach_type) { 9819 err = -EINVAL; 9820 goto out_unlock; 9821 } 9822 9823 if (old_prog == new_prog) { 9824 /* no-op, don't disturb drivers */ 9825 bpf_prog_put(new_prog); 9826 goto out_unlock; 9827 } 9828 9829 mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags); 9830 bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode); 9831 err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL, 9832 xdp_link->flags, new_prog); 9833 if (err) 9834 goto out_unlock; 9835 9836 old_prog = xchg(&link->prog, new_prog); 9837 bpf_prog_put(old_prog); 9838 9839 out_unlock: 9840 rtnl_unlock(); 9841 return err; 9842 } 9843 9844 static const struct bpf_link_ops bpf_xdp_link_lops = { 9845 .release = bpf_xdp_link_release, 9846 .dealloc = bpf_xdp_link_dealloc, 9847 .detach = bpf_xdp_link_detach, 9848 .show_fdinfo = bpf_xdp_link_show_fdinfo, 9849 .fill_link_info = bpf_xdp_link_fill_link_info, 9850 .update_prog = bpf_xdp_link_update, 9851 }; 9852 9853 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 9854 { 9855 struct net *net = current->nsproxy->net_ns; 9856 struct bpf_link_primer link_primer; 9857 struct netlink_ext_ack extack = {}; 9858 struct bpf_xdp_link *link; 9859 struct net_device *dev; 9860 int err, fd; 9861 9862 rtnl_lock(); 9863 dev = dev_get_by_index(net, attr->link_create.target_ifindex); 9864 if (!dev) { 9865 rtnl_unlock(); 9866 return -EINVAL; 9867 } 9868 9869 link = kzalloc(sizeof(*link), GFP_USER); 9870 if (!link) { 9871 err = -ENOMEM; 9872 goto unlock; 9873 } 9874 9875 bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog); 9876 link->dev = dev; 9877 link->flags = attr->link_create.flags; 9878 9879 err = bpf_link_prime(&link->link, &link_primer); 9880 if (err) { 9881 kfree(link); 9882 goto unlock; 9883 } 9884 9885 err = dev_xdp_attach_link(dev, &extack, link); 9886 rtnl_unlock(); 9887 9888 if (err) { 9889 link->dev = NULL; 9890 bpf_link_cleanup(&link_primer); 9891 trace_bpf_xdp_link_attach_failed(extack._msg); 9892 goto out_put_dev; 9893 } 9894 9895 fd = bpf_link_settle(&link_primer); 9896 /* link itself doesn't hold dev's refcnt to not complicate shutdown */ 9897 dev_put(dev); 9898 return fd; 9899 9900 unlock: 9901 rtnl_unlock(); 9902 9903 out_put_dev: 9904 dev_put(dev); 9905 return err; 9906 } 9907 9908 /** 9909 * dev_change_xdp_fd - set or clear a bpf program for a device rx path 9910 * @dev: device 9911 * @extack: netlink extended ack 9912 * @fd: new program fd or negative value to clear 9913 * @expected_fd: old program fd that userspace expects to replace or clear 9914 * @flags: xdp-related flags 9915 * 9916 * Set or clear a bpf program for a device 9917 */ 9918 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 9919 int fd, int expected_fd, u32 flags) 9920 { 9921 enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags); 9922 struct bpf_prog *new_prog = NULL, *old_prog = NULL; 9923 int err; 9924 9925 ASSERT_RTNL(); 9926 9927 if (fd >= 0) { 9928 new_prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP, 9929 mode != XDP_MODE_SKB); 9930 if (IS_ERR(new_prog)) 9931 return PTR_ERR(new_prog); 9932 } 9933 9934 if (expected_fd >= 0) { 9935 old_prog = bpf_prog_get_type_dev(expected_fd, BPF_PROG_TYPE_XDP, 9936 mode != XDP_MODE_SKB); 9937 if (IS_ERR(old_prog)) { 9938 err = PTR_ERR(old_prog); 9939 old_prog = NULL; 9940 goto err_out; 9941 } 9942 } 9943 9944 err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags); 9945 9946 err_out: 9947 if (err && new_prog) 9948 bpf_prog_put(new_prog); 9949 if (old_prog) 9950 bpf_prog_put(old_prog); 9951 return err; 9952 } 9953 9954 u32 dev_get_min_mp_channel_count(const struct net_device *dev) 9955 { 9956 int i; 9957 9958 ASSERT_RTNL(); 9959 9960 for (i = dev->real_num_rx_queues - 1; i >= 0; i--) 9961 if (dev->_rx[i].mp_params.mp_priv) 9962 /* The channel count is the idx plus 1. */ 9963 return i + 1; 9964 9965 return 0; 9966 } 9967 9968 /** 9969 * dev_index_reserve() - allocate an ifindex in a namespace 9970 * @net: the applicable net namespace 9971 * @ifindex: requested ifindex, pass %0 to get one allocated 9972 * 9973 * Allocate a ifindex for a new device. Caller must either use the ifindex 9974 * to store the device (via list_netdevice()) or call dev_index_release() 9975 * to give the index up. 9976 * 9977 * Return: a suitable unique value for a new device interface number or -errno. 9978 */ 9979 static int dev_index_reserve(struct net *net, u32 ifindex) 9980 { 9981 int err; 9982 9983 if (ifindex > INT_MAX) { 9984 DEBUG_NET_WARN_ON_ONCE(1); 9985 return -EINVAL; 9986 } 9987 9988 if (!ifindex) 9989 err = xa_alloc_cyclic(&net->dev_by_index, &ifindex, NULL, 9990 xa_limit_31b, &net->ifindex, GFP_KERNEL); 9991 else 9992 err = xa_insert(&net->dev_by_index, ifindex, NULL, GFP_KERNEL); 9993 if (err < 0) 9994 return err; 9995 9996 return ifindex; 9997 } 9998 9999 static void dev_index_release(struct net *net, int ifindex) 10000 { 10001 /* Expect only unused indexes, unlist_netdevice() removes the used */ 10002 WARN_ON(xa_erase(&net->dev_by_index, ifindex)); 10003 } 10004 10005 /* Delayed registration/unregisteration */ 10006 LIST_HEAD(net_todo_list); 10007 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); 10008 atomic_t dev_unreg_count = ATOMIC_INIT(0); 10009 10010 static void net_set_todo(struct net_device *dev) 10011 { 10012 list_add_tail(&dev->todo_list, &net_todo_list); 10013 } 10014 10015 static netdev_features_t netdev_sync_upper_features(struct net_device *lower, 10016 struct net_device *upper, netdev_features_t features) 10017 { 10018 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; 10019 netdev_features_t feature; 10020 int feature_bit; 10021 10022 for_each_netdev_feature(upper_disables, feature_bit) { 10023 feature = __NETIF_F_BIT(feature_bit); 10024 if (!(upper->wanted_features & feature) 10025 && (features & feature)) { 10026 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n", 10027 &feature, upper->name); 10028 features &= ~feature; 10029 } 10030 } 10031 10032 return features; 10033 } 10034 10035 static void netdev_sync_lower_features(struct net_device *upper, 10036 struct net_device *lower, netdev_features_t features) 10037 { 10038 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; 10039 netdev_features_t feature; 10040 int feature_bit; 10041 10042 for_each_netdev_feature(upper_disables, feature_bit) { 10043 feature = __NETIF_F_BIT(feature_bit); 10044 if (!(features & feature) && (lower->features & feature)) { 10045 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", 10046 &feature, lower->name); 10047 lower->wanted_features &= ~feature; 10048 __netdev_update_features(lower); 10049 10050 if (unlikely(lower->features & feature)) 10051 netdev_WARN(upper, "failed to disable %pNF on %s!\n", 10052 &feature, lower->name); 10053 else 10054 netdev_features_change(lower); 10055 } 10056 } 10057 } 10058 10059 static bool netdev_has_ip_or_hw_csum(netdev_features_t features) 10060 { 10061 netdev_features_t ip_csum_mask = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 10062 bool ip_csum = (features & ip_csum_mask) == ip_csum_mask; 10063 bool hw_csum = features & NETIF_F_HW_CSUM; 10064 10065 return ip_csum || hw_csum; 10066 } 10067 10068 static netdev_features_t netdev_fix_features(struct net_device *dev, 10069 netdev_features_t features) 10070 { 10071 /* Fix illegal checksum combinations */ 10072 if ((features & NETIF_F_HW_CSUM) && 10073 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 10074 netdev_warn(dev, "mixed HW and IP checksum settings.\n"); 10075 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 10076 } 10077 10078 /* TSO requires that SG is present as well. */ 10079 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { 10080 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); 10081 features &= ~NETIF_F_ALL_TSO; 10082 } 10083 10084 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) && 10085 !(features & NETIF_F_IP_CSUM)) { 10086 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n"); 10087 features &= ~NETIF_F_TSO; 10088 features &= ~NETIF_F_TSO_ECN; 10089 } 10090 10091 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) && 10092 !(features & NETIF_F_IPV6_CSUM)) { 10093 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n"); 10094 features &= ~NETIF_F_TSO6; 10095 } 10096 10097 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */ 10098 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO)) 10099 features &= ~NETIF_F_TSO_MANGLEID; 10100 10101 /* TSO ECN requires that TSO is present as well. */ 10102 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) 10103 features &= ~NETIF_F_TSO_ECN; 10104 10105 /* Software GSO depends on SG. */ 10106 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { 10107 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); 10108 features &= ~NETIF_F_GSO; 10109 } 10110 10111 /* GSO partial features require GSO partial be set */ 10112 if ((features & dev->gso_partial_features) && 10113 !(features & NETIF_F_GSO_PARTIAL)) { 10114 netdev_dbg(dev, 10115 "Dropping partially supported GSO features since no GSO partial.\n"); 10116 features &= ~dev->gso_partial_features; 10117 } 10118 10119 if (!(features & NETIF_F_RXCSUM)) { 10120 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet 10121 * successfully merged by hardware must also have the 10122 * checksum verified by hardware. If the user does not 10123 * want to enable RXCSUM, logically, we should disable GRO_HW. 10124 */ 10125 if (features & NETIF_F_GRO_HW) { 10126 netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n"); 10127 features &= ~NETIF_F_GRO_HW; 10128 } 10129 } 10130 10131 /* LRO/HW-GRO features cannot be combined with RX-FCS */ 10132 if (features & NETIF_F_RXFCS) { 10133 if (features & NETIF_F_LRO) { 10134 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n"); 10135 features &= ~NETIF_F_LRO; 10136 } 10137 10138 if (features & NETIF_F_GRO_HW) { 10139 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n"); 10140 features &= ~NETIF_F_GRO_HW; 10141 } 10142 } 10143 10144 if ((features & NETIF_F_GRO_HW) && (features & NETIF_F_LRO)) { 10145 netdev_dbg(dev, "Dropping LRO feature since HW-GRO is requested.\n"); 10146 features &= ~NETIF_F_LRO; 10147 } 10148 10149 if ((features & NETIF_F_HW_TLS_TX) && !netdev_has_ip_or_hw_csum(features)) { 10150 netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n"); 10151 features &= ~NETIF_F_HW_TLS_TX; 10152 } 10153 10154 if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) { 10155 netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n"); 10156 features &= ~NETIF_F_HW_TLS_RX; 10157 } 10158 10159 if ((features & NETIF_F_GSO_UDP_L4) && !netdev_has_ip_or_hw_csum(features)) { 10160 netdev_dbg(dev, "Dropping USO feature since no CSUM feature.\n"); 10161 features &= ~NETIF_F_GSO_UDP_L4; 10162 } 10163 10164 return features; 10165 } 10166 10167 int __netdev_update_features(struct net_device *dev) 10168 { 10169 struct net_device *upper, *lower; 10170 netdev_features_t features; 10171 struct list_head *iter; 10172 int err = -1; 10173 10174 ASSERT_RTNL(); 10175 10176 features = netdev_get_wanted_features(dev); 10177 10178 if (dev->netdev_ops->ndo_fix_features) 10179 features = dev->netdev_ops->ndo_fix_features(dev, features); 10180 10181 /* driver might be less strict about feature dependencies */ 10182 features = netdev_fix_features(dev, features); 10183 10184 /* some features can't be enabled if they're off on an upper device */ 10185 netdev_for_each_upper_dev_rcu(dev, upper, iter) 10186 features = netdev_sync_upper_features(dev, upper, features); 10187 10188 if (dev->features == features) 10189 goto sync_lower; 10190 10191 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n", 10192 &dev->features, &features); 10193 10194 if (dev->netdev_ops->ndo_set_features) 10195 err = dev->netdev_ops->ndo_set_features(dev, features); 10196 else 10197 err = 0; 10198 10199 if (unlikely(err < 0)) { 10200 netdev_err(dev, 10201 "set_features() failed (%d); wanted %pNF, left %pNF\n", 10202 err, &features, &dev->features); 10203 /* return non-0 since some features might have changed and 10204 * it's better to fire a spurious notification than miss it 10205 */ 10206 return -1; 10207 } 10208 10209 sync_lower: 10210 /* some features must be disabled on lower devices when disabled 10211 * on an upper device (think: bonding master or bridge) 10212 */ 10213 netdev_for_each_lower_dev(dev, lower, iter) 10214 netdev_sync_lower_features(dev, lower, features); 10215 10216 if (!err) { 10217 netdev_features_t diff = features ^ dev->features; 10218 10219 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) { 10220 /* udp_tunnel_{get,drop}_rx_info both need 10221 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the 10222 * device, or they won't do anything. 10223 * Thus we need to update dev->features 10224 * *before* calling udp_tunnel_get_rx_info, 10225 * but *after* calling udp_tunnel_drop_rx_info. 10226 */ 10227 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) { 10228 dev->features = features; 10229 udp_tunnel_get_rx_info(dev); 10230 } else { 10231 udp_tunnel_drop_rx_info(dev); 10232 } 10233 } 10234 10235 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) { 10236 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { 10237 dev->features = features; 10238 err |= vlan_get_rx_ctag_filter_info(dev); 10239 } else { 10240 vlan_drop_rx_ctag_filter_info(dev); 10241 } 10242 } 10243 10244 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) { 10245 if (features & NETIF_F_HW_VLAN_STAG_FILTER) { 10246 dev->features = features; 10247 err |= vlan_get_rx_stag_filter_info(dev); 10248 } else { 10249 vlan_drop_rx_stag_filter_info(dev); 10250 } 10251 } 10252 10253 dev->features = features; 10254 } 10255 10256 return err < 0 ? 0 : 1; 10257 } 10258 10259 /** 10260 * netdev_update_features - recalculate device features 10261 * @dev: the device to check 10262 * 10263 * Recalculate dev->features set and send notifications if it 10264 * has changed. Should be called after driver or hardware dependent 10265 * conditions might have changed that influence the features. 10266 */ 10267 void netdev_update_features(struct net_device *dev) 10268 { 10269 if (__netdev_update_features(dev)) 10270 netdev_features_change(dev); 10271 } 10272 EXPORT_SYMBOL(netdev_update_features); 10273 10274 /** 10275 * netdev_change_features - recalculate device features 10276 * @dev: the device to check 10277 * 10278 * Recalculate dev->features set and send notifications even 10279 * if they have not changed. Should be called instead of 10280 * netdev_update_features() if also dev->vlan_features might 10281 * have changed to allow the changes to be propagated to stacked 10282 * VLAN devices. 10283 */ 10284 void netdev_change_features(struct net_device *dev) 10285 { 10286 __netdev_update_features(dev); 10287 netdev_features_change(dev); 10288 } 10289 EXPORT_SYMBOL(netdev_change_features); 10290 10291 /** 10292 * netif_stacked_transfer_operstate - transfer operstate 10293 * @rootdev: the root or lower level device to transfer state from 10294 * @dev: the device to transfer operstate to 10295 * 10296 * Transfer operational state from root to device. This is normally 10297 * called when a stacking relationship exists between the root 10298 * device and the device(a leaf device). 10299 */ 10300 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 10301 struct net_device *dev) 10302 { 10303 if (rootdev->operstate == IF_OPER_DORMANT) 10304 netif_dormant_on(dev); 10305 else 10306 netif_dormant_off(dev); 10307 10308 if (rootdev->operstate == IF_OPER_TESTING) 10309 netif_testing_on(dev); 10310 else 10311 netif_testing_off(dev); 10312 10313 if (netif_carrier_ok(rootdev)) 10314 netif_carrier_on(dev); 10315 else 10316 netif_carrier_off(dev); 10317 } 10318 EXPORT_SYMBOL(netif_stacked_transfer_operstate); 10319 10320 static int netif_alloc_rx_queues(struct net_device *dev) 10321 { 10322 unsigned int i, count = dev->num_rx_queues; 10323 struct netdev_rx_queue *rx; 10324 size_t sz = count * sizeof(*rx); 10325 int err = 0; 10326 10327 BUG_ON(count < 1); 10328 10329 rx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); 10330 if (!rx) 10331 return -ENOMEM; 10332 10333 dev->_rx = rx; 10334 10335 for (i = 0; i < count; i++) { 10336 rx[i].dev = dev; 10337 10338 /* XDP RX-queue setup */ 10339 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0); 10340 if (err < 0) 10341 goto err_rxq_info; 10342 } 10343 return 0; 10344 10345 err_rxq_info: 10346 /* Rollback successful reg's and free other resources */ 10347 while (i--) 10348 xdp_rxq_info_unreg(&rx[i].xdp_rxq); 10349 kvfree(dev->_rx); 10350 dev->_rx = NULL; 10351 return err; 10352 } 10353 10354 static void netif_free_rx_queues(struct net_device *dev) 10355 { 10356 unsigned int i, count = dev->num_rx_queues; 10357 10358 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */ 10359 if (!dev->_rx) 10360 return; 10361 10362 for (i = 0; i < count; i++) 10363 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq); 10364 10365 kvfree(dev->_rx); 10366 } 10367 10368 static void netdev_init_one_queue(struct net_device *dev, 10369 struct netdev_queue *queue, void *_unused) 10370 { 10371 /* Initialize queue lock */ 10372 spin_lock_init(&queue->_xmit_lock); 10373 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); 10374 queue->xmit_lock_owner = -1; 10375 netdev_queue_numa_node_write(queue, NUMA_NO_NODE); 10376 queue->dev = dev; 10377 #ifdef CONFIG_BQL 10378 dql_init(&queue->dql, HZ); 10379 #endif 10380 } 10381 10382 static void netif_free_tx_queues(struct net_device *dev) 10383 { 10384 kvfree(dev->_tx); 10385 } 10386 10387 static int netif_alloc_netdev_queues(struct net_device *dev) 10388 { 10389 unsigned int count = dev->num_tx_queues; 10390 struct netdev_queue *tx; 10391 size_t sz = count * sizeof(*tx); 10392 10393 if (count < 1 || count > 0xffff) 10394 return -EINVAL; 10395 10396 tx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); 10397 if (!tx) 10398 return -ENOMEM; 10399 10400 dev->_tx = tx; 10401 10402 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 10403 spin_lock_init(&dev->tx_global_lock); 10404 10405 return 0; 10406 } 10407 10408 void netif_tx_stop_all_queues(struct net_device *dev) 10409 { 10410 unsigned int i; 10411 10412 for (i = 0; i < dev->num_tx_queues; i++) { 10413 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 10414 10415 netif_tx_stop_queue(txq); 10416 } 10417 } 10418 EXPORT_SYMBOL(netif_tx_stop_all_queues); 10419 10420 static int netdev_do_alloc_pcpu_stats(struct net_device *dev) 10421 { 10422 void __percpu *v; 10423 10424 /* Drivers implementing ndo_get_peer_dev must support tstat 10425 * accounting, so that skb_do_redirect() can bump the dev's 10426 * RX stats upon network namespace switch. 10427 */ 10428 if (dev->netdev_ops->ndo_get_peer_dev && 10429 dev->pcpu_stat_type != NETDEV_PCPU_STAT_TSTATS) 10430 return -EOPNOTSUPP; 10431 10432 switch (dev->pcpu_stat_type) { 10433 case NETDEV_PCPU_STAT_NONE: 10434 return 0; 10435 case NETDEV_PCPU_STAT_LSTATS: 10436 v = dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats); 10437 break; 10438 case NETDEV_PCPU_STAT_TSTATS: 10439 v = dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 10440 break; 10441 case NETDEV_PCPU_STAT_DSTATS: 10442 v = dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats); 10443 break; 10444 default: 10445 return -EINVAL; 10446 } 10447 10448 return v ? 0 : -ENOMEM; 10449 } 10450 10451 static void netdev_do_free_pcpu_stats(struct net_device *dev) 10452 { 10453 switch (dev->pcpu_stat_type) { 10454 case NETDEV_PCPU_STAT_NONE: 10455 return; 10456 case NETDEV_PCPU_STAT_LSTATS: 10457 free_percpu(dev->lstats); 10458 break; 10459 case NETDEV_PCPU_STAT_TSTATS: 10460 free_percpu(dev->tstats); 10461 break; 10462 case NETDEV_PCPU_STAT_DSTATS: 10463 free_percpu(dev->dstats); 10464 break; 10465 } 10466 } 10467 10468 static void netdev_free_phy_link_topology(struct net_device *dev) 10469 { 10470 struct phy_link_topology *topo = dev->link_topo; 10471 10472 if (IS_ENABLED(CONFIG_PHYLIB) && topo) { 10473 xa_destroy(&topo->phys); 10474 kfree(topo); 10475 dev->link_topo = NULL; 10476 } 10477 } 10478 10479 /** 10480 * register_netdevice() - register a network device 10481 * @dev: device to register 10482 * 10483 * Take a prepared network device structure and make it externally accessible. 10484 * A %NETDEV_REGISTER message is sent to the netdev notifier chain. 10485 * Callers must hold the rtnl lock - you may want register_netdev() 10486 * instead of this. 10487 */ 10488 int register_netdevice(struct net_device *dev) 10489 { 10490 int ret; 10491 struct net *net = dev_net(dev); 10492 10493 BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE < 10494 NETDEV_FEATURE_COUNT); 10495 BUG_ON(dev_boot_phase); 10496 ASSERT_RTNL(); 10497 10498 might_sleep(); 10499 10500 /* When net_device's are persistent, this will be fatal. */ 10501 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 10502 BUG_ON(!net); 10503 10504 ret = ethtool_check_ops(dev->ethtool_ops); 10505 if (ret) 10506 return ret; 10507 10508 /* rss ctx ID 0 is reserved for the default context, start from 1 */ 10509 xa_init_flags(&dev->ethtool->rss_ctx, XA_FLAGS_ALLOC1); 10510 mutex_init(&dev->ethtool->rss_lock); 10511 10512 spin_lock_init(&dev->addr_list_lock); 10513 netdev_set_addr_lockdep_class(dev); 10514 10515 ret = dev_get_valid_name(net, dev, dev->name); 10516 if (ret < 0) 10517 goto out; 10518 10519 ret = -ENOMEM; 10520 dev->name_node = netdev_name_node_head_alloc(dev); 10521 if (!dev->name_node) 10522 goto out; 10523 10524 /* Init, if this function is available */ 10525 if (dev->netdev_ops->ndo_init) { 10526 ret = dev->netdev_ops->ndo_init(dev); 10527 if (ret) { 10528 if (ret > 0) 10529 ret = -EIO; 10530 goto err_free_name; 10531 } 10532 } 10533 10534 if (((dev->hw_features | dev->features) & 10535 NETIF_F_HW_VLAN_CTAG_FILTER) && 10536 (!dev->netdev_ops->ndo_vlan_rx_add_vid || 10537 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) { 10538 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n"); 10539 ret = -EINVAL; 10540 goto err_uninit; 10541 } 10542 10543 ret = netdev_do_alloc_pcpu_stats(dev); 10544 if (ret) 10545 goto err_uninit; 10546 10547 ret = dev_index_reserve(net, dev->ifindex); 10548 if (ret < 0) 10549 goto err_free_pcpu; 10550 dev->ifindex = ret; 10551 10552 /* Transfer changeable features to wanted_features and enable 10553 * software offloads (GSO and GRO). 10554 */ 10555 dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF); 10556 dev->features |= NETIF_F_SOFT_FEATURES; 10557 10558 if (dev->udp_tunnel_nic_info) { 10559 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT; 10560 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT; 10561 } 10562 10563 dev->wanted_features = dev->features & dev->hw_features; 10564 10565 if (!(dev->flags & IFF_LOOPBACK)) 10566 dev->hw_features |= NETIF_F_NOCACHE_COPY; 10567 10568 /* If IPv4 TCP segmentation offload is supported we should also 10569 * allow the device to enable segmenting the frame with the option 10570 * of ignoring a static IP ID value. This doesn't enable the 10571 * feature itself but allows the user to enable it later. 10572 */ 10573 if (dev->hw_features & NETIF_F_TSO) 10574 dev->hw_features |= NETIF_F_TSO_MANGLEID; 10575 if (dev->vlan_features & NETIF_F_TSO) 10576 dev->vlan_features |= NETIF_F_TSO_MANGLEID; 10577 if (dev->mpls_features & NETIF_F_TSO) 10578 dev->mpls_features |= NETIF_F_TSO_MANGLEID; 10579 if (dev->hw_enc_features & NETIF_F_TSO) 10580 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 10581 10582 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices. 10583 */ 10584 dev->vlan_features |= NETIF_F_HIGHDMA; 10585 10586 /* Make NETIF_F_SG inheritable to tunnel devices. 10587 */ 10588 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL; 10589 10590 /* Make NETIF_F_SG inheritable to MPLS. 10591 */ 10592 dev->mpls_features |= NETIF_F_SG; 10593 10594 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); 10595 ret = notifier_to_errno(ret); 10596 if (ret) 10597 goto err_ifindex_release; 10598 10599 ret = netdev_register_kobject(dev); 10600 10601 WRITE_ONCE(dev->reg_state, ret ? NETREG_UNREGISTERED : NETREG_REGISTERED); 10602 10603 if (ret) 10604 goto err_uninit_notify; 10605 10606 __netdev_update_features(dev); 10607 10608 /* 10609 * Default initial state at registry is that the 10610 * device is present. 10611 */ 10612 10613 set_bit(__LINK_STATE_PRESENT, &dev->state); 10614 10615 linkwatch_init_dev(dev); 10616 10617 dev_init_scheduler(dev); 10618 10619 netdev_hold(dev, &dev->dev_registered_tracker, GFP_KERNEL); 10620 list_netdevice(dev); 10621 10622 add_device_randomness(dev->dev_addr, dev->addr_len); 10623 10624 /* If the device has permanent device address, driver should 10625 * set dev_addr and also addr_assign_type should be set to 10626 * NET_ADDR_PERM (default value). 10627 */ 10628 if (dev->addr_assign_type == NET_ADDR_PERM) 10629 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 10630 10631 /* Notify protocols, that a new device appeared. */ 10632 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); 10633 ret = notifier_to_errno(ret); 10634 if (ret) { 10635 /* Expect explicit free_netdev() on failure */ 10636 dev->needs_free_netdev = false; 10637 unregister_netdevice_queue(dev, NULL); 10638 goto out; 10639 } 10640 /* 10641 * Prevent userspace races by waiting until the network 10642 * device is fully setup before sending notifications. 10643 */ 10644 if (!dev->rtnl_link_ops || 10645 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 10646 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL); 10647 10648 out: 10649 return ret; 10650 10651 err_uninit_notify: 10652 call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev); 10653 err_ifindex_release: 10654 dev_index_release(net, dev->ifindex); 10655 err_free_pcpu: 10656 netdev_do_free_pcpu_stats(dev); 10657 err_uninit: 10658 if (dev->netdev_ops->ndo_uninit) 10659 dev->netdev_ops->ndo_uninit(dev); 10660 if (dev->priv_destructor) 10661 dev->priv_destructor(dev); 10662 err_free_name: 10663 netdev_name_node_free(dev->name_node); 10664 goto out; 10665 } 10666 EXPORT_SYMBOL(register_netdevice); 10667 10668 /* Initialize the core of a dummy net device. 10669 * This is useful if you are calling this function after alloc_netdev(), 10670 * since it does not memset the net_device fields. 10671 */ 10672 static void init_dummy_netdev_core(struct net_device *dev) 10673 { 10674 /* make sure we BUG if trying to hit standard 10675 * register/unregister code path 10676 */ 10677 dev->reg_state = NETREG_DUMMY; 10678 10679 /* NAPI wants this */ 10680 INIT_LIST_HEAD(&dev->napi_list); 10681 10682 /* a dummy interface is started by default */ 10683 set_bit(__LINK_STATE_PRESENT, &dev->state); 10684 set_bit(__LINK_STATE_START, &dev->state); 10685 10686 /* napi_busy_loop stats accounting wants this */ 10687 dev_net_set(dev, &init_net); 10688 10689 /* Note : We dont allocate pcpu_refcnt for dummy devices, 10690 * because users of this 'device' dont need to change 10691 * its refcount. 10692 */ 10693 } 10694 10695 /** 10696 * init_dummy_netdev - init a dummy network device for NAPI 10697 * @dev: device to init 10698 * 10699 * This takes a network device structure and initializes the minimum 10700 * amount of fields so it can be used to schedule NAPI polls without 10701 * registering a full blown interface. This is to be used by drivers 10702 * that need to tie several hardware interfaces to a single NAPI 10703 * poll scheduler due to HW limitations. 10704 */ 10705 void init_dummy_netdev(struct net_device *dev) 10706 { 10707 /* Clear everything. Note we don't initialize spinlocks 10708 * as they aren't supposed to be taken by any of the 10709 * NAPI code and this dummy netdev is supposed to be 10710 * only ever used for NAPI polls 10711 */ 10712 memset(dev, 0, sizeof(struct net_device)); 10713 init_dummy_netdev_core(dev); 10714 } 10715 EXPORT_SYMBOL_GPL(init_dummy_netdev); 10716 10717 /** 10718 * register_netdev - register a network device 10719 * @dev: device to register 10720 * 10721 * Take a completed network device structure and add it to the kernel 10722 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 10723 * chain. 0 is returned on success. A negative errno code is returned 10724 * on a failure to set up the device, or if the name is a duplicate. 10725 * 10726 * This is a wrapper around register_netdevice that takes the rtnl semaphore 10727 * and expands the device name if you passed a format string to 10728 * alloc_netdev. 10729 */ 10730 int register_netdev(struct net_device *dev) 10731 { 10732 int err; 10733 10734 if (rtnl_lock_killable()) 10735 return -EINTR; 10736 err = register_netdevice(dev); 10737 rtnl_unlock(); 10738 return err; 10739 } 10740 EXPORT_SYMBOL(register_netdev); 10741 10742 int netdev_refcnt_read(const struct net_device *dev) 10743 { 10744 #ifdef CONFIG_PCPU_DEV_REFCNT 10745 int i, refcnt = 0; 10746 10747 for_each_possible_cpu(i) 10748 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i); 10749 return refcnt; 10750 #else 10751 return refcount_read(&dev->dev_refcnt); 10752 #endif 10753 } 10754 EXPORT_SYMBOL(netdev_refcnt_read); 10755 10756 int netdev_unregister_timeout_secs __read_mostly = 10; 10757 10758 #define WAIT_REFS_MIN_MSECS 1 10759 #define WAIT_REFS_MAX_MSECS 250 10760 /** 10761 * netdev_wait_allrefs_any - wait until all references are gone. 10762 * @list: list of net_devices to wait on 10763 * 10764 * This is called when unregistering network devices. 10765 * 10766 * Any protocol or device that holds a reference should register 10767 * for netdevice notification, and cleanup and put back the 10768 * reference if they receive an UNREGISTER event. 10769 * We can get stuck here if buggy protocols don't correctly 10770 * call dev_put. 10771 */ 10772 static struct net_device *netdev_wait_allrefs_any(struct list_head *list) 10773 { 10774 unsigned long rebroadcast_time, warning_time; 10775 struct net_device *dev; 10776 int wait = 0; 10777 10778 rebroadcast_time = warning_time = jiffies; 10779 10780 list_for_each_entry(dev, list, todo_list) 10781 if (netdev_refcnt_read(dev) == 1) 10782 return dev; 10783 10784 while (true) { 10785 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { 10786 rtnl_lock(); 10787 10788 /* Rebroadcast unregister notification */ 10789 list_for_each_entry(dev, list, todo_list) 10790 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 10791 10792 __rtnl_unlock(); 10793 rcu_barrier(); 10794 rtnl_lock(); 10795 10796 list_for_each_entry(dev, list, todo_list) 10797 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 10798 &dev->state)) { 10799 /* We must not have linkwatch events 10800 * pending on unregister. If this 10801 * happens, we simply run the queue 10802 * unscheduled, resulting in a noop 10803 * for this device. 10804 */ 10805 linkwatch_run_queue(); 10806 break; 10807 } 10808 10809 __rtnl_unlock(); 10810 10811 rebroadcast_time = jiffies; 10812 } 10813 10814 rcu_barrier(); 10815 10816 if (!wait) { 10817 wait = WAIT_REFS_MIN_MSECS; 10818 } else { 10819 msleep(wait); 10820 wait = min(wait << 1, WAIT_REFS_MAX_MSECS); 10821 } 10822 10823 list_for_each_entry(dev, list, todo_list) 10824 if (netdev_refcnt_read(dev) == 1) 10825 return dev; 10826 10827 if (time_after(jiffies, warning_time + 10828 READ_ONCE(netdev_unregister_timeout_secs) * HZ)) { 10829 list_for_each_entry(dev, list, todo_list) { 10830 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", 10831 dev->name, netdev_refcnt_read(dev)); 10832 ref_tracker_dir_print(&dev->refcnt_tracker, 10); 10833 } 10834 10835 warning_time = jiffies; 10836 } 10837 } 10838 } 10839 10840 /* The sequence is: 10841 * 10842 * rtnl_lock(); 10843 * ... 10844 * register_netdevice(x1); 10845 * register_netdevice(x2); 10846 * ... 10847 * unregister_netdevice(y1); 10848 * unregister_netdevice(y2); 10849 * ... 10850 * rtnl_unlock(); 10851 * free_netdev(y1); 10852 * free_netdev(y2); 10853 * 10854 * We are invoked by rtnl_unlock(). 10855 * This allows us to deal with problems: 10856 * 1) We can delete sysfs objects which invoke hotplug 10857 * without deadlocking with linkwatch via keventd. 10858 * 2) Since we run with the RTNL semaphore not held, we can sleep 10859 * safely in order to wait for the netdev refcnt to drop to zero. 10860 * 10861 * We must not return until all unregister events added during 10862 * the interval the lock was held have been completed. 10863 */ 10864 void netdev_run_todo(void) 10865 { 10866 struct net_device *dev, *tmp; 10867 struct list_head list; 10868 int cnt; 10869 #ifdef CONFIG_LOCKDEP 10870 struct list_head unlink_list; 10871 10872 list_replace_init(&net_unlink_list, &unlink_list); 10873 10874 while (!list_empty(&unlink_list)) { 10875 struct net_device *dev = list_first_entry(&unlink_list, 10876 struct net_device, 10877 unlink_list); 10878 list_del_init(&dev->unlink_list); 10879 dev->nested_level = dev->lower_level - 1; 10880 } 10881 #endif 10882 10883 /* Snapshot list, allow later requests */ 10884 list_replace_init(&net_todo_list, &list); 10885 10886 __rtnl_unlock(); 10887 10888 /* Wait for rcu callbacks to finish before next phase */ 10889 if (!list_empty(&list)) 10890 rcu_barrier(); 10891 10892 list_for_each_entry_safe(dev, tmp, &list, todo_list) { 10893 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 10894 netdev_WARN(dev, "run_todo but not unregistering\n"); 10895 list_del(&dev->todo_list); 10896 continue; 10897 } 10898 10899 WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERED); 10900 linkwatch_sync_dev(dev); 10901 } 10902 10903 cnt = 0; 10904 while (!list_empty(&list)) { 10905 dev = netdev_wait_allrefs_any(&list); 10906 list_del(&dev->todo_list); 10907 10908 /* paranoia */ 10909 BUG_ON(netdev_refcnt_read(dev) != 1); 10910 BUG_ON(!list_empty(&dev->ptype_all)); 10911 BUG_ON(!list_empty(&dev->ptype_specific)); 10912 WARN_ON(rcu_access_pointer(dev->ip_ptr)); 10913 WARN_ON(rcu_access_pointer(dev->ip6_ptr)); 10914 10915 netdev_do_free_pcpu_stats(dev); 10916 if (dev->priv_destructor) 10917 dev->priv_destructor(dev); 10918 if (dev->needs_free_netdev) 10919 free_netdev(dev); 10920 10921 cnt++; 10922 10923 /* Free network device */ 10924 kobject_put(&dev->dev.kobj); 10925 } 10926 if (cnt && atomic_sub_and_test(cnt, &dev_unreg_count)) 10927 wake_up(&netdev_unregistering_wq); 10928 } 10929 10930 /* Collate per-cpu network dstats statistics 10931 * 10932 * Read per-cpu network statistics from dev->dstats and populate the related 10933 * fields in @s. 10934 */ 10935 static void dev_fetch_dstats(struct rtnl_link_stats64 *s, 10936 const struct pcpu_dstats __percpu *dstats) 10937 { 10938 int cpu; 10939 10940 for_each_possible_cpu(cpu) { 10941 u64 rx_packets, rx_bytes, rx_drops; 10942 u64 tx_packets, tx_bytes, tx_drops; 10943 const struct pcpu_dstats *stats; 10944 unsigned int start; 10945 10946 stats = per_cpu_ptr(dstats, cpu); 10947 do { 10948 start = u64_stats_fetch_begin(&stats->syncp); 10949 rx_packets = u64_stats_read(&stats->rx_packets); 10950 rx_bytes = u64_stats_read(&stats->rx_bytes); 10951 rx_drops = u64_stats_read(&stats->rx_drops); 10952 tx_packets = u64_stats_read(&stats->tx_packets); 10953 tx_bytes = u64_stats_read(&stats->tx_bytes); 10954 tx_drops = u64_stats_read(&stats->tx_drops); 10955 } while (u64_stats_fetch_retry(&stats->syncp, start)); 10956 10957 s->rx_packets += rx_packets; 10958 s->rx_bytes += rx_bytes; 10959 s->rx_dropped += rx_drops; 10960 s->tx_packets += tx_packets; 10961 s->tx_bytes += tx_bytes; 10962 s->tx_dropped += tx_drops; 10963 } 10964 } 10965 10966 /* ndo_get_stats64 implementation for dtstats-based accounting. 10967 * 10968 * Populate @s from dev->stats and dev->dstats. This is used internally by the 10969 * core for NETDEV_PCPU_STAT_DSTAT-type stats collection. 10970 */ 10971 static void dev_get_dstats64(const struct net_device *dev, 10972 struct rtnl_link_stats64 *s) 10973 { 10974 netdev_stats_to_stats64(s, &dev->stats); 10975 dev_fetch_dstats(s, dev->dstats); 10976 } 10977 10978 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has 10979 * all the same fields in the same order as net_device_stats, with only 10980 * the type differing, but rtnl_link_stats64 may have additional fields 10981 * at the end for newer counters. 10982 */ 10983 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 10984 const struct net_device_stats *netdev_stats) 10985 { 10986 size_t i, n = sizeof(*netdev_stats) / sizeof(atomic_long_t); 10987 const atomic_long_t *src = (atomic_long_t *)netdev_stats; 10988 u64 *dst = (u64 *)stats64; 10989 10990 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64)); 10991 for (i = 0; i < n; i++) 10992 dst[i] = (unsigned long)atomic_long_read(&src[i]); 10993 /* zero out counters that only exist in rtnl_link_stats64 */ 10994 memset((char *)stats64 + n * sizeof(u64), 0, 10995 sizeof(*stats64) - n * sizeof(u64)); 10996 } 10997 EXPORT_SYMBOL(netdev_stats_to_stats64); 10998 10999 static __cold struct net_device_core_stats __percpu *netdev_core_stats_alloc( 11000 struct net_device *dev) 11001 { 11002 struct net_device_core_stats __percpu *p; 11003 11004 p = alloc_percpu_gfp(struct net_device_core_stats, 11005 GFP_ATOMIC | __GFP_NOWARN); 11006 11007 if (p && cmpxchg(&dev->core_stats, NULL, p)) 11008 free_percpu(p); 11009 11010 /* This READ_ONCE() pairs with the cmpxchg() above */ 11011 return READ_ONCE(dev->core_stats); 11012 } 11013 11014 noinline void netdev_core_stats_inc(struct net_device *dev, u32 offset) 11015 { 11016 /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */ 11017 struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats); 11018 unsigned long __percpu *field; 11019 11020 if (unlikely(!p)) { 11021 p = netdev_core_stats_alloc(dev); 11022 if (!p) 11023 return; 11024 } 11025 11026 field = (unsigned long __percpu *)((void __percpu *)p + offset); 11027 this_cpu_inc(*field); 11028 } 11029 EXPORT_SYMBOL_GPL(netdev_core_stats_inc); 11030 11031 /** 11032 * dev_get_stats - get network device statistics 11033 * @dev: device to get statistics from 11034 * @storage: place to store stats 11035 * 11036 * Get network statistics from device. Return @storage. 11037 * The device driver may provide its own method by setting 11038 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats; 11039 * otherwise the internal statistics structure is used. 11040 */ 11041 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 11042 struct rtnl_link_stats64 *storage) 11043 { 11044 const struct net_device_ops *ops = dev->netdev_ops; 11045 const struct net_device_core_stats __percpu *p; 11046 11047 if (ops->ndo_get_stats64) { 11048 memset(storage, 0, sizeof(*storage)); 11049 ops->ndo_get_stats64(dev, storage); 11050 } else if (ops->ndo_get_stats) { 11051 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); 11052 } else if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_TSTATS) { 11053 dev_get_tstats64(dev, storage); 11054 } else if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_DSTATS) { 11055 dev_get_dstats64(dev, storage); 11056 } else { 11057 netdev_stats_to_stats64(storage, &dev->stats); 11058 } 11059 11060 /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */ 11061 p = READ_ONCE(dev->core_stats); 11062 if (p) { 11063 const struct net_device_core_stats *core_stats; 11064 int i; 11065 11066 for_each_possible_cpu(i) { 11067 core_stats = per_cpu_ptr(p, i); 11068 storage->rx_dropped += READ_ONCE(core_stats->rx_dropped); 11069 storage->tx_dropped += READ_ONCE(core_stats->tx_dropped); 11070 storage->rx_nohandler += READ_ONCE(core_stats->rx_nohandler); 11071 storage->rx_otherhost_dropped += READ_ONCE(core_stats->rx_otherhost_dropped); 11072 } 11073 } 11074 return storage; 11075 } 11076 EXPORT_SYMBOL(dev_get_stats); 11077 11078 /** 11079 * dev_fetch_sw_netstats - get per-cpu network device statistics 11080 * @s: place to store stats 11081 * @netstats: per-cpu network stats to read from 11082 * 11083 * Read per-cpu network statistics and populate the related fields in @s. 11084 */ 11085 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s, 11086 const struct pcpu_sw_netstats __percpu *netstats) 11087 { 11088 int cpu; 11089 11090 for_each_possible_cpu(cpu) { 11091 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 11092 const struct pcpu_sw_netstats *stats; 11093 unsigned int start; 11094 11095 stats = per_cpu_ptr(netstats, cpu); 11096 do { 11097 start = u64_stats_fetch_begin(&stats->syncp); 11098 rx_packets = u64_stats_read(&stats->rx_packets); 11099 rx_bytes = u64_stats_read(&stats->rx_bytes); 11100 tx_packets = u64_stats_read(&stats->tx_packets); 11101 tx_bytes = u64_stats_read(&stats->tx_bytes); 11102 } while (u64_stats_fetch_retry(&stats->syncp, start)); 11103 11104 s->rx_packets += rx_packets; 11105 s->rx_bytes += rx_bytes; 11106 s->tx_packets += tx_packets; 11107 s->tx_bytes += tx_bytes; 11108 } 11109 } 11110 EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats); 11111 11112 /** 11113 * dev_get_tstats64 - ndo_get_stats64 implementation 11114 * @dev: device to get statistics from 11115 * @s: place to store stats 11116 * 11117 * Populate @s from dev->stats and dev->tstats. Can be used as 11118 * ndo_get_stats64() callback. 11119 */ 11120 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s) 11121 { 11122 netdev_stats_to_stats64(s, &dev->stats); 11123 dev_fetch_sw_netstats(s, dev->tstats); 11124 } 11125 EXPORT_SYMBOL_GPL(dev_get_tstats64); 11126 11127 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) 11128 { 11129 struct netdev_queue *queue = dev_ingress_queue(dev); 11130 11131 #ifdef CONFIG_NET_CLS_ACT 11132 if (queue) 11133 return queue; 11134 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 11135 if (!queue) 11136 return NULL; 11137 netdev_init_one_queue(dev, queue, NULL); 11138 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc); 11139 RCU_INIT_POINTER(queue->qdisc_sleeping, &noop_qdisc); 11140 rcu_assign_pointer(dev->ingress_queue, queue); 11141 #endif 11142 return queue; 11143 } 11144 11145 static const struct ethtool_ops default_ethtool_ops; 11146 11147 void netdev_set_default_ethtool_ops(struct net_device *dev, 11148 const struct ethtool_ops *ops) 11149 { 11150 if (dev->ethtool_ops == &default_ethtool_ops) 11151 dev->ethtool_ops = ops; 11152 } 11153 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops); 11154 11155 /** 11156 * netdev_sw_irq_coalesce_default_on() - enable SW IRQ coalescing by default 11157 * @dev: netdev to enable the IRQ coalescing on 11158 * 11159 * Sets a conservative default for SW IRQ coalescing. Users can use 11160 * sysfs attributes to override the default values. 11161 */ 11162 void netdev_sw_irq_coalesce_default_on(struct net_device *dev) 11163 { 11164 WARN_ON(dev->reg_state == NETREG_REGISTERED); 11165 11166 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { 11167 netdev_set_gro_flush_timeout(dev, 20000); 11168 netdev_set_defer_hard_irqs(dev, 1); 11169 } 11170 } 11171 EXPORT_SYMBOL_GPL(netdev_sw_irq_coalesce_default_on); 11172 11173 /** 11174 * alloc_netdev_mqs - allocate network device 11175 * @sizeof_priv: size of private data to allocate space for 11176 * @name: device name format string 11177 * @name_assign_type: origin of device name 11178 * @setup: callback to initialize device 11179 * @txqs: the number of TX subqueues to allocate 11180 * @rxqs: the number of RX subqueues to allocate 11181 * 11182 * Allocates a struct net_device with private data area for driver use 11183 * and performs basic initialization. Also allocates subqueue structs 11184 * for each queue on the device. 11185 */ 11186 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 11187 unsigned char name_assign_type, 11188 void (*setup)(struct net_device *), 11189 unsigned int txqs, unsigned int rxqs) 11190 { 11191 struct net_device *dev; 11192 size_t napi_config_sz; 11193 unsigned int maxqs; 11194 11195 BUG_ON(strlen(name) >= sizeof(dev->name)); 11196 11197 if (txqs < 1) { 11198 pr_err("alloc_netdev: Unable to allocate device with zero queues\n"); 11199 return NULL; 11200 } 11201 11202 if (rxqs < 1) { 11203 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n"); 11204 return NULL; 11205 } 11206 11207 maxqs = max(txqs, rxqs); 11208 11209 dev = kvzalloc(struct_size(dev, priv, sizeof_priv), 11210 GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); 11211 if (!dev) 11212 return NULL; 11213 11214 dev->priv_len = sizeof_priv; 11215 11216 ref_tracker_dir_init(&dev->refcnt_tracker, 128, name); 11217 #ifdef CONFIG_PCPU_DEV_REFCNT 11218 dev->pcpu_refcnt = alloc_percpu(int); 11219 if (!dev->pcpu_refcnt) 11220 goto free_dev; 11221 __dev_hold(dev); 11222 #else 11223 refcount_set(&dev->dev_refcnt, 1); 11224 #endif 11225 11226 if (dev_addr_init(dev)) 11227 goto free_pcpu; 11228 11229 dev_mc_init(dev); 11230 dev_uc_init(dev); 11231 11232 dev_net_set(dev, &init_net); 11233 11234 dev->gso_max_size = GSO_LEGACY_MAX_SIZE; 11235 dev->xdp_zc_max_segs = 1; 11236 dev->gso_max_segs = GSO_MAX_SEGS; 11237 dev->gro_max_size = GRO_LEGACY_MAX_SIZE; 11238 dev->gso_ipv4_max_size = GSO_LEGACY_MAX_SIZE; 11239 dev->gro_ipv4_max_size = GRO_LEGACY_MAX_SIZE; 11240 dev->tso_max_size = TSO_LEGACY_MAX_SIZE; 11241 dev->tso_max_segs = TSO_MAX_SEGS; 11242 dev->upper_level = 1; 11243 dev->lower_level = 1; 11244 #ifdef CONFIG_LOCKDEP 11245 dev->nested_level = 0; 11246 INIT_LIST_HEAD(&dev->unlink_list); 11247 #endif 11248 11249 INIT_LIST_HEAD(&dev->napi_list); 11250 INIT_LIST_HEAD(&dev->unreg_list); 11251 INIT_LIST_HEAD(&dev->close_list); 11252 INIT_LIST_HEAD(&dev->link_watch_list); 11253 INIT_LIST_HEAD(&dev->adj_list.upper); 11254 INIT_LIST_HEAD(&dev->adj_list.lower); 11255 INIT_LIST_HEAD(&dev->ptype_all); 11256 INIT_LIST_HEAD(&dev->ptype_specific); 11257 INIT_LIST_HEAD(&dev->net_notifier_list); 11258 #ifdef CONFIG_NET_SCHED 11259 hash_init(dev->qdisc_hash); 11260 #endif 11261 11262 mutex_init(&dev->lock); 11263 11264 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; 11265 setup(dev); 11266 11267 if (!dev->tx_queue_len) { 11268 dev->priv_flags |= IFF_NO_QUEUE; 11269 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; 11270 } 11271 11272 dev->num_tx_queues = txqs; 11273 dev->real_num_tx_queues = txqs; 11274 if (netif_alloc_netdev_queues(dev)) 11275 goto free_all; 11276 11277 dev->num_rx_queues = rxqs; 11278 dev->real_num_rx_queues = rxqs; 11279 if (netif_alloc_rx_queues(dev)) 11280 goto free_all; 11281 dev->ethtool = kzalloc(sizeof(*dev->ethtool), GFP_KERNEL_ACCOUNT); 11282 if (!dev->ethtool) 11283 goto free_all; 11284 11285 napi_config_sz = array_size(maxqs, sizeof(*dev->napi_config)); 11286 dev->napi_config = kvzalloc(napi_config_sz, GFP_KERNEL_ACCOUNT); 11287 if (!dev->napi_config) 11288 goto free_all; 11289 11290 strscpy(dev->name, name); 11291 dev->name_assign_type = name_assign_type; 11292 dev->group = INIT_NETDEV_GROUP; 11293 if (!dev->ethtool_ops) 11294 dev->ethtool_ops = &default_ethtool_ops; 11295 11296 nf_hook_netdev_init(dev); 11297 11298 return dev; 11299 11300 free_all: 11301 free_netdev(dev); 11302 return NULL; 11303 11304 free_pcpu: 11305 #ifdef CONFIG_PCPU_DEV_REFCNT 11306 free_percpu(dev->pcpu_refcnt); 11307 free_dev: 11308 #endif 11309 kvfree(dev); 11310 return NULL; 11311 } 11312 EXPORT_SYMBOL(alloc_netdev_mqs); 11313 11314 /** 11315 * free_netdev - free network device 11316 * @dev: device 11317 * 11318 * This function does the last stage of destroying an allocated device 11319 * interface. The reference to the device object is released. If this 11320 * is the last reference then it will be freed.Must be called in process 11321 * context. 11322 */ 11323 void free_netdev(struct net_device *dev) 11324 { 11325 struct napi_struct *p, *n; 11326 11327 might_sleep(); 11328 11329 /* When called immediately after register_netdevice() failed the unwind 11330 * handling may still be dismantling the device. Handle that case by 11331 * deferring the free. 11332 */ 11333 if (dev->reg_state == NETREG_UNREGISTERING) { 11334 ASSERT_RTNL(); 11335 dev->needs_free_netdev = true; 11336 return; 11337 } 11338 11339 mutex_destroy(&dev->lock); 11340 11341 kfree(dev->ethtool); 11342 netif_free_tx_queues(dev); 11343 netif_free_rx_queues(dev); 11344 11345 kfree(rcu_dereference_protected(dev->ingress_queue, 1)); 11346 11347 /* Flush device addresses */ 11348 dev_addr_flush(dev); 11349 11350 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 11351 netif_napi_del(p); 11352 11353 kvfree(dev->napi_config); 11354 11355 ref_tracker_dir_exit(&dev->refcnt_tracker); 11356 #ifdef CONFIG_PCPU_DEV_REFCNT 11357 free_percpu(dev->pcpu_refcnt); 11358 dev->pcpu_refcnt = NULL; 11359 #endif 11360 free_percpu(dev->core_stats); 11361 dev->core_stats = NULL; 11362 free_percpu(dev->xdp_bulkq); 11363 dev->xdp_bulkq = NULL; 11364 11365 netdev_free_phy_link_topology(dev); 11366 11367 /* Compatibility with error handling in drivers */ 11368 if (dev->reg_state == NETREG_UNINITIALIZED || 11369 dev->reg_state == NETREG_DUMMY) { 11370 kvfree(dev); 11371 return; 11372 } 11373 11374 BUG_ON(dev->reg_state != NETREG_UNREGISTERED); 11375 WRITE_ONCE(dev->reg_state, NETREG_RELEASED); 11376 11377 /* will free via device release */ 11378 put_device(&dev->dev); 11379 } 11380 EXPORT_SYMBOL(free_netdev); 11381 11382 /** 11383 * alloc_netdev_dummy - Allocate and initialize a dummy net device. 11384 * @sizeof_priv: size of private data to allocate space for 11385 * 11386 * Return: the allocated net_device on success, NULL otherwise 11387 */ 11388 struct net_device *alloc_netdev_dummy(int sizeof_priv) 11389 { 11390 return alloc_netdev(sizeof_priv, "dummy#", NET_NAME_UNKNOWN, 11391 init_dummy_netdev_core); 11392 } 11393 EXPORT_SYMBOL_GPL(alloc_netdev_dummy); 11394 11395 /** 11396 * synchronize_net - Synchronize with packet receive processing 11397 * 11398 * Wait for packets currently being received to be done. 11399 * Does not block later packets from starting. 11400 */ 11401 void synchronize_net(void) 11402 { 11403 might_sleep(); 11404 if (rtnl_is_locked()) 11405 synchronize_rcu_expedited(); 11406 else 11407 synchronize_rcu(); 11408 } 11409 EXPORT_SYMBOL(synchronize_net); 11410 11411 static void netdev_rss_contexts_free(struct net_device *dev) 11412 { 11413 struct ethtool_rxfh_context *ctx; 11414 unsigned long context; 11415 11416 mutex_lock(&dev->ethtool->rss_lock); 11417 xa_for_each(&dev->ethtool->rss_ctx, context, ctx) { 11418 struct ethtool_rxfh_param rxfh; 11419 11420 rxfh.indir = ethtool_rxfh_context_indir(ctx); 11421 rxfh.key = ethtool_rxfh_context_key(ctx); 11422 rxfh.hfunc = ctx->hfunc; 11423 rxfh.input_xfrm = ctx->input_xfrm; 11424 rxfh.rss_context = context; 11425 rxfh.rss_delete = true; 11426 11427 xa_erase(&dev->ethtool->rss_ctx, context); 11428 if (dev->ethtool_ops->create_rxfh_context) 11429 dev->ethtool_ops->remove_rxfh_context(dev, ctx, 11430 context, NULL); 11431 else 11432 dev->ethtool_ops->set_rxfh(dev, &rxfh, NULL); 11433 kfree(ctx); 11434 } 11435 xa_destroy(&dev->ethtool->rss_ctx); 11436 mutex_unlock(&dev->ethtool->rss_lock); 11437 } 11438 11439 /** 11440 * unregister_netdevice_queue - remove device from the kernel 11441 * @dev: device 11442 * @head: list 11443 * 11444 * This function shuts down a device interface and removes it 11445 * from the kernel tables. 11446 * If head not NULL, device is queued to be unregistered later. 11447 * 11448 * Callers must hold the rtnl semaphore. You may want 11449 * unregister_netdev() instead of this. 11450 */ 11451 11452 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) 11453 { 11454 ASSERT_RTNL(); 11455 11456 if (head) { 11457 list_move_tail(&dev->unreg_list, head); 11458 } else { 11459 LIST_HEAD(single); 11460 11461 list_add(&dev->unreg_list, &single); 11462 unregister_netdevice_many(&single); 11463 } 11464 } 11465 EXPORT_SYMBOL(unregister_netdevice_queue); 11466 11467 void unregister_netdevice_many_notify(struct list_head *head, 11468 u32 portid, const struct nlmsghdr *nlh) 11469 { 11470 struct net_device *dev, *tmp; 11471 LIST_HEAD(close_head); 11472 int cnt = 0; 11473 11474 BUG_ON(dev_boot_phase); 11475 ASSERT_RTNL(); 11476 11477 if (list_empty(head)) 11478 return; 11479 11480 list_for_each_entry_safe(dev, tmp, head, unreg_list) { 11481 /* Some devices call without registering 11482 * for initialization unwind. Remove those 11483 * devices and proceed with the remaining. 11484 */ 11485 if (dev->reg_state == NETREG_UNINITIALIZED) { 11486 pr_debug("unregister_netdevice: device %s/%p never was registered\n", 11487 dev->name, dev); 11488 11489 WARN_ON(1); 11490 list_del(&dev->unreg_list); 11491 continue; 11492 } 11493 dev->dismantle = true; 11494 BUG_ON(dev->reg_state != NETREG_REGISTERED); 11495 } 11496 11497 /* If device is running, close it first. */ 11498 list_for_each_entry(dev, head, unreg_list) 11499 list_add_tail(&dev->close_list, &close_head); 11500 dev_close_many(&close_head, true); 11501 11502 list_for_each_entry(dev, head, unreg_list) { 11503 /* And unlink it from device chain. */ 11504 unlist_netdevice(dev); 11505 WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERING); 11506 } 11507 flush_all_backlogs(); 11508 11509 synchronize_net(); 11510 11511 list_for_each_entry(dev, head, unreg_list) { 11512 struct sk_buff *skb = NULL; 11513 11514 /* Shutdown queueing discipline. */ 11515 dev_shutdown(dev); 11516 dev_tcx_uninstall(dev); 11517 dev_xdp_uninstall(dev); 11518 bpf_dev_bound_netdev_unregister(dev); 11519 dev_dmabuf_uninstall(dev); 11520 11521 netdev_offload_xstats_disable_all(dev); 11522 11523 /* Notify protocols, that we are about to destroy 11524 * this device. They should clean all the things. 11525 */ 11526 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 11527 11528 if (!dev->rtnl_link_ops || 11529 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 11530 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0, 11531 GFP_KERNEL, NULL, 0, 11532 portid, nlh); 11533 11534 /* 11535 * Flush the unicast and multicast chains 11536 */ 11537 dev_uc_flush(dev); 11538 dev_mc_flush(dev); 11539 11540 netdev_name_node_alt_flush(dev); 11541 netdev_name_node_free(dev->name_node); 11542 11543 netdev_rss_contexts_free(dev); 11544 11545 call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev); 11546 11547 if (dev->netdev_ops->ndo_uninit) 11548 dev->netdev_ops->ndo_uninit(dev); 11549 11550 mutex_destroy(&dev->ethtool->rss_lock); 11551 11552 net_shaper_flush_netdev(dev); 11553 11554 if (skb) 11555 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL, portid, nlh); 11556 11557 /* Notifier chain MUST detach us all upper devices. */ 11558 WARN_ON(netdev_has_any_upper_dev(dev)); 11559 WARN_ON(netdev_has_any_lower_dev(dev)); 11560 11561 /* Remove entries from kobject tree */ 11562 netdev_unregister_kobject(dev); 11563 #ifdef CONFIG_XPS 11564 /* Remove XPS queueing entries */ 11565 netif_reset_xps_queues_gt(dev, 0); 11566 #endif 11567 } 11568 11569 synchronize_net(); 11570 11571 list_for_each_entry(dev, head, unreg_list) { 11572 netdev_put(dev, &dev->dev_registered_tracker); 11573 net_set_todo(dev); 11574 cnt++; 11575 } 11576 atomic_add(cnt, &dev_unreg_count); 11577 11578 list_del(head); 11579 } 11580 11581 /** 11582 * unregister_netdevice_many - unregister many devices 11583 * @head: list of devices 11584 * 11585 * Note: As most callers use a stack allocated list_head, 11586 * we force a list_del() to make sure stack won't be corrupted later. 11587 */ 11588 void unregister_netdevice_many(struct list_head *head) 11589 { 11590 unregister_netdevice_many_notify(head, 0, NULL); 11591 } 11592 EXPORT_SYMBOL(unregister_netdevice_many); 11593 11594 /** 11595 * unregister_netdev - remove device from the kernel 11596 * @dev: device 11597 * 11598 * This function shuts down a device interface and removes it 11599 * from the kernel tables. 11600 * 11601 * This is just a wrapper for unregister_netdevice that takes 11602 * the rtnl semaphore. In general you want to use this and not 11603 * unregister_netdevice. 11604 */ 11605 void unregister_netdev(struct net_device *dev) 11606 { 11607 rtnl_lock(); 11608 unregister_netdevice(dev); 11609 rtnl_unlock(); 11610 } 11611 EXPORT_SYMBOL(unregister_netdev); 11612 11613 /** 11614 * __dev_change_net_namespace - move device to different nethost namespace 11615 * @dev: device 11616 * @net: network namespace 11617 * @pat: If not NULL name pattern to try if the current device name 11618 * is already taken in the destination network namespace. 11619 * @new_ifindex: If not zero, specifies device index in the target 11620 * namespace. 11621 * 11622 * This function shuts down a device interface and moves it 11623 * to a new network namespace. On success 0 is returned, on 11624 * a failure a netagive errno code is returned. 11625 * 11626 * Callers must hold the rtnl semaphore. 11627 */ 11628 11629 int __dev_change_net_namespace(struct net_device *dev, struct net *net, 11630 const char *pat, int new_ifindex) 11631 { 11632 struct netdev_name_node *name_node; 11633 struct net *net_old = dev_net(dev); 11634 char new_name[IFNAMSIZ] = {}; 11635 int err, new_nsid; 11636 11637 ASSERT_RTNL(); 11638 11639 /* Don't allow namespace local devices to be moved. */ 11640 err = -EINVAL; 11641 if (dev->netns_local) 11642 goto out; 11643 11644 /* Ensure the device has been registered */ 11645 if (dev->reg_state != NETREG_REGISTERED) 11646 goto out; 11647 11648 /* Get out if there is nothing todo */ 11649 err = 0; 11650 if (net_eq(net_old, net)) 11651 goto out; 11652 11653 /* Pick the destination device name, and ensure 11654 * we can use it in the destination network namespace. 11655 */ 11656 err = -EEXIST; 11657 if (netdev_name_in_use(net, dev->name)) { 11658 /* We get here if we can't use the current device name */ 11659 if (!pat) 11660 goto out; 11661 err = dev_prep_valid_name(net, dev, pat, new_name, EEXIST); 11662 if (err < 0) 11663 goto out; 11664 } 11665 /* Check that none of the altnames conflicts. */ 11666 err = -EEXIST; 11667 netdev_for_each_altname(dev, name_node) 11668 if (netdev_name_in_use(net, name_node->name)) 11669 goto out; 11670 11671 /* Check that new_ifindex isn't used yet. */ 11672 if (new_ifindex) { 11673 err = dev_index_reserve(net, new_ifindex); 11674 if (err < 0) 11675 goto out; 11676 } else { 11677 /* If there is an ifindex conflict assign a new one */ 11678 err = dev_index_reserve(net, dev->ifindex); 11679 if (err == -EBUSY) 11680 err = dev_index_reserve(net, 0); 11681 if (err < 0) 11682 goto out; 11683 new_ifindex = err; 11684 } 11685 11686 /* 11687 * And now a mini version of register_netdevice unregister_netdevice. 11688 */ 11689 11690 /* If device is running close it first. */ 11691 dev_close(dev); 11692 11693 /* And unlink it from device chain */ 11694 unlist_netdevice(dev); 11695 11696 synchronize_net(); 11697 11698 /* Shutdown queueing discipline. */ 11699 dev_shutdown(dev); 11700 11701 /* Notify protocols, that we are about to destroy 11702 * this device. They should clean all the things. 11703 * 11704 * Note that dev->reg_state stays at NETREG_REGISTERED. 11705 * This is wanted because this way 8021q and macvlan know 11706 * the device is just moving and can keep their slaves up. 11707 */ 11708 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 11709 rcu_barrier(); 11710 11711 new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL); 11712 11713 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid, 11714 new_ifindex); 11715 11716 /* 11717 * Flush the unicast and multicast chains 11718 */ 11719 dev_uc_flush(dev); 11720 dev_mc_flush(dev); 11721 11722 /* Send a netdev-removed uevent to the old namespace */ 11723 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE); 11724 netdev_adjacent_del_links(dev); 11725 11726 /* Move per-net netdevice notifiers that are following the netdevice */ 11727 move_netdevice_notifiers_dev_net(dev, net); 11728 11729 /* Actually switch the network namespace */ 11730 dev_net_set(dev, net); 11731 dev->ifindex = new_ifindex; 11732 11733 if (new_name[0]) { 11734 /* Rename the netdev to prepared name */ 11735 write_seqlock_bh(&netdev_rename_lock); 11736 strscpy(dev->name, new_name, IFNAMSIZ); 11737 write_sequnlock_bh(&netdev_rename_lock); 11738 } 11739 11740 /* Fixup kobjects */ 11741 dev_set_uevent_suppress(&dev->dev, 1); 11742 err = device_rename(&dev->dev, dev->name); 11743 dev_set_uevent_suppress(&dev->dev, 0); 11744 WARN_ON(err); 11745 11746 /* Send a netdev-add uevent to the new namespace */ 11747 kobject_uevent(&dev->dev.kobj, KOBJ_ADD); 11748 netdev_adjacent_add_links(dev); 11749 11750 /* Adapt owner in case owning user namespace of target network 11751 * namespace is different from the original one. 11752 */ 11753 err = netdev_change_owner(dev, net_old, net); 11754 WARN_ON(err); 11755 11756 /* Add the device back in the hashes */ 11757 list_netdevice(dev); 11758 11759 /* Notify protocols, that a new device appeared. */ 11760 call_netdevice_notifiers(NETDEV_REGISTER, dev); 11761 11762 /* 11763 * Prevent userspace races by waiting until the network 11764 * device is fully setup before sending notifications. 11765 */ 11766 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL); 11767 11768 synchronize_net(); 11769 err = 0; 11770 out: 11771 return err; 11772 } 11773 EXPORT_SYMBOL_GPL(__dev_change_net_namespace); 11774 11775 static int dev_cpu_dead(unsigned int oldcpu) 11776 { 11777 struct sk_buff **list_skb; 11778 struct sk_buff *skb; 11779 unsigned int cpu; 11780 struct softnet_data *sd, *oldsd, *remsd = NULL; 11781 11782 local_irq_disable(); 11783 cpu = smp_processor_id(); 11784 sd = &per_cpu(softnet_data, cpu); 11785 oldsd = &per_cpu(softnet_data, oldcpu); 11786 11787 /* Find end of our completion_queue. */ 11788 list_skb = &sd->completion_queue; 11789 while (*list_skb) 11790 list_skb = &(*list_skb)->next; 11791 /* Append completion queue from offline CPU. */ 11792 *list_skb = oldsd->completion_queue; 11793 oldsd->completion_queue = NULL; 11794 11795 /* Append output queue from offline CPU. */ 11796 if (oldsd->output_queue) { 11797 *sd->output_queue_tailp = oldsd->output_queue; 11798 sd->output_queue_tailp = oldsd->output_queue_tailp; 11799 oldsd->output_queue = NULL; 11800 oldsd->output_queue_tailp = &oldsd->output_queue; 11801 } 11802 /* Append NAPI poll list from offline CPU, with one exception : 11803 * process_backlog() must be called by cpu owning percpu backlog. 11804 * We properly handle process_queue & input_pkt_queue later. 11805 */ 11806 while (!list_empty(&oldsd->poll_list)) { 11807 struct napi_struct *napi = list_first_entry(&oldsd->poll_list, 11808 struct napi_struct, 11809 poll_list); 11810 11811 list_del_init(&napi->poll_list); 11812 if (napi->poll == process_backlog) 11813 napi->state &= NAPIF_STATE_THREADED; 11814 else 11815 ____napi_schedule(sd, napi); 11816 } 11817 11818 raise_softirq_irqoff(NET_TX_SOFTIRQ); 11819 local_irq_enable(); 11820 11821 if (!use_backlog_threads()) { 11822 #ifdef CONFIG_RPS 11823 remsd = oldsd->rps_ipi_list; 11824 oldsd->rps_ipi_list = NULL; 11825 #endif 11826 /* send out pending IPI's on offline CPU */ 11827 net_rps_send_ipi(remsd); 11828 } 11829 11830 /* Process offline CPU's input_pkt_queue */ 11831 while ((skb = __skb_dequeue(&oldsd->process_queue))) { 11832 netif_rx(skb); 11833 rps_input_queue_head_incr(oldsd); 11834 } 11835 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { 11836 netif_rx(skb); 11837 rps_input_queue_head_incr(oldsd); 11838 } 11839 11840 return 0; 11841 } 11842 11843 /** 11844 * netdev_increment_features - increment feature set by one 11845 * @all: current feature set 11846 * @one: new feature set 11847 * @mask: mask feature set 11848 * 11849 * Computes a new feature set after adding a device with feature set 11850 * @one to the master device with current feature set @all. Will not 11851 * enable anything that is off in @mask. Returns the new feature set. 11852 */ 11853 netdev_features_t netdev_increment_features(netdev_features_t all, 11854 netdev_features_t one, netdev_features_t mask) 11855 { 11856 if (mask & NETIF_F_HW_CSUM) 11857 mask |= NETIF_F_CSUM_MASK; 11858 mask |= NETIF_F_VLAN_CHALLENGED; 11859 11860 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask; 11861 all &= one | ~NETIF_F_ALL_FOR_ALL; 11862 11863 /* If one device supports hw checksumming, set for all. */ 11864 if (all & NETIF_F_HW_CSUM) 11865 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM); 11866 11867 return all; 11868 } 11869 EXPORT_SYMBOL(netdev_increment_features); 11870 11871 static struct hlist_head * __net_init netdev_create_hash(void) 11872 { 11873 int i; 11874 struct hlist_head *hash; 11875 11876 hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL); 11877 if (hash != NULL) 11878 for (i = 0; i < NETDEV_HASHENTRIES; i++) 11879 INIT_HLIST_HEAD(&hash[i]); 11880 11881 return hash; 11882 } 11883 11884 /* Initialize per network namespace state */ 11885 static int __net_init netdev_init(struct net *net) 11886 { 11887 BUILD_BUG_ON(GRO_HASH_BUCKETS > 11888 8 * sizeof_field(struct napi_struct, gro_bitmask)); 11889 11890 INIT_LIST_HEAD(&net->dev_base_head); 11891 11892 net->dev_name_head = netdev_create_hash(); 11893 if (net->dev_name_head == NULL) 11894 goto err_name; 11895 11896 net->dev_index_head = netdev_create_hash(); 11897 if (net->dev_index_head == NULL) 11898 goto err_idx; 11899 11900 xa_init_flags(&net->dev_by_index, XA_FLAGS_ALLOC1); 11901 11902 RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain); 11903 11904 return 0; 11905 11906 err_idx: 11907 kfree(net->dev_name_head); 11908 err_name: 11909 return -ENOMEM; 11910 } 11911 11912 /** 11913 * netdev_drivername - network driver for the device 11914 * @dev: network device 11915 * 11916 * Determine network driver for device. 11917 */ 11918 const char *netdev_drivername(const struct net_device *dev) 11919 { 11920 const struct device_driver *driver; 11921 const struct device *parent; 11922 const char *empty = ""; 11923 11924 parent = dev->dev.parent; 11925 if (!parent) 11926 return empty; 11927 11928 driver = parent->driver; 11929 if (driver && driver->name) 11930 return driver->name; 11931 return empty; 11932 } 11933 11934 static void __netdev_printk(const char *level, const struct net_device *dev, 11935 struct va_format *vaf) 11936 { 11937 if (dev && dev->dev.parent) { 11938 dev_printk_emit(level[1] - '0', 11939 dev->dev.parent, 11940 "%s %s %s%s: %pV", 11941 dev_driver_string(dev->dev.parent), 11942 dev_name(dev->dev.parent), 11943 netdev_name(dev), netdev_reg_state(dev), 11944 vaf); 11945 } else if (dev) { 11946 printk("%s%s%s: %pV", 11947 level, netdev_name(dev), netdev_reg_state(dev), vaf); 11948 } else { 11949 printk("%s(NULL net_device): %pV", level, vaf); 11950 } 11951 } 11952 11953 void netdev_printk(const char *level, const struct net_device *dev, 11954 const char *format, ...) 11955 { 11956 struct va_format vaf; 11957 va_list args; 11958 11959 va_start(args, format); 11960 11961 vaf.fmt = format; 11962 vaf.va = &args; 11963 11964 __netdev_printk(level, dev, &vaf); 11965 11966 va_end(args); 11967 } 11968 EXPORT_SYMBOL(netdev_printk); 11969 11970 #define define_netdev_printk_level(func, level) \ 11971 void func(const struct net_device *dev, const char *fmt, ...) \ 11972 { \ 11973 struct va_format vaf; \ 11974 va_list args; \ 11975 \ 11976 va_start(args, fmt); \ 11977 \ 11978 vaf.fmt = fmt; \ 11979 vaf.va = &args; \ 11980 \ 11981 __netdev_printk(level, dev, &vaf); \ 11982 \ 11983 va_end(args); \ 11984 } \ 11985 EXPORT_SYMBOL(func); 11986 11987 define_netdev_printk_level(netdev_emerg, KERN_EMERG); 11988 define_netdev_printk_level(netdev_alert, KERN_ALERT); 11989 define_netdev_printk_level(netdev_crit, KERN_CRIT); 11990 define_netdev_printk_level(netdev_err, KERN_ERR); 11991 define_netdev_printk_level(netdev_warn, KERN_WARNING); 11992 define_netdev_printk_level(netdev_notice, KERN_NOTICE); 11993 define_netdev_printk_level(netdev_info, KERN_INFO); 11994 11995 static void __net_exit netdev_exit(struct net *net) 11996 { 11997 kfree(net->dev_name_head); 11998 kfree(net->dev_index_head); 11999 xa_destroy(&net->dev_by_index); 12000 if (net != &init_net) 12001 WARN_ON_ONCE(!list_empty(&net->dev_base_head)); 12002 } 12003 12004 static struct pernet_operations __net_initdata netdev_net_ops = { 12005 .init = netdev_init, 12006 .exit = netdev_exit, 12007 }; 12008 12009 static void __net_exit default_device_exit_net(struct net *net) 12010 { 12011 struct netdev_name_node *name_node, *tmp; 12012 struct net_device *dev, *aux; 12013 /* 12014 * Push all migratable network devices back to the 12015 * initial network namespace 12016 */ 12017 ASSERT_RTNL(); 12018 for_each_netdev_safe(net, dev, aux) { 12019 int err; 12020 char fb_name[IFNAMSIZ]; 12021 12022 /* Ignore unmoveable devices (i.e. loopback) */ 12023 if (dev->netns_local) 12024 continue; 12025 12026 /* Leave virtual devices for the generic cleanup */ 12027 if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund) 12028 continue; 12029 12030 /* Push remaining network devices to init_net */ 12031 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 12032 if (netdev_name_in_use(&init_net, fb_name)) 12033 snprintf(fb_name, IFNAMSIZ, "dev%%d"); 12034 12035 netdev_for_each_altname_safe(dev, name_node, tmp) 12036 if (netdev_name_in_use(&init_net, name_node->name)) 12037 __netdev_name_node_alt_destroy(name_node); 12038 12039 err = dev_change_net_namespace(dev, &init_net, fb_name); 12040 if (err) { 12041 pr_emerg("%s: failed to move %s to init_net: %d\n", 12042 __func__, dev->name, err); 12043 BUG(); 12044 } 12045 } 12046 } 12047 12048 static void __net_exit default_device_exit_batch(struct list_head *net_list) 12049 { 12050 /* At exit all network devices most be removed from a network 12051 * namespace. Do this in the reverse order of registration. 12052 * Do this across as many network namespaces as possible to 12053 * improve batching efficiency. 12054 */ 12055 struct net_device *dev; 12056 struct net *net; 12057 LIST_HEAD(dev_kill_list); 12058 12059 rtnl_lock(); 12060 list_for_each_entry(net, net_list, exit_list) { 12061 default_device_exit_net(net); 12062 cond_resched(); 12063 } 12064 12065 list_for_each_entry(net, net_list, exit_list) { 12066 for_each_netdev_reverse(net, dev) { 12067 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) 12068 dev->rtnl_link_ops->dellink(dev, &dev_kill_list); 12069 else 12070 unregister_netdevice_queue(dev, &dev_kill_list); 12071 } 12072 } 12073 unregister_netdevice_many(&dev_kill_list); 12074 rtnl_unlock(); 12075 } 12076 12077 static struct pernet_operations __net_initdata default_device_ops = { 12078 .exit_batch = default_device_exit_batch, 12079 }; 12080 12081 static void __init net_dev_struct_check(void) 12082 { 12083 /* TX read-mostly hotpath */ 12084 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, priv_flags_fast); 12085 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, netdev_ops); 12086 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, header_ops); 12087 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, _tx); 12088 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, real_num_tx_queues); 12089 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_max_size); 12090 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_ipv4_max_size); 12091 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_max_segs); 12092 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_partial_features); 12093 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, num_tc); 12094 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, mtu); 12095 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, needed_headroom); 12096 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, tc_to_txq); 12097 #ifdef CONFIG_XPS 12098 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, xps_maps); 12099 #endif 12100 #ifdef CONFIG_NETFILTER_EGRESS 12101 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, nf_hooks_egress); 12102 #endif 12103 #ifdef CONFIG_NET_XGRESS 12104 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, tcx_egress); 12105 #endif 12106 CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_tx, 160); 12107 12108 /* TXRX read-mostly hotpath */ 12109 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, lstats); 12110 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, state); 12111 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, flags); 12112 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, hard_header_len); 12113 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, features); 12114 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, ip6_ptr); 12115 CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_txrx, 46); 12116 12117 /* RX read-mostly hotpath */ 12118 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, ptype_specific); 12119 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, ifindex); 12120 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, real_num_rx_queues); 12121 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, _rx); 12122 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_max_size); 12123 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_ipv4_max_size); 12124 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, rx_handler); 12125 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, rx_handler_data); 12126 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, nd_net); 12127 #ifdef CONFIG_NETPOLL 12128 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, npinfo); 12129 #endif 12130 #ifdef CONFIG_NET_XGRESS 12131 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, tcx_ingress); 12132 #endif 12133 CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_rx, 92); 12134 } 12135 12136 /* 12137 * Initialize the DEV module. At boot time this walks the device list and 12138 * unhooks any devices that fail to initialise (normally hardware not 12139 * present) and leaves us with a valid list of present and active devices. 12140 * 12141 */ 12142 12143 /* We allocate 256 pages for each CPU if PAGE_SHIFT is 12 */ 12144 #define SYSTEM_PERCPU_PAGE_POOL_SIZE ((1 << 20) / PAGE_SIZE) 12145 12146 static int net_page_pool_create(int cpuid) 12147 { 12148 #if IS_ENABLED(CONFIG_PAGE_POOL) 12149 struct page_pool_params page_pool_params = { 12150 .pool_size = SYSTEM_PERCPU_PAGE_POOL_SIZE, 12151 .flags = PP_FLAG_SYSTEM_POOL, 12152 .nid = cpu_to_mem(cpuid), 12153 }; 12154 struct page_pool *pp_ptr; 12155 12156 pp_ptr = page_pool_create_percpu(&page_pool_params, cpuid); 12157 if (IS_ERR(pp_ptr)) 12158 return -ENOMEM; 12159 12160 per_cpu(system_page_pool, cpuid) = pp_ptr; 12161 #endif 12162 return 0; 12163 } 12164 12165 static int backlog_napi_should_run(unsigned int cpu) 12166 { 12167 struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu); 12168 struct napi_struct *napi = &sd->backlog; 12169 12170 return test_bit(NAPI_STATE_SCHED_THREADED, &napi->state); 12171 } 12172 12173 static void run_backlog_napi(unsigned int cpu) 12174 { 12175 struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu); 12176 12177 napi_threaded_poll_loop(&sd->backlog); 12178 } 12179 12180 static void backlog_napi_setup(unsigned int cpu) 12181 { 12182 struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu); 12183 struct napi_struct *napi = &sd->backlog; 12184 12185 napi->thread = this_cpu_read(backlog_napi); 12186 set_bit(NAPI_STATE_THREADED, &napi->state); 12187 } 12188 12189 static struct smp_hotplug_thread backlog_threads = { 12190 .store = &backlog_napi, 12191 .thread_should_run = backlog_napi_should_run, 12192 .thread_fn = run_backlog_napi, 12193 .thread_comm = "backlog_napi/%u", 12194 .setup = backlog_napi_setup, 12195 }; 12196 12197 /* 12198 * This is called single threaded during boot, so no need 12199 * to take the rtnl semaphore. 12200 */ 12201 static int __init net_dev_init(void) 12202 { 12203 int i, rc = -ENOMEM; 12204 12205 BUG_ON(!dev_boot_phase); 12206 12207 net_dev_struct_check(); 12208 12209 if (dev_proc_init()) 12210 goto out; 12211 12212 if (netdev_kobject_init()) 12213 goto out; 12214 12215 for (i = 0; i < PTYPE_HASH_SIZE; i++) 12216 INIT_LIST_HEAD(&ptype_base[i]); 12217 12218 if (register_pernet_subsys(&netdev_net_ops)) 12219 goto out; 12220 12221 /* 12222 * Initialise the packet receive queues. 12223 */ 12224 12225 for_each_possible_cpu(i) { 12226 struct work_struct *flush = per_cpu_ptr(&flush_works, i); 12227 struct softnet_data *sd = &per_cpu(softnet_data, i); 12228 12229 INIT_WORK(flush, flush_backlog); 12230 12231 skb_queue_head_init(&sd->input_pkt_queue); 12232 skb_queue_head_init(&sd->process_queue); 12233 #ifdef CONFIG_XFRM_OFFLOAD 12234 skb_queue_head_init(&sd->xfrm_backlog); 12235 #endif 12236 INIT_LIST_HEAD(&sd->poll_list); 12237 sd->output_queue_tailp = &sd->output_queue; 12238 #ifdef CONFIG_RPS 12239 INIT_CSD(&sd->csd, rps_trigger_softirq, sd); 12240 sd->cpu = i; 12241 #endif 12242 INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd); 12243 spin_lock_init(&sd->defer_lock); 12244 12245 init_gro_hash(&sd->backlog); 12246 sd->backlog.poll = process_backlog; 12247 sd->backlog.weight = weight_p; 12248 INIT_LIST_HEAD(&sd->backlog.poll_list); 12249 12250 if (net_page_pool_create(i)) 12251 goto out; 12252 } 12253 if (use_backlog_threads()) 12254 smpboot_register_percpu_thread(&backlog_threads); 12255 12256 dev_boot_phase = 0; 12257 12258 /* The loopback device is special if any other network devices 12259 * is present in a network namespace the loopback device must 12260 * be present. Since we now dynamically allocate and free the 12261 * loopback device ensure this invariant is maintained by 12262 * keeping the loopback device as the first device on the 12263 * list of network devices. Ensuring the loopback devices 12264 * is the first device that appears and the last network device 12265 * that disappears. 12266 */ 12267 if (register_pernet_device(&loopback_net_ops)) 12268 goto out; 12269 12270 if (register_pernet_device(&default_device_ops)) 12271 goto out; 12272 12273 open_softirq(NET_TX_SOFTIRQ, net_tx_action); 12274 open_softirq(NET_RX_SOFTIRQ, net_rx_action); 12275 12276 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead", 12277 NULL, dev_cpu_dead); 12278 WARN_ON(rc < 0); 12279 rc = 0; 12280 12281 /* avoid static key IPIs to isolated CPUs */ 12282 if (housekeeping_enabled(HK_TYPE_MISC)) 12283 net_enable_timestamp(); 12284 out: 12285 if (rc < 0) { 12286 for_each_possible_cpu(i) { 12287 struct page_pool *pp_ptr; 12288 12289 pp_ptr = per_cpu(system_page_pool, i); 12290 if (!pp_ptr) 12291 continue; 12292 12293 page_pool_destroy(pp_ptr); 12294 per_cpu(system_page_pool, i) = NULL; 12295 } 12296 } 12297 12298 return rc; 12299 } 12300 12301 subsys_initcall(net_dev_init); 12302