1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * NET3 Protocol independent device support routines. 4 * 5 * Derived from the non IP parts of dev.c 1.0.19 6 * Authors: Ross Biro 7 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 8 * Mark Evans, <evansmp@uhura.aston.ac.uk> 9 * 10 * Additional Authors: 11 * Florian la Roche <rzsfl@rz.uni-sb.de> 12 * Alan Cox <gw4pts@gw4pts.ampr.org> 13 * David Hinds <dahinds@users.sourceforge.net> 14 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 15 * Adam Sulmicki <adam@cfar.umd.edu> 16 * Pekka Riikonen <priikone@poesidon.pspt.fi> 17 * 18 * Changes: 19 * D.J. Barrow : Fixed bug where dev->refcnt gets set 20 * to 2 if register_netdev gets called 21 * before net_dev_init & also removed a 22 * few lines of code in the process. 23 * Alan Cox : device private ioctl copies fields back. 24 * Alan Cox : Transmit queue code does relevant 25 * stunts to keep the queue safe. 26 * Alan Cox : Fixed double lock. 27 * Alan Cox : Fixed promisc NULL pointer trap 28 * ???????? : Support the full private ioctl range 29 * Alan Cox : Moved ioctl permission check into 30 * drivers 31 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI 32 * Alan Cox : 100 backlog just doesn't cut it when 33 * you start doing multicast video 8) 34 * Alan Cox : Rewrote net_bh and list manager. 35 * Alan Cox : Fix ETH_P_ALL echoback lengths. 36 * Alan Cox : Took out transmit every packet pass 37 * Saved a few bytes in the ioctl handler 38 * Alan Cox : Network driver sets packet type before 39 * calling netif_rx. Saves a function 40 * call a packet. 41 * Alan Cox : Hashed net_bh() 42 * Richard Kooijman: Timestamp fixes. 43 * Alan Cox : Wrong field in SIOCGIFDSTADDR 44 * Alan Cox : Device lock protection. 45 * Alan Cox : Fixed nasty side effect of device close 46 * changes. 47 * Rudi Cilibrasi : Pass the right thing to 48 * set_mac_address() 49 * Dave Miller : 32bit quantity for the device lock to 50 * make it work out on a Sparc. 51 * Bjorn Ekwall : Added KERNELD hack. 52 * Alan Cox : Cleaned up the backlog initialise. 53 * Craig Metz : SIOCGIFCONF fix if space for under 54 * 1 device. 55 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there 56 * is no device open function. 57 * Andi Kleen : Fix error reporting for SIOCGIFCONF 58 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF 59 * Cyrus Durgin : Cleaned for KMOD 60 * Adam Sulmicki : Bug Fix : Network Device Unload 61 * A network device unload needs to purge 62 * the backlog queue. 63 * Paul Rusty Russell : SIOCSIFNAME 64 * Pekka Riikonen : Netdev boot-time settings code 65 * Andrew Morton : Make unregister_netdevice wait 66 * indefinitely on dev->refcnt 67 * J Hadi Salim : - Backlog queue sampling 68 * - netif_rx() feedback 69 */ 70 71 #include <linux/uaccess.h> 72 #include <linux/bitmap.h> 73 #include <linux/capability.h> 74 #include <linux/cpu.h> 75 #include <linux/types.h> 76 #include <linux/kernel.h> 77 #include <linux/hash.h> 78 #include <linux/slab.h> 79 #include <linux/sched.h> 80 #include <linux/sched/isolation.h> 81 #include <linux/sched/mm.h> 82 #include <linux/smpboot.h> 83 #include <linux/mutex.h> 84 #include <linux/rwsem.h> 85 #include <linux/string.h> 86 #include <linux/mm.h> 87 #include <linux/socket.h> 88 #include <linux/sockios.h> 89 #include <linux/errno.h> 90 #include <linux/interrupt.h> 91 #include <linux/if_ether.h> 92 #include <linux/netdevice.h> 93 #include <linux/etherdevice.h> 94 #include <linux/ethtool.h> 95 #include <linux/ethtool_netlink.h> 96 #include <linux/skbuff.h> 97 #include <linux/kthread.h> 98 #include <linux/bpf.h> 99 #include <linux/bpf_trace.h> 100 #include <net/net_namespace.h> 101 #include <net/sock.h> 102 #include <net/busy_poll.h> 103 #include <linux/rtnetlink.h> 104 #include <linux/stat.h> 105 #include <net/dsa.h> 106 #include <net/dst.h> 107 #include <net/dst_metadata.h> 108 #include <net/gro.h> 109 #include <net/netdev_queues.h> 110 #include <net/pkt_sched.h> 111 #include <net/pkt_cls.h> 112 #include <net/checksum.h> 113 #include <net/xfrm.h> 114 #include <net/tcx.h> 115 #include <linux/highmem.h> 116 #include <linux/init.h> 117 #include <linux/module.h> 118 #include <linux/netpoll.h> 119 #include <linux/rcupdate.h> 120 #include <linux/delay.h> 121 #include <net/iw_handler.h> 122 #include <asm/current.h> 123 #include <linux/audit.h> 124 #include <linux/dmaengine.h> 125 #include <linux/err.h> 126 #include <linux/ctype.h> 127 #include <linux/if_arp.h> 128 #include <linux/if_vlan.h> 129 #include <linux/ip.h> 130 #include <net/ip.h> 131 #include <net/mpls.h> 132 #include <linux/ipv6.h> 133 #include <linux/in.h> 134 #include <linux/jhash.h> 135 #include <linux/random.h> 136 #include <trace/events/napi.h> 137 #include <trace/events/net.h> 138 #include <trace/events/skb.h> 139 #include <trace/events/qdisc.h> 140 #include <trace/events/xdp.h> 141 #include <linux/inetdevice.h> 142 #include <linux/cpu_rmap.h> 143 #include <linux/static_key.h> 144 #include <linux/hashtable.h> 145 #include <linux/vmalloc.h> 146 #include <linux/if_macvlan.h> 147 #include <linux/errqueue.h> 148 #include <linux/hrtimer.h> 149 #include <linux/netfilter_netdev.h> 150 #include <linux/crash_dump.h> 151 #include <linux/sctp.h> 152 #include <net/udp_tunnel.h> 153 #include <linux/net_namespace.h> 154 #include <linux/indirect_call_wrapper.h> 155 #include <net/devlink.h> 156 #include <linux/pm_runtime.h> 157 #include <linux/prandom.h> 158 #include <linux/once_lite.h> 159 #include <net/netdev_lock.h> 160 #include <net/netdev_rx_queue.h> 161 #include <net/page_pool/types.h> 162 #include <net/page_pool/helpers.h> 163 #include <net/page_pool/memory_provider.h> 164 #include <net/rps.h> 165 #include <linux/phy_link_topology.h> 166 167 #include "dev.h" 168 #include "devmem.h" 169 #include "net-sysfs.h" 170 171 static DEFINE_SPINLOCK(ptype_lock); 172 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 173 174 static int netif_rx_internal(struct sk_buff *skb); 175 static int call_netdevice_notifiers_extack(unsigned long val, 176 struct net_device *dev, 177 struct netlink_ext_ack *extack); 178 179 static DEFINE_MUTEX(ifalias_mutex); 180 181 /* protects napi_hash addition/deletion and napi_gen_id */ 182 static DEFINE_SPINLOCK(napi_hash_lock); 183 184 static unsigned int napi_gen_id = NR_CPUS; 185 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); 186 dev_base_seq_inc(struct net * net)187 static inline void dev_base_seq_inc(struct net *net) 188 { 189 unsigned int val = net->dev_base_seq + 1; 190 191 WRITE_ONCE(net->dev_base_seq, val ?: 1); 192 } 193 dev_name_hash(struct net * net,const char * name)194 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) 195 { 196 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ)); 197 198 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; 199 } 200 dev_index_hash(struct net * net,int ifindex)201 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) 202 { 203 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; 204 } 205 206 #ifndef CONFIG_PREEMPT_RT 207 208 static DEFINE_STATIC_KEY_FALSE(use_backlog_threads_key); 209 setup_backlog_napi_threads(char * arg)210 static int __init setup_backlog_napi_threads(char *arg) 211 { 212 static_branch_enable(&use_backlog_threads_key); 213 return 0; 214 } 215 early_param("thread_backlog_napi", setup_backlog_napi_threads); 216 use_backlog_threads(void)217 static bool use_backlog_threads(void) 218 { 219 return static_branch_unlikely(&use_backlog_threads_key); 220 } 221 222 #else 223 use_backlog_threads(void)224 static bool use_backlog_threads(void) 225 { 226 return true; 227 } 228 229 #endif 230 backlog_lock_irq_save(struct softnet_data * sd,unsigned long * flags)231 static inline void backlog_lock_irq_save(struct softnet_data *sd, 232 unsigned long *flags) 233 { 234 if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads()) 235 spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags); 236 else 237 local_irq_save(*flags); 238 } 239 backlog_lock_irq_disable(struct softnet_data * sd)240 static inline void backlog_lock_irq_disable(struct softnet_data *sd) 241 { 242 if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads()) 243 spin_lock_irq(&sd->input_pkt_queue.lock); 244 else 245 local_irq_disable(); 246 } 247 backlog_unlock_irq_restore(struct softnet_data * sd,unsigned long * flags)248 static inline void backlog_unlock_irq_restore(struct softnet_data *sd, 249 unsigned long *flags) 250 { 251 if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads()) 252 spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags); 253 else 254 local_irq_restore(*flags); 255 } 256 backlog_unlock_irq_enable(struct softnet_data * sd)257 static inline void backlog_unlock_irq_enable(struct softnet_data *sd) 258 { 259 if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads()) 260 spin_unlock_irq(&sd->input_pkt_queue.lock); 261 else 262 local_irq_enable(); 263 } 264 netdev_name_node_alloc(struct net_device * dev,const char * name)265 static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev, 266 const char *name) 267 { 268 struct netdev_name_node *name_node; 269 270 name_node = kmalloc(sizeof(*name_node), GFP_KERNEL); 271 if (!name_node) 272 return NULL; 273 INIT_HLIST_NODE(&name_node->hlist); 274 name_node->dev = dev; 275 name_node->name = name; 276 return name_node; 277 } 278 279 static struct netdev_name_node * netdev_name_node_head_alloc(struct net_device * dev)280 netdev_name_node_head_alloc(struct net_device *dev) 281 { 282 struct netdev_name_node *name_node; 283 284 name_node = netdev_name_node_alloc(dev, dev->name); 285 if (!name_node) 286 return NULL; 287 INIT_LIST_HEAD(&name_node->list); 288 return name_node; 289 } 290 netdev_name_node_free(struct netdev_name_node * name_node)291 static void netdev_name_node_free(struct netdev_name_node *name_node) 292 { 293 kfree(name_node); 294 } 295 netdev_name_node_add(struct net * net,struct netdev_name_node * name_node)296 static void netdev_name_node_add(struct net *net, 297 struct netdev_name_node *name_node) 298 { 299 hlist_add_head_rcu(&name_node->hlist, 300 dev_name_hash(net, name_node->name)); 301 } 302 netdev_name_node_del(struct netdev_name_node * name_node)303 static void netdev_name_node_del(struct netdev_name_node *name_node) 304 { 305 hlist_del_rcu(&name_node->hlist); 306 } 307 netdev_name_node_lookup(struct net * net,const char * name)308 static struct netdev_name_node *netdev_name_node_lookup(struct net *net, 309 const char *name) 310 { 311 struct hlist_head *head = dev_name_hash(net, name); 312 struct netdev_name_node *name_node; 313 314 hlist_for_each_entry(name_node, head, hlist) 315 if (!strcmp(name_node->name, name)) 316 return name_node; 317 return NULL; 318 } 319 netdev_name_node_lookup_rcu(struct net * net,const char * name)320 static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net, 321 const char *name) 322 { 323 struct hlist_head *head = dev_name_hash(net, name); 324 struct netdev_name_node *name_node; 325 326 hlist_for_each_entry_rcu(name_node, head, hlist) 327 if (!strcmp(name_node->name, name)) 328 return name_node; 329 return NULL; 330 } 331 netdev_name_in_use(struct net * net,const char * name)332 bool netdev_name_in_use(struct net *net, const char *name) 333 { 334 return netdev_name_node_lookup(net, name); 335 } 336 EXPORT_SYMBOL(netdev_name_in_use); 337 netdev_name_node_alt_create(struct net_device * dev,const char * name)338 int netdev_name_node_alt_create(struct net_device *dev, const char *name) 339 { 340 struct netdev_name_node *name_node; 341 struct net *net = dev_net(dev); 342 343 name_node = netdev_name_node_lookup(net, name); 344 if (name_node) 345 return -EEXIST; 346 name_node = netdev_name_node_alloc(dev, name); 347 if (!name_node) 348 return -ENOMEM; 349 netdev_name_node_add(net, name_node); 350 /* The node that holds dev->name acts as a head of per-device list. */ 351 list_add_tail_rcu(&name_node->list, &dev->name_node->list); 352 353 return 0; 354 } 355 netdev_name_node_alt_free(struct rcu_head * head)356 static void netdev_name_node_alt_free(struct rcu_head *head) 357 { 358 struct netdev_name_node *name_node = 359 container_of(head, struct netdev_name_node, rcu); 360 361 kfree(name_node->name); 362 netdev_name_node_free(name_node); 363 } 364 __netdev_name_node_alt_destroy(struct netdev_name_node * name_node)365 static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node) 366 { 367 netdev_name_node_del(name_node); 368 list_del(&name_node->list); 369 call_rcu(&name_node->rcu, netdev_name_node_alt_free); 370 } 371 netdev_name_node_alt_destroy(struct net_device * dev,const char * name)372 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name) 373 { 374 struct netdev_name_node *name_node; 375 struct net *net = dev_net(dev); 376 377 name_node = netdev_name_node_lookup(net, name); 378 if (!name_node) 379 return -ENOENT; 380 /* lookup might have found our primary name or a name belonging 381 * to another device. 382 */ 383 if (name_node == dev->name_node || name_node->dev != dev) 384 return -EINVAL; 385 386 __netdev_name_node_alt_destroy(name_node); 387 return 0; 388 } 389 netdev_name_node_alt_flush(struct net_device * dev)390 static void netdev_name_node_alt_flush(struct net_device *dev) 391 { 392 struct netdev_name_node *name_node, *tmp; 393 394 list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list) { 395 list_del(&name_node->list); 396 netdev_name_node_alt_free(&name_node->rcu); 397 } 398 } 399 400 /* Device list insertion */ list_netdevice(struct net_device * dev)401 static void list_netdevice(struct net_device *dev) 402 { 403 struct netdev_name_node *name_node; 404 struct net *net = dev_net(dev); 405 406 ASSERT_RTNL(); 407 408 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); 409 netdev_name_node_add(net, dev->name_node); 410 hlist_add_head_rcu(&dev->index_hlist, 411 dev_index_hash(net, dev->ifindex)); 412 413 netdev_for_each_altname(dev, name_node) 414 netdev_name_node_add(net, name_node); 415 416 /* We reserved the ifindex, this can't fail */ 417 WARN_ON(xa_store(&net->dev_by_index, dev->ifindex, dev, GFP_KERNEL)); 418 419 dev_base_seq_inc(net); 420 } 421 422 /* Device list removal 423 * caller must respect a RCU grace period before freeing/reusing dev 424 */ unlist_netdevice(struct net_device * dev)425 static void unlist_netdevice(struct net_device *dev) 426 { 427 struct netdev_name_node *name_node; 428 struct net *net = dev_net(dev); 429 430 ASSERT_RTNL(); 431 432 xa_erase(&net->dev_by_index, dev->ifindex); 433 434 netdev_for_each_altname(dev, name_node) 435 netdev_name_node_del(name_node); 436 437 /* Unlink dev from the device chain */ 438 list_del_rcu(&dev->dev_list); 439 netdev_name_node_del(dev->name_node); 440 hlist_del_rcu(&dev->index_hlist); 441 442 dev_base_seq_inc(dev_net(dev)); 443 } 444 445 /* 446 * Our notifier list 447 */ 448 449 static RAW_NOTIFIER_HEAD(netdev_chain); 450 451 /* 452 * Device drivers call our routines to queue packets here. We empty the 453 * queue in the local softnet handler. 454 */ 455 456 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data) = { 457 .process_queue_bh_lock = INIT_LOCAL_LOCK(process_queue_bh_lock), 458 }; 459 EXPORT_PER_CPU_SYMBOL(softnet_data); 460 461 /* Page_pool has a lockless array/stack to alloc/recycle pages. 462 * PP consumers must pay attention to run APIs in the appropriate context 463 * (e.g. NAPI context). 464 */ 465 DEFINE_PER_CPU(struct page_pool *, system_page_pool); 466 467 #ifdef CONFIG_LOCKDEP 468 /* 469 * register_netdevice() inits txq->_xmit_lock and sets lockdep class 470 * according to dev->type 471 */ 472 static const unsigned short netdev_lock_type[] = { 473 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, 474 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, 475 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, 476 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, 477 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, 478 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, 479 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, 480 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, 481 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, 482 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, 483 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, 484 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 485 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, 486 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, 487 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; 488 489 static const char *const netdev_lock_name[] = { 490 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", 491 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", 492 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", 493 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", 494 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", 495 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", 496 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", 497 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", 498 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", 499 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", 500 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", 501 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 502 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", 503 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", 504 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; 505 506 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 507 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; 508 netdev_lock_pos(unsigned short dev_type)509 static inline unsigned short netdev_lock_pos(unsigned short dev_type) 510 { 511 int i; 512 513 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) 514 if (netdev_lock_type[i] == dev_type) 515 return i; 516 /* the last key is used by default */ 517 return ARRAY_SIZE(netdev_lock_type) - 1; 518 } 519 netdev_set_xmit_lockdep_class(spinlock_t * lock,unsigned short dev_type)520 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 521 unsigned short dev_type) 522 { 523 int i; 524 525 i = netdev_lock_pos(dev_type); 526 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], 527 netdev_lock_name[i]); 528 } 529 netdev_set_addr_lockdep_class(struct net_device * dev)530 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 531 { 532 int i; 533 534 i = netdev_lock_pos(dev->type); 535 lockdep_set_class_and_name(&dev->addr_list_lock, 536 &netdev_addr_lock_key[i], 537 netdev_lock_name[i]); 538 } 539 #else netdev_set_xmit_lockdep_class(spinlock_t * lock,unsigned short dev_type)540 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 541 unsigned short dev_type) 542 { 543 } 544 netdev_set_addr_lockdep_class(struct net_device * dev)545 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 546 { 547 } 548 #endif 549 550 /******************************************************************************* 551 * 552 * Protocol management and registration routines 553 * 554 *******************************************************************************/ 555 556 557 /* 558 * Add a protocol ID to the list. Now that the input handler is 559 * smarter we can dispense with all the messy stuff that used to be 560 * here. 561 * 562 * BEWARE!!! Protocol handlers, mangling input packets, 563 * MUST BE last in hash buckets and checking protocol handlers 564 * MUST start from promiscuous ptype_all chain in net_bh. 565 * It is true now, do not change it. 566 * Explanation follows: if protocol handler, mangling packet, will 567 * be the first on list, it is not able to sense, that packet 568 * is cloned and should be copied-on-write, so that it will 569 * change it and subsequent readers will get broken packet. 570 * --ANK (980803) 571 */ 572 ptype_head(const struct packet_type * pt)573 static inline struct list_head *ptype_head(const struct packet_type *pt) 574 { 575 if (pt->type == htons(ETH_P_ALL)) { 576 if (!pt->af_packet_net && !pt->dev) 577 return NULL; 578 579 return pt->dev ? &pt->dev->ptype_all : 580 &pt->af_packet_net->ptype_all; 581 } 582 583 if (pt->dev) 584 return &pt->dev->ptype_specific; 585 586 return pt->af_packet_net ? &pt->af_packet_net->ptype_specific : 587 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; 588 } 589 590 /** 591 * dev_add_pack - add packet handler 592 * @pt: packet type declaration 593 * 594 * Add a protocol handler to the networking stack. The passed &packet_type 595 * is linked into kernel lists and may not be freed until it has been 596 * removed from the kernel lists. 597 * 598 * This call does not sleep therefore it can not 599 * guarantee all CPU's that are in middle of receiving packets 600 * will see the new packet type (until the next received packet). 601 */ 602 dev_add_pack(struct packet_type * pt)603 void dev_add_pack(struct packet_type *pt) 604 { 605 struct list_head *head = ptype_head(pt); 606 607 if (WARN_ON_ONCE(!head)) 608 return; 609 610 spin_lock(&ptype_lock); 611 list_add_rcu(&pt->list, head); 612 spin_unlock(&ptype_lock); 613 } 614 EXPORT_SYMBOL(dev_add_pack); 615 616 /** 617 * __dev_remove_pack - remove packet handler 618 * @pt: packet type declaration 619 * 620 * Remove a protocol handler that was previously added to the kernel 621 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 622 * from the kernel lists and can be freed or reused once this function 623 * returns. 624 * 625 * The packet type might still be in use by receivers 626 * and must not be freed until after all the CPU's have gone 627 * through a quiescent state. 628 */ __dev_remove_pack(struct packet_type * pt)629 void __dev_remove_pack(struct packet_type *pt) 630 { 631 struct list_head *head = ptype_head(pt); 632 struct packet_type *pt1; 633 634 if (!head) 635 return; 636 637 spin_lock(&ptype_lock); 638 639 list_for_each_entry(pt1, head, list) { 640 if (pt == pt1) { 641 list_del_rcu(&pt->list); 642 goto out; 643 } 644 } 645 646 pr_warn("dev_remove_pack: %p not found\n", pt); 647 out: 648 spin_unlock(&ptype_lock); 649 } 650 EXPORT_SYMBOL(__dev_remove_pack); 651 652 /** 653 * dev_remove_pack - remove packet handler 654 * @pt: packet type declaration 655 * 656 * Remove a protocol handler that was previously added to the kernel 657 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 658 * from the kernel lists and can be freed or reused once this function 659 * returns. 660 * 661 * This call sleeps to guarantee that no CPU is looking at the packet 662 * type after return. 663 */ dev_remove_pack(struct packet_type * pt)664 void dev_remove_pack(struct packet_type *pt) 665 { 666 __dev_remove_pack(pt); 667 668 synchronize_net(); 669 } 670 EXPORT_SYMBOL(dev_remove_pack); 671 672 673 /******************************************************************************* 674 * 675 * Device Interface Subroutines 676 * 677 *******************************************************************************/ 678 679 /** 680 * dev_get_iflink - get 'iflink' value of a interface 681 * @dev: targeted interface 682 * 683 * Indicates the ifindex the interface is linked to. 684 * Physical interfaces have the same 'ifindex' and 'iflink' values. 685 */ 686 dev_get_iflink(const struct net_device * dev)687 int dev_get_iflink(const struct net_device *dev) 688 { 689 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) 690 return dev->netdev_ops->ndo_get_iflink(dev); 691 692 return READ_ONCE(dev->ifindex); 693 } 694 EXPORT_SYMBOL(dev_get_iflink); 695 696 /** 697 * dev_fill_metadata_dst - Retrieve tunnel egress information. 698 * @dev: targeted interface 699 * @skb: The packet. 700 * 701 * For better visibility of tunnel traffic OVS needs to retrieve 702 * egress tunnel information for a packet. Following API allows 703 * user to get this info. 704 */ dev_fill_metadata_dst(struct net_device * dev,struct sk_buff * skb)705 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 706 { 707 struct ip_tunnel_info *info; 708 709 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst) 710 return -EINVAL; 711 712 info = skb_tunnel_info_unclone(skb); 713 if (!info) 714 return -ENOMEM; 715 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX))) 716 return -EINVAL; 717 718 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb); 719 } 720 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst); 721 dev_fwd_path(struct net_device_path_stack * stack)722 static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack) 723 { 724 int k = stack->num_paths++; 725 726 if (WARN_ON_ONCE(k >= NET_DEVICE_PATH_STACK_MAX)) 727 return NULL; 728 729 return &stack->path[k]; 730 } 731 dev_fill_forward_path(const struct net_device * dev,const u8 * daddr,struct net_device_path_stack * stack)732 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr, 733 struct net_device_path_stack *stack) 734 { 735 const struct net_device *last_dev; 736 struct net_device_path_ctx ctx = { 737 .dev = dev, 738 }; 739 struct net_device_path *path; 740 int ret = 0; 741 742 memcpy(ctx.daddr, daddr, sizeof(ctx.daddr)); 743 stack->num_paths = 0; 744 while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) { 745 last_dev = ctx.dev; 746 path = dev_fwd_path(stack); 747 if (!path) 748 return -1; 749 750 memset(path, 0, sizeof(struct net_device_path)); 751 ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path); 752 if (ret < 0) 753 return -1; 754 755 if (WARN_ON_ONCE(last_dev == ctx.dev)) 756 return -1; 757 } 758 759 if (!ctx.dev) 760 return ret; 761 762 path = dev_fwd_path(stack); 763 if (!path) 764 return -1; 765 path->type = DEV_PATH_ETHERNET; 766 path->dev = ctx.dev; 767 768 return ret; 769 } 770 EXPORT_SYMBOL_GPL(dev_fill_forward_path); 771 772 /* must be called under rcu_read_lock(), as we dont take a reference */ napi_by_id(unsigned int napi_id)773 static struct napi_struct *napi_by_id(unsigned int napi_id) 774 { 775 unsigned int hash = napi_id % HASH_SIZE(napi_hash); 776 struct napi_struct *napi; 777 778 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node) 779 if (napi->napi_id == napi_id) 780 return napi; 781 782 return NULL; 783 } 784 785 /* must be called under rcu_read_lock(), as we dont take a reference */ 786 static struct napi_struct * netdev_napi_by_id(struct net * net,unsigned int napi_id)787 netdev_napi_by_id(struct net *net, unsigned int napi_id) 788 { 789 struct napi_struct *napi; 790 791 napi = napi_by_id(napi_id); 792 if (!napi) 793 return NULL; 794 795 if (WARN_ON_ONCE(!napi->dev)) 796 return NULL; 797 if (!net_eq(net, dev_net(napi->dev))) 798 return NULL; 799 800 return napi; 801 } 802 803 /** 804 * netdev_napi_by_id_lock() - find a device by NAPI ID and lock it 805 * @net: the applicable net namespace 806 * @napi_id: ID of a NAPI of a target device 807 * 808 * Find a NAPI instance with @napi_id. Lock its device. 809 * The device must be in %NETREG_REGISTERED state for lookup to succeed. 810 * netdev_unlock() must be called to release it. 811 * 812 * Return: pointer to NAPI, its device with lock held, NULL if not found. 813 */ 814 struct napi_struct * netdev_napi_by_id_lock(struct net * net,unsigned int napi_id)815 netdev_napi_by_id_lock(struct net *net, unsigned int napi_id) 816 { 817 struct napi_struct *napi; 818 struct net_device *dev; 819 820 rcu_read_lock(); 821 napi = netdev_napi_by_id(net, napi_id); 822 if (!napi || READ_ONCE(napi->dev->reg_state) != NETREG_REGISTERED) { 823 rcu_read_unlock(); 824 return NULL; 825 } 826 827 dev = napi->dev; 828 dev_hold(dev); 829 rcu_read_unlock(); 830 831 dev = __netdev_put_lock(dev); 832 if (!dev) 833 return NULL; 834 835 rcu_read_lock(); 836 napi = netdev_napi_by_id(net, napi_id); 837 if (napi && napi->dev != dev) 838 napi = NULL; 839 rcu_read_unlock(); 840 841 if (!napi) 842 netdev_unlock(dev); 843 return napi; 844 } 845 846 /** 847 * __dev_get_by_name - find a device by its name 848 * @net: the applicable net namespace 849 * @name: name to find 850 * 851 * Find an interface by name. Must be called under RTNL semaphore. 852 * If the name is found a pointer to the device is returned. 853 * If the name is not found then %NULL is returned. The 854 * reference counters are not incremented so the caller must be 855 * careful with locks. 856 */ 857 __dev_get_by_name(struct net * net,const char * name)858 struct net_device *__dev_get_by_name(struct net *net, const char *name) 859 { 860 struct netdev_name_node *node_name; 861 862 node_name = netdev_name_node_lookup(net, name); 863 return node_name ? node_name->dev : NULL; 864 } 865 EXPORT_SYMBOL(__dev_get_by_name); 866 867 /** 868 * dev_get_by_name_rcu - find a device by its name 869 * @net: the applicable net namespace 870 * @name: name to find 871 * 872 * Find an interface by name. 873 * If the name is found a pointer to the device is returned. 874 * If the name is not found then %NULL is returned. 875 * The reference counters are not incremented so the caller must be 876 * careful with locks. The caller must hold RCU lock. 877 */ 878 dev_get_by_name_rcu(struct net * net,const char * name)879 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) 880 { 881 struct netdev_name_node *node_name; 882 883 node_name = netdev_name_node_lookup_rcu(net, name); 884 return node_name ? node_name->dev : NULL; 885 } 886 EXPORT_SYMBOL(dev_get_by_name_rcu); 887 888 /* Deprecated for new users, call netdev_get_by_name() instead */ dev_get_by_name(struct net * net,const char * name)889 struct net_device *dev_get_by_name(struct net *net, const char *name) 890 { 891 struct net_device *dev; 892 893 rcu_read_lock(); 894 dev = dev_get_by_name_rcu(net, name); 895 dev_hold(dev); 896 rcu_read_unlock(); 897 return dev; 898 } 899 EXPORT_SYMBOL(dev_get_by_name); 900 901 /** 902 * netdev_get_by_name() - find a device by its name 903 * @net: the applicable net namespace 904 * @name: name to find 905 * @tracker: tracking object for the acquired reference 906 * @gfp: allocation flags for the tracker 907 * 908 * Find an interface by name. This can be called from any 909 * context and does its own locking. The returned handle has 910 * the usage count incremented and the caller must use netdev_put() to 911 * release it when it is no longer needed. %NULL is returned if no 912 * matching device is found. 913 */ netdev_get_by_name(struct net * net,const char * name,netdevice_tracker * tracker,gfp_t gfp)914 struct net_device *netdev_get_by_name(struct net *net, const char *name, 915 netdevice_tracker *tracker, gfp_t gfp) 916 { 917 struct net_device *dev; 918 919 dev = dev_get_by_name(net, name); 920 if (dev) 921 netdev_tracker_alloc(dev, tracker, gfp); 922 return dev; 923 } 924 EXPORT_SYMBOL(netdev_get_by_name); 925 926 /** 927 * __dev_get_by_index - find a device by its ifindex 928 * @net: the applicable net namespace 929 * @ifindex: index of device 930 * 931 * Search for an interface by index. Returns %NULL if the device 932 * is not found or a pointer to the device. The device has not 933 * had its reference counter increased so the caller must be careful 934 * about locking. The caller must hold the RTNL semaphore. 935 */ 936 __dev_get_by_index(struct net * net,int ifindex)937 struct net_device *__dev_get_by_index(struct net *net, int ifindex) 938 { 939 struct net_device *dev; 940 struct hlist_head *head = dev_index_hash(net, ifindex); 941 942 hlist_for_each_entry(dev, head, index_hlist) 943 if (dev->ifindex == ifindex) 944 return dev; 945 946 return NULL; 947 } 948 EXPORT_SYMBOL(__dev_get_by_index); 949 950 /** 951 * dev_get_by_index_rcu - find a device by its ifindex 952 * @net: the applicable net namespace 953 * @ifindex: index of device 954 * 955 * Search for an interface by index. Returns %NULL if the device 956 * is not found or a pointer to the device. The device has not 957 * had its reference counter increased so the caller must be careful 958 * about locking. The caller must hold RCU lock. 959 */ 960 dev_get_by_index_rcu(struct net * net,int ifindex)961 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) 962 { 963 struct net_device *dev; 964 struct hlist_head *head = dev_index_hash(net, ifindex); 965 966 hlist_for_each_entry_rcu(dev, head, index_hlist) 967 if (dev->ifindex == ifindex) 968 return dev; 969 970 return NULL; 971 } 972 EXPORT_SYMBOL(dev_get_by_index_rcu); 973 974 /* Deprecated for new users, call netdev_get_by_index() instead */ dev_get_by_index(struct net * net,int ifindex)975 struct net_device *dev_get_by_index(struct net *net, int ifindex) 976 { 977 struct net_device *dev; 978 979 rcu_read_lock(); 980 dev = dev_get_by_index_rcu(net, ifindex); 981 dev_hold(dev); 982 rcu_read_unlock(); 983 return dev; 984 } 985 EXPORT_SYMBOL(dev_get_by_index); 986 987 /** 988 * netdev_get_by_index() - find a device by its ifindex 989 * @net: the applicable net namespace 990 * @ifindex: index of device 991 * @tracker: tracking object for the acquired reference 992 * @gfp: allocation flags for the tracker 993 * 994 * Search for an interface by index. Returns NULL if the device 995 * is not found or a pointer to the device. The device returned has 996 * had a reference added and the pointer is safe until the user calls 997 * netdev_put() to indicate they have finished with it. 998 */ netdev_get_by_index(struct net * net,int ifindex,netdevice_tracker * tracker,gfp_t gfp)999 struct net_device *netdev_get_by_index(struct net *net, int ifindex, 1000 netdevice_tracker *tracker, gfp_t gfp) 1001 { 1002 struct net_device *dev; 1003 1004 dev = dev_get_by_index(net, ifindex); 1005 if (dev) 1006 netdev_tracker_alloc(dev, tracker, gfp); 1007 return dev; 1008 } 1009 EXPORT_SYMBOL(netdev_get_by_index); 1010 1011 /** 1012 * dev_get_by_napi_id - find a device by napi_id 1013 * @napi_id: ID of the NAPI struct 1014 * 1015 * Search for an interface by NAPI ID. Returns %NULL if the device 1016 * is not found or a pointer to the device. The device has not had 1017 * its reference counter increased so the caller must be careful 1018 * about locking. The caller must hold RCU lock. 1019 */ dev_get_by_napi_id(unsigned int napi_id)1020 struct net_device *dev_get_by_napi_id(unsigned int napi_id) 1021 { 1022 struct napi_struct *napi; 1023 1024 WARN_ON_ONCE(!rcu_read_lock_held()); 1025 1026 if (!napi_id_valid(napi_id)) 1027 return NULL; 1028 1029 napi = napi_by_id(napi_id); 1030 1031 return napi ? napi->dev : NULL; 1032 } 1033 1034 /* Release the held reference on the net_device, and if the net_device 1035 * is still registered try to lock the instance lock. If device is being 1036 * unregistered NULL will be returned (but the reference has been released, 1037 * either way!) 1038 * 1039 * This helper is intended for locking net_device after it has been looked up 1040 * using a lockless lookup helper. Lock prevents the instance from going away. 1041 */ __netdev_put_lock(struct net_device * dev)1042 struct net_device *__netdev_put_lock(struct net_device *dev) 1043 { 1044 netdev_lock(dev); 1045 if (dev->reg_state > NETREG_REGISTERED) { 1046 netdev_unlock(dev); 1047 dev_put(dev); 1048 return NULL; 1049 } 1050 dev_put(dev); 1051 return dev; 1052 } 1053 1054 /** 1055 * netdev_get_by_index_lock() - find a device by its ifindex 1056 * @net: the applicable net namespace 1057 * @ifindex: index of device 1058 * 1059 * Search for an interface by index. If a valid device 1060 * with @ifindex is found it will be returned with netdev->lock held. 1061 * netdev_unlock() must be called to release it. 1062 * 1063 * Return: pointer to a device with lock held, NULL if not found. 1064 */ netdev_get_by_index_lock(struct net * net,int ifindex)1065 struct net_device *netdev_get_by_index_lock(struct net *net, int ifindex) 1066 { 1067 struct net_device *dev; 1068 1069 dev = dev_get_by_index(net, ifindex); 1070 if (!dev) 1071 return NULL; 1072 1073 return __netdev_put_lock(dev); 1074 } 1075 1076 struct net_device * netdev_xa_find_lock(struct net * net,struct net_device * dev,unsigned long * index)1077 netdev_xa_find_lock(struct net *net, struct net_device *dev, 1078 unsigned long *index) 1079 { 1080 if (dev) 1081 netdev_unlock(dev); 1082 1083 do { 1084 rcu_read_lock(); 1085 dev = xa_find(&net->dev_by_index, index, ULONG_MAX, XA_PRESENT); 1086 if (!dev) { 1087 rcu_read_unlock(); 1088 return NULL; 1089 } 1090 dev_hold(dev); 1091 rcu_read_unlock(); 1092 1093 dev = __netdev_put_lock(dev); 1094 if (dev) 1095 return dev; 1096 1097 (*index)++; 1098 } while (true); 1099 } 1100 1101 static DEFINE_SEQLOCK(netdev_rename_lock); 1102 netdev_copy_name(struct net_device * dev,char * name)1103 void netdev_copy_name(struct net_device *dev, char *name) 1104 { 1105 unsigned int seq; 1106 1107 do { 1108 seq = read_seqbegin(&netdev_rename_lock); 1109 strscpy(name, dev->name, IFNAMSIZ); 1110 } while (read_seqretry(&netdev_rename_lock, seq)); 1111 } 1112 1113 /** 1114 * netdev_get_name - get a netdevice name, knowing its ifindex. 1115 * @net: network namespace 1116 * @name: a pointer to the buffer where the name will be stored. 1117 * @ifindex: the ifindex of the interface to get the name from. 1118 */ netdev_get_name(struct net * net,char * name,int ifindex)1119 int netdev_get_name(struct net *net, char *name, int ifindex) 1120 { 1121 struct net_device *dev; 1122 int ret; 1123 1124 rcu_read_lock(); 1125 1126 dev = dev_get_by_index_rcu(net, ifindex); 1127 if (!dev) { 1128 ret = -ENODEV; 1129 goto out; 1130 } 1131 1132 netdev_copy_name(dev, name); 1133 1134 ret = 0; 1135 out: 1136 rcu_read_unlock(); 1137 return ret; 1138 } 1139 dev_addr_cmp(struct net_device * dev,unsigned short type,const char * ha)1140 static bool dev_addr_cmp(struct net_device *dev, unsigned short type, 1141 const char *ha) 1142 { 1143 return dev->type == type && !memcmp(dev->dev_addr, ha, dev->addr_len); 1144 } 1145 1146 /** 1147 * dev_getbyhwaddr_rcu - find a device by its hardware address 1148 * @net: the applicable net namespace 1149 * @type: media type of device 1150 * @ha: hardware address 1151 * 1152 * Search for an interface by MAC address. Returns NULL if the device 1153 * is not found or a pointer to the device. 1154 * The caller must hold RCU. 1155 * The returned device has not had its ref count increased 1156 * and the caller must therefore be careful about locking 1157 * 1158 */ 1159 dev_getbyhwaddr_rcu(struct net * net,unsigned short type,const char * ha)1160 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 1161 const char *ha) 1162 { 1163 struct net_device *dev; 1164 1165 for_each_netdev_rcu(net, dev) 1166 if (dev_addr_cmp(dev, type, ha)) 1167 return dev; 1168 1169 return NULL; 1170 } 1171 EXPORT_SYMBOL(dev_getbyhwaddr_rcu); 1172 1173 /** 1174 * dev_getbyhwaddr() - find a device by its hardware address 1175 * @net: the applicable net namespace 1176 * @type: media type of device 1177 * @ha: hardware address 1178 * 1179 * Similar to dev_getbyhwaddr_rcu(), but the owner needs to hold 1180 * rtnl_lock. 1181 * 1182 * Context: rtnl_lock() must be held. 1183 * Return: pointer to the net_device, or NULL if not found 1184 */ dev_getbyhwaddr(struct net * net,unsigned short type,const char * ha)1185 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, 1186 const char *ha) 1187 { 1188 struct net_device *dev; 1189 1190 ASSERT_RTNL(); 1191 for_each_netdev(net, dev) 1192 if (dev_addr_cmp(dev, type, ha)) 1193 return dev; 1194 1195 return NULL; 1196 } 1197 EXPORT_SYMBOL(dev_getbyhwaddr); 1198 dev_getfirstbyhwtype(struct net * net,unsigned short type)1199 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) 1200 { 1201 struct net_device *dev, *ret = NULL; 1202 1203 rcu_read_lock(); 1204 for_each_netdev_rcu(net, dev) 1205 if (dev->type == type) { 1206 dev_hold(dev); 1207 ret = dev; 1208 break; 1209 } 1210 rcu_read_unlock(); 1211 return ret; 1212 } 1213 EXPORT_SYMBOL(dev_getfirstbyhwtype); 1214 1215 /** 1216 * __dev_get_by_flags - find any device with given flags 1217 * @net: the applicable net namespace 1218 * @if_flags: IFF_* values 1219 * @mask: bitmask of bits in if_flags to check 1220 * 1221 * Search for any interface with the given flags. Returns NULL if a device 1222 * is not found or a pointer to the device. Must be called inside 1223 * rtnl_lock(), and result refcount is unchanged. 1224 */ 1225 __dev_get_by_flags(struct net * net,unsigned short if_flags,unsigned short mask)1226 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags, 1227 unsigned short mask) 1228 { 1229 struct net_device *dev, *ret; 1230 1231 ASSERT_RTNL(); 1232 1233 ret = NULL; 1234 for_each_netdev(net, dev) { 1235 if (((dev->flags ^ if_flags) & mask) == 0) { 1236 ret = dev; 1237 break; 1238 } 1239 } 1240 return ret; 1241 } 1242 EXPORT_SYMBOL(__dev_get_by_flags); 1243 1244 /** 1245 * dev_valid_name - check if name is okay for network device 1246 * @name: name string 1247 * 1248 * Network device names need to be valid file names to 1249 * allow sysfs to work. We also disallow any kind of 1250 * whitespace. 1251 */ dev_valid_name(const char * name)1252 bool dev_valid_name(const char *name) 1253 { 1254 if (*name == '\0') 1255 return false; 1256 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ) 1257 return false; 1258 if (!strcmp(name, ".") || !strcmp(name, "..")) 1259 return false; 1260 1261 while (*name) { 1262 if (*name == '/' || *name == ':' || isspace(*name)) 1263 return false; 1264 name++; 1265 } 1266 return true; 1267 } 1268 EXPORT_SYMBOL(dev_valid_name); 1269 1270 /** 1271 * __dev_alloc_name - allocate a name for a device 1272 * @net: network namespace to allocate the device name in 1273 * @name: name format string 1274 * @res: result name string 1275 * 1276 * Passed a format string - eg "lt%d" it will try and find a suitable 1277 * id. It scans list of devices to build up a free map, then chooses 1278 * the first empty slot. The caller must hold the dev_base or rtnl lock 1279 * while allocating the name and adding the device in order to avoid 1280 * duplicates. 1281 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1282 * Returns the number of the unit assigned or a negative errno code. 1283 */ 1284 __dev_alloc_name(struct net * net,const char * name,char * res)1285 static int __dev_alloc_name(struct net *net, const char *name, char *res) 1286 { 1287 int i = 0; 1288 const char *p; 1289 const int max_netdevices = 8*PAGE_SIZE; 1290 unsigned long *inuse; 1291 struct net_device *d; 1292 char buf[IFNAMSIZ]; 1293 1294 /* Verify the string as this thing may have come from the user. 1295 * There must be one "%d" and no other "%" characters. 1296 */ 1297 p = strchr(name, '%'); 1298 if (!p || p[1] != 'd' || strchr(p + 2, '%')) 1299 return -EINVAL; 1300 1301 /* Use one page as a bit array of possible slots */ 1302 inuse = bitmap_zalloc(max_netdevices, GFP_ATOMIC); 1303 if (!inuse) 1304 return -ENOMEM; 1305 1306 for_each_netdev(net, d) { 1307 struct netdev_name_node *name_node; 1308 1309 netdev_for_each_altname(d, name_node) { 1310 if (!sscanf(name_node->name, name, &i)) 1311 continue; 1312 if (i < 0 || i >= max_netdevices) 1313 continue; 1314 1315 /* avoid cases where sscanf is not exact inverse of printf */ 1316 snprintf(buf, IFNAMSIZ, name, i); 1317 if (!strncmp(buf, name_node->name, IFNAMSIZ)) 1318 __set_bit(i, inuse); 1319 } 1320 if (!sscanf(d->name, name, &i)) 1321 continue; 1322 if (i < 0 || i >= max_netdevices) 1323 continue; 1324 1325 /* avoid cases where sscanf is not exact inverse of printf */ 1326 snprintf(buf, IFNAMSIZ, name, i); 1327 if (!strncmp(buf, d->name, IFNAMSIZ)) 1328 __set_bit(i, inuse); 1329 } 1330 1331 i = find_first_zero_bit(inuse, max_netdevices); 1332 bitmap_free(inuse); 1333 if (i == max_netdevices) 1334 return -ENFILE; 1335 1336 /* 'res' and 'name' could overlap, use 'buf' as an intermediate buffer */ 1337 strscpy(buf, name, IFNAMSIZ); 1338 snprintf(res, IFNAMSIZ, buf, i); 1339 return i; 1340 } 1341 1342 /* Returns negative errno or allocated unit id (see __dev_alloc_name()) */ dev_prep_valid_name(struct net * net,struct net_device * dev,const char * want_name,char * out_name,int dup_errno)1343 static int dev_prep_valid_name(struct net *net, struct net_device *dev, 1344 const char *want_name, char *out_name, 1345 int dup_errno) 1346 { 1347 if (!dev_valid_name(want_name)) 1348 return -EINVAL; 1349 1350 if (strchr(want_name, '%')) 1351 return __dev_alloc_name(net, want_name, out_name); 1352 1353 if (netdev_name_in_use(net, want_name)) 1354 return -dup_errno; 1355 if (out_name != want_name) 1356 strscpy(out_name, want_name, IFNAMSIZ); 1357 return 0; 1358 } 1359 1360 /** 1361 * dev_alloc_name - allocate a name for a device 1362 * @dev: device 1363 * @name: name format string 1364 * 1365 * Passed a format string - eg "lt%d" it will try and find a suitable 1366 * id. It scans list of devices to build up a free map, then chooses 1367 * the first empty slot. The caller must hold the dev_base or rtnl lock 1368 * while allocating the name and adding the device in order to avoid 1369 * duplicates. 1370 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1371 * Returns the number of the unit assigned or a negative errno code. 1372 */ 1373 dev_alloc_name(struct net_device * dev,const char * name)1374 int dev_alloc_name(struct net_device *dev, const char *name) 1375 { 1376 return dev_prep_valid_name(dev_net(dev), dev, name, dev->name, ENFILE); 1377 } 1378 EXPORT_SYMBOL(dev_alloc_name); 1379 dev_get_valid_name(struct net * net,struct net_device * dev,const char * name)1380 static int dev_get_valid_name(struct net *net, struct net_device *dev, 1381 const char *name) 1382 { 1383 int ret; 1384 1385 ret = dev_prep_valid_name(net, dev, name, dev->name, EEXIST); 1386 return ret < 0 ? ret : 0; 1387 } 1388 netif_change_name(struct net_device * dev,const char * newname)1389 int netif_change_name(struct net_device *dev, const char *newname) 1390 { 1391 struct net *net = dev_net(dev); 1392 unsigned char old_assign_type; 1393 char oldname[IFNAMSIZ]; 1394 int err = 0; 1395 int ret; 1396 1397 ASSERT_RTNL_NET(net); 1398 1399 if (!strncmp(newname, dev->name, IFNAMSIZ)) 1400 return 0; 1401 1402 memcpy(oldname, dev->name, IFNAMSIZ); 1403 1404 write_seqlock_bh(&netdev_rename_lock); 1405 err = dev_get_valid_name(net, dev, newname); 1406 write_sequnlock_bh(&netdev_rename_lock); 1407 1408 if (err < 0) 1409 return err; 1410 1411 if (oldname[0] && !strchr(oldname, '%')) 1412 netdev_info(dev, "renamed from %s%s\n", oldname, 1413 dev->flags & IFF_UP ? " (while UP)" : ""); 1414 1415 old_assign_type = dev->name_assign_type; 1416 WRITE_ONCE(dev->name_assign_type, NET_NAME_RENAMED); 1417 1418 rollback: 1419 ret = device_rename(&dev->dev, dev->name); 1420 if (ret) { 1421 write_seqlock_bh(&netdev_rename_lock); 1422 memcpy(dev->name, oldname, IFNAMSIZ); 1423 write_sequnlock_bh(&netdev_rename_lock); 1424 WRITE_ONCE(dev->name_assign_type, old_assign_type); 1425 return ret; 1426 } 1427 1428 netdev_adjacent_rename_links(dev, oldname); 1429 1430 netdev_name_node_del(dev->name_node); 1431 1432 synchronize_net(); 1433 1434 netdev_name_node_add(net, dev->name_node); 1435 1436 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); 1437 ret = notifier_to_errno(ret); 1438 1439 if (ret) { 1440 /* err >= 0 after dev_alloc_name() or stores the first errno */ 1441 if (err >= 0) { 1442 err = ret; 1443 write_seqlock_bh(&netdev_rename_lock); 1444 memcpy(dev->name, oldname, IFNAMSIZ); 1445 write_sequnlock_bh(&netdev_rename_lock); 1446 memcpy(oldname, newname, IFNAMSIZ); 1447 WRITE_ONCE(dev->name_assign_type, old_assign_type); 1448 old_assign_type = NET_NAME_RENAMED; 1449 goto rollback; 1450 } else { 1451 netdev_err(dev, "name change rollback failed: %d\n", 1452 ret); 1453 } 1454 } 1455 1456 return err; 1457 } 1458 netif_set_alias(struct net_device * dev,const char * alias,size_t len)1459 int netif_set_alias(struct net_device *dev, const char *alias, size_t len) 1460 { 1461 struct dev_ifalias *new_alias = NULL; 1462 1463 if (len >= IFALIASZ) 1464 return -EINVAL; 1465 1466 if (len) { 1467 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL); 1468 if (!new_alias) 1469 return -ENOMEM; 1470 1471 memcpy(new_alias->ifalias, alias, len); 1472 new_alias->ifalias[len] = 0; 1473 } 1474 1475 mutex_lock(&ifalias_mutex); 1476 new_alias = rcu_replace_pointer(dev->ifalias, new_alias, 1477 mutex_is_locked(&ifalias_mutex)); 1478 mutex_unlock(&ifalias_mutex); 1479 1480 if (new_alias) 1481 kfree_rcu(new_alias, rcuhead); 1482 1483 return len; 1484 } 1485 1486 /** 1487 * dev_get_alias - get ifalias of a device 1488 * @dev: device 1489 * @name: buffer to store name of ifalias 1490 * @len: size of buffer 1491 * 1492 * get ifalias for a device. Caller must make sure dev cannot go 1493 * away, e.g. rcu read lock or own a reference count to device. 1494 */ dev_get_alias(const struct net_device * dev,char * name,size_t len)1495 int dev_get_alias(const struct net_device *dev, char *name, size_t len) 1496 { 1497 const struct dev_ifalias *alias; 1498 int ret = 0; 1499 1500 rcu_read_lock(); 1501 alias = rcu_dereference(dev->ifalias); 1502 if (alias) 1503 ret = snprintf(name, len, "%s", alias->ifalias); 1504 rcu_read_unlock(); 1505 1506 return ret; 1507 } 1508 1509 /** 1510 * netdev_features_change - device changes features 1511 * @dev: device to cause notification 1512 * 1513 * Called to indicate a device has changed features. 1514 */ netdev_features_change(struct net_device * dev)1515 void netdev_features_change(struct net_device *dev) 1516 { 1517 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); 1518 } 1519 EXPORT_SYMBOL(netdev_features_change); 1520 1521 /** 1522 * netdev_state_change - device changes state 1523 * @dev: device to cause notification 1524 * 1525 * Called to indicate a device has changed state. This function calls 1526 * the notifier chains for netdev_chain and sends a NEWLINK message 1527 * to the routing socket. 1528 */ netdev_state_change(struct net_device * dev)1529 void netdev_state_change(struct net_device *dev) 1530 { 1531 if (dev->flags & IFF_UP) { 1532 struct netdev_notifier_change_info change_info = { 1533 .info.dev = dev, 1534 }; 1535 1536 call_netdevice_notifiers_info(NETDEV_CHANGE, 1537 &change_info.info); 1538 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL, 0, NULL); 1539 } 1540 } 1541 EXPORT_SYMBOL(netdev_state_change); 1542 1543 /** 1544 * __netdev_notify_peers - notify network peers about existence of @dev, 1545 * to be called when rtnl lock is already held. 1546 * @dev: network device 1547 * 1548 * Generate traffic such that interested network peers are aware of 1549 * @dev, such as by generating a gratuitous ARP. This may be used when 1550 * a device wants to inform the rest of the network about some sort of 1551 * reconfiguration such as a failover event or virtual machine 1552 * migration. 1553 */ __netdev_notify_peers(struct net_device * dev)1554 void __netdev_notify_peers(struct net_device *dev) 1555 { 1556 ASSERT_RTNL(); 1557 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); 1558 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev); 1559 } 1560 EXPORT_SYMBOL(__netdev_notify_peers); 1561 1562 /** 1563 * netdev_notify_peers - notify network peers about existence of @dev 1564 * @dev: network device 1565 * 1566 * Generate traffic such that interested network peers are aware of 1567 * @dev, such as by generating a gratuitous ARP. This may be used when 1568 * a device wants to inform the rest of the network about some sort of 1569 * reconfiguration such as a failover event or virtual machine 1570 * migration. 1571 */ netdev_notify_peers(struct net_device * dev)1572 void netdev_notify_peers(struct net_device *dev) 1573 { 1574 rtnl_lock(); 1575 __netdev_notify_peers(dev); 1576 rtnl_unlock(); 1577 } 1578 EXPORT_SYMBOL(netdev_notify_peers); 1579 1580 static int napi_threaded_poll(void *data); 1581 napi_kthread_create(struct napi_struct * n)1582 static int napi_kthread_create(struct napi_struct *n) 1583 { 1584 int err = 0; 1585 1586 /* Create and wake up the kthread once to put it in 1587 * TASK_INTERRUPTIBLE mode to avoid the blocked task 1588 * warning and work with loadavg. 1589 */ 1590 n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d", 1591 n->dev->name, n->napi_id); 1592 if (IS_ERR(n->thread)) { 1593 err = PTR_ERR(n->thread); 1594 pr_err("kthread_run failed with err %d\n", err); 1595 n->thread = NULL; 1596 } 1597 1598 return err; 1599 } 1600 __dev_open(struct net_device * dev,struct netlink_ext_ack * extack)1601 static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack) 1602 { 1603 const struct net_device_ops *ops = dev->netdev_ops; 1604 int ret; 1605 1606 ASSERT_RTNL(); 1607 dev_addr_check(dev); 1608 1609 if (!netif_device_present(dev)) { 1610 /* may be detached because parent is runtime-suspended */ 1611 if (dev->dev.parent) 1612 pm_runtime_resume(dev->dev.parent); 1613 if (!netif_device_present(dev)) 1614 return -ENODEV; 1615 } 1616 1617 /* Block netpoll from trying to do any rx path servicing. 1618 * If we don't do this there is a chance ndo_poll_controller 1619 * or ndo_poll may be running while we open the device 1620 */ 1621 netpoll_poll_disable(dev); 1622 1623 ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack); 1624 ret = notifier_to_errno(ret); 1625 if (ret) 1626 return ret; 1627 1628 set_bit(__LINK_STATE_START, &dev->state); 1629 1630 netdev_ops_assert_locked(dev); 1631 1632 if (ops->ndo_validate_addr) 1633 ret = ops->ndo_validate_addr(dev); 1634 1635 if (!ret && ops->ndo_open) 1636 ret = ops->ndo_open(dev); 1637 1638 netpoll_poll_enable(dev); 1639 1640 if (ret) 1641 clear_bit(__LINK_STATE_START, &dev->state); 1642 else { 1643 netif_set_up(dev, true); 1644 dev_set_rx_mode(dev); 1645 dev_activate(dev); 1646 add_device_randomness(dev->dev_addr, dev->addr_len); 1647 } 1648 1649 return ret; 1650 } 1651 netif_open(struct net_device * dev,struct netlink_ext_ack * extack)1652 int netif_open(struct net_device *dev, struct netlink_ext_ack *extack) 1653 { 1654 int ret; 1655 1656 if (dev->flags & IFF_UP) 1657 return 0; 1658 1659 ret = __dev_open(dev, extack); 1660 if (ret < 0) 1661 return ret; 1662 1663 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL); 1664 call_netdevice_notifiers(NETDEV_UP, dev); 1665 1666 return ret; 1667 } 1668 __dev_close_many(struct list_head * head)1669 static void __dev_close_many(struct list_head *head) 1670 { 1671 struct net_device *dev; 1672 1673 ASSERT_RTNL(); 1674 might_sleep(); 1675 1676 list_for_each_entry(dev, head, close_list) { 1677 /* Temporarily disable netpoll until the interface is down */ 1678 netpoll_poll_disable(dev); 1679 1680 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); 1681 1682 clear_bit(__LINK_STATE_START, &dev->state); 1683 1684 /* Synchronize to scheduled poll. We cannot touch poll list, it 1685 * can be even on different cpu. So just clear netif_running(). 1686 * 1687 * dev->stop() will invoke napi_disable() on all of it's 1688 * napi_struct instances on this device. 1689 */ 1690 smp_mb__after_atomic(); /* Commit netif_running(). */ 1691 } 1692 1693 dev_deactivate_many(head); 1694 1695 list_for_each_entry(dev, head, close_list) { 1696 const struct net_device_ops *ops = dev->netdev_ops; 1697 1698 /* 1699 * Call the device specific close. This cannot fail. 1700 * Only if device is UP 1701 * 1702 * We allow it to be called even after a DETACH hot-plug 1703 * event. 1704 */ 1705 1706 netdev_ops_assert_locked(dev); 1707 1708 if (ops->ndo_stop) 1709 ops->ndo_stop(dev); 1710 1711 netif_set_up(dev, false); 1712 netpoll_poll_enable(dev); 1713 } 1714 } 1715 __dev_close(struct net_device * dev)1716 static void __dev_close(struct net_device *dev) 1717 { 1718 LIST_HEAD(single); 1719 1720 list_add(&dev->close_list, &single); 1721 __dev_close_many(&single); 1722 list_del(&single); 1723 } 1724 dev_close_many(struct list_head * head,bool unlink)1725 void dev_close_many(struct list_head *head, bool unlink) 1726 { 1727 struct net_device *dev, *tmp; 1728 1729 /* Remove the devices that don't need to be closed */ 1730 list_for_each_entry_safe(dev, tmp, head, close_list) 1731 if (!(dev->flags & IFF_UP)) 1732 list_del_init(&dev->close_list); 1733 1734 __dev_close_many(head); 1735 1736 list_for_each_entry_safe(dev, tmp, head, close_list) { 1737 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL); 1738 call_netdevice_notifiers(NETDEV_DOWN, dev); 1739 if (unlink) 1740 list_del_init(&dev->close_list); 1741 } 1742 } 1743 EXPORT_SYMBOL(dev_close_many); 1744 netif_close(struct net_device * dev)1745 void netif_close(struct net_device *dev) 1746 { 1747 if (dev->flags & IFF_UP) { 1748 LIST_HEAD(single); 1749 1750 list_add(&dev->close_list, &single); 1751 dev_close_many(&single, true); 1752 list_del(&single); 1753 } 1754 } 1755 EXPORT_SYMBOL(netif_close); 1756 netif_disable_lro(struct net_device * dev)1757 void netif_disable_lro(struct net_device *dev) 1758 { 1759 struct net_device *lower_dev; 1760 struct list_head *iter; 1761 1762 dev->wanted_features &= ~NETIF_F_LRO; 1763 netdev_update_features(dev); 1764 1765 if (unlikely(dev->features & NETIF_F_LRO)) 1766 netdev_WARN(dev, "failed to disable LRO!\n"); 1767 1768 netdev_for_each_lower_dev(dev, lower_dev, iter) { 1769 netdev_lock_ops(lower_dev); 1770 netif_disable_lro(lower_dev); 1771 netdev_unlock_ops(lower_dev); 1772 } 1773 } 1774 1775 /** 1776 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device 1777 * @dev: device 1778 * 1779 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be 1780 * called under RTNL. This is needed if Generic XDP is installed on 1781 * the device. 1782 */ dev_disable_gro_hw(struct net_device * dev)1783 static void dev_disable_gro_hw(struct net_device *dev) 1784 { 1785 dev->wanted_features &= ~NETIF_F_GRO_HW; 1786 netdev_update_features(dev); 1787 1788 if (unlikely(dev->features & NETIF_F_GRO_HW)) 1789 netdev_WARN(dev, "failed to disable GRO_HW!\n"); 1790 } 1791 netdev_cmd_to_name(enum netdev_cmd cmd)1792 const char *netdev_cmd_to_name(enum netdev_cmd cmd) 1793 { 1794 #define N(val) \ 1795 case NETDEV_##val: \ 1796 return "NETDEV_" __stringify(val); 1797 switch (cmd) { 1798 N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER) 1799 N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE) 1800 N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE) 1801 N(POST_INIT) N(PRE_UNINIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) 1802 N(CHANGEUPPER) N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) 1803 N(BONDING_INFO) N(PRECHANGEUPPER) N(CHANGELOWERSTATE) 1804 N(UDP_TUNNEL_PUSH_INFO) N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN) 1805 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO) 1806 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO) 1807 N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE) 1808 N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA) 1809 N(XDP_FEAT_CHANGE) 1810 } 1811 #undef N 1812 return "UNKNOWN_NETDEV_EVENT"; 1813 } 1814 EXPORT_SYMBOL_GPL(netdev_cmd_to_name); 1815 call_netdevice_notifier(struct notifier_block * nb,unsigned long val,struct net_device * dev)1816 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val, 1817 struct net_device *dev) 1818 { 1819 struct netdev_notifier_info info = { 1820 .dev = dev, 1821 }; 1822 1823 return nb->notifier_call(nb, val, &info); 1824 } 1825 call_netdevice_register_notifiers(struct notifier_block * nb,struct net_device * dev)1826 static int call_netdevice_register_notifiers(struct notifier_block *nb, 1827 struct net_device *dev) 1828 { 1829 int err; 1830 1831 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev); 1832 err = notifier_to_errno(err); 1833 if (err) 1834 return err; 1835 1836 if (!(dev->flags & IFF_UP)) 1837 return 0; 1838 1839 call_netdevice_notifier(nb, NETDEV_UP, dev); 1840 return 0; 1841 } 1842 call_netdevice_unregister_notifiers(struct notifier_block * nb,struct net_device * dev)1843 static void call_netdevice_unregister_notifiers(struct notifier_block *nb, 1844 struct net_device *dev) 1845 { 1846 if (dev->flags & IFF_UP) { 1847 call_netdevice_notifier(nb, NETDEV_GOING_DOWN, 1848 dev); 1849 call_netdevice_notifier(nb, NETDEV_DOWN, dev); 1850 } 1851 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); 1852 } 1853 call_netdevice_register_net_notifiers(struct notifier_block * nb,struct net * net)1854 static int call_netdevice_register_net_notifiers(struct notifier_block *nb, 1855 struct net *net) 1856 { 1857 struct net_device *dev; 1858 int err; 1859 1860 for_each_netdev(net, dev) { 1861 err = call_netdevice_register_notifiers(nb, dev); 1862 if (err) 1863 goto rollback; 1864 } 1865 return 0; 1866 1867 rollback: 1868 for_each_netdev_continue_reverse(net, dev) 1869 call_netdevice_unregister_notifiers(nb, dev); 1870 return err; 1871 } 1872 call_netdevice_unregister_net_notifiers(struct notifier_block * nb,struct net * net)1873 static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb, 1874 struct net *net) 1875 { 1876 struct net_device *dev; 1877 1878 for_each_netdev(net, dev) 1879 call_netdevice_unregister_notifiers(nb, dev); 1880 } 1881 1882 static int dev_boot_phase = 1; 1883 1884 /** 1885 * register_netdevice_notifier - register a network notifier block 1886 * @nb: notifier 1887 * 1888 * Register a notifier to be called when network device events occur. 1889 * The notifier passed is linked into the kernel structures and must 1890 * not be reused until it has been unregistered. A negative errno code 1891 * is returned on a failure. 1892 * 1893 * When registered all registration and up events are replayed 1894 * to the new notifier to allow device to have a race free 1895 * view of the network device list. 1896 */ 1897 register_netdevice_notifier(struct notifier_block * nb)1898 int register_netdevice_notifier(struct notifier_block *nb) 1899 { 1900 struct net *net; 1901 int err; 1902 1903 /* Close race with setup_net() and cleanup_net() */ 1904 down_write(&pernet_ops_rwsem); 1905 1906 /* When RTNL is removed, we need protection for netdev_chain. */ 1907 rtnl_lock(); 1908 1909 err = raw_notifier_chain_register(&netdev_chain, nb); 1910 if (err) 1911 goto unlock; 1912 if (dev_boot_phase) 1913 goto unlock; 1914 for_each_net(net) { 1915 __rtnl_net_lock(net); 1916 err = call_netdevice_register_net_notifiers(nb, net); 1917 __rtnl_net_unlock(net); 1918 if (err) 1919 goto rollback; 1920 } 1921 1922 unlock: 1923 rtnl_unlock(); 1924 up_write(&pernet_ops_rwsem); 1925 return err; 1926 1927 rollback: 1928 for_each_net_continue_reverse(net) { 1929 __rtnl_net_lock(net); 1930 call_netdevice_unregister_net_notifiers(nb, net); 1931 __rtnl_net_unlock(net); 1932 } 1933 1934 raw_notifier_chain_unregister(&netdev_chain, nb); 1935 goto unlock; 1936 } 1937 EXPORT_SYMBOL(register_netdevice_notifier); 1938 1939 /** 1940 * unregister_netdevice_notifier - unregister a network notifier block 1941 * @nb: notifier 1942 * 1943 * Unregister a notifier previously registered by 1944 * register_netdevice_notifier(). The notifier is unlinked into the 1945 * kernel structures and may then be reused. A negative errno code 1946 * is returned on a failure. 1947 * 1948 * After unregistering unregister and down device events are synthesized 1949 * for all devices on the device list to the removed notifier to remove 1950 * the need for special case cleanup code. 1951 */ 1952 unregister_netdevice_notifier(struct notifier_block * nb)1953 int unregister_netdevice_notifier(struct notifier_block *nb) 1954 { 1955 struct net *net; 1956 int err; 1957 1958 /* Close race with setup_net() and cleanup_net() */ 1959 down_write(&pernet_ops_rwsem); 1960 rtnl_lock(); 1961 err = raw_notifier_chain_unregister(&netdev_chain, nb); 1962 if (err) 1963 goto unlock; 1964 1965 for_each_net(net) { 1966 __rtnl_net_lock(net); 1967 call_netdevice_unregister_net_notifiers(nb, net); 1968 __rtnl_net_unlock(net); 1969 } 1970 1971 unlock: 1972 rtnl_unlock(); 1973 up_write(&pernet_ops_rwsem); 1974 return err; 1975 } 1976 EXPORT_SYMBOL(unregister_netdevice_notifier); 1977 __register_netdevice_notifier_net(struct net * net,struct notifier_block * nb,bool ignore_call_fail)1978 static int __register_netdevice_notifier_net(struct net *net, 1979 struct notifier_block *nb, 1980 bool ignore_call_fail) 1981 { 1982 int err; 1983 1984 err = raw_notifier_chain_register(&net->netdev_chain, nb); 1985 if (err) 1986 return err; 1987 if (dev_boot_phase) 1988 return 0; 1989 1990 err = call_netdevice_register_net_notifiers(nb, net); 1991 if (err && !ignore_call_fail) 1992 goto chain_unregister; 1993 1994 return 0; 1995 1996 chain_unregister: 1997 raw_notifier_chain_unregister(&net->netdev_chain, nb); 1998 return err; 1999 } 2000 __unregister_netdevice_notifier_net(struct net * net,struct notifier_block * nb)2001 static int __unregister_netdevice_notifier_net(struct net *net, 2002 struct notifier_block *nb) 2003 { 2004 int err; 2005 2006 err = raw_notifier_chain_unregister(&net->netdev_chain, nb); 2007 if (err) 2008 return err; 2009 2010 call_netdevice_unregister_net_notifiers(nb, net); 2011 return 0; 2012 } 2013 2014 /** 2015 * register_netdevice_notifier_net - register a per-netns network notifier block 2016 * @net: network namespace 2017 * @nb: notifier 2018 * 2019 * Register a notifier to be called when network device events occur. 2020 * The notifier passed is linked into the kernel structures and must 2021 * not be reused until it has been unregistered. A negative errno code 2022 * is returned on a failure. 2023 * 2024 * When registered all registration and up events are replayed 2025 * to the new notifier to allow device to have a race free 2026 * view of the network device list. 2027 */ 2028 register_netdevice_notifier_net(struct net * net,struct notifier_block * nb)2029 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb) 2030 { 2031 int err; 2032 2033 rtnl_net_lock(net); 2034 err = __register_netdevice_notifier_net(net, nb, false); 2035 rtnl_net_unlock(net); 2036 2037 return err; 2038 } 2039 EXPORT_SYMBOL(register_netdevice_notifier_net); 2040 2041 /** 2042 * unregister_netdevice_notifier_net - unregister a per-netns 2043 * network notifier block 2044 * @net: network namespace 2045 * @nb: notifier 2046 * 2047 * Unregister a notifier previously registered by 2048 * register_netdevice_notifier_net(). The notifier is unlinked from the 2049 * kernel structures and may then be reused. A negative errno code 2050 * is returned on a failure. 2051 * 2052 * After unregistering unregister and down device events are synthesized 2053 * for all devices on the device list to the removed notifier to remove 2054 * the need for special case cleanup code. 2055 */ 2056 unregister_netdevice_notifier_net(struct net * net,struct notifier_block * nb)2057 int unregister_netdevice_notifier_net(struct net *net, 2058 struct notifier_block *nb) 2059 { 2060 int err; 2061 2062 rtnl_net_lock(net); 2063 err = __unregister_netdevice_notifier_net(net, nb); 2064 rtnl_net_unlock(net); 2065 2066 return err; 2067 } 2068 EXPORT_SYMBOL(unregister_netdevice_notifier_net); 2069 __move_netdevice_notifier_net(struct net * src_net,struct net * dst_net,struct notifier_block * nb)2070 static void __move_netdevice_notifier_net(struct net *src_net, 2071 struct net *dst_net, 2072 struct notifier_block *nb) 2073 { 2074 __unregister_netdevice_notifier_net(src_net, nb); 2075 __register_netdevice_notifier_net(dst_net, nb, true); 2076 } 2077 rtnl_net_dev_lock(struct net_device * dev)2078 static void rtnl_net_dev_lock(struct net_device *dev) 2079 { 2080 bool again; 2081 2082 do { 2083 struct net *net; 2084 2085 again = false; 2086 2087 /* netns might be being dismantled. */ 2088 rcu_read_lock(); 2089 net = dev_net_rcu(dev); 2090 net_passive_inc(net); 2091 rcu_read_unlock(); 2092 2093 rtnl_net_lock(net); 2094 2095 #ifdef CONFIG_NET_NS 2096 /* dev might have been moved to another netns. */ 2097 if (!net_eq(net, rcu_access_pointer(dev->nd_net.net))) { 2098 rtnl_net_unlock(net); 2099 net_passive_dec(net); 2100 again = true; 2101 } 2102 #endif 2103 } while (again); 2104 } 2105 rtnl_net_dev_unlock(struct net_device * dev)2106 static void rtnl_net_dev_unlock(struct net_device *dev) 2107 { 2108 struct net *net = dev_net(dev); 2109 2110 rtnl_net_unlock(net); 2111 net_passive_dec(net); 2112 } 2113 register_netdevice_notifier_dev_net(struct net_device * dev,struct notifier_block * nb,struct netdev_net_notifier * nn)2114 int register_netdevice_notifier_dev_net(struct net_device *dev, 2115 struct notifier_block *nb, 2116 struct netdev_net_notifier *nn) 2117 { 2118 int err; 2119 2120 rtnl_net_dev_lock(dev); 2121 err = __register_netdevice_notifier_net(dev_net(dev), nb, false); 2122 if (!err) { 2123 nn->nb = nb; 2124 list_add(&nn->list, &dev->net_notifier_list); 2125 } 2126 rtnl_net_dev_unlock(dev); 2127 2128 return err; 2129 } 2130 EXPORT_SYMBOL(register_netdevice_notifier_dev_net); 2131 unregister_netdevice_notifier_dev_net(struct net_device * dev,struct notifier_block * nb,struct netdev_net_notifier * nn)2132 int unregister_netdevice_notifier_dev_net(struct net_device *dev, 2133 struct notifier_block *nb, 2134 struct netdev_net_notifier *nn) 2135 { 2136 int err; 2137 2138 rtnl_net_dev_lock(dev); 2139 list_del(&nn->list); 2140 err = __unregister_netdevice_notifier_net(dev_net(dev), nb); 2141 rtnl_net_dev_unlock(dev); 2142 2143 return err; 2144 } 2145 EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net); 2146 move_netdevice_notifiers_dev_net(struct net_device * dev,struct net * net)2147 static void move_netdevice_notifiers_dev_net(struct net_device *dev, 2148 struct net *net) 2149 { 2150 struct netdev_net_notifier *nn; 2151 2152 list_for_each_entry(nn, &dev->net_notifier_list, list) 2153 __move_netdevice_notifier_net(dev_net(dev), net, nn->nb); 2154 } 2155 2156 /** 2157 * call_netdevice_notifiers_info - call all network notifier blocks 2158 * @val: value passed unmodified to notifier function 2159 * @info: notifier information data 2160 * 2161 * Call all network notifier blocks. Parameters and return value 2162 * are as for raw_notifier_call_chain(). 2163 */ 2164 call_netdevice_notifiers_info(unsigned long val,struct netdev_notifier_info * info)2165 int call_netdevice_notifiers_info(unsigned long val, 2166 struct netdev_notifier_info *info) 2167 { 2168 struct net *net = dev_net(info->dev); 2169 int ret; 2170 2171 ASSERT_RTNL(); 2172 2173 /* Run per-netns notifier block chain first, then run the global one. 2174 * Hopefully, one day, the global one is going to be removed after 2175 * all notifier block registrators get converted to be per-netns. 2176 */ 2177 ret = raw_notifier_call_chain(&net->netdev_chain, val, info); 2178 if (ret & NOTIFY_STOP_MASK) 2179 return ret; 2180 return raw_notifier_call_chain(&netdev_chain, val, info); 2181 } 2182 2183 /** 2184 * call_netdevice_notifiers_info_robust - call per-netns notifier blocks 2185 * for and rollback on error 2186 * @val_up: value passed unmodified to notifier function 2187 * @val_down: value passed unmodified to the notifier function when 2188 * recovering from an error on @val_up 2189 * @info: notifier information data 2190 * 2191 * Call all per-netns network notifier blocks, but not notifier blocks on 2192 * the global notifier chain. Parameters and return value are as for 2193 * raw_notifier_call_chain_robust(). 2194 */ 2195 2196 static int call_netdevice_notifiers_info_robust(unsigned long val_up,unsigned long val_down,struct netdev_notifier_info * info)2197 call_netdevice_notifiers_info_robust(unsigned long val_up, 2198 unsigned long val_down, 2199 struct netdev_notifier_info *info) 2200 { 2201 struct net *net = dev_net(info->dev); 2202 2203 ASSERT_RTNL(); 2204 2205 return raw_notifier_call_chain_robust(&net->netdev_chain, 2206 val_up, val_down, info); 2207 } 2208 call_netdevice_notifiers_extack(unsigned long val,struct net_device * dev,struct netlink_ext_ack * extack)2209 static int call_netdevice_notifiers_extack(unsigned long val, 2210 struct net_device *dev, 2211 struct netlink_ext_ack *extack) 2212 { 2213 struct netdev_notifier_info info = { 2214 .dev = dev, 2215 .extack = extack, 2216 }; 2217 2218 return call_netdevice_notifiers_info(val, &info); 2219 } 2220 2221 /** 2222 * call_netdevice_notifiers - call all network notifier blocks 2223 * @val: value passed unmodified to notifier function 2224 * @dev: net_device pointer passed unmodified to notifier function 2225 * 2226 * Call all network notifier blocks. Parameters and return value 2227 * are as for raw_notifier_call_chain(). 2228 */ 2229 call_netdevice_notifiers(unsigned long val,struct net_device * dev)2230 int call_netdevice_notifiers(unsigned long val, struct net_device *dev) 2231 { 2232 return call_netdevice_notifiers_extack(val, dev, NULL); 2233 } 2234 EXPORT_SYMBOL(call_netdevice_notifiers); 2235 2236 /** 2237 * call_netdevice_notifiers_mtu - call all network notifier blocks 2238 * @val: value passed unmodified to notifier function 2239 * @dev: net_device pointer passed unmodified to notifier function 2240 * @arg: additional u32 argument passed to the notifier function 2241 * 2242 * Call all network notifier blocks. Parameters and return value 2243 * are as for raw_notifier_call_chain(). 2244 */ call_netdevice_notifiers_mtu(unsigned long val,struct net_device * dev,u32 arg)2245 static int call_netdevice_notifiers_mtu(unsigned long val, 2246 struct net_device *dev, u32 arg) 2247 { 2248 struct netdev_notifier_info_ext info = { 2249 .info.dev = dev, 2250 .ext.mtu = arg, 2251 }; 2252 2253 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0); 2254 2255 return call_netdevice_notifiers_info(val, &info.info); 2256 } 2257 2258 #ifdef CONFIG_NET_INGRESS 2259 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key); 2260 net_inc_ingress_queue(void)2261 void net_inc_ingress_queue(void) 2262 { 2263 static_branch_inc(&ingress_needed_key); 2264 } 2265 EXPORT_SYMBOL_GPL(net_inc_ingress_queue); 2266 net_dec_ingress_queue(void)2267 void net_dec_ingress_queue(void) 2268 { 2269 static_branch_dec(&ingress_needed_key); 2270 } 2271 EXPORT_SYMBOL_GPL(net_dec_ingress_queue); 2272 #endif 2273 2274 #ifdef CONFIG_NET_EGRESS 2275 static DEFINE_STATIC_KEY_FALSE(egress_needed_key); 2276 net_inc_egress_queue(void)2277 void net_inc_egress_queue(void) 2278 { 2279 static_branch_inc(&egress_needed_key); 2280 } 2281 EXPORT_SYMBOL_GPL(net_inc_egress_queue); 2282 net_dec_egress_queue(void)2283 void net_dec_egress_queue(void) 2284 { 2285 static_branch_dec(&egress_needed_key); 2286 } 2287 EXPORT_SYMBOL_GPL(net_dec_egress_queue); 2288 #endif 2289 2290 #ifdef CONFIG_NET_CLS_ACT 2291 DEFINE_STATIC_KEY_FALSE(tcf_sw_enabled_key); 2292 EXPORT_SYMBOL(tcf_sw_enabled_key); 2293 #endif 2294 2295 DEFINE_STATIC_KEY_FALSE(netstamp_needed_key); 2296 EXPORT_SYMBOL(netstamp_needed_key); 2297 #ifdef CONFIG_JUMP_LABEL 2298 static atomic_t netstamp_needed_deferred; 2299 static atomic_t netstamp_wanted; netstamp_clear(struct work_struct * work)2300 static void netstamp_clear(struct work_struct *work) 2301 { 2302 int deferred = atomic_xchg(&netstamp_needed_deferred, 0); 2303 int wanted; 2304 2305 wanted = atomic_add_return(deferred, &netstamp_wanted); 2306 if (wanted > 0) 2307 static_branch_enable(&netstamp_needed_key); 2308 else 2309 static_branch_disable(&netstamp_needed_key); 2310 } 2311 static DECLARE_WORK(netstamp_work, netstamp_clear); 2312 #endif 2313 net_enable_timestamp(void)2314 void net_enable_timestamp(void) 2315 { 2316 #ifdef CONFIG_JUMP_LABEL 2317 int wanted = atomic_read(&netstamp_wanted); 2318 2319 while (wanted > 0) { 2320 if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted + 1)) 2321 return; 2322 } 2323 atomic_inc(&netstamp_needed_deferred); 2324 schedule_work(&netstamp_work); 2325 #else 2326 static_branch_inc(&netstamp_needed_key); 2327 #endif 2328 } 2329 EXPORT_SYMBOL(net_enable_timestamp); 2330 net_disable_timestamp(void)2331 void net_disable_timestamp(void) 2332 { 2333 #ifdef CONFIG_JUMP_LABEL 2334 int wanted = atomic_read(&netstamp_wanted); 2335 2336 while (wanted > 1) { 2337 if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted - 1)) 2338 return; 2339 } 2340 atomic_dec(&netstamp_needed_deferred); 2341 schedule_work(&netstamp_work); 2342 #else 2343 static_branch_dec(&netstamp_needed_key); 2344 #endif 2345 } 2346 EXPORT_SYMBOL(net_disable_timestamp); 2347 net_timestamp_set(struct sk_buff * skb)2348 static inline void net_timestamp_set(struct sk_buff *skb) 2349 { 2350 skb->tstamp = 0; 2351 skb->tstamp_type = SKB_CLOCK_REALTIME; 2352 if (static_branch_unlikely(&netstamp_needed_key)) 2353 skb->tstamp = ktime_get_real(); 2354 } 2355 2356 #define net_timestamp_check(COND, SKB) \ 2357 if (static_branch_unlikely(&netstamp_needed_key)) { \ 2358 if ((COND) && !(SKB)->tstamp) \ 2359 (SKB)->tstamp = ktime_get_real(); \ 2360 } \ 2361 is_skb_forwardable(const struct net_device * dev,const struct sk_buff * skb)2362 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb) 2363 { 2364 return __is_skb_forwardable(dev, skb, true); 2365 } 2366 EXPORT_SYMBOL_GPL(is_skb_forwardable); 2367 __dev_forward_skb2(struct net_device * dev,struct sk_buff * skb,bool check_mtu)2368 static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb, 2369 bool check_mtu) 2370 { 2371 int ret = ____dev_forward_skb(dev, skb, check_mtu); 2372 2373 if (likely(!ret)) { 2374 skb->protocol = eth_type_trans(skb, dev); 2375 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 2376 } 2377 2378 return ret; 2379 } 2380 __dev_forward_skb(struct net_device * dev,struct sk_buff * skb)2381 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 2382 { 2383 return __dev_forward_skb2(dev, skb, true); 2384 } 2385 EXPORT_SYMBOL_GPL(__dev_forward_skb); 2386 2387 /** 2388 * dev_forward_skb - loopback an skb to another netif 2389 * 2390 * @dev: destination network device 2391 * @skb: buffer to forward 2392 * 2393 * return values: 2394 * NET_RX_SUCCESS (no congestion) 2395 * NET_RX_DROP (packet was dropped, but freed) 2396 * 2397 * dev_forward_skb can be used for injecting an skb from the 2398 * start_xmit function of one device into the receive queue 2399 * of another device. 2400 * 2401 * The receiving device may be in another namespace, so 2402 * we have to clear all information in the skb that could 2403 * impact namespace isolation. 2404 */ dev_forward_skb(struct net_device * dev,struct sk_buff * skb)2405 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 2406 { 2407 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb); 2408 } 2409 EXPORT_SYMBOL_GPL(dev_forward_skb); 2410 dev_forward_skb_nomtu(struct net_device * dev,struct sk_buff * skb)2411 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb) 2412 { 2413 return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb); 2414 } 2415 deliver_skb(struct sk_buff * skb,struct packet_type * pt_prev,struct net_device * orig_dev)2416 static inline int deliver_skb(struct sk_buff *skb, 2417 struct packet_type *pt_prev, 2418 struct net_device *orig_dev) 2419 { 2420 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 2421 return -ENOMEM; 2422 refcount_inc(&skb->users); 2423 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 2424 } 2425 deliver_ptype_list_skb(struct sk_buff * skb,struct packet_type ** pt,struct net_device * orig_dev,__be16 type,struct list_head * ptype_list)2426 static inline void deliver_ptype_list_skb(struct sk_buff *skb, 2427 struct packet_type **pt, 2428 struct net_device *orig_dev, 2429 __be16 type, 2430 struct list_head *ptype_list) 2431 { 2432 struct packet_type *ptype, *pt_prev = *pt; 2433 2434 list_for_each_entry_rcu(ptype, ptype_list, list) { 2435 if (ptype->type != type) 2436 continue; 2437 if (pt_prev) 2438 deliver_skb(skb, pt_prev, orig_dev); 2439 pt_prev = ptype; 2440 } 2441 *pt = pt_prev; 2442 } 2443 skb_loop_sk(struct packet_type * ptype,struct sk_buff * skb)2444 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) 2445 { 2446 if (!ptype->af_packet_priv || !skb->sk) 2447 return false; 2448 2449 if (ptype->id_match) 2450 return ptype->id_match(ptype, skb->sk); 2451 else if ((struct sock *)ptype->af_packet_priv == skb->sk) 2452 return true; 2453 2454 return false; 2455 } 2456 2457 /** 2458 * dev_nit_active_rcu - return true if any network interface taps are in use 2459 * 2460 * The caller must hold the RCU lock 2461 * 2462 * @dev: network device to check for the presence of taps 2463 */ dev_nit_active_rcu(const struct net_device * dev)2464 bool dev_nit_active_rcu(const struct net_device *dev) 2465 { 2466 /* Callers may hold either RCU or RCU BH lock */ 2467 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 2468 2469 return !list_empty(&dev_net(dev)->ptype_all) || 2470 !list_empty(&dev->ptype_all); 2471 } 2472 EXPORT_SYMBOL_GPL(dev_nit_active_rcu); 2473 2474 /* 2475 * Support routine. Sends outgoing frames to any network 2476 * taps currently in use. 2477 */ 2478 dev_queue_xmit_nit(struct sk_buff * skb,struct net_device * dev)2479 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) 2480 { 2481 struct packet_type *ptype, *pt_prev = NULL; 2482 struct list_head *ptype_list; 2483 struct sk_buff *skb2 = NULL; 2484 2485 rcu_read_lock(); 2486 ptype_list = &dev_net_rcu(dev)->ptype_all; 2487 again: 2488 list_for_each_entry_rcu(ptype, ptype_list, list) { 2489 if (READ_ONCE(ptype->ignore_outgoing)) 2490 continue; 2491 2492 /* Never send packets back to the socket 2493 * they originated from - MvS (miquels@drinkel.ow.org) 2494 */ 2495 if (skb_loop_sk(ptype, skb)) 2496 continue; 2497 2498 if (pt_prev) { 2499 deliver_skb(skb2, pt_prev, skb->dev); 2500 pt_prev = ptype; 2501 continue; 2502 } 2503 2504 /* need to clone skb, done only once */ 2505 skb2 = skb_clone(skb, GFP_ATOMIC); 2506 if (!skb2) 2507 goto out_unlock; 2508 2509 net_timestamp_set(skb2); 2510 2511 /* skb->nh should be correctly 2512 * set by sender, so that the second statement is 2513 * just protection against buggy protocols. 2514 */ 2515 skb_reset_mac_header(skb2); 2516 2517 if (skb_network_header(skb2) < skb2->data || 2518 skb_network_header(skb2) > skb_tail_pointer(skb2)) { 2519 net_crit_ratelimited("protocol %04x is buggy, dev %s\n", 2520 ntohs(skb2->protocol), 2521 dev->name); 2522 skb_reset_network_header(skb2); 2523 } 2524 2525 skb2->transport_header = skb2->network_header; 2526 skb2->pkt_type = PACKET_OUTGOING; 2527 pt_prev = ptype; 2528 } 2529 2530 if (ptype_list != &dev->ptype_all) { 2531 ptype_list = &dev->ptype_all; 2532 goto again; 2533 } 2534 out_unlock: 2535 if (pt_prev) { 2536 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC)) 2537 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); 2538 else 2539 kfree_skb(skb2); 2540 } 2541 rcu_read_unlock(); 2542 } 2543 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit); 2544 2545 /** 2546 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change 2547 * @dev: Network device 2548 * @txq: number of queues available 2549 * 2550 * If real_num_tx_queues is changed the tc mappings may no longer be 2551 * valid. To resolve this verify the tc mapping remains valid and if 2552 * not NULL the mapping. With no priorities mapping to this 2553 * offset/count pair it will no longer be used. In the worst case TC0 2554 * is invalid nothing can be done so disable priority mappings. If is 2555 * expected that drivers will fix this mapping if they can before 2556 * calling netif_set_real_num_tx_queues. 2557 */ netif_setup_tc(struct net_device * dev,unsigned int txq)2558 static void netif_setup_tc(struct net_device *dev, unsigned int txq) 2559 { 2560 int i; 2561 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 2562 2563 /* If TC0 is invalidated disable TC mapping */ 2564 if (tc->offset + tc->count > txq) { 2565 netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); 2566 dev->num_tc = 0; 2567 return; 2568 } 2569 2570 /* Invalidated prio to tc mappings set to TC0 */ 2571 for (i = 1; i < TC_BITMASK + 1; i++) { 2572 int q = netdev_get_prio_tc_map(dev, i); 2573 2574 tc = &dev->tc_to_txq[q]; 2575 if (tc->offset + tc->count > txq) { 2576 netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", 2577 i, q); 2578 netdev_set_prio_tc_map(dev, i, 0); 2579 } 2580 } 2581 } 2582 netdev_txq_to_tc(struct net_device * dev,unsigned int txq)2583 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq) 2584 { 2585 if (dev->num_tc) { 2586 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 2587 int i; 2588 2589 /* walk through the TCs and see if it falls into any of them */ 2590 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) { 2591 if ((txq - tc->offset) < tc->count) 2592 return i; 2593 } 2594 2595 /* didn't find it, just return -1 to indicate no match */ 2596 return -1; 2597 } 2598 2599 return 0; 2600 } 2601 EXPORT_SYMBOL(netdev_txq_to_tc); 2602 2603 #ifdef CONFIG_XPS 2604 static struct static_key xps_needed __read_mostly; 2605 static struct static_key xps_rxqs_needed __read_mostly; 2606 static DEFINE_MUTEX(xps_map_mutex); 2607 #define xmap_dereference(P) \ 2608 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) 2609 remove_xps_queue(struct xps_dev_maps * dev_maps,struct xps_dev_maps * old_maps,int tci,u16 index)2610 static bool remove_xps_queue(struct xps_dev_maps *dev_maps, 2611 struct xps_dev_maps *old_maps, int tci, u16 index) 2612 { 2613 struct xps_map *map = NULL; 2614 int pos; 2615 2616 map = xmap_dereference(dev_maps->attr_map[tci]); 2617 if (!map) 2618 return false; 2619 2620 for (pos = map->len; pos--;) { 2621 if (map->queues[pos] != index) 2622 continue; 2623 2624 if (map->len > 1) { 2625 map->queues[pos] = map->queues[--map->len]; 2626 break; 2627 } 2628 2629 if (old_maps) 2630 RCU_INIT_POINTER(old_maps->attr_map[tci], NULL); 2631 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL); 2632 kfree_rcu(map, rcu); 2633 return false; 2634 } 2635 2636 return true; 2637 } 2638 remove_xps_queue_cpu(struct net_device * dev,struct xps_dev_maps * dev_maps,int cpu,u16 offset,u16 count)2639 static bool remove_xps_queue_cpu(struct net_device *dev, 2640 struct xps_dev_maps *dev_maps, 2641 int cpu, u16 offset, u16 count) 2642 { 2643 int num_tc = dev_maps->num_tc; 2644 bool active = false; 2645 int tci; 2646 2647 for (tci = cpu * num_tc; num_tc--; tci++) { 2648 int i, j; 2649 2650 for (i = count, j = offset; i--; j++) { 2651 if (!remove_xps_queue(dev_maps, NULL, tci, j)) 2652 break; 2653 } 2654 2655 active |= i < 0; 2656 } 2657 2658 return active; 2659 } 2660 reset_xps_maps(struct net_device * dev,struct xps_dev_maps * dev_maps,enum xps_map_type type)2661 static void reset_xps_maps(struct net_device *dev, 2662 struct xps_dev_maps *dev_maps, 2663 enum xps_map_type type) 2664 { 2665 static_key_slow_dec_cpuslocked(&xps_needed); 2666 if (type == XPS_RXQS) 2667 static_key_slow_dec_cpuslocked(&xps_rxqs_needed); 2668 2669 RCU_INIT_POINTER(dev->xps_maps[type], NULL); 2670 2671 kfree_rcu(dev_maps, rcu); 2672 } 2673 clean_xps_maps(struct net_device * dev,enum xps_map_type type,u16 offset,u16 count)2674 static void clean_xps_maps(struct net_device *dev, enum xps_map_type type, 2675 u16 offset, u16 count) 2676 { 2677 struct xps_dev_maps *dev_maps; 2678 bool active = false; 2679 int i, j; 2680 2681 dev_maps = xmap_dereference(dev->xps_maps[type]); 2682 if (!dev_maps) 2683 return; 2684 2685 for (j = 0; j < dev_maps->nr_ids; j++) 2686 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, count); 2687 if (!active) 2688 reset_xps_maps(dev, dev_maps, type); 2689 2690 if (type == XPS_CPUS) { 2691 for (i = offset + (count - 1); count--; i--) 2692 netdev_queue_numa_node_write( 2693 netdev_get_tx_queue(dev, i), NUMA_NO_NODE); 2694 } 2695 } 2696 netif_reset_xps_queues(struct net_device * dev,u16 offset,u16 count)2697 static void netif_reset_xps_queues(struct net_device *dev, u16 offset, 2698 u16 count) 2699 { 2700 if (!static_key_false(&xps_needed)) 2701 return; 2702 2703 cpus_read_lock(); 2704 mutex_lock(&xps_map_mutex); 2705 2706 if (static_key_false(&xps_rxqs_needed)) 2707 clean_xps_maps(dev, XPS_RXQS, offset, count); 2708 2709 clean_xps_maps(dev, XPS_CPUS, offset, count); 2710 2711 mutex_unlock(&xps_map_mutex); 2712 cpus_read_unlock(); 2713 } 2714 netif_reset_xps_queues_gt(struct net_device * dev,u16 index)2715 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index) 2716 { 2717 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index); 2718 } 2719 expand_xps_map(struct xps_map * map,int attr_index,u16 index,bool is_rxqs_map)2720 static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index, 2721 u16 index, bool is_rxqs_map) 2722 { 2723 struct xps_map *new_map; 2724 int alloc_len = XPS_MIN_MAP_ALLOC; 2725 int i, pos; 2726 2727 for (pos = 0; map && pos < map->len; pos++) { 2728 if (map->queues[pos] != index) 2729 continue; 2730 return map; 2731 } 2732 2733 /* Need to add tx-queue to this CPU's/rx-queue's existing map */ 2734 if (map) { 2735 if (pos < map->alloc_len) 2736 return map; 2737 2738 alloc_len = map->alloc_len * 2; 2739 } 2740 2741 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's 2742 * map 2743 */ 2744 if (is_rxqs_map) 2745 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL); 2746 else 2747 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL, 2748 cpu_to_node(attr_index)); 2749 if (!new_map) 2750 return NULL; 2751 2752 for (i = 0; i < pos; i++) 2753 new_map->queues[i] = map->queues[i]; 2754 new_map->alloc_len = alloc_len; 2755 new_map->len = pos; 2756 2757 return new_map; 2758 } 2759 2760 /* Copy xps maps at a given index */ xps_copy_dev_maps(struct xps_dev_maps * dev_maps,struct xps_dev_maps * new_dev_maps,int index,int tc,bool skip_tc)2761 static void xps_copy_dev_maps(struct xps_dev_maps *dev_maps, 2762 struct xps_dev_maps *new_dev_maps, int index, 2763 int tc, bool skip_tc) 2764 { 2765 int i, tci = index * dev_maps->num_tc; 2766 struct xps_map *map; 2767 2768 /* copy maps belonging to foreign traffic classes */ 2769 for (i = 0; i < dev_maps->num_tc; i++, tci++) { 2770 if (i == tc && skip_tc) 2771 continue; 2772 2773 /* fill in the new device map from the old device map */ 2774 map = xmap_dereference(dev_maps->attr_map[tci]); 2775 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2776 } 2777 } 2778 2779 /* Must be called under cpus_read_lock */ __netif_set_xps_queue(struct net_device * dev,const unsigned long * mask,u16 index,enum xps_map_type type)2780 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, 2781 u16 index, enum xps_map_type type) 2782 { 2783 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL, *old_dev_maps = NULL; 2784 const unsigned long *online_mask = NULL; 2785 bool active = false, copy = false; 2786 int i, j, tci, numa_node_id = -2; 2787 int maps_sz, num_tc = 1, tc = 0; 2788 struct xps_map *map, *new_map; 2789 unsigned int nr_ids; 2790 2791 WARN_ON_ONCE(index >= dev->num_tx_queues); 2792 2793 if (dev->num_tc) { 2794 /* Do not allow XPS on subordinate device directly */ 2795 num_tc = dev->num_tc; 2796 if (num_tc < 0) 2797 return -EINVAL; 2798 2799 /* If queue belongs to subordinate dev use its map */ 2800 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; 2801 2802 tc = netdev_txq_to_tc(dev, index); 2803 if (tc < 0) 2804 return -EINVAL; 2805 } 2806 2807 mutex_lock(&xps_map_mutex); 2808 2809 dev_maps = xmap_dereference(dev->xps_maps[type]); 2810 if (type == XPS_RXQS) { 2811 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues); 2812 nr_ids = dev->num_rx_queues; 2813 } else { 2814 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc); 2815 if (num_possible_cpus() > 1) 2816 online_mask = cpumask_bits(cpu_online_mask); 2817 nr_ids = nr_cpu_ids; 2818 } 2819 2820 if (maps_sz < L1_CACHE_BYTES) 2821 maps_sz = L1_CACHE_BYTES; 2822 2823 /* The old dev_maps could be larger or smaller than the one we're 2824 * setting up now, as dev->num_tc or nr_ids could have been updated in 2825 * between. We could try to be smart, but let's be safe instead and only 2826 * copy foreign traffic classes if the two map sizes match. 2827 */ 2828 if (dev_maps && 2829 dev_maps->num_tc == num_tc && dev_maps->nr_ids == nr_ids) 2830 copy = true; 2831 2832 /* allocate memory for queue storage */ 2833 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids), 2834 j < nr_ids;) { 2835 if (!new_dev_maps) { 2836 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); 2837 if (!new_dev_maps) { 2838 mutex_unlock(&xps_map_mutex); 2839 return -ENOMEM; 2840 } 2841 2842 new_dev_maps->nr_ids = nr_ids; 2843 new_dev_maps->num_tc = num_tc; 2844 } 2845 2846 tci = j * num_tc + tc; 2847 map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL; 2848 2849 map = expand_xps_map(map, j, index, type == XPS_RXQS); 2850 if (!map) 2851 goto error; 2852 2853 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2854 } 2855 2856 if (!new_dev_maps) 2857 goto out_no_new_maps; 2858 2859 if (!dev_maps) { 2860 /* Increment static keys at most once per type */ 2861 static_key_slow_inc_cpuslocked(&xps_needed); 2862 if (type == XPS_RXQS) 2863 static_key_slow_inc_cpuslocked(&xps_rxqs_needed); 2864 } 2865 2866 for (j = 0; j < nr_ids; j++) { 2867 bool skip_tc = false; 2868 2869 tci = j * num_tc + tc; 2870 if (netif_attr_test_mask(j, mask, nr_ids) && 2871 netif_attr_test_online(j, online_mask, nr_ids)) { 2872 /* add tx-queue to CPU/rx-queue maps */ 2873 int pos = 0; 2874 2875 skip_tc = true; 2876 2877 map = xmap_dereference(new_dev_maps->attr_map[tci]); 2878 while ((pos < map->len) && (map->queues[pos] != index)) 2879 pos++; 2880 2881 if (pos == map->len) 2882 map->queues[map->len++] = index; 2883 #ifdef CONFIG_NUMA 2884 if (type == XPS_CPUS) { 2885 if (numa_node_id == -2) 2886 numa_node_id = cpu_to_node(j); 2887 else if (numa_node_id != cpu_to_node(j)) 2888 numa_node_id = -1; 2889 } 2890 #endif 2891 } 2892 2893 if (copy) 2894 xps_copy_dev_maps(dev_maps, new_dev_maps, j, tc, 2895 skip_tc); 2896 } 2897 2898 rcu_assign_pointer(dev->xps_maps[type], new_dev_maps); 2899 2900 /* Cleanup old maps */ 2901 if (!dev_maps) 2902 goto out_no_old_maps; 2903 2904 for (j = 0; j < dev_maps->nr_ids; j++) { 2905 for (i = num_tc, tci = j * dev_maps->num_tc; i--; tci++) { 2906 map = xmap_dereference(dev_maps->attr_map[tci]); 2907 if (!map) 2908 continue; 2909 2910 if (copy) { 2911 new_map = xmap_dereference(new_dev_maps->attr_map[tci]); 2912 if (map == new_map) 2913 continue; 2914 } 2915 2916 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL); 2917 kfree_rcu(map, rcu); 2918 } 2919 } 2920 2921 old_dev_maps = dev_maps; 2922 2923 out_no_old_maps: 2924 dev_maps = new_dev_maps; 2925 active = true; 2926 2927 out_no_new_maps: 2928 if (type == XPS_CPUS) 2929 /* update Tx queue numa node */ 2930 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index), 2931 (numa_node_id >= 0) ? 2932 numa_node_id : NUMA_NO_NODE); 2933 2934 if (!dev_maps) 2935 goto out_no_maps; 2936 2937 /* removes tx-queue from unused CPUs/rx-queues */ 2938 for (j = 0; j < dev_maps->nr_ids; j++) { 2939 tci = j * dev_maps->num_tc; 2940 2941 for (i = 0; i < dev_maps->num_tc; i++, tci++) { 2942 if (i == tc && 2943 netif_attr_test_mask(j, mask, dev_maps->nr_ids) && 2944 netif_attr_test_online(j, online_mask, dev_maps->nr_ids)) 2945 continue; 2946 2947 active |= remove_xps_queue(dev_maps, 2948 copy ? old_dev_maps : NULL, 2949 tci, index); 2950 } 2951 } 2952 2953 if (old_dev_maps) 2954 kfree_rcu(old_dev_maps, rcu); 2955 2956 /* free map if not active */ 2957 if (!active) 2958 reset_xps_maps(dev, dev_maps, type); 2959 2960 out_no_maps: 2961 mutex_unlock(&xps_map_mutex); 2962 2963 return 0; 2964 error: 2965 /* remove any maps that we added */ 2966 for (j = 0; j < nr_ids; j++) { 2967 for (i = num_tc, tci = j * num_tc; i--; tci++) { 2968 new_map = xmap_dereference(new_dev_maps->attr_map[tci]); 2969 map = copy ? 2970 xmap_dereference(dev_maps->attr_map[tci]) : 2971 NULL; 2972 if (new_map && new_map != map) 2973 kfree(new_map); 2974 } 2975 } 2976 2977 mutex_unlock(&xps_map_mutex); 2978 2979 kfree(new_dev_maps); 2980 return -ENOMEM; 2981 } 2982 EXPORT_SYMBOL_GPL(__netif_set_xps_queue); 2983 netif_set_xps_queue(struct net_device * dev,const struct cpumask * mask,u16 index)2984 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 2985 u16 index) 2986 { 2987 int ret; 2988 2989 cpus_read_lock(); 2990 ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, XPS_CPUS); 2991 cpus_read_unlock(); 2992 2993 return ret; 2994 } 2995 EXPORT_SYMBOL(netif_set_xps_queue); 2996 2997 #endif netdev_unbind_all_sb_channels(struct net_device * dev)2998 static void netdev_unbind_all_sb_channels(struct net_device *dev) 2999 { 3000 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; 3001 3002 /* Unbind any subordinate channels */ 3003 while (txq-- != &dev->_tx[0]) { 3004 if (txq->sb_dev) 3005 netdev_unbind_sb_channel(dev, txq->sb_dev); 3006 } 3007 } 3008 netdev_reset_tc(struct net_device * dev)3009 void netdev_reset_tc(struct net_device *dev) 3010 { 3011 #ifdef CONFIG_XPS 3012 netif_reset_xps_queues_gt(dev, 0); 3013 #endif 3014 netdev_unbind_all_sb_channels(dev); 3015 3016 /* Reset TC configuration of device */ 3017 dev->num_tc = 0; 3018 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); 3019 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); 3020 } 3021 EXPORT_SYMBOL(netdev_reset_tc); 3022 netdev_set_tc_queue(struct net_device * dev,u8 tc,u16 count,u16 offset)3023 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) 3024 { 3025 if (tc >= dev->num_tc) 3026 return -EINVAL; 3027 3028 #ifdef CONFIG_XPS 3029 netif_reset_xps_queues(dev, offset, count); 3030 #endif 3031 dev->tc_to_txq[tc].count = count; 3032 dev->tc_to_txq[tc].offset = offset; 3033 return 0; 3034 } 3035 EXPORT_SYMBOL(netdev_set_tc_queue); 3036 netdev_set_num_tc(struct net_device * dev,u8 num_tc)3037 int netdev_set_num_tc(struct net_device *dev, u8 num_tc) 3038 { 3039 if (num_tc > TC_MAX_QUEUE) 3040 return -EINVAL; 3041 3042 #ifdef CONFIG_XPS 3043 netif_reset_xps_queues_gt(dev, 0); 3044 #endif 3045 netdev_unbind_all_sb_channels(dev); 3046 3047 dev->num_tc = num_tc; 3048 return 0; 3049 } 3050 EXPORT_SYMBOL(netdev_set_num_tc); 3051 netdev_unbind_sb_channel(struct net_device * dev,struct net_device * sb_dev)3052 void netdev_unbind_sb_channel(struct net_device *dev, 3053 struct net_device *sb_dev) 3054 { 3055 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; 3056 3057 #ifdef CONFIG_XPS 3058 netif_reset_xps_queues_gt(sb_dev, 0); 3059 #endif 3060 memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq)); 3061 memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map)); 3062 3063 while (txq-- != &dev->_tx[0]) { 3064 if (txq->sb_dev == sb_dev) 3065 txq->sb_dev = NULL; 3066 } 3067 } 3068 EXPORT_SYMBOL(netdev_unbind_sb_channel); 3069 netdev_bind_sb_channel_queue(struct net_device * dev,struct net_device * sb_dev,u8 tc,u16 count,u16 offset)3070 int netdev_bind_sb_channel_queue(struct net_device *dev, 3071 struct net_device *sb_dev, 3072 u8 tc, u16 count, u16 offset) 3073 { 3074 /* Make certain the sb_dev and dev are already configured */ 3075 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc) 3076 return -EINVAL; 3077 3078 /* We cannot hand out queues we don't have */ 3079 if ((offset + count) > dev->real_num_tx_queues) 3080 return -EINVAL; 3081 3082 /* Record the mapping */ 3083 sb_dev->tc_to_txq[tc].count = count; 3084 sb_dev->tc_to_txq[tc].offset = offset; 3085 3086 /* Provide a way for Tx queue to find the tc_to_txq map or 3087 * XPS map for itself. 3088 */ 3089 while (count--) 3090 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev; 3091 3092 return 0; 3093 } 3094 EXPORT_SYMBOL(netdev_bind_sb_channel_queue); 3095 netdev_set_sb_channel(struct net_device * dev,u16 channel)3096 int netdev_set_sb_channel(struct net_device *dev, u16 channel) 3097 { 3098 /* Do not use a multiqueue device to represent a subordinate channel */ 3099 if (netif_is_multiqueue(dev)) 3100 return -ENODEV; 3101 3102 /* We allow channels 1 - 32767 to be used for subordinate channels. 3103 * Channel 0 is meant to be "native" mode and used only to represent 3104 * the main root device. We allow writing 0 to reset the device back 3105 * to normal mode after being used as a subordinate channel. 3106 */ 3107 if (channel > S16_MAX) 3108 return -EINVAL; 3109 3110 dev->num_tc = -channel; 3111 3112 return 0; 3113 } 3114 EXPORT_SYMBOL(netdev_set_sb_channel); 3115 3116 /* 3117 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues 3118 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed. 3119 */ netif_set_real_num_tx_queues(struct net_device * dev,unsigned int txq)3120 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) 3121 { 3122 bool disabling; 3123 int rc; 3124 3125 disabling = txq < dev->real_num_tx_queues; 3126 3127 if (txq < 1 || txq > dev->num_tx_queues) 3128 return -EINVAL; 3129 3130 if (dev->reg_state == NETREG_REGISTERED || 3131 dev->reg_state == NETREG_UNREGISTERING) { 3132 ASSERT_RTNL(); 3133 netdev_ops_assert_locked(dev); 3134 3135 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, 3136 txq); 3137 if (rc) 3138 return rc; 3139 3140 if (dev->num_tc) 3141 netif_setup_tc(dev, txq); 3142 3143 net_shaper_set_real_num_tx_queues(dev, txq); 3144 3145 dev_qdisc_change_real_num_tx(dev, txq); 3146 3147 dev->real_num_tx_queues = txq; 3148 3149 if (disabling) { 3150 synchronize_net(); 3151 qdisc_reset_all_tx_gt(dev, txq); 3152 #ifdef CONFIG_XPS 3153 netif_reset_xps_queues_gt(dev, txq); 3154 #endif 3155 } 3156 } else { 3157 dev->real_num_tx_queues = txq; 3158 } 3159 3160 return 0; 3161 } 3162 EXPORT_SYMBOL(netif_set_real_num_tx_queues); 3163 3164 /** 3165 * netif_set_real_num_rx_queues - set actual number of RX queues used 3166 * @dev: Network device 3167 * @rxq: Actual number of RX queues 3168 * 3169 * This must be called either with the rtnl_lock held or before 3170 * registration of the net device. Returns 0 on success, or a 3171 * negative error code. If called before registration, it always 3172 * succeeds. 3173 */ netif_set_real_num_rx_queues(struct net_device * dev,unsigned int rxq)3174 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) 3175 { 3176 int rc; 3177 3178 if (rxq < 1 || rxq > dev->num_rx_queues) 3179 return -EINVAL; 3180 3181 if (dev->reg_state == NETREG_REGISTERED) { 3182 ASSERT_RTNL(); 3183 netdev_ops_assert_locked(dev); 3184 3185 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, 3186 rxq); 3187 if (rc) 3188 return rc; 3189 } 3190 3191 dev->real_num_rx_queues = rxq; 3192 return 0; 3193 } 3194 EXPORT_SYMBOL(netif_set_real_num_rx_queues); 3195 3196 /** 3197 * netif_set_real_num_queues - set actual number of RX and TX queues used 3198 * @dev: Network device 3199 * @txq: Actual number of TX queues 3200 * @rxq: Actual number of RX queues 3201 * 3202 * Set the real number of both TX and RX queues. 3203 * Does nothing if the number of queues is already correct. 3204 */ netif_set_real_num_queues(struct net_device * dev,unsigned int txq,unsigned int rxq)3205 int netif_set_real_num_queues(struct net_device *dev, 3206 unsigned int txq, unsigned int rxq) 3207 { 3208 unsigned int old_rxq = dev->real_num_rx_queues; 3209 int err; 3210 3211 if (txq < 1 || txq > dev->num_tx_queues || 3212 rxq < 1 || rxq > dev->num_rx_queues) 3213 return -EINVAL; 3214 3215 /* Start from increases, so the error path only does decreases - 3216 * decreases can't fail. 3217 */ 3218 if (rxq > dev->real_num_rx_queues) { 3219 err = netif_set_real_num_rx_queues(dev, rxq); 3220 if (err) 3221 return err; 3222 } 3223 if (txq > dev->real_num_tx_queues) { 3224 err = netif_set_real_num_tx_queues(dev, txq); 3225 if (err) 3226 goto undo_rx; 3227 } 3228 if (rxq < dev->real_num_rx_queues) 3229 WARN_ON(netif_set_real_num_rx_queues(dev, rxq)); 3230 if (txq < dev->real_num_tx_queues) 3231 WARN_ON(netif_set_real_num_tx_queues(dev, txq)); 3232 3233 return 0; 3234 undo_rx: 3235 WARN_ON(netif_set_real_num_rx_queues(dev, old_rxq)); 3236 return err; 3237 } 3238 EXPORT_SYMBOL(netif_set_real_num_queues); 3239 3240 /** 3241 * netif_set_tso_max_size() - set the max size of TSO frames supported 3242 * @dev: netdev to update 3243 * @size: max skb->len of a TSO frame 3244 * 3245 * Set the limit on the size of TSO super-frames the device can handle. 3246 * Unless explicitly set the stack will assume the value of 3247 * %GSO_LEGACY_MAX_SIZE. 3248 */ netif_set_tso_max_size(struct net_device * dev,unsigned int size)3249 void netif_set_tso_max_size(struct net_device *dev, unsigned int size) 3250 { 3251 dev->tso_max_size = min(GSO_MAX_SIZE, size); 3252 if (size < READ_ONCE(dev->gso_max_size)) 3253 netif_set_gso_max_size(dev, size); 3254 if (size < READ_ONCE(dev->gso_ipv4_max_size)) 3255 netif_set_gso_ipv4_max_size(dev, size); 3256 } 3257 EXPORT_SYMBOL(netif_set_tso_max_size); 3258 3259 /** 3260 * netif_set_tso_max_segs() - set the max number of segs supported for TSO 3261 * @dev: netdev to update 3262 * @segs: max number of TCP segments 3263 * 3264 * Set the limit on the number of TCP segments the device can generate from 3265 * a single TSO super-frame. 3266 * Unless explicitly set the stack will assume the value of %GSO_MAX_SEGS. 3267 */ netif_set_tso_max_segs(struct net_device * dev,unsigned int segs)3268 void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs) 3269 { 3270 dev->tso_max_segs = segs; 3271 if (segs < READ_ONCE(dev->gso_max_segs)) 3272 netif_set_gso_max_segs(dev, segs); 3273 } 3274 EXPORT_SYMBOL(netif_set_tso_max_segs); 3275 3276 /** 3277 * netif_inherit_tso_max() - copy all TSO limits from a lower device to an upper 3278 * @to: netdev to update 3279 * @from: netdev from which to copy the limits 3280 */ netif_inherit_tso_max(struct net_device * to,const struct net_device * from)3281 void netif_inherit_tso_max(struct net_device *to, const struct net_device *from) 3282 { 3283 netif_set_tso_max_size(to, from->tso_max_size); 3284 netif_set_tso_max_segs(to, from->tso_max_segs); 3285 } 3286 EXPORT_SYMBOL(netif_inherit_tso_max); 3287 3288 /** 3289 * netif_get_num_default_rss_queues - default number of RSS queues 3290 * 3291 * Default value is the number of physical cores if there are only 1 or 2, or 3292 * divided by 2 if there are more. 3293 */ netif_get_num_default_rss_queues(void)3294 int netif_get_num_default_rss_queues(void) 3295 { 3296 cpumask_var_t cpus; 3297 int cpu, count = 0; 3298 3299 if (unlikely(is_kdump_kernel() || !zalloc_cpumask_var(&cpus, GFP_KERNEL))) 3300 return 1; 3301 3302 cpumask_copy(cpus, cpu_online_mask); 3303 for_each_cpu(cpu, cpus) { 3304 ++count; 3305 cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu)); 3306 } 3307 free_cpumask_var(cpus); 3308 3309 return count > 2 ? DIV_ROUND_UP(count, 2) : count; 3310 } 3311 EXPORT_SYMBOL(netif_get_num_default_rss_queues); 3312 __netif_reschedule(struct Qdisc * q)3313 static void __netif_reschedule(struct Qdisc *q) 3314 { 3315 struct softnet_data *sd; 3316 unsigned long flags; 3317 3318 local_irq_save(flags); 3319 sd = this_cpu_ptr(&softnet_data); 3320 q->next_sched = NULL; 3321 *sd->output_queue_tailp = q; 3322 sd->output_queue_tailp = &q->next_sched; 3323 raise_softirq_irqoff(NET_TX_SOFTIRQ); 3324 local_irq_restore(flags); 3325 } 3326 __netif_schedule(struct Qdisc * q)3327 void __netif_schedule(struct Qdisc *q) 3328 { 3329 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) 3330 __netif_reschedule(q); 3331 } 3332 EXPORT_SYMBOL(__netif_schedule); 3333 3334 struct dev_kfree_skb_cb { 3335 enum skb_drop_reason reason; 3336 }; 3337 get_kfree_skb_cb(const struct sk_buff * skb)3338 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb) 3339 { 3340 return (struct dev_kfree_skb_cb *)skb->cb; 3341 } 3342 netif_schedule_queue(struct netdev_queue * txq)3343 void netif_schedule_queue(struct netdev_queue *txq) 3344 { 3345 rcu_read_lock(); 3346 if (!netif_xmit_stopped(txq)) { 3347 struct Qdisc *q = rcu_dereference(txq->qdisc); 3348 3349 __netif_schedule(q); 3350 } 3351 rcu_read_unlock(); 3352 } 3353 EXPORT_SYMBOL(netif_schedule_queue); 3354 netif_tx_wake_queue(struct netdev_queue * dev_queue)3355 void netif_tx_wake_queue(struct netdev_queue *dev_queue) 3356 { 3357 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) { 3358 struct Qdisc *q; 3359 3360 rcu_read_lock(); 3361 q = rcu_dereference(dev_queue->qdisc); 3362 __netif_schedule(q); 3363 rcu_read_unlock(); 3364 } 3365 } 3366 EXPORT_SYMBOL(netif_tx_wake_queue); 3367 dev_kfree_skb_irq_reason(struct sk_buff * skb,enum skb_drop_reason reason)3368 void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason) 3369 { 3370 unsigned long flags; 3371 3372 if (unlikely(!skb)) 3373 return; 3374 3375 if (likely(refcount_read(&skb->users) == 1)) { 3376 smp_rmb(); 3377 refcount_set(&skb->users, 0); 3378 } else if (likely(!refcount_dec_and_test(&skb->users))) { 3379 return; 3380 } 3381 get_kfree_skb_cb(skb)->reason = reason; 3382 local_irq_save(flags); 3383 skb->next = __this_cpu_read(softnet_data.completion_queue); 3384 __this_cpu_write(softnet_data.completion_queue, skb); 3385 raise_softirq_irqoff(NET_TX_SOFTIRQ); 3386 local_irq_restore(flags); 3387 } 3388 EXPORT_SYMBOL(dev_kfree_skb_irq_reason); 3389 dev_kfree_skb_any_reason(struct sk_buff * skb,enum skb_drop_reason reason)3390 void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason) 3391 { 3392 if (in_hardirq() || irqs_disabled()) 3393 dev_kfree_skb_irq_reason(skb, reason); 3394 else 3395 kfree_skb_reason(skb, reason); 3396 } 3397 EXPORT_SYMBOL(dev_kfree_skb_any_reason); 3398 3399 3400 /** 3401 * netif_device_detach - mark device as removed 3402 * @dev: network device 3403 * 3404 * Mark device as removed from system and therefore no longer available. 3405 */ netif_device_detach(struct net_device * dev)3406 void netif_device_detach(struct net_device *dev) 3407 { 3408 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && 3409 netif_running(dev)) { 3410 netif_tx_stop_all_queues(dev); 3411 } 3412 } 3413 EXPORT_SYMBOL(netif_device_detach); 3414 3415 /** 3416 * netif_device_attach - mark device as attached 3417 * @dev: network device 3418 * 3419 * Mark device as attached from system and restart if needed. 3420 */ netif_device_attach(struct net_device * dev)3421 void netif_device_attach(struct net_device *dev) 3422 { 3423 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && 3424 netif_running(dev)) { 3425 netif_tx_wake_all_queues(dev); 3426 netdev_watchdog_up(dev); 3427 } 3428 } 3429 EXPORT_SYMBOL(netif_device_attach); 3430 3431 /* 3432 * Returns a Tx hash based on the given packet descriptor a Tx queues' number 3433 * to be used as a distribution range. 3434 */ skb_tx_hash(const struct net_device * dev,const struct net_device * sb_dev,struct sk_buff * skb)3435 static u16 skb_tx_hash(const struct net_device *dev, 3436 const struct net_device *sb_dev, 3437 struct sk_buff *skb) 3438 { 3439 u32 hash; 3440 u16 qoffset = 0; 3441 u16 qcount = dev->real_num_tx_queues; 3442 3443 if (dev->num_tc) { 3444 u8 tc = netdev_get_prio_tc_map(dev, skb->priority); 3445 3446 qoffset = sb_dev->tc_to_txq[tc].offset; 3447 qcount = sb_dev->tc_to_txq[tc].count; 3448 if (unlikely(!qcount)) { 3449 net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n", 3450 sb_dev->name, qoffset, tc); 3451 qoffset = 0; 3452 qcount = dev->real_num_tx_queues; 3453 } 3454 } 3455 3456 if (skb_rx_queue_recorded(skb)) { 3457 DEBUG_NET_WARN_ON_ONCE(qcount == 0); 3458 hash = skb_get_rx_queue(skb); 3459 if (hash >= qoffset) 3460 hash -= qoffset; 3461 while (unlikely(hash >= qcount)) 3462 hash -= qcount; 3463 return hash + qoffset; 3464 } 3465 3466 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; 3467 } 3468 skb_warn_bad_offload(const struct sk_buff * skb)3469 void skb_warn_bad_offload(const struct sk_buff *skb) 3470 { 3471 static const netdev_features_t null_features; 3472 struct net_device *dev = skb->dev; 3473 const char *name = ""; 3474 3475 if (!net_ratelimit()) 3476 return; 3477 3478 if (dev) { 3479 if (dev->dev.parent) 3480 name = dev_driver_string(dev->dev.parent); 3481 else 3482 name = netdev_name(dev); 3483 } 3484 skb_dump(KERN_WARNING, skb, false); 3485 WARN(1, "%s: caps=(%pNF, %pNF)\n", 3486 name, dev ? &dev->features : &null_features, 3487 skb->sk ? &skb->sk->sk_route_caps : &null_features); 3488 } 3489 3490 /* 3491 * Invalidate hardware checksum when packet is to be mangled, and 3492 * complete checksum manually on outgoing path. 3493 */ skb_checksum_help(struct sk_buff * skb)3494 int skb_checksum_help(struct sk_buff *skb) 3495 { 3496 __wsum csum; 3497 int ret = 0, offset; 3498 3499 if (skb->ip_summed == CHECKSUM_COMPLETE) 3500 goto out_set_summed; 3501 3502 if (unlikely(skb_is_gso(skb))) { 3503 skb_warn_bad_offload(skb); 3504 return -EINVAL; 3505 } 3506 3507 if (!skb_frags_readable(skb)) { 3508 return -EFAULT; 3509 } 3510 3511 /* Before computing a checksum, we should make sure no frag could 3512 * be modified by an external entity : checksum could be wrong. 3513 */ 3514 if (skb_has_shared_frag(skb)) { 3515 ret = __skb_linearize(skb); 3516 if (ret) 3517 goto out; 3518 } 3519 3520 offset = skb_checksum_start_offset(skb); 3521 ret = -EINVAL; 3522 if (unlikely(offset >= skb_headlen(skb))) { 3523 DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false); 3524 WARN_ONCE(true, "offset (%d) >= skb_headlen() (%u)\n", 3525 offset, skb_headlen(skb)); 3526 goto out; 3527 } 3528 csum = skb_checksum(skb, offset, skb->len - offset, 0); 3529 3530 offset += skb->csum_offset; 3531 if (unlikely(offset + sizeof(__sum16) > skb_headlen(skb))) { 3532 DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false); 3533 WARN_ONCE(true, "offset+2 (%zu) > skb_headlen() (%u)\n", 3534 offset + sizeof(__sum16), skb_headlen(skb)); 3535 goto out; 3536 } 3537 ret = skb_ensure_writable(skb, offset + sizeof(__sum16)); 3538 if (ret) 3539 goto out; 3540 3541 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0; 3542 out_set_summed: 3543 skb->ip_summed = CHECKSUM_NONE; 3544 out: 3545 return ret; 3546 } 3547 EXPORT_SYMBOL(skb_checksum_help); 3548 skb_crc32c_csum_help(struct sk_buff * skb)3549 int skb_crc32c_csum_help(struct sk_buff *skb) 3550 { 3551 __le32 crc32c_csum; 3552 int ret = 0, offset, start; 3553 3554 if (skb->ip_summed != CHECKSUM_PARTIAL) 3555 goto out; 3556 3557 if (unlikely(skb_is_gso(skb))) 3558 goto out; 3559 3560 /* Before computing a checksum, we should make sure no frag could 3561 * be modified by an external entity : checksum could be wrong. 3562 */ 3563 if (unlikely(skb_has_shared_frag(skb))) { 3564 ret = __skb_linearize(skb); 3565 if (ret) 3566 goto out; 3567 } 3568 start = skb_checksum_start_offset(skb); 3569 offset = start + offsetof(struct sctphdr, checksum); 3570 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) { 3571 ret = -EINVAL; 3572 goto out; 3573 } 3574 3575 ret = skb_ensure_writable(skb, offset + sizeof(__le32)); 3576 if (ret) 3577 goto out; 3578 3579 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start, 3580 skb->len - start, ~(__u32)0, 3581 crc32c_csum_stub)); 3582 *(__le32 *)(skb->data + offset) = crc32c_csum; 3583 skb_reset_csum_not_inet(skb); 3584 out: 3585 return ret; 3586 } 3587 EXPORT_SYMBOL(skb_crc32c_csum_help); 3588 skb_network_protocol(struct sk_buff * skb,int * depth)3589 __be16 skb_network_protocol(struct sk_buff *skb, int *depth) 3590 { 3591 __be16 type = skb->protocol; 3592 3593 /* Tunnel gso handlers can set protocol to ethernet. */ 3594 if (type == htons(ETH_P_TEB)) { 3595 struct ethhdr *eth; 3596 3597 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) 3598 return 0; 3599 3600 eth = (struct ethhdr *)skb->data; 3601 type = eth->h_proto; 3602 } 3603 3604 return vlan_get_protocol_and_depth(skb, type, depth); 3605 } 3606 3607 3608 /* Take action when hardware reception checksum errors are detected. */ 3609 #ifdef CONFIG_BUG do_netdev_rx_csum_fault(struct net_device * dev,struct sk_buff * skb)3610 static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) 3611 { 3612 netdev_err(dev, "hw csum failure\n"); 3613 skb_dump(KERN_ERR, skb, true); 3614 dump_stack(); 3615 } 3616 netdev_rx_csum_fault(struct net_device * dev,struct sk_buff * skb)3617 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) 3618 { 3619 DO_ONCE_LITE(do_netdev_rx_csum_fault, dev, skb); 3620 } 3621 EXPORT_SYMBOL(netdev_rx_csum_fault); 3622 #endif 3623 3624 /* XXX: check that highmem exists at all on the given machine. */ illegal_highdma(struct net_device * dev,struct sk_buff * skb)3625 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 3626 { 3627 #ifdef CONFIG_HIGHMEM 3628 int i; 3629 3630 if (!(dev->features & NETIF_F_HIGHDMA)) { 3631 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3632 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3633 struct page *page = skb_frag_page(frag); 3634 3635 if (page && PageHighMem(page)) 3636 return 1; 3637 } 3638 } 3639 #endif 3640 return 0; 3641 } 3642 3643 /* If MPLS offload request, verify we are testing hardware MPLS features 3644 * instead of standard features for the netdev. 3645 */ 3646 #if IS_ENABLED(CONFIG_NET_MPLS_GSO) net_mpls_features(struct sk_buff * skb,netdev_features_t features,__be16 type)3647 static netdev_features_t net_mpls_features(struct sk_buff *skb, 3648 netdev_features_t features, 3649 __be16 type) 3650 { 3651 if (eth_p_mpls(type)) 3652 features &= skb->dev->mpls_features; 3653 3654 return features; 3655 } 3656 #else net_mpls_features(struct sk_buff * skb,netdev_features_t features,__be16 type)3657 static netdev_features_t net_mpls_features(struct sk_buff *skb, 3658 netdev_features_t features, 3659 __be16 type) 3660 { 3661 return features; 3662 } 3663 #endif 3664 harmonize_features(struct sk_buff * skb,netdev_features_t features)3665 static netdev_features_t harmonize_features(struct sk_buff *skb, 3666 netdev_features_t features) 3667 { 3668 __be16 type; 3669 3670 type = skb_network_protocol(skb, NULL); 3671 features = net_mpls_features(skb, features, type); 3672 3673 if (skb->ip_summed != CHECKSUM_NONE && 3674 !can_checksum_protocol(features, type)) { 3675 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3676 } 3677 if (illegal_highdma(skb->dev, skb)) 3678 features &= ~NETIF_F_SG; 3679 3680 return features; 3681 } 3682 passthru_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)3683 netdev_features_t passthru_features_check(struct sk_buff *skb, 3684 struct net_device *dev, 3685 netdev_features_t features) 3686 { 3687 return features; 3688 } 3689 EXPORT_SYMBOL(passthru_features_check); 3690 dflt_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)3691 static netdev_features_t dflt_features_check(struct sk_buff *skb, 3692 struct net_device *dev, 3693 netdev_features_t features) 3694 { 3695 return vlan_features_check(skb, features); 3696 } 3697 gso_features_check(const struct sk_buff * skb,struct net_device * dev,netdev_features_t features)3698 static netdev_features_t gso_features_check(const struct sk_buff *skb, 3699 struct net_device *dev, 3700 netdev_features_t features) 3701 { 3702 u16 gso_segs = skb_shinfo(skb)->gso_segs; 3703 3704 if (gso_segs > READ_ONCE(dev->gso_max_segs)) 3705 return features & ~NETIF_F_GSO_MASK; 3706 3707 if (unlikely(skb->len >= netif_get_gso_max_size(dev, skb))) 3708 return features & ~NETIF_F_GSO_MASK; 3709 3710 if (!skb_shinfo(skb)->gso_type) { 3711 skb_warn_bad_offload(skb); 3712 return features & ~NETIF_F_GSO_MASK; 3713 } 3714 3715 /* Support for GSO partial features requires software 3716 * intervention before we can actually process the packets 3717 * so we need to strip support for any partial features now 3718 * and we can pull them back in after we have partially 3719 * segmented the frame. 3720 */ 3721 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL)) 3722 features &= ~dev->gso_partial_features; 3723 3724 /* Make sure to clear the IPv4 ID mangling feature if the 3725 * IPv4 header has the potential to be fragmented. 3726 */ 3727 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { 3728 struct iphdr *iph = skb->encapsulation ? 3729 inner_ip_hdr(skb) : ip_hdr(skb); 3730 3731 if (!(iph->frag_off & htons(IP_DF))) 3732 features &= ~NETIF_F_TSO_MANGLEID; 3733 } 3734 3735 return features; 3736 } 3737 netif_skb_features(struct sk_buff * skb)3738 netdev_features_t netif_skb_features(struct sk_buff *skb) 3739 { 3740 struct net_device *dev = skb->dev; 3741 netdev_features_t features = dev->features; 3742 3743 if (skb_is_gso(skb)) 3744 features = gso_features_check(skb, dev, features); 3745 3746 /* If encapsulation offload request, verify we are testing 3747 * hardware encapsulation features instead of standard 3748 * features for the netdev 3749 */ 3750 if (skb->encapsulation) 3751 features &= dev->hw_enc_features; 3752 3753 if (skb_vlan_tagged(skb)) 3754 features = netdev_intersect_features(features, 3755 dev->vlan_features | 3756 NETIF_F_HW_VLAN_CTAG_TX | 3757 NETIF_F_HW_VLAN_STAG_TX); 3758 3759 if (dev->netdev_ops->ndo_features_check) 3760 features &= dev->netdev_ops->ndo_features_check(skb, dev, 3761 features); 3762 else 3763 features &= dflt_features_check(skb, dev, features); 3764 3765 return harmonize_features(skb, features); 3766 } 3767 EXPORT_SYMBOL(netif_skb_features); 3768 xmit_one(struct sk_buff * skb,struct net_device * dev,struct netdev_queue * txq,bool more)3769 static int xmit_one(struct sk_buff *skb, struct net_device *dev, 3770 struct netdev_queue *txq, bool more) 3771 { 3772 unsigned int len; 3773 int rc; 3774 3775 if (dev_nit_active_rcu(dev)) 3776 dev_queue_xmit_nit(skb, dev); 3777 3778 len = skb->len; 3779 trace_net_dev_start_xmit(skb, dev); 3780 rc = netdev_start_xmit(skb, dev, txq, more); 3781 trace_net_dev_xmit(skb, rc, dev, len); 3782 3783 return rc; 3784 } 3785 dev_hard_start_xmit(struct sk_buff * first,struct net_device * dev,struct netdev_queue * txq,int * ret)3786 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev, 3787 struct netdev_queue *txq, int *ret) 3788 { 3789 struct sk_buff *skb = first; 3790 int rc = NETDEV_TX_OK; 3791 3792 while (skb) { 3793 struct sk_buff *next = skb->next; 3794 3795 skb_mark_not_on_list(skb); 3796 rc = xmit_one(skb, dev, txq, next != NULL); 3797 if (unlikely(!dev_xmit_complete(rc))) { 3798 skb->next = next; 3799 goto out; 3800 } 3801 3802 skb = next; 3803 if (netif_tx_queue_stopped(txq) && skb) { 3804 rc = NETDEV_TX_BUSY; 3805 break; 3806 } 3807 } 3808 3809 out: 3810 *ret = rc; 3811 return skb; 3812 } 3813 validate_xmit_vlan(struct sk_buff * skb,netdev_features_t features)3814 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, 3815 netdev_features_t features) 3816 { 3817 if (skb_vlan_tag_present(skb) && 3818 !vlan_hw_offload_capable(features, skb->vlan_proto)) 3819 skb = __vlan_hwaccel_push_inside(skb); 3820 return skb; 3821 } 3822 skb_csum_hwoffload_help(struct sk_buff * skb,const netdev_features_t features)3823 int skb_csum_hwoffload_help(struct sk_buff *skb, 3824 const netdev_features_t features) 3825 { 3826 if (unlikely(skb_csum_is_sctp(skb))) 3827 return !!(features & NETIF_F_SCTP_CRC) ? 0 : 3828 skb_crc32c_csum_help(skb); 3829 3830 if (features & NETIF_F_HW_CSUM) 3831 return 0; 3832 3833 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { 3834 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6) && 3835 skb_network_header_len(skb) != sizeof(struct ipv6hdr) && 3836 !ipv6_has_hopopt_jumbo(skb)) 3837 goto sw_checksum; 3838 3839 switch (skb->csum_offset) { 3840 case offsetof(struct tcphdr, check): 3841 case offsetof(struct udphdr, check): 3842 return 0; 3843 } 3844 } 3845 3846 sw_checksum: 3847 return skb_checksum_help(skb); 3848 } 3849 EXPORT_SYMBOL(skb_csum_hwoffload_help); 3850 validate_xmit_skb(struct sk_buff * skb,struct net_device * dev,bool * again)3851 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again) 3852 { 3853 netdev_features_t features; 3854 3855 if (!skb_frags_readable(skb)) 3856 goto out_kfree_skb; 3857 3858 features = netif_skb_features(skb); 3859 skb = validate_xmit_vlan(skb, features); 3860 if (unlikely(!skb)) 3861 goto out_null; 3862 3863 skb = sk_validate_xmit_skb(skb, dev); 3864 if (unlikely(!skb)) 3865 goto out_null; 3866 3867 if (netif_needs_gso(skb, features)) { 3868 struct sk_buff *segs; 3869 3870 segs = skb_gso_segment(skb, features); 3871 if (IS_ERR(segs)) { 3872 goto out_kfree_skb; 3873 } else if (segs) { 3874 consume_skb(skb); 3875 skb = segs; 3876 } 3877 } else { 3878 if (skb_needs_linearize(skb, features) && 3879 __skb_linearize(skb)) 3880 goto out_kfree_skb; 3881 3882 /* If packet is not checksummed and device does not 3883 * support checksumming for this protocol, complete 3884 * checksumming here. 3885 */ 3886 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3887 if (skb->encapsulation) 3888 skb_set_inner_transport_header(skb, 3889 skb_checksum_start_offset(skb)); 3890 else 3891 skb_set_transport_header(skb, 3892 skb_checksum_start_offset(skb)); 3893 if (skb_csum_hwoffload_help(skb, features)) 3894 goto out_kfree_skb; 3895 } 3896 } 3897 3898 skb = validate_xmit_xfrm(skb, features, again); 3899 3900 return skb; 3901 3902 out_kfree_skb: 3903 kfree_skb(skb); 3904 out_null: 3905 dev_core_stats_tx_dropped_inc(dev); 3906 return NULL; 3907 } 3908 validate_xmit_skb_list(struct sk_buff * skb,struct net_device * dev,bool * again)3909 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again) 3910 { 3911 struct sk_buff *next, *head = NULL, *tail; 3912 3913 for (; skb != NULL; skb = next) { 3914 next = skb->next; 3915 skb_mark_not_on_list(skb); 3916 3917 /* in case skb won't be segmented, point to itself */ 3918 skb->prev = skb; 3919 3920 skb = validate_xmit_skb(skb, dev, again); 3921 if (!skb) 3922 continue; 3923 3924 if (!head) 3925 head = skb; 3926 else 3927 tail->next = skb; 3928 /* If skb was segmented, skb->prev points to 3929 * the last segment. If not, it still contains skb. 3930 */ 3931 tail = skb->prev; 3932 } 3933 return head; 3934 } 3935 EXPORT_SYMBOL_GPL(validate_xmit_skb_list); 3936 qdisc_pkt_len_init(struct sk_buff * skb)3937 static void qdisc_pkt_len_init(struct sk_buff *skb) 3938 { 3939 const struct skb_shared_info *shinfo = skb_shinfo(skb); 3940 3941 qdisc_skb_cb(skb)->pkt_len = skb->len; 3942 3943 /* To get more precise estimation of bytes sent on wire, 3944 * we add to pkt_len the headers size of all segments 3945 */ 3946 if (shinfo->gso_size && skb_transport_header_was_set(skb)) { 3947 u16 gso_segs = shinfo->gso_segs; 3948 unsigned int hdr_len; 3949 3950 /* mac layer + network layer */ 3951 hdr_len = skb_transport_offset(skb); 3952 3953 /* + transport layer */ 3954 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 3955 const struct tcphdr *th; 3956 struct tcphdr _tcphdr; 3957 3958 th = skb_header_pointer(skb, hdr_len, 3959 sizeof(_tcphdr), &_tcphdr); 3960 if (likely(th)) 3961 hdr_len += __tcp_hdrlen(th); 3962 } else if (shinfo->gso_type & SKB_GSO_UDP_L4) { 3963 struct udphdr _udphdr; 3964 3965 if (skb_header_pointer(skb, hdr_len, 3966 sizeof(_udphdr), &_udphdr)) 3967 hdr_len += sizeof(struct udphdr); 3968 } 3969 3970 if (unlikely(shinfo->gso_type & SKB_GSO_DODGY)) { 3971 int payload = skb->len - hdr_len; 3972 3973 /* Malicious packet. */ 3974 if (payload <= 0) 3975 return; 3976 gso_segs = DIV_ROUND_UP(payload, shinfo->gso_size); 3977 } 3978 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len; 3979 } 3980 } 3981 dev_qdisc_enqueue(struct sk_buff * skb,struct Qdisc * q,struct sk_buff ** to_free,struct netdev_queue * txq)3982 static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q, 3983 struct sk_buff **to_free, 3984 struct netdev_queue *txq) 3985 { 3986 int rc; 3987 3988 rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK; 3989 if (rc == NET_XMIT_SUCCESS) 3990 trace_qdisc_enqueue(q, txq, skb); 3991 return rc; 3992 } 3993 __dev_xmit_skb(struct sk_buff * skb,struct Qdisc * q,struct net_device * dev,struct netdev_queue * txq)3994 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, 3995 struct net_device *dev, 3996 struct netdev_queue *txq) 3997 { 3998 spinlock_t *root_lock = qdisc_lock(q); 3999 struct sk_buff *to_free = NULL; 4000 bool contended; 4001 int rc; 4002 4003 qdisc_calculate_pkt_len(skb, q); 4004 4005 tcf_set_drop_reason(skb, SKB_DROP_REASON_QDISC_DROP); 4006 4007 if (q->flags & TCQ_F_NOLOCK) { 4008 if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) && 4009 qdisc_run_begin(q)) { 4010 /* Retest nolock_qdisc_is_empty() within the protection 4011 * of q->seqlock to protect from racing with requeuing. 4012 */ 4013 if (unlikely(!nolock_qdisc_is_empty(q))) { 4014 rc = dev_qdisc_enqueue(skb, q, &to_free, txq); 4015 __qdisc_run(q); 4016 qdisc_run_end(q); 4017 4018 goto no_lock_out; 4019 } 4020 4021 qdisc_bstats_cpu_update(q, skb); 4022 if (sch_direct_xmit(skb, q, dev, txq, NULL, true) && 4023 !nolock_qdisc_is_empty(q)) 4024 __qdisc_run(q); 4025 4026 qdisc_run_end(q); 4027 return NET_XMIT_SUCCESS; 4028 } 4029 4030 rc = dev_qdisc_enqueue(skb, q, &to_free, txq); 4031 qdisc_run(q); 4032 4033 no_lock_out: 4034 if (unlikely(to_free)) 4035 kfree_skb_list_reason(to_free, 4036 tcf_get_drop_reason(to_free)); 4037 return rc; 4038 } 4039 4040 if (unlikely(READ_ONCE(q->owner) == smp_processor_id())) { 4041 kfree_skb_reason(skb, SKB_DROP_REASON_TC_RECLASSIFY_LOOP); 4042 return NET_XMIT_DROP; 4043 } 4044 /* 4045 * Heuristic to force contended enqueues to serialize on a 4046 * separate lock before trying to get qdisc main lock. 4047 * This permits qdisc->running owner to get the lock more 4048 * often and dequeue packets faster. 4049 * On PREEMPT_RT it is possible to preempt the qdisc owner during xmit 4050 * and then other tasks will only enqueue packets. The packets will be 4051 * sent after the qdisc owner is scheduled again. To prevent this 4052 * scenario the task always serialize on the lock. 4053 */ 4054 contended = qdisc_is_running(q) || IS_ENABLED(CONFIG_PREEMPT_RT); 4055 if (unlikely(contended)) 4056 spin_lock(&q->busylock); 4057 4058 spin_lock(root_lock); 4059 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { 4060 __qdisc_drop(skb, &to_free); 4061 rc = NET_XMIT_DROP; 4062 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && 4063 qdisc_run_begin(q)) { 4064 /* 4065 * This is a work-conserving queue; there are no old skbs 4066 * waiting to be sent out; and the qdisc is not running - 4067 * xmit the skb directly. 4068 */ 4069 4070 qdisc_bstats_update(q, skb); 4071 4072 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) { 4073 if (unlikely(contended)) { 4074 spin_unlock(&q->busylock); 4075 contended = false; 4076 } 4077 __qdisc_run(q); 4078 } 4079 4080 qdisc_run_end(q); 4081 rc = NET_XMIT_SUCCESS; 4082 } else { 4083 WRITE_ONCE(q->owner, smp_processor_id()); 4084 rc = dev_qdisc_enqueue(skb, q, &to_free, txq); 4085 WRITE_ONCE(q->owner, -1); 4086 if (qdisc_run_begin(q)) { 4087 if (unlikely(contended)) { 4088 spin_unlock(&q->busylock); 4089 contended = false; 4090 } 4091 __qdisc_run(q); 4092 qdisc_run_end(q); 4093 } 4094 } 4095 spin_unlock(root_lock); 4096 if (unlikely(to_free)) 4097 kfree_skb_list_reason(to_free, 4098 tcf_get_drop_reason(to_free)); 4099 if (unlikely(contended)) 4100 spin_unlock(&q->busylock); 4101 return rc; 4102 } 4103 4104 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) skb_update_prio(struct sk_buff * skb)4105 static void skb_update_prio(struct sk_buff *skb) 4106 { 4107 const struct netprio_map *map; 4108 const struct sock *sk; 4109 unsigned int prioidx; 4110 4111 if (skb->priority) 4112 return; 4113 map = rcu_dereference_bh(skb->dev->priomap); 4114 if (!map) 4115 return; 4116 sk = skb_to_full_sk(skb); 4117 if (!sk) 4118 return; 4119 4120 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data); 4121 4122 if (prioidx < map->priomap_len) 4123 skb->priority = map->priomap[prioidx]; 4124 } 4125 #else 4126 #define skb_update_prio(skb) 4127 #endif 4128 4129 /** 4130 * dev_loopback_xmit - loop back @skb 4131 * @net: network namespace this loopback is happening in 4132 * @sk: sk needed to be a netfilter okfn 4133 * @skb: buffer to transmit 4134 */ dev_loopback_xmit(struct net * net,struct sock * sk,struct sk_buff * skb)4135 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) 4136 { 4137 skb_reset_mac_header(skb); 4138 __skb_pull(skb, skb_network_offset(skb)); 4139 skb->pkt_type = PACKET_LOOPBACK; 4140 if (skb->ip_summed == CHECKSUM_NONE) 4141 skb->ip_summed = CHECKSUM_UNNECESSARY; 4142 DEBUG_NET_WARN_ON_ONCE(!skb_dst(skb)); 4143 skb_dst_force(skb); 4144 netif_rx(skb); 4145 return 0; 4146 } 4147 EXPORT_SYMBOL(dev_loopback_xmit); 4148 4149 #ifdef CONFIG_NET_EGRESS 4150 static struct netdev_queue * netdev_tx_queue_mapping(struct net_device * dev,struct sk_buff * skb)4151 netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb) 4152 { 4153 int qm = skb_get_queue_mapping(skb); 4154 4155 return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm)); 4156 } 4157 4158 #ifndef CONFIG_PREEMPT_RT netdev_xmit_txqueue_skipped(void)4159 static bool netdev_xmit_txqueue_skipped(void) 4160 { 4161 return __this_cpu_read(softnet_data.xmit.skip_txqueue); 4162 } 4163 netdev_xmit_skip_txqueue(bool skip)4164 void netdev_xmit_skip_txqueue(bool skip) 4165 { 4166 __this_cpu_write(softnet_data.xmit.skip_txqueue, skip); 4167 } 4168 EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue); 4169 4170 #else netdev_xmit_txqueue_skipped(void)4171 static bool netdev_xmit_txqueue_skipped(void) 4172 { 4173 return current->net_xmit.skip_txqueue; 4174 } 4175 netdev_xmit_skip_txqueue(bool skip)4176 void netdev_xmit_skip_txqueue(bool skip) 4177 { 4178 current->net_xmit.skip_txqueue = skip; 4179 } 4180 EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue); 4181 #endif 4182 #endif /* CONFIG_NET_EGRESS */ 4183 4184 #ifdef CONFIG_NET_XGRESS tc_run(struct tcx_entry * entry,struct sk_buff * skb,enum skb_drop_reason * drop_reason)4185 static int tc_run(struct tcx_entry *entry, struct sk_buff *skb, 4186 enum skb_drop_reason *drop_reason) 4187 { 4188 int ret = TC_ACT_UNSPEC; 4189 #ifdef CONFIG_NET_CLS_ACT 4190 struct mini_Qdisc *miniq = rcu_dereference_bh(entry->miniq); 4191 struct tcf_result res; 4192 4193 if (!miniq) 4194 return ret; 4195 4196 /* Global bypass */ 4197 if (!static_branch_likely(&tcf_sw_enabled_key)) 4198 return ret; 4199 4200 /* Block-wise bypass */ 4201 if (tcf_block_bypass_sw(miniq->block)) 4202 return ret; 4203 4204 tc_skb_cb(skb)->mru = 0; 4205 tc_skb_cb(skb)->post_ct = false; 4206 tcf_set_drop_reason(skb, *drop_reason); 4207 4208 mini_qdisc_bstats_cpu_update(miniq, skb); 4209 ret = tcf_classify(skb, miniq->block, miniq->filter_list, &res, false); 4210 /* Only tcf related quirks below. */ 4211 switch (ret) { 4212 case TC_ACT_SHOT: 4213 *drop_reason = tcf_get_drop_reason(skb); 4214 mini_qdisc_qstats_cpu_drop(miniq); 4215 break; 4216 case TC_ACT_OK: 4217 case TC_ACT_RECLASSIFY: 4218 skb->tc_index = TC_H_MIN(res.classid); 4219 break; 4220 } 4221 #endif /* CONFIG_NET_CLS_ACT */ 4222 return ret; 4223 } 4224 4225 static DEFINE_STATIC_KEY_FALSE(tcx_needed_key); 4226 tcx_inc(void)4227 void tcx_inc(void) 4228 { 4229 static_branch_inc(&tcx_needed_key); 4230 } 4231 tcx_dec(void)4232 void tcx_dec(void) 4233 { 4234 static_branch_dec(&tcx_needed_key); 4235 } 4236 4237 static __always_inline enum tcx_action_base tcx_run(const struct bpf_mprog_entry * entry,struct sk_buff * skb,const bool needs_mac)4238 tcx_run(const struct bpf_mprog_entry *entry, struct sk_buff *skb, 4239 const bool needs_mac) 4240 { 4241 const struct bpf_mprog_fp *fp; 4242 const struct bpf_prog *prog; 4243 int ret = TCX_NEXT; 4244 4245 if (needs_mac) 4246 __skb_push(skb, skb->mac_len); 4247 bpf_mprog_foreach_prog(entry, fp, prog) { 4248 bpf_compute_data_pointers(skb); 4249 ret = bpf_prog_run(prog, skb); 4250 if (ret != TCX_NEXT) 4251 break; 4252 } 4253 if (needs_mac) 4254 __skb_pull(skb, skb->mac_len); 4255 return tcx_action_code(skb, ret); 4256 } 4257 4258 static __always_inline struct sk_buff * sch_handle_ingress(struct sk_buff * skb,struct packet_type ** pt_prev,int * ret,struct net_device * orig_dev,bool * another)4259 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, 4260 struct net_device *orig_dev, bool *another) 4261 { 4262 struct bpf_mprog_entry *entry = rcu_dereference_bh(skb->dev->tcx_ingress); 4263 enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_INGRESS; 4264 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; 4265 int sch_ret; 4266 4267 if (!entry) 4268 return skb; 4269 4270 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); 4271 if (*pt_prev) { 4272 *ret = deliver_skb(skb, *pt_prev, orig_dev); 4273 *pt_prev = NULL; 4274 } 4275 4276 qdisc_skb_cb(skb)->pkt_len = skb->len; 4277 tcx_set_ingress(skb, true); 4278 4279 if (static_branch_unlikely(&tcx_needed_key)) { 4280 sch_ret = tcx_run(entry, skb, true); 4281 if (sch_ret != TC_ACT_UNSPEC) 4282 goto ingress_verdict; 4283 } 4284 sch_ret = tc_run(tcx_entry(entry), skb, &drop_reason); 4285 ingress_verdict: 4286 switch (sch_ret) { 4287 case TC_ACT_REDIRECT: 4288 /* skb_mac_header check was done by BPF, so we can safely 4289 * push the L2 header back before redirecting to another 4290 * netdev. 4291 */ 4292 __skb_push(skb, skb->mac_len); 4293 if (skb_do_redirect(skb) == -EAGAIN) { 4294 __skb_pull(skb, skb->mac_len); 4295 *another = true; 4296 break; 4297 } 4298 *ret = NET_RX_SUCCESS; 4299 bpf_net_ctx_clear(bpf_net_ctx); 4300 return NULL; 4301 case TC_ACT_SHOT: 4302 kfree_skb_reason(skb, drop_reason); 4303 *ret = NET_RX_DROP; 4304 bpf_net_ctx_clear(bpf_net_ctx); 4305 return NULL; 4306 /* used by tc_run */ 4307 case TC_ACT_STOLEN: 4308 case TC_ACT_QUEUED: 4309 case TC_ACT_TRAP: 4310 consume_skb(skb); 4311 fallthrough; 4312 case TC_ACT_CONSUMED: 4313 *ret = NET_RX_SUCCESS; 4314 bpf_net_ctx_clear(bpf_net_ctx); 4315 return NULL; 4316 } 4317 bpf_net_ctx_clear(bpf_net_ctx); 4318 4319 return skb; 4320 } 4321 4322 static __always_inline struct sk_buff * sch_handle_egress(struct sk_buff * skb,int * ret,struct net_device * dev)4323 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) 4324 { 4325 struct bpf_mprog_entry *entry = rcu_dereference_bh(dev->tcx_egress); 4326 enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_EGRESS; 4327 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; 4328 int sch_ret; 4329 4330 if (!entry) 4331 return skb; 4332 4333 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); 4334 4335 /* qdisc_skb_cb(skb)->pkt_len & tcx_set_ingress() was 4336 * already set by the caller. 4337 */ 4338 if (static_branch_unlikely(&tcx_needed_key)) { 4339 sch_ret = tcx_run(entry, skb, false); 4340 if (sch_ret != TC_ACT_UNSPEC) 4341 goto egress_verdict; 4342 } 4343 sch_ret = tc_run(tcx_entry(entry), skb, &drop_reason); 4344 egress_verdict: 4345 switch (sch_ret) { 4346 case TC_ACT_REDIRECT: 4347 /* No need to push/pop skb's mac_header here on egress! */ 4348 skb_do_redirect(skb); 4349 *ret = NET_XMIT_SUCCESS; 4350 bpf_net_ctx_clear(bpf_net_ctx); 4351 return NULL; 4352 case TC_ACT_SHOT: 4353 kfree_skb_reason(skb, drop_reason); 4354 *ret = NET_XMIT_DROP; 4355 bpf_net_ctx_clear(bpf_net_ctx); 4356 return NULL; 4357 /* used by tc_run */ 4358 case TC_ACT_STOLEN: 4359 case TC_ACT_QUEUED: 4360 case TC_ACT_TRAP: 4361 consume_skb(skb); 4362 fallthrough; 4363 case TC_ACT_CONSUMED: 4364 *ret = NET_XMIT_SUCCESS; 4365 bpf_net_ctx_clear(bpf_net_ctx); 4366 return NULL; 4367 } 4368 bpf_net_ctx_clear(bpf_net_ctx); 4369 4370 return skb; 4371 } 4372 #else 4373 static __always_inline struct sk_buff * sch_handle_ingress(struct sk_buff * skb,struct packet_type ** pt_prev,int * ret,struct net_device * orig_dev,bool * another)4374 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, 4375 struct net_device *orig_dev, bool *another) 4376 { 4377 return skb; 4378 } 4379 4380 static __always_inline struct sk_buff * sch_handle_egress(struct sk_buff * skb,int * ret,struct net_device * dev)4381 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) 4382 { 4383 return skb; 4384 } 4385 #endif /* CONFIG_NET_XGRESS */ 4386 4387 #ifdef CONFIG_XPS __get_xps_queue_idx(struct net_device * dev,struct sk_buff * skb,struct xps_dev_maps * dev_maps,unsigned int tci)4388 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb, 4389 struct xps_dev_maps *dev_maps, unsigned int tci) 4390 { 4391 int tc = netdev_get_prio_tc_map(dev, skb->priority); 4392 struct xps_map *map; 4393 int queue_index = -1; 4394 4395 if (tc >= dev_maps->num_tc || tci >= dev_maps->nr_ids) 4396 return queue_index; 4397 4398 tci *= dev_maps->num_tc; 4399 tci += tc; 4400 4401 map = rcu_dereference(dev_maps->attr_map[tci]); 4402 if (map) { 4403 if (map->len == 1) 4404 queue_index = map->queues[0]; 4405 else 4406 queue_index = map->queues[reciprocal_scale( 4407 skb_get_hash(skb), map->len)]; 4408 if (unlikely(queue_index >= dev->real_num_tx_queues)) 4409 queue_index = -1; 4410 } 4411 return queue_index; 4412 } 4413 #endif 4414 get_xps_queue(struct net_device * dev,struct net_device * sb_dev,struct sk_buff * skb)4415 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev, 4416 struct sk_buff *skb) 4417 { 4418 #ifdef CONFIG_XPS 4419 struct xps_dev_maps *dev_maps; 4420 struct sock *sk = skb->sk; 4421 int queue_index = -1; 4422 4423 if (!static_key_false(&xps_needed)) 4424 return -1; 4425 4426 rcu_read_lock(); 4427 if (!static_key_false(&xps_rxqs_needed)) 4428 goto get_cpus_map; 4429 4430 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_RXQS]); 4431 if (dev_maps) { 4432 int tci = sk_rx_queue_get(sk); 4433 4434 if (tci >= 0) 4435 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, 4436 tci); 4437 } 4438 4439 get_cpus_map: 4440 if (queue_index < 0) { 4441 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_CPUS]); 4442 if (dev_maps) { 4443 unsigned int tci = skb->sender_cpu - 1; 4444 4445 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, 4446 tci); 4447 } 4448 } 4449 rcu_read_unlock(); 4450 4451 return queue_index; 4452 #else 4453 return -1; 4454 #endif 4455 } 4456 dev_pick_tx_zero(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)4457 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, 4458 struct net_device *sb_dev) 4459 { 4460 return 0; 4461 } 4462 EXPORT_SYMBOL(dev_pick_tx_zero); 4463 netdev_pick_tx(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)4464 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, 4465 struct net_device *sb_dev) 4466 { 4467 struct sock *sk = skb->sk; 4468 int queue_index = sk_tx_queue_get(sk); 4469 4470 sb_dev = sb_dev ? : dev; 4471 4472 if (queue_index < 0 || skb->ooo_okay || 4473 queue_index >= dev->real_num_tx_queues) { 4474 int new_index = get_xps_queue(dev, sb_dev, skb); 4475 4476 if (new_index < 0) 4477 new_index = skb_tx_hash(dev, sb_dev, skb); 4478 4479 if (queue_index != new_index && sk && 4480 sk_fullsock(sk) && 4481 rcu_access_pointer(sk->sk_dst_cache)) 4482 sk_tx_queue_set(sk, new_index); 4483 4484 queue_index = new_index; 4485 } 4486 4487 return queue_index; 4488 } 4489 EXPORT_SYMBOL(netdev_pick_tx); 4490 netdev_core_pick_tx(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)4491 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, 4492 struct sk_buff *skb, 4493 struct net_device *sb_dev) 4494 { 4495 int queue_index = 0; 4496 4497 #ifdef CONFIG_XPS 4498 u32 sender_cpu = skb->sender_cpu - 1; 4499 4500 if (sender_cpu >= (u32)NR_CPUS) 4501 skb->sender_cpu = raw_smp_processor_id() + 1; 4502 #endif 4503 4504 if (dev->real_num_tx_queues != 1) { 4505 const struct net_device_ops *ops = dev->netdev_ops; 4506 4507 if (ops->ndo_select_queue) 4508 queue_index = ops->ndo_select_queue(dev, skb, sb_dev); 4509 else 4510 queue_index = netdev_pick_tx(dev, skb, sb_dev); 4511 4512 queue_index = netdev_cap_txqueue(dev, queue_index); 4513 } 4514 4515 skb_set_queue_mapping(skb, queue_index); 4516 return netdev_get_tx_queue(dev, queue_index); 4517 } 4518 4519 /** 4520 * __dev_queue_xmit() - transmit a buffer 4521 * @skb: buffer to transmit 4522 * @sb_dev: suboordinate device used for L2 forwarding offload 4523 * 4524 * Queue a buffer for transmission to a network device. The caller must 4525 * have set the device and priority and built the buffer before calling 4526 * this function. The function can be called from an interrupt. 4527 * 4528 * When calling this method, interrupts MUST be enabled. This is because 4529 * the BH enable code must have IRQs enabled so that it will not deadlock. 4530 * 4531 * Regardless of the return value, the skb is consumed, so it is currently 4532 * difficult to retry a send to this method. (You can bump the ref count 4533 * before sending to hold a reference for retry if you are careful.) 4534 * 4535 * Return: 4536 * * 0 - buffer successfully transmitted 4537 * * positive qdisc return code - NET_XMIT_DROP etc. 4538 * * negative errno - other errors 4539 */ __dev_queue_xmit(struct sk_buff * skb,struct net_device * sb_dev)4540 int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) 4541 { 4542 struct net_device *dev = skb->dev; 4543 struct netdev_queue *txq = NULL; 4544 struct Qdisc *q; 4545 int rc = -ENOMEM; 4546 bool again = false; 4547 4548 skb_reset_mac_header(skb); 4549 skb_assert_len(skb); 4550 4551 if (unlikely(skb_shinfo(skb)->tx_flags & 4552 (SKBTX_SCHED_TSTAMP | SKBTX_BPF))) 4553 __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED); 4554 4555 /* Disable soft irqs for various locks below. Also 4556 * stops preemption for RCU. 4557 */ 4558 rcu_read_lock_bh(); 4559 4560 skb_update_prio(skb); 4561 4562 qdisc_pkt_len_init(skb); 4563 tcx_set_ingress(skb, false); 4564 #ifdef CONFIG_NET_EGRESS 4565 if (static_branch_unlikely(&egress_needed_key)) { 4566 if (nf_hook_egress_active()) { 4567 skb = nf_hook_egress(skb, &rc, dev); 4568 if (!skb) 4569 goto out; 4570 } 4571 4572 netdev_xmit_skip_txqueue(false); 4573 4574 nf_skip_egress(skb, true); 4575 skb = sch_handle_egress(skb, &rc, dev); 4576 if (!skb) 4577 goto out; 4578 nf_skip_egress(skb, false); 4579 4580 if (netdev_xmit_txqueue_skipped()) 4581 txq = netdev_tx_queue_mapping(dev, skb); 4582 } 4583 #endif 4584 /* If device/qdisc don't need skb->dst, release it right now while 4585 * its hot in this cpu cache. 4586 */ 4587 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 4588 skb_dst_drop(skb); 4589 else 4590 skb_dst_force(skb); 4591 4592 if (!txq) 4593 txq = netdev_core_pick_tx(dev, skb, sb_dev); 4594 4595 q = rcu_dereference_bh(txq->qdisc); 4596 4597 trace_net_dev_queue(skb); 4598 if (q->enqueue) { 4599 rc = __dev_xmit_skb(skb, q, dev, txq); 4600 goto out; 4601 } 4602 4603 /* The device has no queue. Common case for software devices: 4604 * loopback, all the sorts of tunnels... 4605 4606 * Really, it is unlikely that netif_tx_lock protection is necessary 4607 * here. (f.e. loopback and IP tunnels are clean ignoring statistics 4608 * counters.) 4609 * However, it is possible, that they rely on protection 4610 * made by us here. 4611 4612 * Check this and shot the lock. It is not prone from deadlocks. 4613 *Either shot noqueue qdisc, it is even simpler 8) 4614 */ 4615 if (dev->flags & IFF_UP) { 4616 int cpu = smp_processor_id(); /* ok because BHs are off */ 4617 4618 /* Other cpus might concurrently change txq->xmit_lock_owner 4619 * to -1 or to their cpu id, but not to our id. 4620 */ 4621 if (READ_ONCE(txq->xmit_lock_owner) != cpu) { 4622 if (dev_xmit_recursion()) 4623 goto recursion_alert; 4624 4625 skb = validate_xmit_skb(skb, dev, &again); 4626 if (!skb) 4627 goto out; 4628 4629 HARD_TX_LOCK(dev, txq, cpu); 4630 4631 if (!netif_xmit_stopped(txq)) { 4632 dev_xmit_recursion_inc(); 4633 skb = dev_hard_start_xmit(skb, dev, txq, &rc); 4634 dev_xmit_recursion_dec(); 4635 if (dev_xmit_complete(rc)) { 4636 HARD_TX_UNLOCK(dev, txq); 4637 goto out; 4638 } 4639 } 4640 HARD_TX_UNLOCK(dev, txq); 4641 net_crit_ratelimited("Virtual device %s asks to queue packet!\n", 4642 dev->name); 4643 } else { 4644 /* Recursion is detected! It is possible, 4645 * unfortunately 4646 */ 4647 recursion_alert: 4648 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n", 4649 dev->name); 4650 } 4651 } 4652 4653 rc = -ENETDOWN; 4654 rcu_read_unlock_bh(); 4655 4656 dev_core_stats_tx_dropped_inc(dev); 4657 kfree_skb_list(skb); 4658 return rc; 4659 out: 4660 rcu_read_unlock_bh(); 4661 return rc; 4662 } 4663 EXPORT_SYMBOL(__dev_queue_xmit); 4664 __dev_direct_xmit(struct sk_buff * skb,u16 queue_id)4665 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id) 4666 { 4667 struct net_device *dev = skb->dev; 4668 struct sk_buff *orig_skb = skb; 4669 struct netdev_queue *txq; 4670 int ret = NETDEV_TX_BUSY; 4671 bool again = false; 4672 4673 if (unlikely(!netif_running(dev) || 4674 !netif_carrier_ok(dev))) 4675 goto drop; 4676 4677 skb = validate_xmit_skb_list(skb, dev, &again); 4678 if (skb != orig_skb) 4679 goto drop; 4680 4681 skb_set_queue_mapping(skb, queue_id); 4682 txq = skb_get_tx_queue(dev, skb); 4683 4684 local_bh_disable(); 4685 4686 dev_xmit_recursion_inc(); 4687 HARD_TX_LOCK(dev, txq, smp_processor_id()); 4688 if (!netif_xmit_frozen_or_drv_stopped(txq)) 4689 ret = netdev_start_xmit(skb, dev, txq, false); 4690 HARD_TX_UNLOCK(dev, txq); 4691 dev_xmit_recursion_dec(); 4692 4693 local_bh_enable(); 4694 return ret; 4695 drop: 4696 dev_core_stats_tx_dropped_inc(dev); 4697 kfree_skb_list(skb); 4698 return NET_XMIT_DROP; 4699 } 4700 EXPORT_SYMBOL(__dev_direct_xmit); 4701 4702 /************************************************************************* 4703 * Receiver routines 4704 *************************************************************************/ 4705 static DEFINE_PER_CPU(struct task_struct *, backlog_napi); 4706 4707 int weight_p __read_mostly = 64; /* old backlog weight */ 4708 int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */ 4709 int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */ 4710 4711 /* Called with irq disabled */ ____napi_schedule(struct softnet_data * sd,struct napi_struct * napi)4712 static inline void ____napi_schedule(struct softnet_data *sd, 4713 struct napi_struct *napi) 4714 { 4715 struct task_struct *thread; 4716 4717 lockdep_assert_irqs_disabled(); 4718 4719 if (test_bit(NAPI_STATE_THREADED, &napi->state)) { 4720 /* Paired with smp_mb__before_atomic() in 4721 * napi_enable()/dev_set_threaded(). 4722 * Use READ_ONCE() to guarantee a complete 4723 * read on napi->thread. Only call 4724 * wake_up_process() when it's not NULL. 4725 */ 4726 thread = READ_ONCE(napi->thread); 4727 if (thread) { 4728 if (use_backlog_threads() && thread == raw_cpu_read(backlog_napi)) 4729 goto use_local_napi; 4730 4731 set_bit(NAPI_STATE_SCHED_THREADED, &napi->state); 4732 wake_up_process(thread); 4733 return; 4734 } 4735 } 4736 4737 use_local_napi: 4738 list_add_tail(&napi->poll_list, &sd->poll_list); 4739 WRITE_ONCE(napi->list_owner, smp_processor_id()); 4740 /* If not called from net_rx_action() 4741 * we have to raise NET_RX_SOFTIRQ. 4742 */ 4743 if (!sd->in_net_rx_action) 4744 raise_softirq_irqoff(NET_RX_SOFTIRQ); 4745 } 4746 4747 #ifdef CONFIG_RPS 4748 4749 struct static_key_false rps_needed __read_mostly; 4750 EXPORT_SYMBOL(rps_needed); 4751 struct static_key_false rfs_needed __read_mostly; 4752 EXPORT_SYMBOL(rfs_needed); 4753 rfs_slot(u32 hash,const struct rps_dev_flow_table * flow_table)4754 static u32 rfs_slot(u32 hash, const struct rps_dev_flow_table *flow_table) 4755 { 4756 return hash_32(hash, flow_table->log); 4757 } 4758 4759 static struct rps_dev_flow * set_rps_cpu(struct net_device * dev,struct sk_buff * skb,struct rps_dev_flow * rflow,u16 next_cpu)4760 set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 4761 struct rps_dev_flow *rflow, u16 next_cpu) 4762 { 4763 if (next_cpu < nr_cpu_ids) { 4764 u32 head; 4765 #ifdef CONFIG_RFS_ACCEL 4766 struct netdev_rx_queue *rxqueue; 4767 struct rps_dev_flow_table *flow_table; 4768 struct rps_dev_flow *old_rflow; 4769 u16 rxq_index; 4770 u32 flow_id; 4771 int rc; 4772 4773 /* Should we steer this flow to a different hardware queue? */ 4774 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || 4775 !(dev->features & NETIF_F_NTUPLE)) 4776 goto out; 4777 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); 4778 if (rxq_index == skb_get_rx_queue(skb)) 4779 goto out; 4780 4781 rxqueue = dev->_rx + rxq_index; 4782 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4783 if (!flow_table) 4784 goto out; 4785 flow_id = rfs_slot(skb_get_hash(skb), flow_table); 4786 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, 4787 rxq_index, flow_id); 4788 if (rc < 0) 4789 goto out; 4790 old_rflow = rflow; 4791 rflow = &flow_table->flows[flow_id]; 4792 WRITE_ONCE(rflow->filter, rc); 4793 if (old_rflow->filter == rc) 4794 WRITE_ONCE(old_rflow->filter, RPS_NO_FILTER); 4795 out: 4796 #endif 4797 head = READ_ONCE(per_cpu(softnet_data, next_cpu).input_queue_head); 4798 rps_input_queue_tail_save(&rflow->last_qtail, head); 4799 } 4800 4801 WRITE_ONCE(rflow->cpu, next_cpu); 4802 return rflow; 4803 } 4804 4805 /* 4806 * get_rps_cpu is called from netif_receive_skb and returns the target 4807 * CPU from the RPS map of the receiving queue for a given skb. 4808 * rcu_read_lock must be held on entry. 4809 */ get_rps_cpu(struct net_device * dev,struct sk_buff * skb,struct rps_dev_flow ** rflowp)4810 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, 4811 struct rps_dev_flow **rflowp) 4812 { 4813 const struct rps_sock_flow_table *sock_flow_table; 4814 struct netdev_rx_queue *rxqueue = dev->_rx; 4815 struct rps_dev_flow_table *flow_table; 4816 struct rps_map *map; 4817 int cpu = -1; 4818 u32 tcpu; 4819 u32 hash; 4820 4821 if (skb_rx_queue_recorded(skb)) { 4822 u16 index = skb_get_rx_queue(skb); 4823 4824 if (unlikely(index >= dev->real_num_rx_queues)) { 4825 WARN_ONCE(dev->real_num_rx_queues > 1, 4826 "%s received packet on queue %u, but number " 4827 "of RX queues is %u\n", 4828 dev->name, index, dev->real_num_rx_queues); 4829 goto done; 4830 } 4831 rxqueue += index; 4832 } 4833 4834 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */ 4835 4836 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4837 map = rcu_dereference(rxqueue->rps_map); 4838 if (!flow_table && !map) 4839 goto done; 4840 4841 skb_reset_network_header(skb); 4842 hash = skb_get_hash(skb); 4843 if (!hash) 4844 goto done; 4845 4846 sock_flow_table = rcu_dereference(net_hotdata.rps_sock_flow_table); 4847 if (flow_table && sock_flow_table) { 4848 struct rps_dev_flow *rflow; 4849 u32 next_cpu; 4850 u32 ident; 4851 4852 /* First check into global flow table if there is a match. 4853 * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow(). 4854 */ 4855 ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]); 4856 if ((ident ^ hash) & ~net_hotdata.rps_cpu_mask) 4857 goto try_rps; 4858 4859 next_cpu = ident & net_hotdata.rps_cpu_mask; 4860 4861 /* OK, now we know there is a match, 4862 * we can look at the local (per receive queue) flow table 4863 */ 4864 rflow = &flow_table->flows[rfs_slot(hash, flow_table)]; 4865 tcpu = rflow->cpu; 4866 4867 /* 4868 * If the desired CPU (where last recvmsg was done) is 4869 * different from current CPU (one in the rx-queue flow 4870 * table entry), switch if one of the following holds: 4871 * - Current CPU is unset (>= nr_cpu_ids). 4872 * - Current CPU is offline. 4873 * - The current CPU's queue tail has advanced beyond the 4874 * last packet that was enqueued using this table entry. 4875 * This guarantees that all previous packets for the flow 4876 * have been dequeued, thus preserving in order delivery. 4877 */ 4878 if (unlikely(tcpu != next_cpu) && 4879 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) || 4880 ((int)(READ_ONCE(per_cpu(softnet_data, tcpu).input_queue_head) - 4881 rflow->last_qtail)) >= 0)) { 4882 tcpu = next_cpu; 4883 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); 4884 } 4885 4886 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) { 4887 *rflowp = rflow; 4888 cpu = tcpu; 4889 goto done; 4890 } 4891 } 4892 4893 try_rps: 4894 4895 if (map) { 4896 tcpu = map->cpus[reciprocal_scale(hash, map->len)]; 4897 if (cpu_online(tcpu)) { 4898 cpu = tcpu; 4899 goto done; 4900 } 4901 } 4902 4903 done: 4904 return cpu; 4905 } 4906 4907 #ifdef CONFIG_RFS_ACCEL 4908 4909 /** 4910 * rps_may_expire_flow - check whether an RFS hardware filter may be removed 4911 * @dev: Device on which the filter was set 4912 * @rxq_index: RX queue index 4913 * @flow_id: Flow ID passed to ndo_rx_flow_steer() 4914 * @filter_id: Filter ID returned by ndo_rx_flow_steer() 4915 * 4916 * Drivers that implement ndo_rx_flow_steer() should periodically call 4917 * this function for each installed filter and remove the filters for 4918 * which it returns %true. 4919 */ rps_may_expire_flow(struct net_device * dev,u16 rxq_index,u32 flow_id,u16 filter_id)4920 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, 4921 u32 flow_id, u16 filter_id) 4922 { 4923 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index; 4924 struct rps_dev_flow_table *flow_table; 4925 struct rps_dev_flow *rflow; 4926 bool expire = true; 4927 unsigned int cpu; 4928 4929 rcu_read_lock(); 4930 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4931 if (flow_table && flow_id < (1UL << flow_table->log)) { 4932 rflow = &flow_table->flows[flow_id]; 4933 cpu = READ_ONCE(rflow->cpu); 4934 if (READ_ONCE(rflow->filter) == filter_id && cpu < nr_cpu_ids && 4935 ((int)(READ_ONCE(per_cpu(softnet_data, cpu).input_queue_head) - 4936 READ_ONCE(rflow->last_qtail)) < 4937 (int)(10 << flow_table->log))) 4938 expire = false; 4939 } 4940 rcu_read_unlock(); 4941 return expire; 4942 } 4943 EXPORT_SYMBOL(rps_may_expire_flow); 4944 4945 #endif /* CONFIG_RFS_ACCEL */ 4946 4947 /* Called from hardirq (IPI) context */ rps_trigger_softirq(void * data)4948 static void rps_trigger_softirq(void *data) 4949 { 4950 struct softnet_data *sd = data; 4951 4952 ____napi_schedule(sd, &sd->backlog); 4953 sd->received_rps++; 4954 } 4955 4956 #endif /* CONFIG_RPS */ 4957 4958 /* Called from hardirq (IPI) context */ trigger_rx_softirq(void * data)4959 static void trigger_rx_softirq(void *data) 4960 { 4961 struct softnet_data *sd = data; 4962 4963 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4964 smp_store_release(&sd->defer_ipi_scheduled, 0); 4965 } 4966 4967 /* 4968 * After we queued a packet into sd->input_pkt_queue, 4969 * we need to make sure this queue is serviced soon. 4970 * 4971 * - If this is another cpu queue, link it to our rps_ipi_list, 4972 * and make sure we will process rps_ipi_list from net_rx_action(). 4973 * 4974 * - If this is our own queue, NAPI schedule our backlog. 4975 * Note that this also raises NET_RX_SOFTIRQ. 4976 */ napi_schedule_rps(struct softnet_data * sd)4977 static void napi_schedule_rps(struct softnet_data *sd) 4978 { 4979 struct softnet_data *mysd = this_cpu_ptr(&softnet_data); 4980 4981 #ifdef CONFIG_RPS 4982 if (sd != mysd) { 4983 if (use_backlog_threads()) { 4984 __napi_schedule_irqoff(&sd->backlog); 4985 return; 4986 } 4987 4988 sd->rps_ipi_next = mysd->rps_ipi_list; 4989 mysd->rps_ipi_list = sd; 4990 4991 /* If not called from net_rx_action() or napi_threaded_poll() 4992 * we have to raise NET_RX_SOFTIRQ. 4993 */ 4994 if (!mysd->in_net_rx_action && !mysd->in_napi_threaded_poll) 4995 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4996 return; 4997 } 4998 #endif /* CONFIG_RPS */ 4999 __napi_schedule_irqoff(&mysd->backlog); 5000 } 5001 kick_defer_list_purge(struct softnet_data * sd,unsigned int cpu)5002 void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu) 5003 { 5004 unsigned long flags; 5005 5006 if (use_backlog_threads()) { 5007 backlog_lock_irq_save(sd, &flags); 5008 5009 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) 5010 __napi_schedule_irqoff(&sd->backlog); 5011 5012 backlog_unlock_irq_restore(sd, &flags); 5013 5014 } else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) { 5015 smp_call_function_single_async(cpu, &sd->defer_csd); 5016 } 5017 } 5018 5019 #ifdef CONFIG_NET_FLOW_LIMIT 5020 int netdev_flow_limit_table_len __read_mostly = (1 << 12); 5021 #endif 5022 skb_flow_limit(struct sk_buff * skb,unsigned int qlen)5023 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) 5024 { 5025 #ifdef CONFIG_NET_FLOW_LIMIT 5026 struct sd_flow_limit *fl; 5027 struct softnet_data *sd; 5028 unsigned int old_flow, new_flow; 5029 5030 if (qlen < (READ_ONCE(net_hotdata.max_backlog) >> 1)) 5031 return false; 5032 5033 sd = this_cpu_ptr(&softnet_data); 5034 5035 rcu_read_lock(); 5036 fl = rcu_dereference(sd->flow_limit); 5037 if (fl) { 5038 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1); 5039 old_flow = fl->history[fl->history_head]; 5040 fl->history[fl->history_head] = new_flow; 5041 5042 fl->history_head++; 5043 fl->history_head &= FLOW_LIMIT_HISTORY - 1; 5044 5045 if (likely(fl->buckets[old_flow])) 5046 fl->buckets[old_flow]--; 5047 5048 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) { 5049 fl->count++; 5050 rcu_read_unlock(); 5051 return true; 5052 } 5053 } 5054 rcu_read_unlock(); 5055 #endif 5056 return false; 5057 } 5058 5059 /* 5060 * enqueue_to_backlog is called to queue an skb to a per CPU backlog 5061 * queue (may be a remote CPU queue). 5062 */ enqueue_to_backlog(struct sk_buff * skb,int cpu,unsigned int * qtail)5063 static int enqueue_to_backlog(struct sk_buff *skb, int cpu, 5064 unsigned int *qtail) 5065 { 5066 enum skb_drop_reason reason; 5067 struct softnet_data *sd; 5068 unsigned long flags; 5069 unsigned int qlen; 5070 int max_backlog; 5071 u32 tail; 5072 5073 reason = SKB_DROP_REASON_DEV_READY; 5074 if (!netif_running(skb->dev)) 5075 goto bad_dev; 5076 5077 reason = SKB_DROP_REASON_CPU_BACKLOG; 5078 sd = &per_cpu(softnet_data, cpu); 5079 5080 qlen = skb_queue_len_lockless(&sd->input_pkt_queue); 5081 max_backlog = READ_ONCE(net_hotdata.max_backlog); 5082 if (unlikely(qlen > max_backlog)) 5083 goto cpu_backlog_drop; 5084 backlog_lock_irq_save(sd, &flags); 5085 qlen = skb_queue_len(&sd->input_pkt_queue); 5086 if (qlen <= max_backlog && !skb_flow_limit(skb, qlen)) { 5087 if (!qlen) { 5088 /* Schedule NAPI for backlog device. We can use 5089 * non atomic operation as we own the queue lock. 5090 */ 5091 if (!__test_and_set_bit(NAPI_STATE_SCHED, 5092 &sd->backlog.state)) 5093 napi_schedule_rps(sd); 5094 } 5095 __skb_queue_tail(&sd->input_pkt_queue, skb); 5096 tail = rps_input_queue_tail_incr(sd); 5097 backlog_unlock_irq_restore(sd, &flags); 5098 5099 /* save the tail outside of the critical section */ 5100 rps_input_queue_tail_save(qtail, tail); 5101 return NET_RX_SUCCESS; 5102 } 5103 5104 backlog_unlock_irq_restore(sd, &flags); 5105 5106 cpu_backlog_drop: 5107 atomic_inc(&sd->dropped); 5108 bad_dev: 5109 dev_core_stats_rx_dropped_inc(skb->dev); 5110 kfree_skb_reason(skb, reason); 5111 return NET_RX_DROP; 5112 } 5113 netif_get_rxqueue(struct sk_buff * skb)5114 static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb) 5115 { 5116 struct net_device *dev = skb->dev; 5117 struct netdev_rx_queue *rxqueue; 5118 5119 rxqueue = dev->_rx; 5120 5121 if (skb_rx_queue_recorded(skb)) { 5122 u16 index = skb_get_rx_queue(skb); 5123 5124 if (unlikely(index >= dev->real_num_rx_queues)) { 5125 WARN_ONCE(dev->real_num_rx_queues > 1, 5126 "%s received packet on queue %u, but number " 5127 "of RX queues is %u\n", 5128 dev->name, index, dev->real_num_rx_queues); 5129 5130 return rxqueue; /* Return first rxqueue */ 5131 } 5132 rxqueue += index; 5133 } 5134 return rxqueue; 5135 } 5136 bpf_prog_run_generic_xdp(struct sk_buff * skb,struct xdp_buff * xdp,const struct bpf_prog * xdp_prog)5137 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, 5138 const struct bpf_prog *xdp_prog) 5139 { 5140 void *orig_data, *orig_data_end, *hard_start; 5141 struct netdev_rx_queue *rxqueue; 5142 bool orig_bcast, orig_host; 5143 u32 mac_len, frame_sz; 5144 __be16 orig_eth_type; 5145 struct ethhdr *eth; 5146 u32 metalen, act; 5147 int off; 5148 5149 /* The XDP program wants to see the packet starting at the MAC 5150 * header. 5151 */ 5152 mac_len = skb->data - skb_mac_header(skb); 5153 hard_start = skb->data - skb_headroom(skb); 5154 5155 /* SKB "head" area always have tailroom for skb_shared_info */ 5156 frame_sz = (void *)skb_end_pointer(skb) - hard_start; 5157 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 5158 5159 rxqueue = netif_get_rxqueue(skb); 5160 xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq); 5161 xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len, 5162 skb_headlen(skb) + mac_len, true); 5163 if (skb_is_nonlinear(skb)) { 5164 skb_shinfo(skb)->xdp_frags_size = skb->data_len; 5165 xdp_buff_set_frags_flag(xdp); 5166 } else { 5167 xdp_buff_clear_frags_flag(xdp); 5168 } 5169 5170 orig_data_end = xdp->data_end; 5171 orig_data = xdp->data; 5172 eth = (struct ethhdr *)xdp->data; 5173 orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr); 5174 orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest); 5175 orig_eth_type = eth->h_proto; 5176 5177 act = bpf_prog_run_xdp(xdp_prog, xdp); 5178 5179 /* check if bpf_xdp_adjust_head was used */ 5180 off = xdp->data - orig_data; 5181 if (off) { 5182 if (off > 0) 5183 __skb_pull(skb, off); 5184 else if (off < 0) 5185 __skb_push(skb, -off); 5186 5187 skb->mac_header += off; 5188 skb_reset_network_header(skb); 5189 } 5190 5191 /* check if bpf_xdp_adjust_tail was used */ 5192 off = xdp->data_end - orig_data_end; 5193 if (off != 0) { 5194 skb_set_tail_pointer(skb, xdp->data_end - xdp->data); 5195 skb->len += off; /* positive on grow, negative on shrink */ 5196 } 5197 5198 /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers 5199 * (e.g. bpf_xdp_adjust_tail), we need to update data_len here. 5200 */ 5201 if (xdp_buff_has_frags(xdp)) 5202 skb->data_len = skb_shinfo(skb)->xdp_frags_size; 5203 else 5204 skb->data_len = 0; 5205 5206 /* check if XDP changed eth hdr such SKB needs update */ 5207 eth = (struct ethhdr *)xdp->data; 5208 if ((orig_eth_type != eth->h_proto) || 5209 (orig_host != ether_addr_equal_64bits(eth->h_dest, 5210 skb->dev->dev_addr)) || 5211 (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) { 5212 __skb_push(skb, ETH_HLEN); 5213 skb->pkt_type = PACKET_HOST; 5214 skb->protocol = eth_type_trans(skb, skb->dev); 5215 } 5216 5217 /* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull 5218 * before calling us again on redirect path. We do not call do_redirect 5219 * as we leave that up to the caller. 5220 * 5221 * Caller is responsible for managing lifetime of skb (i.e. calling 5222 * kfree_skb in response to actions it cannot handle/XDP_DROP). 5223 */ 5224 switch (act) { 5225 case XDP_REDIRECT: 5226 case XDP_TX: 5227 __skb_push(skb, mac_len); 5228 break; 5229 case XDP_PASS: 5230 metalen = xdp->data - xdp->data_meta; 5231 if (metalen) 5232 skb_metadata_set(skb, metalen); 5233 break; 5234 } 5235 5236 return act; 5237 } 5238 5239 static int netif_skb_check_for_xdp(struct sk_buff ** pskb,const struct bpf_prog * prog)5240 netif_skb_check_for_xdp(struct sk_buff **pskb, const struct bpf_prog *prog) 5241 { 5242 struct sk_buff *skb = *pskb; 5243 int err, hroom, troom; 5244 5245 if (!skb_cow_data_for_xdp(this_cpu_read(system_page_pool), pskb, prog)) 5246 return 0; 5247 5248 /* In case we have to go down the path and also linearize, 5249 * then lets do the pskb_expand_head() work just once here. 5250 */ 5251 hroom = XDP_PACKET_HEADROOM - skb_headroom(skb); 5252 troom = skb->tail + skb->data_len - skb->end; 5253 err = pskb_expand_head(skb, 5254 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0, 5255 troom > 0 ? troom + 128 : 0, GFP_ATOMIC); 5256 if (err) 5257 return err; 5258 5259 return skb_linearize(skb); 5260 } 5261 netif_receive_generic_xdp(struct sk_buff ** pskb,struct xdp_buff * xdp,const struct bpf_prog * xdp_prog)5262 static u32 netif_receive_generic_xdp(struct sk_buff **pskb, 5263 struct xdp_buff *xdp, 5264 const struct bpf_prog *xdp_prog) 5265 { 5266 struct sk_buff *skb = *pskb; 5267 u32 mac_len, act = XDP_DROP; 5268 5269 /* Reinjected packets coming from act_mirred or similar should 5270 * not get XDP generic processing. 5271 */ 5272 if (skb_is_redirected(skb)) 5273 return XDP_PASS; 5274 5275 /* XDP packets must have sufficient headroom of XDP_PACKET_HEADROOM 5276 * bytes. This is the guarantee that also native XDP provides, 5277 * thus we need to do it here as well. 5278 */ 5279 mac_len = skb->data - skb_mac_header(skb); 5280 __skb_push(skb, mac_len); 5281 5282 if (skb_cloned(skb) || skb_is_nonlinear(skb) || 5283 skb_headroom(skb) < XDP_PACKET_HEADROOM) { 5284 if (netif_skb_check_for_xdp(pskb, xdp_prog)) 5285 goto do_drop; 5286 } 5287 5288 __skb_pull(*pskb, mac_len); 5289 5290 act = bpf_prog_run_generic_xdp(*pskb, xdp, xdp_prog); 5291 switch (act) { 5292 case XDP_REDIRECT: 5293 case XDP_TX: 5294 case XDP_PASS: 5295 break; 5296 default: 5297 bpf_warn_invalid_xdp_action((*pskb)->dev, xdp_prog, act); 5298 fallthrough; 5299 case XDP_ABORTED: 5300 trace_xdp_exception((*pskb)->dev, xdp_prog, act); 5301 fallthrough; 5302 case XDP_DROP: 5303 do_drop: 5304 kfree_skb(*pskb); 5305 break; 5306 } 5307 5308 return act; 5309 } 5310 5311 /* When doing generic XDP we have to bypass the qdisc layer and the 5312 * network taps in order to match in-driver-XDP behavior. This also means 5313 * that XDP packets are able to starve other packets going through a qdisc, 5314 * and DDOS attacks will be more effective. In-driver-XDP use dedicated TX 5315 * queues, so they do not have this starvation issue. 5316 */ generic_xdp_tx(struct sk_buff * skb,const struct bpf_prog * xdp_prog)5317 void generic_xdp_tx(struct sk_buff *skb, const struct bpf_prog *xdp_prog) 5318 { 5319 struct net_device *dev = skb->dev; 5320 struct netdev_queue *txq; 5321 bool free_skb = true; 5322 int cpu, rc; 5323 5324 txq = netdev_core_pick_tx(dev, skb, NULL); 5325 cpu = smp_processor_id(); 5326 HARD_TX_LOCK(dev, txq, cpu); 5327 if (!netif_xmit_frozen_or_drv_stopped(txq)) { 5328 rc = netdev_start_xmit(skb, dev, txq, 0); 5329 if (dev_xmit_complete(rc)) 5330 free_skb = false; 5331 } 5332 HARD_TX_UNLOCK(dev, txq); 5333 if (free_skb) { 5334 trace_xdp_exception(dev, xdp_prog, XDP_TX); 5335 dev_core_stats_tx_dropped_inc(dev); 5336 kfree_skb(skb); 5337 } 5338 } 5339 5340 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key); 5341 do_xdp_generic(const struct bpf_prog * xdp_prog,struct sk_buff ** pskb)5342 int do_xdp_generic(const struct bpf_prog *xdp_prog, struct sk_buff **pskb) 5343 { 5344 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; 5345 5346 if (xdp_prog) { 5347 struct xdp_buff xdp; 5348 u32 act; 5349 int err; 5350 5351 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); 5352 act = netif_receive_generic_xdp(pskb, &xdp, xdp_prog); 5353 if (act != XDP_PASS) { 5354 switch (act) { 5355 case XDP_REDIRECT: 5356 err = xdp_do_generic_redirect((*pskb)->dev, *pskb, 5357 &xdp, xdp_prog); 5358 if (err) 5359 goto out_redir; 5360 break; 5361 case XDP_TX: 5362 generic_xdp_tx(*pskb, xdp_prog); 5363 break; 5364 } 5365 bpf_net_ctx_clear(bpf_net_ctx); 5366 return XDP_DROP; 5367 } 5368 bpf_net_ctx_clear(bpf_net_ctx); 5369 } 5370 return XDP_PASS; 5371 out_redir: 5372 bpf_net_ctx_clear(bpf_net_ctx); 5373 kfree_skb_reason(*pskb, SKB_DROP_REASON_XDP); 5374 return XDP_DROP; 5375 } 5376 EXPORT_SYMBOL_GPL(do_xdp_generic); 5377 netif_rx_internal(struct sk_buff * skb)5378 static int netif_rx_internal(struct sk_buff *skb) 5379 { 5380 int ret; 5381 5382 net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb); 5383 5384 trace_netif_rx(skb); 5385 5386 #ifdef CONFIG_RPS 5387 if (static_branch_unlikely(&rps_needed)) { 5388 struct rps_dev_flow voidflow, *rflow = &voidflow; 5389 int cpu; 5390 5391 rcu_read_lock(); 5392 5393 cpu = get_rps_cpu(skb->dev, skb, &rflow); 5394 if (cpu < 0) 5395 cpu = smp_processor_id(); 5396 5397 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5398 5399 rcu_read_unlock(); 5400 } else 5401 #endif 5402 { 5403 unsigned int qtail; 5404 5405 ret = enqueue_to_backlog(skb, smp_processor_id(), &qtail); 5406 } 5407 return ret; 5408 } 5409 5410 /** 5411 * __netif_rx - Slightly optimized version of netif_rx 5412 * @skb: buffer to post 5413 * 5414 * This behaves as netif_rx except that it does not disable bottom halves. 5415 * As a result this function may only be invoked from the interrupt context 5416 * (either hard or soft interrupt). 5417 */ __netif_rx(struct sk_buff * skb)5418 int __netif_rx(struct sk_buff *skb) 5419 { 5420 int ret; 5421 5422 lockdep_assert_once(hardirq_count() | softirq_count()); 5423 5424 trace_netif_rx_entry(skb); 5425 ret = netif_rx_internal(skb); 5426 trace_netif_rx_exit(ret); 5427 return ret; 5428 } 5429 EXPORT_SYMBOL(__netif_rx); 5430 5431 /** 5432 * netif_rx - post buffer to the network code 5433 * @skb: buffer to post 5434 * 5435 * This function receives a packet from a device driver and queues it for 5436 * the upper (protocol) levels to process via the backlog NAPI device. It 5437 * always succeeds. The buffer may be dropped during processing for 5438 * congestion control or by the protocol layers. 5439 * The network buffer is passed via the backlog NAPI device. Modern NIC 5440 * driver should use NAPI and GRO. 5441 * This function can used from interrupt and from process context. The 5442 * caller from process context must not disable interrupts before invoking 5443 * this function. 5444 * 5445 * return values: 5446 * NET_RX_SUCCESS (no congestion) 5447 * NET_RX_DROP (packet was dropped) 5448 * 5449 */ netif_rx(struct sk_buff * skb)5450 int netif_rx(struct sk_buff *skb) 5451 { 5452 bool need_bh_off = !(hardirq_count() | softirq_count()); 5453 int ret; 5454 5455 if (need_bh_off) 5456 local_bh_disable(); 5457 trace_netif_rx_entry(skb); 5458 ret = netif_rx_internal(skb); 5459 trace_netif_rx_exit(ret); 5460 if (need_bh_off) 5461 local_bh_enable(); 5462 return ret; 5463 } 5464 EXPORT_SYMBOL(netif_rx); 5465 net_tx_action(void)5466 static __latent_entropy void net_tx_action(void) 5467 { 5468 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 5469 5470 if (sd->completion_queue) { 5471 struct sk_buff *clist; 5472 5473 local_irq_disable(); 5474 clist = sd->completion_queue; 5475 sd->completion_queue = NULL; 5476 local_irq_enable(); 5477 5478 while (clist) { 5479 struct sk_buff *skb = clist; 5480 5481 clist = clist->next; 5482 5483 WARN_ON(refcount_read(&skb->users)); 5484 if (likely(get_kfree_skb_cb(skb)->reason == SKB_CONSUMED)) 5485 trace_consume_skb(skb, net_tx_action); 5486 else 5487 trace_kfree_skb(skb, net_tx_action, 5488 get_kfree_skb_cb(skb)->reason, NULL); 5489 5490 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) 5491 __kfree_skb(skb); 5492 else 5493 __napi_kfree_skb(skb, 5494 get_kfree_skb_cb(skb)->reason); 5495 } 5496 } 5497 5498 if (sd->output_queue) { 5499 struct Qdisc *head; 5500 5501 local_irq_disable(); 5502 head = sd->output_queue; 5503 sd->output_queue = NULL; 5504 sd->output_queue_tailp = &sd->output_queue; 5505 local_irq_enable(); 5506 5507 rcu_read_lock(); 5508 5509 while (head) { 5510 struct Qdisc *q = head; 5511 spinlock_t *root_lock = NULL; 5512 5513 head = head->next_sched; 5514 5515 /* We need to make sure head->next_sched is read 5516 * before clearing __QDISC_STATE_SCHED 5517 */ 5518 smp_mb__before_atomic(); 5519 5520 if (!(q->flags & TCQ_F_NOLOCK)) { 5521 root_lock = qdisc_lock(q); 5522 spin_lock(root_lock); 5523 } else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, 5524 &q->state))) { 5525 /* There is a synchronize_net() between 5526 * STATE_DEACTIVATED flag being set and 5527 * qdisc_reset()/some_qdisc_is_busy() in 5528 * dev_deactivate(), so we can safely bail out 5529 * early here to avoid data race between 5530 * qdisc_deactivate() and some_qdisc_is_busy() 5531 * for lockless qdisc. 5532 */ 5533 clear_bit(__QDISC_STATE_SCHED, &q->state); 5534 continue; 5535 } 5536 5537 clear_bit(__QDISC_STATE_SCHED, &q->state); 5538 qdisc_run(q); 5539 if (root_lock) 5540 spin_unlock(root_lock); 5541 } 5542 5543 rcu_read_unlock(); 5544 } 5545 5546 xfrm_dev_backlog(sd); 5547 } 5548 5549 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE) 5550 /* This hook is defined here for ATM LANE */ 5551 int (*br_fdb_test_addr_hook)(struct net_device *dev, 5552 unsigned char *addr) __read_mostly; 5553 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); 5554 #endif 5555 5556 /** 5557 * netdev_is_rx_handler_busy - check if receive handler is registered 5558 * @dev: device to check 5559 * 5560 * Check if a receive handler is already registered for a given device. 5561 * Return true if there one. 5562 * 5563 * The caller must hold the rtnl_mutex. 5564 */ netdev_is_rx_handler_busy(struct net_device * dev)5565 bool netdev_is_rx_handler_busy(struct net_device *dev) 5566 { 5567 ASSERT_RTNL(); 5568 return dev && rtnl_dereference(dev->rx_handler); 5569 } 5570 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy); 5571 5572 /** 5573 * netdev_rx_handler_register - register receive handler 5574 * @dev: device to register a handler for 5575 * @rx_handler: receive handler to register 5576 * @rx_handler_data: data pointer that is used by rx handler 5577 * 5578 * Register a receive handler for a device. This handler will then be 5579 * called from __netif_receive_skb. A negative errno code is returned 5580 * on a failure. 5581 * 5582 * The caller must hold the rtnl_mutex. 5583 * 5584 * For a general description of rx_handler, see enum rx_handler_result. 5585 */ netdev_rx_handler_register(struct net_device * dev,rx_handler_func_t * rx_handler,void * rx_handler_data)5586 int netdev_rx_handler_register(struct net_device *dev, 5587 rx_handler_func_t *rx_handler, 5588 void *rx_handler_data) 5589 { 5590 if (netdev_is_rx_handler_busy(dev)) 5591 return -EBUSY; 5592 5593 if (dev->priv_flags & IFF_NO_RX_HANDLER) 5594 return -EINVAL; 5595 5596 /* Note: rx_handler_data must be set before rx_handler */ 5597 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); 5598 rcu_assign_pointer(dev->rx_handler, rx_handler); 5599 5600 return 0; 5601 } 5602 EXPORT_SYMBOL_GPL(netdev_rx_handler_register); 5603 5604 /** 5605 * netdev_rx_handler_unregister - unregister receive handler 5606 * @dev: device to unregister a handler from 5607 * 5608 * Unregister a receive handler from a device. 5609 * 5610 * The caller must hold the rtnl_mutex. 5611 */ netdev_rx_handler_unregister(struct net_device * dev)5612 void netdev_rx_handler_unregister(struct net_device *dev) 5613 { 5614 5615 ASSERT_RTNL(); 5616 RCU_INIT_POINTER(dev->rx_handler, NULL); 5617 /* a reader seeing a non NULL rx_handler in a rcu_read_lock() 5618 * section has a guarantee to see a non NULL rx_handler_data 5619 * as well. 5620 */ 5621 synchronize_net(); 5622 RCU_INIT_POINTER(dev->rx_handler_data, NULL); 5623 } 5624 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 5625 5626 /* 5627 * Limit the use of PFMEMALLOC reserves to those protocols that implement 5628 * the special handling of PFMEMALLOC skbs. 5629 */ skb_pfmemalloc_protocol(struct sk_buff * skb)5630 static bool skb_pfmemalloc_protocol(struct sk_buff *skb) 5631 { 5632 switch (skb->protocol) { 5633 case htons(ETH_P_ARP): 5634 case htons(ETH_P_IP): 5635 case htons(ETH_P_IPV6): 5636 case htons(ETH_P_8021Q): 5637 case htons(ETH_P_8021AD): 5638 return true; 5639 default: 5640 return false; 5641 } 5642 } 5643 nf_ingress(struct sk_buff * skb,struct packet_type ** pt_prev,int * ret,struct net_device * orig_dev)5644 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev, 5645 int *ret, struct net_device *orig_dev) 5646 { 5647 if (nf_hook_ingress_active(skb)) { 5648 int ingress_retval; 5649 5650 if (*pt_prev) { 5651 *ret = deliver_skb(skb, *pt_prev, orig_dev); 5652 *pt_prev = NULL; 5653 } 5654 5655 rcu_read_lock(); 5656 ingress_retval = nf_hook_ingress(skb); 5657 rcu_read_unlock(); 5658 return ingress_retval; 5659 } 5660 return 0; 5661 } 5662 __netif_receive_skb_core(struct sk_buff ** pskb,bool pfmemalloc,struct packet_type ** ppt_prev)5663 static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc, 5664 struct packet_type **ppt_prev) 5665 { 5666 struct packet_type *ptype, *pt_prev; 5667 rx_handler_func_t *rx_handler; 5668 struct sk_buff *skb = *pskb; 5669 struct net_device *orig_dev; 5670 bool deliver_exact = false; 5671 int ret = NET_RX_DROP; 5672 __be16 type; 5673 5674 net_timestamp_check(!READ_ONCE(net_hotdata.tstamp_prequeue), skb); 5675 5676 trace_netif_receive_skb(skb); 5677 5678 orig_dev = skb->dev; 5679 5680 skb_reset_network_header(skb); 5681 #if !defined(CONFIG_DEBUG_NET) 5682 /* We plan to no longer reset the transport header here. 5683 * Give some time to fuzzers and dev build to catch bugs 5684 * in network stacks. 5685 */ 5686 if (!skb_transport_header_was_set(skb)) 5687 skb_reset_transport_header(skb); 5688 #endif 5689 skb_reset_mac_len(skb); 5690 5691 pt_prev = NULL; 5692 5693 another_round: 5694 skb->skb_iif = skb->dev->ifindex; 5695 5696 __this_cpu_inc(softnet_data.processed); 5697 5698 if (static_branch_unlikely(&generic_xdp_needed_key)) { 5699 int ret2; 5700 5701 migrate_disable(); 5702 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), 5703 &skb); 5704 migrate_enable(); 5705 5706 if (ret2 != XDP_PASS) { 5707 ret = NET_RX_DROP; 5708 goto out; 5709 } 5710 } 5711 5712 if (eth_type_vlan(skb->protocol)) { 5713 skb = skb_vlan_untag(skb); 5714 if (unlikely(!skb)) 5715 goto out; 5716 } 5717 5718 if (skb_skip_tc_classify(skb)) 5719 goto skip_classify; 5720 5721 if (pfmemalloc) 5722 goto skip_taps; 5723 5724 list_for_each_entry_rcu(ptype, &dev_net_rcu(skb->dev)->ptype_all, 5725 list) { 5726 if (pt_prev) 5727 ret = deliver_skb(skb, pt_prev, orig_dev); 5728 pt_prev = ptype; 5729 } 5730 5731 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) { 5732 if (pt_prev) 5733 ret = deliver_skb(skb, pt_prev, orig_dev); 5734 pt_prev = ptype; 5735 } 5736 5737 skip_taps: 5738 #ifdef CONFIG_NET_INGRESS 5739 if (static_branch_unlikely(&ingress_needed_key)) { 5740 bool another = false; 5741 5742 nf_skip_egress(skb, true); 5743 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev, 5744 &another); 5745 if (another) 5746 goto another_round; 5747 if (!skb) 5748 goto out; 5749 5750 nf_skip_egress(skb, false); 5751 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0) 5752 goto out; 5753 } 5754 #endif 5755 skb_reset_redirect(skb); 5756 skip_classify: 5757 if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) 5758 goto drop; 5759 5760 if (skb_vlan_tag_present(skb)) { 5761 if (pt_prev) { 5762 ret = deliver_skb(skb, pt_prev, orig_dev); 5763 pt_prev = NULL; 5764 } 5765 if (vlan_do_receive(&skb)) 5766 goto another_round; 5767 else if (unlikely(!skb)) 5768 goto out; 5769 } 5770 5771 rx_handler = rcu_dereference(skb->dev->rx_handler); 5772 if (rx_handler) { 5773 if (pt_prev) { 5774 ret = deliver_skb(skb, pt_prev, orig_dev); 5775 pt_prev = NULL; 5776 } 5777 switch (rx_handler(&skb)) { 5778 case RX_HANDLER_CONSUMED: 5779 ret = NET_RX_SUCCESS; 5780 goto out; 5781 case RX_HANDLER_ANOTHER: 5782 goto another_round; 5783 case RX_HANDLER_EXACT: 5784 deliver_exact = true; 5785 break; 5786 case RX_HANDLER_PASS: 5787 break; 5788 default: 5789 BUG(); 5790 } 5791 } 5792 5793 if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) { 5794 check_vlan_id: 5795 if (skb_vlan_tag_get_id(skb)) { 5796 /* Vlan id is non 0 and vlan_do_receive() above couldn't 5797 * find vlan device. 5798 */ 5799 skb->pkt_type = PACKET_OTHERHOST; 5800 } else if (eth_type_vlan(skb->protocol)) { 5801 /* Outer header is 802.1P with vlan 0, inner header is 5802 * 802.1Q or 802.1AD and vlan_do_receive() above could 5803 * not find vlan dev for vlan id 0. 5804 */ 5805 __vlan_hwaccel_clear_tag(skb); 5806 skb = skb_vlan_untag(skb); 5807 if (unlikely(!skb)) 5808 goto out; 5809 if (vlan_do_receive(&skb)) 5810 /* After stripping off 802.1P header with vlan 0 5811 * vlan dev is found for inner header. 5812 */ 5813 goto another_round; 5814 else if (unlikely(!skb)) 5815 goto out; 5816 else 5817 /* We have stripped outer 802.1P vlan 0 header. 5818 * But could not find vlan dev. 5819 * check again for vlan id to set OTHERHOST. 5820 */ 5821 goto check_vlan_id; 5822 } 5823 /* Note: we might in the future use prio bits 5824 * and set skb->priority like in vlan_do_receive() 5825 * For the time being, just ignore Priority Code Point 5826 */ 5827 __vlan_hwaccel_clear_tag(skb); 5828 } 5829 5830 type = skb->protocol; 5831 5832 /* deliver only exact match when indicated */ 5833 if (likely(!deliver_exact)) { 5834 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5835 &ptype_base[ntohs(type) & 5836 PTYPE_HASH_MASK]); 5837 5838 /* orig_dev and skb->dev could belong to different netns; 5839 * Even in such case we need to traverse only the list 5840 * coming from skb->dev, as the ptype owner (packet socket) 5841 * will use dev_net(skb->dev) to do namespace filtering. 5842 */ 5843 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5844 &dev_net_rcu(skb->dev)->ptype_specific); 5845 } 5846 5847 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5848 &orig_dev->ptype_specific); 5849 5850 if (unlikely(skb->dev != orig_dev)) { 5851 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5852 &skb->dev->ptype_specific); 5853 } 5854 5855 if (pt_prev) { 5856 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 5857 goto drop; 5858 *ppt_prev = pt_prev; 5859 } else { 5860 drop: 5861 if (!deliver_exact) 5862 dev_core_stats_rx_dropped_inc(skb->dev); 5863 else 5864 dev_core_stats_rx_nohandler_inc(skb->dev); 5865 kfree_skb_reason(skb, SKB_DROP_REASON_UNHANDLED_PROTO); 5866 /* Jamal, now you will not able to escape explaining 5867 * me how you were going to use this. :-) 5868 */ 5869 ret = NET_RX_DROP; 5870 } 5871 5872 out: 5873 /* The invariant here is that if *ppt_prev is not NULL 5874 * then skb should also be non-NULL. 5875 * 5876 * Apparently *ppt_prev assignment above holds this invariant due to 5877 * skb dereferencing near it. 5878 */ 5879 *pskb = skb; 5880 return ret; 5881 } 5882 __netif_receive_skb_one_core(struct sk_buff * skb,bool pfmemalloc)5883 static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc) 5884 { 5885 struct net_device *orig_dev = skb->dev; 5886 struct packet_type *pt_prev = NULL; 5887 int ret; 5888 5889 ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev); 5890 if (pt_prev) 5891 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb, 5892 skb->dev, pt_prev, orig_dev); 5893 return ret; 5894 } 5895 5896 /** 5897 * netif_receive_skb_core - special purpose version of netif_receive_skb 5898 * @skb: buffer to process 5899 * 5900 * More direct receive version of netif_receive_skb(). It should 5901 * only be used by callers that have a need to skip RPS and Generic XDP. 5902 * Caller must also take care of handling if ``(page_is_)pfmemalloc``. 5903 * 5904 * This function may only be called from softirq context and interrupts 5905 * should be enabled. 5906 * 5907 * Return values (usually ignored): 5908 * NET_RX_SUCCESS: no congestion 5909 * NET_RX_DROP: packet was dropped 5910 */ netif_receive_skb_core(struct sk_buff * skb)5911 int netif_receive_skb_core(struct sk_buff *skb) 5912 { 5913 int ret; 5914 5915 rcu_read_lock(); 5916 ret = __netif_receive_skb_one_core(skb, false); 5917 rcu_read_unlock(); 5918 5919 return ret; 5920 } 5921 EXPORT_SYMBOL(netif_receive_skb_core); 5922 __netif_receive_skb_list_ptype(struct list_head * head,struct packet_type * pt_prev,struct net_device * orig_dev)5923 static inline void __netif_receive_skb_list_ptype(struct list_head *head, 5924 struct packet_type *pt_prev, 5925 struct net_device *orig_dev) 5926 { 5927 struct sk_buff *skb, *next; 5928 5929 if (!pt_prev) 5930 return; 5931 if (list_empty(head)) 5932 return; 5933 if (pt_prev->list_func != NULL) 5934 INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv, 5935 ip_list_rcv, head, pt_prev, orig_dev); 5936 else 5937 list_for_each_entry_safe(skb, next, head, list) { 5938 skb_list_del_init(skb); 5939 pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 5940 } 5941 } 5942 __netif_receive_skb_list_core(struct list_head * head,bool pfmemalloc)5943 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc) 5944 { 5945 /* Fast-path assumptions: 5946 * - There is no RX handler. 5947 * - Only one packet_type matches. 5948 * If either of these fails, we will end up doing some per-packet 5949 * processing in-line, then handling the 'last ptype' for the whole 5950 * sublist. This can't cause out-of-order delivery to any single ptype, 5951 * because the 'last ptype' must be constant across the sublist, and all 5952 * other ptypes are handled per-packet. 5953 */ 5954 /* Current (common) ptype of sublist */ 5955 struct packet_type *pt_curr = NULL; 5956 /* Current (common) orig_dev of sublist */ 5957 struct net_device *od_curr = NULL; 5958 struct sk_buff *skb, *next; 5959 LIST_HEAD(sublist); 5960 5961 list_for_each_entry_safe(skb, next, head, list) { 5962 struct net_device *orig_dev = skb->dev; 5963 struct packet_type *pt_prev = NULL; 5964 5965 skb_list_del_init(skb); 5966 __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev); 5967 if (!pt_prev) 5968 continue; 5969 if (pt_curr != pt_prev || od_curr != orig_dev) { 5970 /* dispatch old sublist */ 5971 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); 5972 /* start new sublist */ 5973 INIT_LIST_HEAD(&sublist); 5974 pt_curr = pt_prev; 5975 od_curr = orig_dev; 5976 } 5977 list_add_tail(&skb->list, &sublist); 5978 } 5979 5980 /* dispatch final sublist */ 5981 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); 5982 } 5983 __netif_receive_skb(struct sk_buff * skb)5984 static int __netif_receive_skb(struct sk_buff *skb) 5985 { 5986 int ret; 5987 5988 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) { 5989 unsigned int noreclaim_flag; 5990 5991 /* 5992 * PFMEMALLOC skbs are special, they should 5993 * - be delivered to SOCK_MEMALLOC sockets only 5994 * - stay away from userspace 5995 * - have bounded memory usage 5996 * 5997 * Use PF_MEMALLOC as this saves us from propagating the allocation 5998 * context down to all allocation sites. 5999 */ 6000 noreclaim_flag = memalloc_noreclaim_save(); 6001 ret = __netif_receive_skb_one_core(skb, true); 6002 memalloc_noreclaim_restore(noreclaim_flag); 6003 } else 6004 ret = __netif_receive_skb_one_core(skb, false); 6005 6006 return ret; 6007 } 6008 __netif_receive_skb_list(struct list_head * head)6009 static void __netif_receive_skb_list(struct list_head *head) 6010 { 6011 unsigned long noreclaim_flag = 0; 6012 struct sk_buff *skb, *next; 6013 bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */ 6014 6015 list_for_each_entry_safe(skb, next, head, list) { 6016 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) { 6017 struct list_head sublist; 6018 6019 /* Handle the previous sublist */ 6020 list_cut_before(&sublist, head, &skb->list); 6021 if (!list_empty(&sublist)) 6022 __netif_receive_skb_list_core(&sublist, pfmemalloc); 6023 pfmemalloc = !pfmemalloc; 6024 /* See comments in __netif_receive_skb */ 6025 if (pfmemalloc) 6026 noreclaim_flag = memalloc_noreclaim_save(); 6027 else 6028 memalloc_noreclaim_restore(noreclaim_flag); 6029 } 6030 } 6031 /* Handle the remaining sublist */ 6032 if (!list_empty(head)) 6033 __netif_receive_skb_list_core(head, pfmemalloc); 6034 /* Restore pflags */ 6035 if (pfmemalloc) 6036 memalloc_noreclaim_restore(noreclaim_flag); 6037 } 6038 generic_xdp_install(struct net_device * dev,struct netdev_bpf * xdp)6039 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) 6040 { 6041 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog); 6042 struct bpf_prog *new = xdp->prog; 6043 int ret = 0; 6044 6045 switch (xdp->command) { 6046 case XDP_SETUP_PROG: 6047 rcu_assign_pointer(dev->xdp_prog, new); 6048 if (old) 6049 bpf_prog_put(old); 6050 6051 if (old && !new) { 6052 static_branch_dec(&generic_xdp_needed_key); 6053 } else if (new && !old) { 6054 static_branch_inc(&generic_xdp_needed_key); 6055 netif_disable_lro(dev); 6056 dev_disable_gro_hw(dev); 6057 } 6058 break; 6059 6060 default: 6061 ret = -EINVAL; 6062 break; 6063 } 6064 6065 return ret; 6066 } 6067 netif_receive_skb_internal(struct sk_buff * skb)6068 static int netif_receive_skb_internal(struct sk_buff *skb) 6069 { 6070 int ret; 6071 6072 net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb); 6073 6074 if (skb_defer_rx_timestamp(skb)) 6075 return NET_RX_SUCCESS; 6076 6077 rcu_read_lock(); 6078 #ifdef CONFIG_RPS 6079 if (static_branch_unlikely(&rps_needed)) { 6080 struct rps_dev_flow voidflow, *rflow = &voidflow; 6081 int cpu = get_rps_cpu(skb->dev, skb, &rflow); 6082 6083 if (cpu >= 0) { 6084 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 6085 rcu_read_unlock(); 6086 return ret; 6087 } 6088 } 6089 #endif 6090 ret = __netif_receive_skb(skb); 6091 rcu_read_unlock(); 6092 return ret; 6093 } 6094 netif_receive_skb_list_internal(struct list_head * head)6095 void netif_receive_skb_list_internal(struct list_head *head) 6096 { 6097 struct sk_buff *skb, *next; 6098 LIST_HEAD(sublist); 6099 6100 list_for_each_entry_safe(skb, next, head, list) { 6101 net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), 6102 skb); 6103 skb_list_del_init(skb); 6104 if (!skb_defer_rx_timestamp(skb)) 6105 list_add_tail(&skb->list, &sublist); 6106 } 6107 list_splice_init(&sublist, head); 6108 6109 rcu_read_lock(); 6110 #ifdef CONFIG_RPS 6111 if (static_branch_unlikely(&rps_needed)) { 6112 list_for_each_entry_safe(skb, next, head, list) { 6113 struct rps_dev_flow voidflow, *rflow = &voidflow; 6114 int cpu = get_rps_cpu(skb->dev, skb, &rflow); 6115 6116 if (cpu >= 0) { 6117 /* Will be handled, remove from list */ 6118 skb_list_del_init(skb); 6119 enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 6120 } 6121 } 6122 } 6123 #endif 6124 __netif_receive_skb_list(head); 6125 rcu_read_unlock(); 6126 } 6127 6128 /** 6129 * netif_receive_skb - process receive buffer from network 6130 * @skb: buffer to process 6131 * 6132 * netif_receive_skb() is the main receive data processing function. 6133 * It always succeeds. The buffer may be dropped during processing 6134 * for congestion control or by the protocol layers. 6135 * 6136 * This function may only be called from softirq context and interrupts 6137 * should be enabled. 6138 * 6139 * Return values (usually ignored): 6140 * NET_RX_SUCCESS: no congestion 6141 * NET_RX_DROP: packet was dropped 6142 */ netif_receive_skb(struct sk_buff * skb)6143 int netif_receive_skb(struct sk_buff *skb) 6144 { 6145 int ret; 6146 6147 trace_netif_receive_skb_entry(skb); 6148 6149 ret = netif_receive_skb_internal(skb); 6150 trace_netif_receive_skb_exit(ret); 6151 6152 return ret; 6153 } 6154 EXPORT_SYMBOL(netif_receive_skb); 6155 6156 /** 6157 * netif_receive_skb_list - process many receive buffers from network 6158 * @head: list of skbs to process. 6159 * 6160 * Since return value of netif_receive_skb() is normally ignored, and 6161 * wouldn't be meaningful for a list, this function returns void. 6162 * 6163 * This function may only be called from softirq context and interrupts 6164 * should be enabled. 6165 */ netif_receive_skb_list(struct list_head * head)6166 void netif_receive_skb_list(struct list_head *head) 6167 { 6168 struct sk_buff *skb; 6169 6170 if (list_empty(head)) 6171 return; 6172 if (trace_netif_receive_skb_list_entry_enabled()) { 6173 list_for_each_entry(skb, head, list) 6174 trace_netif_receive_skb_list_entry(skb); 6175 } 6176 netif_receive_skb_list_internal(head); 6177 trace_netif_receive_skb_list_exit(0); 6178 } 6179 EXPORT_SYMBOL(netif_receive_skb_list); 6180 6181 /* Network device is going away, flush any packets still pending */ flush_backlog(struct work_struct * work)6182 static void flush_backlog(struct work_struct *work) 6183 { 6184 struct sk_buff *skb, *tmp; 6185 struct sk_buff_head list; 6186 struct softnet_data *sd; 6187 6188 __skb_queue_head_init(&list); 6189 local_bh_disable(); 6190 sd = this_cpu_ptr(&softnet_data); 6191 6192 backlog_lock_irq_disable(sd); 6193 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { 6194 if (READ_ONCE(skb->dev->reg_state) == NETREG_UNREGISTERING) { 6195 __skb_unlink(skb, &sd->input_pkt_queue); 6196 __skb_queue_tail(&list, skb); 6197 rps_input_queue_head_incr(sd); 6198 } 6199 } 6200 backlog_unlock_irq_enable(sd); 6201 6202 local_lock_nested_bh(&softnet_data.process_queue_bh_lock); 6203 skb_queue_walk_safe(&sd->process_queue, skb, tmp) { 6204 if (READ_ONCE(skb->dev->reg_state) == NETREG_UNREGISTERING) { 6205 __skb_unlink(skb, &sd->process_queue); 6206 __skb_queue_tail(&list, skb); 6207 rps_input_queue_head_incr(sd); 6208 } 6209 } 6210 local_unlock_nested_bh(&softnet_data.process_queue_bh_lock); 6211 local_bh_enable(); 6212 6213 __skb_queue_purge_reason(&list, SKB_DROP_REASON_DEV_READY); 6214 } 6215 flush_required(int cpu)6216 static bool flush_required(int cpu) 6217 { 6218 #if IS_ENABLED(CONFIG_RPS) 6219 struct softnet_data *sd = &per_cpu(softnet_data, cpu); 6220 bool do_flush; 6221 6222 backlog_lock_irq_disable(sd); 6223 6224 /* as insertion into process_queue happens with the rps lock held, 6225 * process_queue access may race only with dequeue 6226 */ 6227 do_flush = !skb_queue_empty(&sd->input_pkt_queue) || 6228 !skb_queue_empty_lockless(&sd->process_queue); 6229 backlog_unlock_irq_enable(sd); 6230 6231 return do_flush; 6232 #endif 6233 /* without RPS we can't safely check input_pkt_queue: during a 6234 * concurrent remote skb_queue_splice() we can detect as empty both 6235 * input_pkt_queue and process_queue even if the latter could end-up 6236 * containing a lot of packets. 6237 */ 6238 return true; 6239 } 6240 6241 struct flush_backlogs { 6242 cpumask_t flush_cpus; 6243 struct work_struct w[]; 6244 }; 6245 flush_backlogs_alloc(void)6246 static struct flush_backlogs *flush_backlogs_alloc(void) 6247 { 6248 return kmalloc(struct_size_t(struct flush_backlogs, w, nr_cpu_ids), 6249 GFP_KERNEL); 6250 } 6251 6252 static struct flush_backlogs *flush_backlogs_fallback; 6253 static DEFINE_MUTEX(flush_backlogs_mutex); 6254 flush_all_backlogs(void)6255 static void flush_all_backlogs(void) 6256 { 6257 struct flush_backlogs *ptr = flush_backlogs_alloc(); 6258 unsigned int cpu; 6259 6260 if (!ptr) { 6261 mutex_lock(&flush_backlogs_mutex); 6262 ptr = flush_backlogs_fallback; 6263 } 6264 cpumask_clear(&ptr->flush_cpus); 6265 6266 cpus_read_lock(); 6267 6268 for_each_online_cpu(cpu) { 6269 if (flush_required(cpu)) { 6270 INIT_WORK(&ptr->w[cpu], flush_backlog); 6271 queue_work_on(cpu, system_highpri_wq, &ptr->w[cpu]); 6272 __cpumask_set_cpu(cpu, &ptr->flush_cpus); 6273 } 6274 } 6275 6276 /* we can have in flight packet[s] on the cpus we are not flushing, 6277 * synchronize_net() in unregister_netdevice_many() will take care of 6278 * them. 6279 */ 6280 for_each_cpu(cpu, &ptr->flush_cpus) 6281 flush_work(&ptr->w[cpu]); 6282 6283 cpus_read_unlock(); 6284 6285 if (ptr != flush_backlogs_fallback) 6286 kfree(ptr); 6287 else 6288 mutex_unlock(&flush_backlogs_mutex); 6289 } 6290 net_rps_send_ipi(struct softnet_data * remsd)6291 static void net_rps_send_ipi(struct softnet_data *remsd) 6292 { 6293 #ifdef CONFIG_RPS 6294 while (remsd) { 6295 struct softnet_data *next = remsd->rps_ipi_next; 6296 6297 if (cpu_online(remsd->cpu)) 6298 smp_call_function_single_async(remsd->cpu, &remsd->csd); 6299 remsd = next; 6300 } 6301 #endif 6302 } 6303 6304 /* 6305 * net_rps_action_and_irq_enable sends any pending IPI's for rps. 6306 * Note: called with local irq disabled, but exits with local irq enabled. 6307 */ net_rps_action_and_irq_enable(struct softnet_data * sd)6308 static void net_rps_action_and_irq_enable(struct softnet_data *sd) 6309 { 6310 #ifdef CONFIG_RPS 6311 struct softnet_data *remsd = sd->rps_ipi_list; 6312 6313 if (!use_backlog_threads() && remsd) { 6314 sd->rps_ipi_list = NULL; 6315 6316 local_irq_enable(); 6317 6318 /* Send pending IPI's to kick RPS processing on remote cpus. */ 6319 net_rps_send_ipi(remsd); 6320 } else 6321 #endif 6322 local_irq_enable(); 6323 } 6324 sd_has_rps_ipi_waiting(struct softnet_data * sd)6325 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) 6326 { 6327 #ifdef CONFIG_RPS 6328 return !use_backlog_threads() && sd->rps_ipi_list; 6329 #else 6330 return false; 6331 #endif 6332 } 6333 process_backlog(struct napi_struct * napi,int quota)6334 static int process_backlog(struct napi_struct *napi, int quota) 6335 { 6336 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); 6337 bool again = true; 6338 int work = 0; 6339 6340 /* Check if we have pending ipi, its better to send them now, 6341 * not waiting net_rx_action() end. 6342 */ 6343 if (sd_has_rps_ipi_waiting(sd)) { 6344 local_irq_disable(); 6345 net_rps_action_and_irq_enable(sd); 6346 } 6347 6348 napi->weight = READ_ONCE(net_hotdata.dev_rx_weight); 6349 while (again) { 6350 struct sk_buff *skb; 6351 6352 local_lock_nested_bh(&softnet_data.process_queue_bh_lock); 6353 while ((skb = __skb_dequeue(&sd->process_queue))) { 6354 local_unlock_nested_bh(&softnet_data.process_queue_bh_lock); 6355 rcu_read_lock(); 6356 __netif_receive_skb(skb); 6357 rcu_read_unlock(); 6358 if (++work >= quota) { 6359 rps_input_queue_head_add(sd, work); 6360 return work; 6361 } 6362 6363 local_lock_nested_bh(&softnet_data.process_queue_bh_lock); 6364 } 6365 local_unlock_nested_bh(&softnet_data.process_queue_bh_lock); 6366 6367 backlog_lock_irq_disable(sd); 6368 if (skb_queue_empty(&sd->input_pkt_queue)) { 6369 /* 6370 * Inline a custom version of __napi_complete(). 6371 * only current cpu owns and manipulates this napi, 6372 * and NAPI_STATE_SCHED is the only possible flag set 6373 * on backlog. 6374 * We can use a plain write instead of clear_bit(), 6375 * and we dont need an smp_mb() memory barrier. 6376 */ 6377 napi->state &= NAPIF_STATE_THREADED; 6378 again = false; 6379 } else { 6380 local_lock_nested_bh(&softnet_data.process_queue_bh_lock); 6381 skb_queue_splice_tail_init(&sd->input_pkt_queue, 6382 &sd->process_queue); 6383 local_unlock_nested_bh(&softnet_data.process_queue_bh_lock); 6384 } 6385 backlog_unlock_irq_enable(sd); 6386 } 6387 6388 if (work) 6389 rps_input_queue_head_add(sd, work); 6390 return work; 6391 } 6392 6393 /** 6394 * __napi_schedule - schedule for receive 6395 * @n: entry to schedule 6396 * 6397 * The entry's receive function will be scheduled to run. 6398 * Consider using __napi_schedule_irqoff() if hard irqs are masked. 6399 */ __napi_schedule(struct napi_struct * n)6400 void __napi_schedule(struct napi_struct *n) 6401 { 6402 unsigned long flags; 6403 6404 local_irq_save(flags); 6405 ____napi_schedule(this_cpu_ptr(&softnet_data), n); 6406 local_irq_restore(flags); 6407 } 6408 EXPORT_SYMBOL(__napi_schedule); 6409 6410 /** 6411 * napi_schedule_prep - check if napi can be scheduled 6412 * @n: napi context 6413 * 6414 * Test if NAPI routine is already running, and if not mark 6415 * it as running. This is used as a condition variable to 6416 * insure only one NAPI poll instance runs. We also make 6417 * sure there is no pending NAPI disable. 6418 */ napi_schedule_prep(struct napi_struct * n)6419 bool napi_schedule_prep(struct napi_struct *n) 6420 { 6421 unsigned long new, val = READ_ONCE(n->state); 6422 6423 do { 6424 if (unlikely(val & NAPIF_STATE_DISABLE)) 6425 return false; 6426 new = val | NAPIF_STATE_SCHED; 6427 6428 /* Sets STATE_MISSED bit if STATE_SCHED was already set 6429 * This was suggested by Alexander Duyck, as compiler 6430 * emits better code than : 6431 * if (val & NAPIF_STATE_SCHED) 6432 * new |= NAPIF_STATE_MISSED; 6433 */ 6434 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED * 6435 NAPIF_STATE_MISSED; 6436 } while (!try_cmpxchg(&n->state, &val, new)); 6437 6438 return !(val & NAPIF_STATE_SCHED); 6439 } 6440 EXPORT_SYMBOL(napi_schedule_prep); 6441 6442 /** 6443 * __napi_schedule_irqoff - schedule for receive 6444 * @n: entry to schedule 6445 * 6446 * Variant of __napi_schedule() assuming hard irqs are masked. 6447 * 6448 * On PREEMPT_RT enabled kernels this maps to __napi_schedule() 6449 * because the interrupt disabled assumption might not be true 6450 * due to force-threaded interrupts and spinlock substitution. 6451 */ __napi_schedule_irqoff(struct napi_struct * n)6452 void __napi_schedule_irqoff(struct napi_struct *n) 6453 { 6454 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 6455 ____napi_schedule(this_cpu_ptr(&softnet_data), n); 6456 else 6457 __napi_schedule(n); 6458 } 6459 EXPORT_SYMBOL(__napi_schedule_irqoff); 6460 napi_complete_done(struct napi_struct * n,int work_done)6461 bool napi_complete_done(struct napi_struct *n, int work_done) 6462 { 6463 unsigned long flags, val, new, timeout = 0; 6464 bool ret = true; 6465 6466 /* 6467 * 1) Don't let napi dequeue from the cpu poll list 6468 * just in case its running on a different cpu. 6469 * 2) If we are busy polling, do nothing here, we have 6470 * the guarantee we will be called later. 6471 */ 6472 if (unlikely(n->state & (NAPIF_STATE_NPSVC | 6473 NAPIF_STATE_IN_BUSY_POLL))) 6474 return false; 6475 6476 if (work_done) { 6477 if (n->gro.bitmask) 6478 timeout = napi_get_gro_flush_timeout(n); 6479 n->defer_hard_irqs_count = napi_get_defer_hard_irqs(n); 6480 } 6481 if (n->defer_hard_irqs_count > 0) { 6482 n->defer_hard_irqs_count--; 6483 timeout = napi_get_gro_flush_timeout(n); 6484 if (timeout) 6485 ret = false; 6486 } 6487 6488 /* 6489 * When the NAPI instance uses a timeout and keeps postponing 6490 * it, we need to bound somehow the time packets are kept in 6491 * the GRO layer. 6492 */ 6493 gro_flush(&n->gro, !!timeout); 6494 gro_normal_list(&n->gro); 6495 6496 if (unlikely(!list_empty(&n->poll_list))) { 6497 /* If n->poll_list is not empty, we need to mask irqs */ 6498 local_irq_save(flags); 6499 list_del_init(&n->poll_list); 6500 local_irq_restore(flags); 6501 } 6502 WRITE_ONCE(n->list_owner, -1); 6503 6504 val = READ_ONCE(n->state); 6505 do { 6506 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED)); 6507 6508 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED | 6509 NAPIF_STATE_SCHED_THREADED | 6510 NAPIF_STATE_PREFER_BUSY_POLL); 6511 6512 /* If STATE_MISSED was set, leave STATE_SCHED set, 6513 * because we will call napi->poll() one more time. 6514 * This C code was suggested by Alexander Duyck to help gcc. 6515 */ 6516 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED * 6517 NAPIF_STATE_SCHED; 6518 } while (!try_cmpxchg(&n->state, &val, new)); 6519 6520 if (unlikely(val & NAPIF_STATE_MISSED)) { 6521 __napi_schedule(n); 6522 return false; 6523 } 6524 6525 if (timeout) 6526 hrtimer_start(&n->timer, ns_to_ktime(timeout), 6527 HRTIMER_MODE_REL_PINNED); 6528 return ret; 6529 } 6530 EXPORT_SYMBOL(napi_complete_done); 6531 skb_defer_free_flush(struct softnet_data * sd)6532 static void skb_defer_free_flush(struct softnet_data *sd) 6533 { 6534 struct sk_buff *skb, *next; 6535 6536 /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */ 6537 if (!READ_ONCE(sd->defer_list)) 6538 return; 6539 6540 spin_lock(&sd->defer_lock); 6541 skb = sd->defer_list; 6542 sd->defer_list = NULL; 6543 sd->defer_count = 0; 6544 spin_unlock(&sd->defer_lock); 6545 6546 while (skb != NULL) { 6547 next = skb->next; 6548 napi_consume_skb(skb, 1); 6549 skb = next; 6550 } 6551 } 6552 6553 #if defined(CONFIG_NET_RX_BUSY_POLL) 6554 __busy_poll_stop(struct napi_struct * napi,bool skip_schedule)6555 static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule) 6556 { 6557 if (!skip_schedule) { 6558 gro_normal_list(&napi->gro); 6559 __napi_schedule(napi); 6560 return; 6561 } 6562 6563 /* Flush too old packets. If HZ < 1000, flush all packets */ 6564 gro_flush(&napi->gro, HZ >= 1000); 6565 gro_normal_list(&napi->gro); 6566 6567 clear_bit(NAPI_STATE_SCHED, &napi->state); 6568 } 6569 6570 enum { 6571 NAPI_F_PREFER_BUSY_POLL = 1, 6572 NAPI_F_END_ON_RESCHED = 2, 6573 }; 6574 busy_poll_stop(struct napi_struct * napi,void * have_poll_lock,unsigned flags,u16 budget)6575 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, 6576 unsigned flags, u16 budget) 6577 { 6578 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; 6579 bool skip_schedule = false; 6580 unsigned long timeout; 6581 int rc; 6582 6583 /* Busy polling means there is a high chance device driver hard irq 6584 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was 6585 * set in napi_schedule_prep(). 6586 * Since we are about to call napi->poll() once more, we can safely 6587 * clear NAPI_STATE_MISSED. 6588 * 6589 * Note: x86 could use a single "lock and ..." instruction 6590 * to perform these two clear_bit() 6591 */ 6592 clear_bit(NAPI_STATE_MISSED, &napi->state); 6593 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state); 6594 6595 local_bh_disable(); 6596 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); 6597 6598 if (flags & NAPI_F_PREFER_BUSY_POLL) { 6599 napi->defer_hard_irqs_count = napi_get_defer_hard_irqs(napi); 6600 timeout = napi_get_gro_flush_timeout(napi); 6601 if (napi->defer_hard_irqs_count && timeout) { 6602 hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED); 6603 skip_schedule = true; 6604 } 6605 } 6606 6607 /* All we really want here is to re-enable device interrupts. 6608 * Ideally, a new ndo_busy_poll_stop() could avoid another round. 6609 */ 6610 rc = napi->poll(napi, budget); 6611 /* We can't gro_normal_list() here, because napi->poll() might have 6612 * rearmed the napi (napi_complete_done()) in which case it could 6613 * already be running on another CPU. 6614 */ 6615 trace_napi_poll(napi, rc, budget); 6616 netpoll_poll_unlock(have_poll_lock); 6617 if (rc == budget) 6618 __busy_poll_stop(napi, skip_schedule); 6619 bpf_net_ctx_clear(bpf_net_ctx); 6620 local_bh_enable(); 6621 } 6622 __napi_busy_loop(unsigned int napi_id,bool (* loop_end)(void *,unsigned long),void * loop_end_arg,unsigned flags,u16 budget)6623 static void __napi_busy_loop(unsigned int napi_id, 6624 bool (*loop_end)(void *, unsigned long), 6625 void *loop_end_arg, unsigned flags, u16 budget) 6626 { 6627 unsigned long start_time = loop_end ? busy_loop_current_time() : 0; 6628 int (*napi_poll)(struct napi_struct *napi, int budget); 6629 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; 6630 void *have_poll_lock = NULL; 6631 struct napi_struct *napi; 6632 6633 WARN_ON_ONCE(!rcu_read_lock_held()); 6634 6635 restart: 6636 napi_poll = NULL; 6637 6638 napi = napi_by_id(napi_id); 6639 if (!napi) 6640 return; 6641 6642 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 6643 preempt_disable(); 6644 for (;;) { 6645 int work = 0; 6646 6647 local_bh_disable(); 6648 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); 6649 if (!napi_poll) { 6650 unsigned long val = READ_ONCE(napi->state); 6651 6652 /* If multiple threads are competing for this napi, 6653 * we avoid dirtying napi->state as much as we can. 6654 */ 6655 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED | 6656 NAPIF_STATE_IN_BUSY_POLL)) { 6657 if (flags & NAPI_F_PREFER_BUSY_POLL) 6658 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); 6659 goto count; 6660 } 6661 if (cmpxchg(&napi->state, val, 6662 val | NAPIF_STATE_IN_BUSY_POLL | 6663 NAPIF_STATE_SCHED) != val) { 6664 if (flags & NAPI_F_PREFER_BUSY_POLL) 6665 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); 6666 goto count; 6667 } 6668 have_poll_lock = netpoll_poll_lock(napi); 6669 napi_poll = napi->poll; 6670 } 6671 work = napi_poll(napi, budget); 6672 trace_napi_poll(napi, work, budget); 6673 gro_normal_list(&napi->gro); 6674 count: 6675 if (work > 0) 6676 __NET_ADD_STATS(dev_net(napi->dev), 6677 LINUX_MIB_BUSYPOLLRXPACKETS, work); 6678 skb_defer_free_flush(this_cpu_ptr(&softnet_data)); 6679 bpf_net_ctx_clear(bpf_net_ctx); 6680 local_bh_enable(); 6681 6682 if (!loop_end || loop_end(loop_end_arg, start_time)) 6683 break; 6684 6685 if (unlikely(need_resched())) { 6686 if (flags & NAPI_F_END_ON_RESCHED) 6687 break; 6688 if (napi_poll) 6689 busy_poll_stop(napi, have_poll_lock, flags, budget); 6690 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 6691 preempt_enable(); 6692 rcu_read_unlock(); 6693 cond_resched(); 6694 rcu_read_lock(); 6695 if (loop_end(loop_end_arg, start_time)) 6696 return; 6697 goto restart; 6698 } 6699 cpu_relax(); 6700 } 6701 if (napi_poll) 6702 busy_poll_stop(napi, have_poll_lock, flags, budget); 6703 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 6704 preempt_enable(); 6705 } 6706 napi_busy_loop_rcu(unsigned int napi_id,bool (* loop_end)(void *,unsigned long),void * loop_end_arg,bool prefer_busy_poll,u16 budget)6707 void napi_busy_loop_rcu(unsigned int napi_id, 6708 bool (*loop_end)(void *, unsigned long), 6709 void *loop_end_arg, bool prefer_busy_poll, u16 budget) 6710 { 6711 unsigned flags = NAPI_F_END_ON_RESCHED; 6712 6713 if (prefer_busy_poll) 6714 flags |= NAPI_F_PREFER_BUSY_POLL; 6715 6716 __napi_busy_loop(napi_id, loop_end, loop_end_arg, flags, budget); 6717 } 6718 napi_busy_loop(unsigned int napi_id,bool (* loop_end)(void *,unsigned long),void * loop_end_arg,bool prefer_busy_poll,u16 budget)6719 void napi_busy_loop(unsigned int napi_id, 6720 bool (*loop_end)(void *, unsigned long), 6721 void *loop_end_arg, bool prefer_busy_poll, u16 budget) 6722 { 6723 unsigned flags = prefer_busy_poll ? NAPI_F_PREFER_BUSY_POLL : 0; 6724 6725 rcu_read_lock(); 6726 __napi_busy_loop(napi_id, loop_end, loop_end_arg, flags, budget); 6727 rcu_read_unlock(); 6728 } 6729 EXPORT_SYMBOL(napi_busy_loop); 6730 napi_suspend_irqs(unsigned int napi_id)6731 void napi_suspend_irqs(unsigned int napi_id) 6732 { 6733 struct napi_struct *napi; 6734 6735 rcu_read_lock(); 6736 napi = napi_by_id(napi_id); 6737 if (napi) { 6738 unsigned long timeout = napi_get_irq_suspend_timeout(napi); 6739 6740 if (timeout) 6741 hrtimer_start(&napi->timer, ns_to_ktime(timeout), 6742 HRTIMER_MODE_REL_PINNED); 6743 } 6744 rcu_read_unlock(); 6745 } 6746 napi_resume_irqs(unsigned int napi_id)6747 void napi_resume_irqs(unsigned int napi_id) 6748 { 6749 struct napi_struct *napi; 6750 6751 rcu_read_lock(); 6752 napi = napi_by_id(napi_id); 6753 if (napi) { 6754 /* If irq_suspend_timeout is set to 0 between the call to 6755 * napi_suspend_irqs and now, the original value still 6756 * determines the safety timeout as intended and napi_watchdog 6757 * will resume irq processing. 6758 */ 6759 if (napi_get_irq_suspend_timeout(napi)) { 6760 local_bh_disable(); 6761 napi_schedule(napi); 6762 local_bh_enable(); 6763 } 6764 } 6765 rcu_read_unlock(); 6766 } 6767 6768 #endif /* CONFIG_NET_RX_BUSY_POLL */ 6769 __napi_hash_add_with_id(struct napi_struct * napi,unsigned int napi_id)6770 static void __napi_hash_add_with_id(struct napi_struct *napi, 6771 unsigned int napi_id) 6772 { 6773 napi->gro.cached_napi_id = napi_id; 6774 6775 WRITE_ONCE(napi->napi_id, napi_id); 6776 hlist_add_head_rcu(&napi->napi_hash_node, 6777 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); 6778 } 6779 napi_hash_add_with_id(struct napi_struct * napi,unsigned int napi_id)6780 static void napi_hash_add_with_id(struct napi_struct *napi, 6781 unsigned int napi_id) 6782 { 6783 unsigned long flags; 6784 6785 spin_lock_irqsave(&napi_hash_lock, flags); 6786 WARN_ON_ONCE(napi_by_id(napi_id)); 6787 __napi_hash_add_with_id(napi, napi_id); 6788 spin_unlock_irqrestore(&napi_hash_lock, flags); 6789 } 6790 napi_hash_add(struct napi_struct * napi)6791 static void napi_hash_add(struct napi_struct *napi) 6792 { 6793 unsigned long flags; 6794 6795 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state)) 6796 return; 6797 6798 spin_lock_irqsave(&napi_hash_lock, flags); 6799 6800 /* 0..NR_CPUS range is reserved for sender_cpu use */ 6801 do { 6802 if (unlikely(!napi_id_valid(++napi_gen_id))) 6803 napi_gen_id = MIN_NAPI_ID; 6804 } while (napi_by_id(napi_gen_id)); 6805 6806 __napi_hash_add_with_id(napi, napi_gen_id); 6807 6808 spin_unlock_irqrestore(&napi_hash_lock, flags); 6809 } 6810 6811 /* Warning : caller is responsible to make sure rcu grace period 6812 * is respected before freeing memory containing @napi 6813 */ napi_hash_del(struct napi_struct * napi)6814 static void napi_hash_del(struct napi_struct *napi) 6815 { 6816 unsigned long flags; 6817 6818 spin_lock_irqsave(&napi_hash_lock, flags); 6819 6820 hlist_del_init_rcu(&napi->napi_hash_node); 6821 6822 spin_unlock_irqrestore(&napi_hash_lock, flags); 6823 } 6824 napi_watchdog(struct hrtimer * timer)6825 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) 6826 { 6827 struct napi_struct *napi; 6828 6829 napi = container_of(timer, struct napi_struct, timer); 6830 6831 /* Note : we use a relaxed variant of napi_schedule_prep() not setting 6832 * NAPI_STATE_MISSED, since we do not react to a device IRQ. 6833 */ 6834 if (!napi_disable_pending(napi) && 6835 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) { 6836 clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); 6837 __napi_schedule_irqoff(napi); 6838 } 6839 6840 return HRTIMER_NORESTART; 6841 } 6842 dev_set_threaded(struct net_device * dev,bool threaded)6843 int dev_set_threaded(struct net_device *dev, bool threaded) 6844 { 6845 struct napi_struct *napi; 6846 int err = 0; 6847 6848 netdev_assert_locked_or_invisible(dev); 6849 6850 if (dev->threaded == threaded) 6851 return 0; 6852 6853 if (threaded) { 6854 list_for_each_entry(napi, &dev->napi_list, dev_list) { 6855 if (!napi->thread) { 6856 err = napi_kthread_create(napi); 6857 if (err) { 6858 threaded = false; 6859 break; 6860 } 6861 } 6862 } 6863 } 6864 6865 WRITE_ONCE(dev->threaded, threaded); 6866 6867 /* Make sure kthread is created before THREADED bit 6868 * is set. 6869 */ 6870 smp_mb__before_atomic(); 6871 6872 /* Setting/unsetting threaded mode on a napi might not immediately 6873 * take effect, if the current napi instance is actively being 6874 * polled. In this case, the switch between threaded mode and 6875 * softirq mode will happen in the next round of napi_schedule(). 6876 * This should not cause hiccups/stalls to the live traffic. 6877 */ 6878 list_for_each_entry(napi, &dev->napi_list, dev_list) 6879 assign_bit(NAPI_STATE_THREADED, &napi->state, threaded); 6880 6881 return err; 6882 } 6883 EXPORT_SYMBOL(dev_set_threaded); 6884 6885 /** 6886 * netif_queue_set_napi - Associate queue with the napi 6887 * @dev: device to which NAPI and queue belong 6888 * @queue_index: Index of queue 6889 * @type: queue type as RX or TX 6890 * @napi: NAPI context, pass NULL to clear previously set NAPI 6891 * 6892 * Set queue with its corresponding napi context. This should be done after 6893 * registering the NAPI handler for the queue-vector and the queues have been 6894 * mapped to the corresponding interrupt vector. 6895 */ netif_queue_set_napi(struct net_device * dev,unsigned int queue_index,enum netdev_queue_type type,struct napi_struct * napi)6896 void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index, 6897 enum netdev_queue_type type, struct napi_struct *napi) 6898 { 6899 struct netdev_rx_queue *rxq; 6900 struct netdev_queue *txq; 6901 6902 if (WARN_ON_ONCE(napi && !napi->dev)) 6903 return; 6904 netdev_ops_assert_locked_or_invisible(dev); 6905 6906 switch (type) { 6907 case NETDEV_QUEUE_TYPE_RX: 6908 rxq = __netif_get_rx_queue(dev, queue_index); 6909 rxq->napi = napi; 6910 return; 6911 case NETDEV_QUEUE_TYPE_TX: 6912 txq = netdev_get_tx_queue(dev, queue_index); 6913 txq->napi = napi; 6914 return; 6915 default: 6916 return; 6917 } 6918 } 6919 EXPORT_SYMBOL(netif_queue_set_napi); 6920 6921 static void netif_napi_irq_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)6922 netif_napi_irq_notify(struct irq_affinity_notify *notify, 6923 const cpumask_t *mask) 6924 { 6925 struct napi_struct *napi = 6926 container_of(notify, struct napi_struct, notify); 6927 #ifdef CONFIG_RFS_ACCEL 6928 struct cpu_rmap *rmap = napi->dev->rx_cpu_rmap; 6929 int err; 6930 #endif 6931 6932 if (napi->config && napi->dev->irq_affinity_auto) 6933 cpumask_copy(&napi->config->affinity_mask, mask); 6934 6935 #ifdef CONFIG_RFS_ACCEL 6936 if (napi->dev->rx_cpu_rmap_auto) { 6937 err = cpu_rmap_update(rmap, napi->napi_rmap_idx, mask); 6938 if (err) 6939 netdev_warn(napi->dev, "RMAP update failed (%d)\n", 6940 err); 6941 } 6942 #endif 6943 } 6944 6945 #ifdef CONFIG_RFS_ACCEL netif_napi_affinity_release(struct kref * ref)6946 static void netif_napi_affinity_release(struct kref *ref) 6947 { 6948 struct napi_struct *napi = 6949 container_of(ref, struct napi_struct, notify.kref); 6950 struct cpu_rmap *rmap = napi->dev->rx_cpu_rmap; 6951 6952 netdev_assert_locked(napi->dev); 6953 WARN_ON(test_and_clear_bit(NAPI_STATE_HAS_NOTIFIER, 6954 &napi->state)); 6955 6956 if (!napi->dev->rx_cpu_rmap_auto) 6957 return; 6958 rmap->obj[napi->napi_rmap_idx] = NULL; 6959 napi->napi_rmap_idx = -1; 6960 cpu_rmap_put(rmap); 6961 } 6962 netif_enable_cpu_rmap(struct net_device * dev,unsigned int num_irqs)6963 int netif_enable_cpu_rmap(struct net_device *dev, unsigned int num_irqs) 6964 { 6965 if (dev->rx_cpu_rmap_auto) 6966 return 0; 6967 6968 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(num_irqs); 6969 if (!dev->rx_cpu_rmap) 6970 return -ENOMEM; 6971 6972 dev->rx_cpu_rmap_auto = true; 6973 return 0; 6974 } 6975 EXPORT_SYMBOL(netif_enable_cpu_rmap); 6976 netif_del_cpu_rmap(struct net_device * dev)6977 static void netif_del_cpu_rmap(struct net_device *dev) 6978 { 6979 struct cpu_rmap *rmap = dev->rx_cpu_rmap; 6980 6981 if (!dev->rx_cpu_rmap_auto) 6982 return; 6983 6984 /* Free the rmap */ 6985 cpu_rmap_put(rmap); 6986 dev->rx_cpu_rmap = NULL; 6987 dev->rx_cpu_rmap_auto = false; 6988 } 6989 6990 #else netif_napi_affinity_release(struct kref * ref)6991 static void netif_napi_affinity_release(struct kref *ref) 6992 { 6993 } 6994 netif_enable_cpu_rmap(struct net_device * dev,unsigned int num_irqs)6995 int netif_enable_cpu_rmap(struct net_device *dev, unsigned int num_irqs) 6996 { 6997 return 0; 6998 } 6999 EXPORT_SYMBOL(netif_enable_cpu_rmap); 7000 netif_del_cpu_rmap(struct net_device * dev)7001 static void netif_del_cpu_rmap(struct net_device *dev) 7002 { 7003 } 7004 #endif 7005 netif_set_affinity_auto(struct net_device * dev)7006 void netif_set_affinity_auto(struct net_device *dev) 7007 { 7008 unsigned int i, maxqs, numa; 7009 7010 maxqs = max(dev->num_tx_queues, dev->num_rx_queues); 7011 numa = dev_to_node(&dev->dev); 7012 7013 for (i = 0; i < maxqs; i++) 7014 cpumask_set_cpu(cpumask_local_spread(i, numa), 7015 &dev->napi_config[i].affinity_mask); 7016 7017 dev->irq_affinity_auto = true; 7018 } 7019 EXPORT_SYMBOL(netif_set_affinity_auto); 7020 netif_napi_set_irq_locked(struct napi_struct * napi,int irq)7021 void netif_napi_set_irq_locked(struct napi_struct *napi, int irq) 7022 { 7023 int rc; 7024 7025 netdev_assert_locked_or_invisible(napi->dev); 7026 7027 if (napi->irq == irq) 7028 return; 7029 7030 /* Remove existing resources */ 7031 if (test_and_clear_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state)) 7032 irq_set_affinity_notifier(napi->irq, NULL); 7033 7034 napi->irq = irq; 7035 if (irq < 0 || 7036 (!napi->dev->rx_cpu_rmap_auto && !napi->dev->irq_affinity_auto)) 7037 return; 7038 7039 /* Abort for buggy drivers */ 7040 if (napi->dev->irq_affinity_auto && WARN_ON_ONCE(!napi->config)) 7041 return; 7042 7043 #ifdef CONFIG_RFS_ACCEL 7044 if (napi->dev->rx_cpu_rmap_auto) { 7045 rc = cpu_rmap_add(napi->dev->rx_cpu_rmap, napi); 7046 if (rc < 0) 7047 return; 7048 7049 cpu_rmap_get(napi->dev->rx_cpu_rmap); 7050 napi->napi_rmap_idx = rc; 7051 } 7052 #endif 7053 7054 /* Use core IRQ notifier */ 7055 napi->notify.notify = netif_napi_irq_notify; 7056 napi->notify.release = netif_napi_affinity_release; 7057 rc = irq_set_affinity_notifier(irq, &napi->notify); 7058 if (rc) { 7059 netdev_warn(napi->dev, "Unable to set IRQ notifier (%d)\n", 7060 rc); 7061 goto put_rmap; 7062 } 7063 7064 set_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state); 7065 return; 7066 7067 put_rmap: 7068 #ifdef CONFIG_RFS_ACCEL 7069 if (napi->dev->rx_cpu_rmap_auto) { 7070 napi->dev->rx_cpu_rmap->obj[napi->napi_rmap_idx] = NULL; 7071 cpu_rmap_put(napi->dev->rx_cpu_rmap); 7072 napi->napi_rmap_idx = -1; 7073 } 7074 #endif 7075 napi->notify.notify = NULL; 7076 napi->notify.release = NULL; 7077 } 7078 EXPORT_SYMBOL(netif_napi_set_irq_locked); 7079 napi_restore_config(struct napi_struct * n)7080 static void napi_restore_config(struct napi_struct *n) 7081 { 7082 n->defer_hard_irqs = n->config->defer_hard_irqs; 7083 n->gro_flush_timeout = n->config->gro_flush_timeout; 7084 n->irq_suspend_timeout = n->config->irq_suspend_timeout; 7085 7086 if (n->dev->irq_affinity_auto && 7087 test_bit(NAPI_STATE_HAS_NOTIFIER, &n->state)) 7088 irq_set_affinity(n->irq, &n->config->affinity_mask); 7089 7090 /* a NAPI ID might be stored in the config, if so use it. if not, use 7091 * napi_hash_add to generate one for us. 7092 */ 7093 if (n->config->napi_id) { 7094 napi_hash_add_with_id(n, n->config->napi_id); 7095 } else { 7096 napi_hash_add(n); 7097 n->config->napi_id = n->napi_id; 7098 } 7099 } 7100 napi_save_config(struct napi_struct * n)7101 static void napi_save_config(struct napi_struct *n) 7102 { 7103 n->config->defer_hard_irqs = n->defer_hard_irqs; 7104 n->config->gro_flush_timeout = n->gro_flush_timeout; 7105 n->config->irq_suspend_timeout = n->irq_suspend_timeout; 7106 napi_hash_del(n); 7107 } 7108 7109 /* Netlink wants the NAPI list to be sorted by ID, if adding a NAPI which will 7110 * inherit an existing ID try to insert it at the right position. 7111 */ 7112 static void netif_napi_dev_list_add(struct net_device * dev,struct napi_struct * napi)7113 netif_napi_dev_list_add(struct net_device *dev, struct napi_struct *napi) 7114 { 7115 unsigned int new_id, pos_id; 7116 struct list_head *higher; 7117 struct napi_struct *pos; 7118 7119 new_id = UINT_MAX; 7120 if (napi->config && napi->config->napi_id) 7121 new_id = napi->config->napi_id; 7122 7123 higher = &dev->napi_list; 7124 list_for_each_entry(pos, &dev->napi_list, dev_list) { 7125 if (napi_id_valid(pos->napi_id)) 7126 pos_id = pos->napi_id; 7127 else if (pos->config) 7128 pos_id = pos->config->napi_id; 7129 else 7130 pos_id = UINT_MAX; 7131 7132 if (pos_id <= new_id) 7133 break; 7134 higher = &pos->dev_list; 7135 } 7136 list_add_rcu(&napi->dev_list, higher); /* adds after higher */ 7137 } 7138 7139 /* Double check that napi_get_frags() allocates skbs with 7140 * skb->head being backed by slab, not a page fragment. 7141 * This is to make sure bug fixed in 3226b158e67c 7142 * ("net: avoid 32 x truesize under-estimation for tiny skbs") 7143 * does not accidentally come back. 7144 */ napi_get_frags_check(struct napi_struct * napi)7145 static void napi_get_frags_check(struct napi_struct *napi) 7146 { 7147 struct sk_buff *skb; 7148 7149 local_bh_disable(); 7150 skb = napi_get_frags(napi); 7151 WARN_ON_ONCE(skb && skb->head_frag); 7152 napi_free_frags(napi); 7153 local_bh_enable(); 7154 } 7155 netif_napi_add_weight_locked(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int),int weight)7156 void netif_napi_add_weight_locked(struct net_device *dev, 7157 struct napi_struct *napi, 7158 int (*poll)(struct napi_struct *, int), 7159 int weight) 7160 { 7161 netdev_assert_locked(dev); 7162 if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state))) 7163 return; 7164 7165 INIT_LIST_HEAD(&napi->poll_list); 7166 INIT_HLIST_NODE(&napi->napi_hash_node); 7167 hrtimer_setup(&napi->timer, napi_watchdog, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); 7168 gro_init(&napi->gro); 7169 napi->skb = NULL; 7170 napi->poll = poll; 7171 if (weight > NAPI_POLL_WEIGHT) 7172 netdev_err_once(dev, "%s() called with weight %d\n", __func__, 7173 weight); 7174 napi->weight = weight; 7175 napi->dev = dev; 7176 #ifdef CONFIG_NETPOLL 7177 napi->poll_owner = -1; 7178 #endif 7179 napi->list_owner = -1; 7180 set_bit(NAPI_STATE_SCHED, &napi->state); 7181 set_bit(NAPI_STATE_NPSVC, &napi->state); 7182 netif_napi_dev_list_add(dev, napi); 7183 7184 /* default settings from sysfs are applied to all NAPIs. any per-NAPI 7185 * configuration will be loaded in napi_enable 7186 */ 7187 napi_set_defer_hard_irqs(napi, READ_ONCE(dev->napi_defer_hard_irqs)); 7188 napi_set_gro_flush_timeout(napi, READ_ONCE(dev->gro_flush_timeout)); 7189 7190 napi_get_frags_check(napi); 7191 /* Create kthread for this napi if dev->threaded is set. 7192 * Clear dev->threaded if kthread creation failed so that 7193 * threaded mode will not be enabled in napi_enable(). 7194 */ 7195 if (dev->threaded && napi_kthread_create(napi)) 7196 dev->threaded = false; 7197 netif_napi_set_irq_locked(napi, -1); 7198 } 7199 EXPORT_SYMBOL(netif_napi_add_weight_locked); 7200 napi_disable_locked(struct napi_struct * n)7201 void napi_disable_locked(struct napi_struct *n) 7202 { 7203 unsigned long val, new; 7204 7205 might_sleep(); 7206 netdev_assert_locked(n->dev); 7207 7208 set_bit(NAPI_STATE_DISABLE, &n->state); 7209 7210 val = READ_ONCE(n->state); 7211 do { 7212 while (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) { 7213 usleep_range(20, 200); 7214 val = READ_ONCE(n->state); 7215 } 7216 7217 new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC; 7218 new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL); 7219 } while (!try_cmpxchg(&n->state, &val, new)); 7220 7221 hrtimer_cancel(&n->timer); 7222 7223 if (n->config) 7224 napi_save_config(n); 7225 else 7226 napi_hash_del(n); 7227 7228 clear_bit(NAPI_STATE_DISABLE, &n->state); 7229 } 7230 EXPORT_SYMBOL(napi_disable_locked); 7231 7232 /** 7233 * napi_disable() - prevent NAPI from scheduling 7234 * @n: NAPI context 7235 * 7236 * Stop NAPI from being scheduled on this context. 7237 * Waits till any outstanding processing completes. 7238 * Takes netdev_lock() for associated net_device. 7239 */ napi_disable(struct napi_struct * n)7240 void napi_disable(struct napi_struct *n) 7241 { 7242 netdev_lock(n->dev); 7243 napi_disable_locked(n); 7244 netdev_unlock(n->dev); 7245 } 7246 EXPORT_SYMBOL(napi_disable); 7247 napi_enable_locked(struct napi_struct * n)7248 void napi_enable_locked(struct napi_struct *n) 7249 { 7250 unsigned long new, val = READ_ONCE(n->state); 7251 7252 if (n->config) 7253 napi_restore_config(n); 7254 else 7255 napi_hash_add(n); 7256 7257 do { 7258 BUG_ON(!test_bit(NAPI_STATE_SCHED, &val)); 7259 7260 new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC); 7261 if (n->dev->threaded && n->thread) 7262 new |= NAPIF_STATE_THREADED; 7263 } while (!try_cmpxchg(&n->state, &val, new)); 7264 } 7265 EXPORT_SYMBOL(napi_enable_locked); 7266 7267 /** 7268 * napi_enable() - enable NAPI scheduling 7269 * @n: NAPI context 7270 * 7271 * Enable scheduling of a NAPI instance. 7272 * Must be paired with napi_disable(). 7273 * Takes netdev_lock() for associated net_device. 7274 */ napi_enable(struct napi_struct * n)7275 void napi_enable(struct napi_struct *n) 7276 { 7277 netdev_lock(n->dev); 7278 napi_enable_locked(n); 7279 netdev_unlock(n->dev); 7280 } 7281 EXPORT_SYMBOL(napi_enable); 7282 7283 /* Must be called in process context */ __netif_napi_del_locked(struct napi_struct * napi)7284 void __netif_napi_del_locked(struct napi_struct *napi) 7285 { 7286 netdev_assert_locked(napi->dev); 7287 7288 if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state)) 7289 return; 7290 7291 /* Make sure NAPI is disabled (or was never enabled). */ 7292 WARN_ON(!test_bit(NAPI_STATE_SCHED, &napi->state)); 7293 7294 if (test_and_clear_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state)) 7295 irq_set_affinity_notifier(napi->irq, NULL); 7296 7297 if (napi->config) { 7298 napi->index = -1; 7299 napi->config = NULL; 7300 } 7301 7302 list_del_rcu(&napi->dev_list); 7303 napi_free_frags(napi); 7304 7305 gro_cleanup(&napi->gro); 7306 7307 if (napi->thread) { 7308 kthread_stop(napi->thread); 7309 napi->thread = NULL; 7310 } 7311 } 7312 EXPORT_SYMBOL(__netif_napi_del_locked); 7313 __napi_poll(struct napi_struct * n,bool * repoll)7314 static int __napi_poll(struct napi_struct *n, bool *repoll) 7315 { 7316 int work, weight; 7317 7318 weight = n->weight; 7319 7320 /* This NAPI_STATE_SCHED test is for avoiding a race 7321 * with netpoll's poll_napi(). Only the entity which 7322 * obtains the lock and sees NAPI_STATE_SCHED set will 7323 * actually make the ->poll() call. Therefore we avoid 7324 * accidentally calling ->poll() when NAPI is not scheduled. 7325 */ 7326 work = 0; 7327 if (napi_is_scheduled(n)) { 7328 work = n->poll(n, weight); 7329 trace_napi_poll(n, work, weight); 7330 7331 xdp_do_check_flushed(n); 7332 } 7333 7334 if (unlikely(work > weight)) 7335 netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n", 7336 n->poll, work, weight); 7337 7338 if (likely(work < weight)) 7339 return work; 7340 7341 /* Drivers must not modify the NAPI state if they 7342 * consume the entire weight. In such cases this code 7343 * still "owns" the NAPI instance and therefore can 7344 * move the instance around on the list at-will. 7345 */ 7346 if (unlikely(napi_disable_pending(n))) { 7347 napi_complete(n); 7348 return work; 7349 } 7350 7351 /* The NAPI context has more processing work, but busy-polling 7352 * is preferred. Exit early. 7353 */ 7354 if (napi_prefer_busy_poll(n)) { 7355 if (napi_complete_done(n, work)) { 7356 /* If timeout is not set, we need to make sure 7357 * that the NAPI is re-scheduled. 7358 */ 7359 napi_schedule(n); 7360 } 7361 return work; 7362 } 7363 7364 /* Flush too old packets. If HZ < 1000, flush all packets */ 7365 gro_flush(&n->gro, HZ >= 1000); 7366 gro_normal_list(&n->gro); 7367 7368 /* Some drivers may have called napi_schedule 7369 * prior to exhausting their budget. 7370 */ 7371 if (unlikely(!list_empty(&n->poll_list))) { 7372 pr_warn_once("%s: Budget exhausted after napi rescheduled\n", 7373 n->dev ? n->dev->name : "backlog"); 7374 return work; 7375 } 7376 7377 *repoll = true; 7378 7379 return work; 7380 } 7381 napi_poll(struct napi_struct * n,struct list_head * repoll)7382 static int napi_poll(struct napi_struct *n, struct list_head *repoll) 7383 { 7384 bool do_repoll = false; 7385 void *have; 7386 int work; 7387 7388 list_del_init(&n->poll_list); 7389 7390 have = netpoll_poll_lock(n); 7391 7392 work = __napi_poll(n, &do_repoll); 7393 7394 if (do_repoll) 7395 list_add_tail(&n->poll_list, repoll); 7396 7397 netpoll_poll_unlock(have); 7398 7399 return work; 7400 } 7401 napi_thread_wait(struct napi_struct * napi)7402 static int napi_thread_wait(struct napi_struct *napi) 7403 { 7404 set_current_state(TASK_INTERRUPTIBLE); 7405 7406 while (!kthread_should_stop()) { 7407 /* Testing SCHED_THREADED bit here to make sure the current 7408 * kthread owns this napi and could poll on this napi. 7409 * Testing SCHED bit is not enough because SCHED bit might be 7410 * set by some other busy poll thread or by napi_disable(). 7411 */ 7412 if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state)) { 7413 WARN_ON(!list_empty(&napi->poll_list)); 7414 __set_current_state(TASK_RUNNING); 7415 return 0; 7416 } 7417 7418 schedule(); 7419 set_current_state(TASK_INTERRUPTIBLE); 7420 } 7421 __set_current_state(TASK_RUNNING); 7422 7423 return -1; 7424 } 7425 napi_threaded_poll_loop(struct napi_struct * napi)7426 static void napi_threaded_poll_loop(struct napi_struct *napi) 7427 { 7428 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; 7429 struct softnet_data *sd; 7430 unsigned long last_qs = jiffies; 7431 7432 for (;;) { 7433 bool repoll = false; 7434 void *have; 7435 7436 local_bh_disable(); 7437 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); 7438 7439 sd = this_cpu_ptr(&softnet_data); 7440 sd->in_napi_threaded_poll = true; 7441 7442 have = netpoll_poll_lock(napi); 7443 __napi_poll(napi, &repoll); 7444 netpoll_poll_unlock(have); 7445 7446 sd->in_napi_threaded_poll = false; 7447 barrier(); 7448 7449 if (sd_has_rps_ipi_waiting(sd)) { 7450 local_irq_disable(); 7451 net_rps_action_and_irq_enable(sd); 7452 } 7453 skb_defer_free_flush(sd); 7454 bpf_net_ctx_clear(bpf_net_ctx); 7455 local_bh_enable(); 7456 7457 if (!repoll) 7458 break; 7459 7460 rcu_softirq_qs_periodic(last_qs); 7461 cond_resched(); 7462 } 7463 } 7464 napi_threaded_poll(void * data)7465 static int napi_threaded_poll(void *data) 7466 { 7467 struct napi_struct *napi = data; 7468 7469 while (!napi_thread_wait(napi)) 7470 napi_threaded_poll_loop(napi); 7471 7472 return 0; 7473 } 7474 net_rx_action(void)7475 static __latent_entropy void net_rx_action(void) 7476 { 7477 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 7478 unsigned long time_limit = jiffies + 7479 usecs_to_jiffies(READ_ONCE(net_hotdata.netdev_budget_usecs)); 7480 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; 7481 int budget = READ_ONCE(net_hotdata.netdev_budget); 7482 LIST_HEAD(list); 7483 LIST_HEAD(repoll); 7484 7485 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); 7486 start: 7487 sd->in_net_rx_action = true; 7488 local_irq_disable(); 7489 list_splice_init(&sd->poll_list, &list); 7490 local_irq_enable(); 7491 7492 for (;;) { 7493 struct napi_struct *n; 7494 7495 skb_defer_free_flush(sd); 7496 7497 if (list_empty(&list)) { 7498 if (list_empty(&repoll)) { 7499 sd->in_net_rx_action = false; 7500 barrier(); 7501 /* We need to check if ____napi_schedule() 7502 * had refilled poll_list while 7503 * sd->in_net_rx_action was true. 7504 */ 7505 if (!list_empty(&sd->poll_list)) 7506 goto start; 7507 if (!sd_has_rps_ipi_waiting(sd)) 7508 goto end; 7509 } 7510 break; 7511 } 7512 7513 n = list_first_entry(&list, struct napi_struct, poll_list); 7514 budget -= napi_poll(n, &repoll); 7515 7516 /* If softirq window is exhausted then punt. 7517 * Allow this to run for 2 jiffies since which will allow 7518 * an average latency of 1.5/HZ. 7519 */ 7520 if (unlikely(budget <= 0 || 7521 time_after_eq(jiffies, time_limit))) { 7522 sd->time_squeeze++; 7523 break; 7524 } 7525 } 7526 7527 local_irq_disable(); 7528 7529 list_splice_tail_init(&sd->poll_list, &list); 7530 list_splice_tail(&repoll, &list); 7531 list_splice(&list, &sd->poll_list); 7532 if (!list_empty(&sd->poll_list)) 7533 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 7534 else 7535 sd->in_net_rx_action = false; 7536 7537 net_rps_action_and_irq_enable(sd); 7538 end: 7539 bpf_net_ctx_clear(bpf_net_ctx); 7540 } 7541 7542 struct netdev_adjacent { 7543 struct net_device *dev; 7544 netdevice_tracker dev_tracker; 7545 7546 /* upper master flag, there can only be one master device per list */ 7547 bool master; 7548 7549 /* lookup ignore flag */ 7550 bool ignore; 7551 7552 /* counter for the number of times this device was added to us */ 7553 u16 ref_nr; 7554 7555 /* private field for the users */ 7556 void *private; 7557 7558 struct list_head list; 7559 struct rcu_head rcu; 7560 }; 7561 __netdev_find_adj(struct net_device * adj_dev,struct list_head * adj_list)7562 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev, 7563 struct list_head *adj_list) 7564 { 7565 struct netdev_adjacent *adj; 7566 7567 list_for_each_entry(adj, adj_list, list) { 7568 if (adj->dev == adj_dev) 7569 return adj; 7570 } 7571 return NULL; 7572 } 7573 ____netdev_has_upper_dev(struct net_device * upper_dev,struct netdev_nested_priv * priv)7574 static int ____netdev_has_upper_dev(struct net_device *upper_dev, 7575 struct netdev_nested_priv *priv) 7576 { 7577 struct net_device *dev = (struct net_device *)priv->data; 7578 7579 return upper_dev == dev; 7580 } 7581 7582 /** 7583 * netdev_has_upper_dev - Check if device is linked to an upper device 7584 * @dev: device 7585 * @upper_dev: upper device to check 7586 * 7587 * Find out if a device is linked to specified upper device and return true 7588 * in case it is. Note that this checks only immediate upper device, 7589 * not through a complete stack of devices. The caller must hold the RTNL lock. 7590 */ netdev_has_upper_dev(struct net_device * dev,struct net_device * upper_dev)7591 bool netdev_has_upper_dev(struct net_device *dev, 7592 struct net_device *upper_dev) 7593 { 7594 struct netdev_nested_priv priv = { 7595 .data = (void *)upper_dev, 7596 }; 7597 7598 ASSERT_RTNL(); 7599 7600 return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev, 7601 &priv); 7602 } 7603 EXPORT_SYMBOL(netdev_has_upper_dev); 7604 7605 /** 7606 * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device 7607 * @dev: device 7608 * @upper_dev: upper device to check 7609 * 7610 * Find out if a device is linked to specified upper device and return true 7611 * in case it is. Note that this checks the entire upper device chain. 7612 * The caller must hold rcu lock. 7613 */ 7614 netdev_has_upper_dev_all_rcu(struct net_device * dev,struct net_device * upper_dev)7615 bool netdev_has_upper_dev_all_rcu(struct net_device *dev, 7616 struct net_device *upper_dev) 7617 { 7618 struct netdev_nested_priv priv = { 7619 .data = (void *)upper_dev, 7620 }; 7621 7622 return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev, 7623 &priv); 7624 } 7625 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu); 7626 7627 /** 7628 * netdev_has_any_upper_dev - Check if device is linked to some device 7629 * @dev: device 7630 * 7631 * Find out if a device is linked to an upper device and return true in case 7632 * it is. The caller must hold the RTNL lock. 7633 */ netdev_has_any_upper_dev(struct net_device * dev)7634 bool netdev_has_any_upper_dev(struct net_device *dev) 7635 { 7636 ASSERT_RTNL(); 7637 7638 return !list_empty(&dev->adj_list.upper); 7639 } 7640 EXPORT_SYMBOL(netdev_has_any_upper_dev); 7641 7642 /** 7643 * netdev_master_upper_dev_get - Get master upper device 7644 * @dev: device 7645 * 7646 * Find a master upper device and return pointer to it or NULL in case 7647 * it's not there. The caller must hold the RTNL lock. 7648 */ netdev_master_upper_dev_get(struct net_device * dev)7649 struct net_device *netdev_master_upper_dev_get(struct net_device *dev) 7650 { 7651 struct netdev_adjacent *upper; 7652 7653 ASSERT_RTNL(); 7654 7655 if (list_empty(&dev->adj_list.upper)) 7656 return NULL; 7657 7658 upper = list_first_entry(&dev->adj_list.upper, 7659 struct netdev_adjacent, list); 7660 if (likely(upper->master)) 7661 return upper->dev; 7662 return NULL; 7663 } 7664 EXPORT_SYMBOL(netdev_master_upper_dev_get); 7665 __netdev_master_upper_dev_get(struct net_device * dev)7666 static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev) 7667 { 7668 struct netdev_adjacent *upper; 7669 7670 ASSERT_RTNL(); 7671 7672 if (list_empty(&dev->adj_list.upper)) 7673 return NULL; 7674 7675 upper = list_first_entry(&dev->adj_list.upper, 7676 struct netdev_adjacent, list); 7677 if (likely(upper->master) && !upper->ignore) 7678 return upper->dev; 7679 return NULL; 7680 } 7681 7682 /** 7683 * netdev_has_any_lower_dev - Check if device is linked to some device 7684 * @dev: device 7685 * 7686 * Find out if a device is linked to a lower device and return true in case 7687 * it is. The caller must hold the RTNL lock. 7688 */ netdev_has_any_lower_dev(struct net_device * dev)7689 static bool netdev_has_any_lower_dev(struct net_device *dev) 7690 { 7691 ASSERT_RTNL(); 7692 7693 return !list_empty(&dev->adj_list.lower); 7694 } 7695 netdev_adjacent_get_private(struct list_head * adj_list)7696 void *netdev_adjacent_get_private(struct list_head *adj_list) 7697 { 7698 struct netdev_adjacent *adj; 7699 7700 adj = list_entry(adj_list, struct netdev_adjacent, list); 7701 7702 return adj->private; 7703 } 7704 EXPORT_SYMBOL(netdev_adjacent_get_private); 7705 7706 /** 7707 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list 7708 * @dev: device 7709 * @iter: list_head ** of the current position 7710 * 7711 * Gets the next device from the dev's upper list, starting from iter 7712 * position. The caller must hold RCU read lock. 7713 */ netdev_upper_get_next_dev_rcu(struct net_device * dev,struct list_head ** iter)7714 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, 7715 struct list_head **iter) 7716 { 7717 struct netdev_adjacent *upper; 7718 7719 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 7720 7721 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 7722 7723 if (&upper->list == &dev->adj_list.upper) 7724 return NULL; 7725 7726 *iter = &upper->list; 7727 7728 return upper->dev; 7729 } 7730 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu); 7731 __netdev_next_upper_dev(struct net_device * dev,struct list_head ** iter,bool * ignore)7732 static struct net_device *__netdev_next_upper_dev(struct net_device *dev, 7733 struct list_head **iter, 7734 bool *ignore) 7735 { 7736 struct netdev_adjacent *upper; 7737 7738 upper = list_entry((*iter)->next, struct netdev_adjacent, list); 7739 7740 if (&upper->list == &dev->adj_list.upper) 7741 return NULL; 7742 7743 *iter = &upper->list; 7744 *ignore = upper->ignore; 7745 7746 return upper->dev; 7747 } 7748 netdev_next_upper_dev_rcu(struct net_device * dev,struct list_head ** iter)7749 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev, 7750 struct list_head **iter) 7751 { 7752 struct netdev_adjacent *upper; 7753 7754 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 7755 7756 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 7757 7758 if (&upper->list == &dev->adj_list.upper) 7759 return NULL; 7760 7761 *iter = &upper->list; 7762 7763 return upper->dev; 7764 } 7765 __netdev_walk_all_upper_dev(struct net_device * dev,int (* fn)(struct net_device * dev,struct netdev_nested_priv * priv),struct netdev_nested_priv * priv)7766 static int __netdev_walk_all_upper_dev(struct net_device *dev, 7767 int (*fn)(struct net_device *dev, 7768 struct netdev_nested_priv *priv), 7769 struct netdev_nested_priv *priv) 7770 { 7771 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7772 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7773 int ret, cur = 0; 7774 bool ignore; 7775 7776 now = dev; 7777 iter = &dev->adj_list.upper; 7778 7779 while (1) { 7780 if (now != dev) { 7781 ret = fn(now, priv); 7782 if (ret) 7783 return ret; 7784 } 7785 7786 next = NULL; 7787 while (1) { 7788 udev = __netdev_next_upper_dev(now, &iter, &ignore); 7789 if (!udev) 7790 break; 7791 if (ignore) 7792 continue; 7793 7794 next = udev; 7795 niter = &udev->adj_list.upper; 7796 dev_stack[cur] = now; 7797 iter_stack[cur++] = iter; 7798 break; 7799 } 7800 7801 if (!next) { 7802 if (!cur) 7803 return 0; 7804 next = dev_stack[--cur]; 7805 niter = iter_stack[cur]; 7806 } 7807 7808 now = next; 7809 iter = niter; 7810 } 7811 7812 return 0; 7813 } 7814 netdev_walk_all_upper_dev_rcu(struct net_device * dev,int (* fn)(struct net_device * dev,struct netdev_nested_priv * priv),struct netdev_nested_priv * priv)7815 int netdev_walk_all_upper_dev_rcu(struct net_device *dev, 7816 int (*fn)(struct net_device *dev, 7817 struct netdev_nested_priv *priv), 7818 struct netdev_nested_priv *priv) 7819 { 7820 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7821 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7822 int ret, cur = 0; 7823 7824 now = dev; 7825 iter = &dev->adj_list.upper; 7826 7827 while (1) { 7828 if (now != dev) { 7829 ret = fn(now, priv); 7830 if (ret) 7831 return ret; 7832 } 7833 7834 next = NULL; 7835 while (1) { 7836 udev = netdev_next_upper_dev_rcu(now, &iter); 7837 if (!udev) 7838 break; 7839 7840 next = udev; 7841 niter = &udev->adj_list.upper; 7842 dev_stack[cur] = now; 7843 iter_stack[cur++] = iter; 7844 break; 7845 } 7846 7847 if (!next) { 7848 if (!cur) 7849 return 0; 7850 next = dev_stack[--cur]; 7851 niter = iter_stack[cur]; 7852 } 7853 7854 now = next; 7855 iter = niter; 7856 } 7857 7858 return 0; 7859 } 7860 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu); 7861 __netdev_has_upper_dev(struct net_device * dev,struct net_device * upper_dev)7862 static bool __netdev_has_upper_dev(struct net_device *dev, 7863 struct net_device *upper_dev) 7864 { 7865 struct netdev_nested_priv priv = { 7866 .flags = 0, 7867 .data = (void *)upper_dev, 7868 }; 7869 7870 ASSERT_RTNL(); 7871 7872 return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev, 7873 &priv); 7874 } 7875 7876 /** 7877 * netdev_lower_get_next_private - Get the next ->private from the 7878 * lower neighbour list 7879 * @dev: device 7880 * @iter: list_head ** of the current position 7881 * 7882 * Gets the next netdev_adjacent->private from the dev's lower neighbour 7883 * list, starting from iter position. The caller must hold either hold the 7884 * RTNL lock or its own locking that guarantees that the neighbour lower 7885 * list will remain unchanged. 7886 */ netdev_lower_get_next_private(struct net_device * dev,struct list_head ** iter)7887 void *netdev_lower_get_next_private(struct net_device *dev, 7888 struct list_head **iter) 7889 { 7890 struct netdev_adjacent *lower; 7891 7892 lower = list_entry(*iter, struct netdev_adjacent, list); 7893 7894 if (&lower->list == &dev->adj_list.lower) 7895 return NULL; 7896 7897 *iter = lower->list.next; 7898 7899 return lower->private; 7900 } 7901 EXPORT_SYMBOL(netdev_lower_get_next_private); 7902 7903 /** 7904 * netdev_lower_get_next_private_rcu - Get the next ->private from the 7905 * lower neighbour list, RCU 7906 * variant 7907 * @dev: device 7908 * @iter: list_head ** of the current position 7909 * 7910 * Gets the next netdev_adjacent->private from the dev's lower neighbour 7911 * list, starting from iter position. The caller must hold RCU read lock. 7912 */ netdev_lower_get_next_private_rcu(struct net_device * dev,struct list_head ** iter)7913 void *netdev_lower_get_next_private_rcu(struct net_device *dev, 7914 struct list_head **iter) 7915 { 7916 struct netdev_adjacent *lower; 7917 7918 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 7919 7920 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 7921 7922 if (&lower->list == &dev->adj_list.lower) 7923 return NULL; 7924 7925 *iter = &lower->list; 7926 7927 return lower->private; 7928 } 7929 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu); 7930 7931 /** 7932 * netdev_lower_get_next - Get the next device from the lower neighbour 7933 * list 7934 * @dev: device 7935 * @iter: list_head ** of the current position 7936 * 7937 * Gets the next netdev_adjacent from the dev's lower neighbour 7938 * list, starting from iter position. The caller must hold RTNL lock or 7939 * its own locking that guarantees that the neighbour lower 7940 * list will remain unchanged. 7941 */ netdev_lower_get_next(struct net_device * dev,struct list_head ** iter)7942 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter) 7943 { 7944 struct netdev_adjacent *lower; 7945 7946 lower = list_entry(*iter, struct netdev_adjacent, list); 7947 7948 if (&lower->list == &dev->adj_list.lower) 7949 return NULL; 7950 7951 *iter = lower->list.next; 7952 7953 return lower->dev; 7954 } 7955 EXPORT_SYMBOL(netdev_lower_get_next); 7956 netdev_next_lower_dev(struct net_device * dev,struct list_head ** iter)7957 static struct net_device *netdev_next_lower_dev(struct net_device *dev, 7958 struct list_head **iter) 7959 { 7960 struct netdev_adjacent *lower; 7961 7962 lower = list_entry((*iter)->next, struct netdev_adjacent, list); 7963 7964 if (&lower->list == &dev->adj_list.lower) 7965 return NULL; 7966 7967 *iter = &lower->list; 7968 7969 return lower->dev; 7970 } 7971 __netdev_next_lower_dev(struct net_device * dev,struct list_head ** iter,bool * ignore)7972 static struct net_device *__netdev_next_lower_dev(struct net_device *dev, 7973 struct list_head **iter, 7974 bool *ignore) 7975 { 7976 struct netdev_adjacent *lower; 7977 7978 lower = list_entry((*iter)->next, struct netdev_adjacent, list); 7979 7980 if (&lower->list == &dev->adj_list.lower) 7981 return NULL; 7982 7983 *iter = &lower->list; 7984 *ignore = lower->ignore; 7985 7986 return lower->dev; 7987 } 7988 netdev_walk_all_lower_dev(struct net_device * dev,int (* fn)(struct net_device * dev,struct netdev_nested_priv * priv),struct netdev_nested_priv * priv)7989 int netdev_walk_all_lower_dev(struct net_device *dev, 7990 int (*fn)(struct net_device *dev, 7991 struct netdev_nested_priv *priv), 7992 struct netdev_nested_priv *priv) 7993 { 7994 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7995 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7996 int ret, cur = 0; 7997 7998 now = dev; 7999 iter = &dev->adj_list.lower; 8000 8001 while (1) { 8002 if (now != dev) { 8003 ret = fn(now, priv); 8004 if (ret) 8005 return ret; 8006 } 8007 8008 next = NULL; 8009 while (1) { 8010 ldev = netdev_next_lower_dev(now, &iter); 8011 if (!ldev) 8012 break; 8013 8014 next = ldev; 8015 niter = &ldev->adj_list.lower; 8016 dev_stack[cur] = now; 8017 iter_stack[cur++] = iter; 8018 break; 8019 } 8020 8021 if (!next) { 8022 if (!cur) 8023 return 0; 8024 next = dev_stack[--cur]; 8025 niter = iter_stack[cur]; 8026 } 8027 8028 now = next; 8029 iter = niter; 8030 } 8031 8032 return 0; 8033 } 8034 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev); 8035 __netdev_walk_all_lower_dev(struct net_device * dev,int (* fn)(struct net_device * dev,struct netdev_nested_priv * priv),struct netdev_nested_priv * priv)8036 static int __netdev_walk_all_lower_dev(struct net_device *dev, 8037 int (*fn)(struct net_device *dev, 8038 struct netdev_nested_priv *priv), 8039 struct netdev_nested_priv *priv) 8040 { 8041 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 8042 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 8043 int ret, cur = 0; 8044 bool ignore; 8045 8046 now = dev; 8047 iter = &dev->adj_list.lower; 8048 8049 while (1) { 8050 if (now != dev) { 8051 ret = fn(now, priv); 8052 if (ret) 8053 return ret; 8054 } 8055 8056 next = NULL; 8057 while (1) { 8058 ldev = __netdev_next_lower_dev(now, &iter, &ignore); 8059 if (!ldev) 8060 break; 8061 if (ignore) 8062 continue; 8063 8064 next = ldev; 8065 niter = &ldev->adj_list.lower; 8066 dev_stack[cur] = now; 8067 iter_stack[cur++] = iter; 8068 break; 8069 } 8070 8071 if (!next) { 8072 if (!cur) 8073 return 0; 8074 next = dev_stack[--cur]; 8075 niter = iter_stack[cur]; 8076 } 8077 8078 now = next; 8079 iter = niter; 8080 } 8081 8082 return 0; 8083 } 8084 netdev_next_lower_dev_rcu(struct net_device * dev,struct list_head ** iter)8085 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, 8086 struct list_head **iter) 8087 { 8088 struct netdev_adjacent *lower; 8089 8090 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 8091 if (&lower->list == &dev->adj_list.lower) 8092 return NULL; 8093 8094 *iter = &lower->list; 8095 8096 return lower->dev; 8097 } 8098 EXPORT_SYMBOL(netdev_next_lower_dev_rcu); 8099 __netdev_upper_depth(struct net_device * dev)8100 static u8 __netdev_upper_depth(struct net_device *dev) 8101 { 8102 struct net_device *udev; 8103 struct list_head *iter; 8104 u8 max_depth = 0; 8105 bool ignore; 8106 8107 for (iter = &dev->adj_list.upper, 8108 udev = __netdev_next_upper_dev(dev, &iter, &ignore); 8109 udev; 8110 udev = __netdev_next_upper_dev(dev, &iter, &ignore)) { 8111 if (ignore) 8112 continue; 8113 if (max_depth < udev->upper_level) 8114 max_depth = udev->upper_level; 8115 } 8116 8117 return max_depth; 8118 } 8119 __netdev_lower_depth(struct net_device * dev)8120 static u8 __netdev_lower_depth(struct net_device *dev) 8121 { 8122 struct net_device *ldev; 8123 struct list_head *iter; 8124 u8 max_depth = 0; 8125 bool ignore; 8126 8127 for (iter = &dev->adj_list.lower, 8128 ldev = __netdev_next_lower_dev(dev, &iter, &ignore); 8129 ldev; 8130 ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) { 8131 if (ignore) 8132 continue; 8133 if (max_depth < ldev->lower_level) 8134 max_depth = ldev->lower_level; 8135 } 8136 8137 return max_depth; 8138 } 8139 __netdev_update_upper_level(struct net_device * dev,struct netdev_nested_priv * __unused)8140 static int __netdev_update_upper_level(struct net_device *dev, 8141 struct netdev_nested_priv *__unused) 8142 { 8143 dev->upper_level = __netdev_upper_depth(dev) + 1; 8144 return 0; 8145 } 8146 8147 #ifdef CONFIG_LOCKDEP 8148 static LIST_HEAD(net_unlink_list); 8149 net_unlink_todo(struct net_device * dev)8150 static void net_unlink_todo(struct net_device *dev) 8151 { 8152 if (list_empty(&dev->unlink_list)) 8153 list_add_tail(&dev->unlink_list, &net_unlink_list); 8154 } 8155 #endif 8156 __netdev_update_lower_level(struct net_device * dev,struct netdev_nested_priv * priv)8157 static int __netdev_update_lower_level(struct net_device *dev, 8158 struct netdev_nested_priv *priv) 8159 { 8160 dev->lower_level = __netdev_lower_depth(dev) + 1; 8161 8162 #ifdef CONFIG_LOCKDEP 8163 if (!priv) 8164 return 0; 8165 8166 if (priv->flags & NESTED_SYNC_IMM) 8167 dev->nested_level = dev->lower_level - 1; 8168 if (priv->flags & NESTED_SYNC_TODO) 8169 net_unlink_todo(dev); 8170 #endif 8171 return 0; 8172 } 8173 netdev_walk_all_lower_dev_rcu(struct net_device * dev,int (* fn)(struct net_device * dev,struct netdev_nested_priv * priv),struct netdev_nested_priv * priv)8174 int netdev_walk_all_lower_dev_rcu(struct net_device *dev, 8175 int (*fn)(struct net_device *dev, 8176 struct netdev_nested_priv *priv), 8177 struct netdev_nested_priv *priv) 8178 { 8179 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 8180 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 8181 int ret, cur = 0; 8182 8183 now = dev; 8184 iter = &dev->adj_list.lower; 8185 8186 while (1) { 8187 if (now != dev) { 8188 ret = fn(now, priv); 8189 if (ret) 8190 return ret; 8191 } 8192 8193 next = NULL; 8194 while (1) { 8195 ldev = netdev_next_lower_dev_rcu(now, &iter); 8196 if (!ldev) 8197 break; 8198 8199 next = ldev; 8200 niter = &ldev->adj_list.lower; 8201 dev_stack[cur] = now; 8202 iter_stack[cur++] = iter; 8203 break; 8204 } 8205 8206 if (!next) { 8207 if (!cur) 8208 return 0; 8209 next = dev_stack[--cur]; 8210 niter = iter_stack[cur]; 8211 } 8212 8213 now = next; 8214 iter = niter; 8215 } 8216 8217 return 0; 8218 } 8219 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu); 8220 8221 /** 8222 * netdev_lower_get_first_private_rcu - Get the first ->private from the 8223 * lower neighbour list, RCU 8224 * variant 8225 * @dev: device 8226 * 8227 * Gets the first netdev_adjacent->private from the dev's lower neighbour 8228 * list. The caller must hold RCU read lock. 8229 */ netdev_lower_get_first_private_rcu(struct net_device * dev)8230 void *netdev_lower_get_first_private_rcu(struct net_device *dev) 8231 { 8232 struct netdev_adjacent *lower; 8233 8234 lower = list_first_or_null_rcu(&dev->adj_list.lower, 8235 struct netdev_adjacent, list); 8236 if (lower) 8237 return lower->private; 8238 return NULL; 8239 } 8240 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu); 8241 8242 /** 8243 * netdev_master_upper_dev_get_rcu - Get master upper device 8244 * @dev: device 8245 * 8246 * Find a master upper device and return pointer to it or NULL in case 8247 * it's not there. The caller must hold the RCU read lock. 8248 */ netdev_master_upper_dev_get_rcu(struct net_device * dev)8249 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev) 8250 { 8251 struct netdev_adjacent *upper; 8252 8253 upper = list_first_or_null_rcu(&dev->adj_list.upper, 8254 struct netdev_adjacent, list); 8255 if (upper && likely(upper->master)) 8256 return upper->dev; 8257 return NULL; 8258 } 8259 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu); 8260 netdev_adjacent_sysfs_add(struct net_device * dev,struct net_device * adj_dev,struct list_head * dev_list)8261 static int netdev_adjacent_sysfs_add(struct net_device *dev, 8262 struct net_device *adj_dev, 8263 struct list_head *dev_list) 8264 { 8265 char linkname[IFNAMSIZ+7]; 8266 8267 sprintf(linkname, dev_list == &dev->adj_list.upper ? 8268 "upper_%s" : "lower_%s", adj_dev->name); 8269 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), 8270 linkname); 8271 } netdev_adjacent_sysfs_del(struct net_device * dev,char * name,struct list_head * dev_list)8272 static void netdev_adjacent_sysfs_del(struct net_device *dev, 8273 char *name, 8274 struct list_head *dev_list) 8275 { 8276 char linkname[IFNAMSIZ+7]; 8277 8278 sprintf(linkname, dev_list == &dev->adj_list.upper ? 8279 "upper_%s" : "lower_%s", name); 8280 sysfs_remove_link(&(dev->dev.kobj), linkname); 8281 } 8282 netdev_adjacent_is_neigh_list(struct net_device * dev,struct net_device * adj_dev,struct list_head * dev_list)8283 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev, 8284 struct net_device *adj_dev, 8285 struct list_head *dev_list) 8286 { 8287 return (dev_list == &dev->adj_list.upper || 8288 dev_list == &dev->adj_list.lower) && 8289 net_eq(dev_net(dev), dev_net(adj_dev)); 8290 } 8291 __netdev_adjacent_dev_insert(struct net_device * dev,struct net_device * adj_dev,struct list_head * dev_list,void * private,bool master)8292 static int __netdev_adjacent_dev_insert(struct net_device *dev, 8293 struct net_device *adj_dev, 8294 struct list_head *dev_list, 8295 void *private, bool master) 8296 { 8297 struct netdev_adjacent *adj; 8298 int ret; 8299 8300 adj = __netdev_find_adj(adj_dev, dev_list); 8301 8302 if (adj) { 8303 adj->ref_nr += 1; 8304 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n", 8305 dev->name, adj_dev->name, adj->ref_nr); 8306 8307 return 0; 8308 } 8309 8310 adj = kmalloc(sizeof(*adj), GFP_KERNEL); 8311 if (!adj) 8312 return -ENOMEM; 8313 8314 adj->dev = adj_dev; 8315 adj->master = master; 8316 adj->ref_nr = 1; 8317 adj->private = private; 8318 adj->ignore = false; 8319 netdev_hold(adj_dev, &adj->dev_tracker, GFP_KERNEL); 8320 8321 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n", 8322 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name); 8323 8324 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) { 8325 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list); 8326 if (ret) 8327 goto free_adj; 8328 } 8329 8330 /* Ensure that master link is always the first item in list. */ 8331 if (master) { 8332 ret = sysfs_create_link(&(dev->dev.kobj), 8333 &(adj_dev->dev.kobj), "master"); 8334 if (ret) 8335 goto remove_symlinks; 8336 8337 list_add_rcu(&adj->list, dev_list); 8338 } else { 8339 list_add_tail_rcu(&adj->list, dev_list); 8340 } 8341 8342 return 0; 8343 8344 remove_symlinks: 8345 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) 8346 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 8347 free_adj: 8348 netdev_put(adj_dev, &adj->dev_tracker); 8349 kfree(adj); 8350 8351 return ret; 8352 } 8353 __netdev_adjacent_dev_remove(struct net_device * dev,struct net_device * adj_dev,u16 ref_nr,struct list_head * dev_list)8354 static void __netdev_adjacent_dev_remove(struct net_device *dev, 8355 struct net_device *adj_dev, 8356 u16 ref_nr, 8357 struct list_head *dev_list) 8358 { 8359 struct netdev_adjacent *adj; 8360 8361 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n", 8362 dev->name, adj_dev->name, ref_nr); 8363 8364 adj = __netdev_find_adj(adj_dev, dev_list); 8365 8366 if (!adj) { 8367 pr_err("Adjacency does not exist for device %s from %s\n", 8368 dev->name, adj_dev->name); 8369 WARN_ON(1); 8370 return; 8371 } 8372 8373 if (adj->ref_nr > ref_nr) { 8374 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n", 8375 dev->name, adj_dev->name, ref_nr, 8376 adj->ref_nr - ref_nr); 8377 adj->ref_nr -= ref_nr; 8378 return; 8379 } 8380 8381 if (adj->master) 8382 sysfs_remove_link(&(dev->dev.kobj), "master"); 8383 8384 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) 8385 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 8386 8387 list_del_rcu(&adj->list); 8388 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n", 8389 adj_dev->name, dev->name, adj_dev->name); 8390 netdev_put(adj_dev, &adj->dev_tracker); 8391 kfree_rcu(adj, rcu); 8392 } 8393 __netdev_adjacent_dev_link_lists(struct net_device * dev,struct net_device * upper_dev,struct list_head * up_list,struct list_head * down_list,void * private,bool master)8394 static int __netdev_adjacent_dev_link_lists(struct net_device *dev, 8395 struct net_device *upper_dev, 8396 struct list_head *up_list, 8397 struct list_head *down_list, 8398 void *private, bool master) 8399 { 8400 int ret; 8401 8402 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, 8403 private, master); 8404 if (ret) 8405 return ret; 8406 8407 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, 8408 private, false); 8409 if (ret) { 8410 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list); 8411 return ret; 8412 } 8413 8414 return 0; 8415 } 8416 __netdev_adjacent_dev_unlink_lists(struct net_device * dev,struct net_device * upper_dev,u16 ref_nr,struct list_head * up_list,struct list_head * down_list)8417 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev, 8418 struct net_device *upper_dev, 8419 u16 ref_nr, 8420 struct list_head *up_list, 8421 struct list_head *down_list) 8422 { 8423 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list); 8424 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list); 8425 } 8426 __netdev_adjacent_dev_link_neighbour(struct net_device * dev,struct net_device * upper_dev,void * private,bool master)8427 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev, 8428 struct net_device *upper_dev, 8429 void *private, bool master) 8430 { 8431 return __netdev_adjacent_dev_link_lists(dev, upper_dev, 8432 &dev->adj_list.upper, 8433 &upper_dev->adj_list.lower, 8434 private, master); 8435 } 8436 __netdev_adjacent_dev_unlink_neighbour(struct net_device * dev,struct net_device * upper_dev)8437 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev, 8438 struct net_device *upper_dev) 8439 { 8440 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1, 8441 &dev->adj_list.upper, 8442 &upper_dev->adj_list.lower); 8443 } 8444 __netdev_upper_dev_link(struct net_device * dev,struct net_device * upper_dev,bool master,void * upper_priv,void * upper_info,struct netdev_nested_priv * priv,struct netlink_ext_ack * extack)8445 static int __netdev_upper_dev_link(struct net_device *dev, 8446 struct net_device *upper_dev, bool master, 8447 void *upper_priv, void *upper_info, 8448 struct netdev_nested_priv *priv, 8449 struct netlink_ext_ack *extack) 8450 { 8451 struct netdev_notifier_changeupper_info changeupper_info = { 8452 .info = { 8453 .dev = dev, 8454 .extack = extack, 8455 }, 8456 .upper_dev = upper_dev, 8457 .master = master, 8458 .linking = true, 8459 .upper_info = upper_info, 8460 }; 8461 struct net_device *master_dev; 8462 int ret = 0; 8463 8464 ASSERT_RTNL(); 8465 8466 if (dev == upper_dev) 8467 return -EBUSY; 8468 8469 /* To prevent loops, check if dev is not upper device to upper_dev. */ 8470 if (__netdev_has_upper_dev(upper_dev, dev)) 8471 return -EBUSY; 8472 8473 if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV) 8474 return -EMLINK; 8475 8476 if (!master) { 8477 if (__netdev_has_upper_dev(dev, upper_dev)) 8478 return -EEXIST; 8479 } else { 8480 master_dev = __netdev_master_upper_dev_get(dev); 8481 if (master_dev) 8482 return master_dev == upper_dev ? -EEXIST : -EBUSY; 8483 } 8484 8485 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 8486 &changeupper_info.info); 8487 ret = notifier_to_errno(ret); 8488 if (ret) 8489 return ret; 8490 8491 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv, 8492 master); 8493 if (ret) 8494 return ret; 8495 8496 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 8497 &changeupper_info.info); 8498 ret = notifier_to_errno(ret); 8499 if (ret) 8500 goto rollback; 8501 8502 __netdev_update_upper_level(dev, NULL); 8503 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL); 8504 8505 __netdev_update_lower_level(upper_dev, priv); 8506 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level, 8507 priv); 8508 8509 return 0; 8510 8511 rollback: 8512 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 8513 8514 return ret; 8515 } 8516 8517 /** 8518 * netdev_upper_dev_link - Add a link to the upper device 8519 * @dev: device 8520 * @upper_dev: new upper device 8521 * @extack: netlink extended ack 8522 * 8523 * Adds a link to device which is upper to this one. The caller must hold 8524 * the RTNL lock. On a failure a negative errno code is returned. 8525 * On success the reference counts are adjusted and the function 8526 * returns zero. 8527 */ netdev_upper_dev_link(struct net_device * dev,struct net_device * upper_dev,struct netlink_ext_ack * extack)8528 int netdev_upper_dev_link(struct net_device *dev, 8529 struct net_device *upper_dev, 8530 struct netlink_ext_ack *extack) 8531 { 8532 struct netdev_nested_priv priv = { 8533 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO, 8534 .data = NULL, 8535 }; 8536 8537 return __netdev_upper_dev_link(dev, upper_dev, false, 8538 NULL, NULL, &priv, extack); 8539 } 8540 EXPORT_SYMBOL(netdev_upper_dev_link); 8541 8542 /** 8543 * netdev_master_upper_dev_link - Add a master link to the upper device 8544 * @dev: device 8545 * @upper_dev: new upper device 8546 * @upper_priv: upper device private 8547 * @upper_info: upper info to be passed down via notifier 8548 * @extack: netlink extended ack 8549 * 8550 * Adds a link to device which is upper to this one. In this case, only 8551 * one master upper device can be linked, although other non-master devices 8552 * might be linked as well. The caller must hold the RTNL lock. 8553 * On a failure a negative errno code is returned. On success the reference 8554 * counts are adjusted and the function returns zero. 8555 */ netdev_master_upper_dev_link(struct net_device * dev,struct net_device * upper_dev,void * upper_priv,void * upper_info,struct netlink_ext_ack * extack)8556 int netdev_master_upper_dev_link(struct net_device *dev, 8557 struct net_device *upper_dev, 8558 void *upper_priv, void *upper_info, 8559 struct netlink_ext_ack *extack) 8560 { 8561 struct netdev_nested_priv priv = { 8562 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO, 8563 .data = NULL, 8564 }; 8565 8566 return __netdev_upper_dev_link(dev, upper_dev, true, 8567 upper_priv, upper_info, &priv, extack); 8568 } 8569 EXPORT_SYMBOL(netdev_master_upper_dev_link); 8570 __netdev_upper_dev_unlink(struct net_device * dev,struct net_device * upper_dev,struct netdev_nested_priv * priv)8571 static void __netdev_upper_dev_unlink(struct net_device *dev, 8572 struct net_device *upper_dev, 8573 struct netdev_nested_priv *priv) 8574 { 8575 struct netdev_notifier_changeupper_info changeupper_info = { 8576 .info = { 8577 .dev = dev, 8578 }, 8579 .upper_dev = upper_dev, 8580 .linking = false, 8581 }; 8582 8583 ASSERT_RTNL(); 8584 8585 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev; 8586 8587 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 8588 &changeupper_info.info); 8589 8590 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 8591 8592 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 8593 &changeupper_info.info); 8594 8595 __netdev_update_upper_level(dev, NULL); 8596 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL); 8597 8598 __netdev_update_lower_level(upper_dev, priv); 8599 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level, 8600 priv); 8601 } 8602 8603 /** 8604 * netdev_upper_dev_unlink - Removes a link to upper device 8605 * @dev: device 8606 * @upper_dev: new upper device 8607 * 8608 * Removes a link to device which is upper to this one. The caller must hold 8609 * the RTNL lock. 8610 */ netdev_upper_dev_unlink(struct net_device * dev,struct net_device * upper_dev)8611 void netdev_upper_dev_unlink(struct net_device *dev, 8612 struct net_device *upper_dev) 8613 { 8614 struct netdev_nested_priv priv = { 8615 .flags = NESTED_SYNC_TODO, 8616 .data = NULL, 8617 }; 8618 8619 __netdev_upper_dev_unlink(dev, upper_dev, &priv); 8620 } 8621 EXPORT_SYMBOL(netdev_upper_dev_unlink); 8622 __netdev_adjacent_dev_set(struct net_device * upper_dev,struct net_device * lower_dev,bool val)8623 static void __netdev_adjacent_dev_set(struct net_device *upper_dev, 8624 struct net_device *lower_dev, 8625 bool val) 8626 { 8627 struct netdev_adjacent *adj; 8628 8629 adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower); 8630 if (adj) 8631 adj->ignore = val; 8632 8633 adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper); 8634 if (adj) 8635 adj->ignore = val; 8636 } 8637 netdev_adjacent_dev_disable(struct net_device * upper_dev,struct net_device * lower_dev)8638 static void netdev_adjacent_dev_disable(struct net_device *upper_dev, 8639 struct net_device *lower_dev) 8640 { 8641 __netdev_adjacent_dev_set(upper_dev, lower_dev, true); 8642 } 8643 netdev_adjacent_dev_enable(struct net_device * upper_dev,struct net_device * lower_dev)8644 static void netdev_adjacent_dev_enable(struct net_device *upper_dev, 8645 struct net_device *lower_dev) 8646 { 8647 __netdev_adjacent_dev_set(upper_dev, lower_dev, false); 8648 } 8649 netdev_adjacent_change_prepare(struct net_device * old_dev,struct net_device * new_dev,struct net_device * dev,struct netlink_ext_ack * extack)8650 int netdev_adjacent_change_prepare(struct net_device *old_dev, 8651 struct net_device *new_dev, 8652 struct net_device *dev, 8653 struct netlink_ext_ack *extack) 8654 { 8655 struct netdev_nested_priv priv = { 8656 .flags = 0, 8657 .data = NULL, 8658 }; 8659 int err; 8660 8661 if (!new_dev) 8662 return 0; 8663 8664 if (old_dev && new_dev != old_dev) 8665 netdev_adjacent_dev_disable(dev, old_dev); 8666 err = __netdev_upper_dev_link(new_dev, dev, false, NULL, NULL, &priv, 8667 extack); 8668 if (err) { 8669 if (old_dev && new_dev != old_dev) 8670 netdev_adjacent_dev_enable(dev, old_dev); 8671 return err; 8672 } 8673 8674 return 0; 8675 } 8676 EXPORT_SYMBOL(netdev_adjacent_change_prepare); 8677 netdev_adjacent_change_commit(struct net_device * old_dev,struct net_device * new_dev,struct net_device * dev)8678 void netdev_adjacent_change_commit(struct net_device *old_dev, 8679 struct net_device *new_dev, 8680 struct net_device *dev) 8681 { 8682 struct netdev_nested_priv priv = { 8683 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO, 8684 .data = NULL, 8685 }; 8686 8687 if (!new_dev || !old_dev) 8688 return; 8689 8690 if (new_dev == old_dev) 8691 return; 8692 8693 netdev_adjacent_dev_enable(dev, old_dev); 8694 __netdev_upper_dev_unlink(old_dev, dev, &priv); 8695 } 8696 EXPORT_SYMBOL(netdev_adjacent_change_commit); 8697 netdev_adjacent_change_abort(struct net_device * old_dev,struct net_device * new_dev,struct net_device * dev)8698 void netdev_adjacent_change_abort(struct net_device *old_dev, 8699 struct net_device *new_dev, 8700 struct net_device *dev) 8701 { 8702 struct netdev_nested_priv priv = { 8703 .flags = 0, 8704 .data = NULL, 8705 }; 8706 8707 if (!new_dev) 8708 return; 8709 8710 if (old_dev && new_dev != old_dev) 8711 netdev_adjacent_dev_enable(dev, old_dev); 8712 8713 __netdev_upper_dev_unlink(new_dev, dev, &priv); 8714 } 8715 EXPORT_SYMBOL(netdev_adjacent_change_abort); 8716 8717 /** 8718 * netdev_bonding_info_change - Dispatch event about slave change 8719 * @dev: device 8720 * @bonding_info: info to dispatch 8721 * 8722 * Send NETDEV_BONDING_INFO to netdev notifiers with info. 8723 * The caller must hold the RTNL lock. 8724 */ netdev_bonding_info_change(struct net_device * dev,struct netdev_bonding_info * bonding_info)8725 void netdev_bonding_info_change(struct net_device *dev, 8726 struct netdev_bonding_info *bonding_info) 8727 { 8728 struct netdev_notifier_bonding_info info = { 8729 .info.dev = dev, 8730 }; 8731 8732 memcpy(&info.bonding_info, bonding_info, 8733 sizeof(struct netdev_bonding_info)); 8734 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, 8735 &info.info); 8736 } 8737 EXPORT_SYMBOL(netdev_bonding_info_change); 8738 netdev_offload_xstats_enable_l3(struct net_device * dev,struct netlink_ext_ack * extack)8739 static int netdev_offload_xstats_enable_l3(struct net_device *dev, 8740 struct netlink_ext_ack *extack) 8741 { 8742 struct netdev_notifier_offload_xstats_info info = { 8743 .info.dev = dev, 8744 .info.extack = extack, 8745 .type = NETDEV_OFFLOAD_XSTATS_TYPE_L3, 8746 }; 8747 int err; 8748 int rc; 8749 8750 dev->offload_xstats_l3 = kzalloc(sizeof(*dev->offload_xstats_l3), 8751 GFP_KERNEL); 8752 if (!dev->offload_xstats_l3) 8753 return -ENOMEM; 8754 8755 rc = call_netdevice_notifiers_info_robust(NETDEV_OFFLOAD_XSTATS_ENABLE, 8756 NETDEV_OFFLOAD_XSTATS_DISABLE, 8757 &info.info); 8758 err = notifier_to_errno(rc); 8759 if (err) 8760 goto free_stats; 8761 8762 return 0; 8763 8764 free_stats: 8765 kfree(dev->offload_xstats_l3); 8766 dev->offload_xstats_l3 = NULL; 8767 return err; 8768 } 8769 netdev_offload_xstats_enable(struct net_device * dev,enum netdev_offload_xstats_type type,struct netlink_ext_ack * extack)8770 int netdev_offload_xstats_enable(struct net_device *dev, 8771 enum netdev_offload_xstats_type type, 8772 struct netlink_ext_ack *extack) 8773 { 8774 ASSERT_RTNL(); 8775 8776 if (netdev_offload_xstats_enabled(dev, type)) 8777 return -EALREADY; 8778 8779 switch (type) { 8780 case NETDEV_OFFLOAD_XSTATS_TYPE_L3: 8781 return netdev_offload_xstats_enable_l3(dev, extack); 8782 } 8783 8784 WARN_ON(1); 8785 return -EINVAL; 8786 } 8787 EXPORT_SYMBOL(netdev_offload_xstats_enable); 8788 netdev_offload_xstats_disable_l3(struct net_device * dev)8789 static void netdev_offload_xstats_disable_l3(struct net_device *dev) 8790 { 8791 struct netdev_notifier_offload_xstats_info info = { 8792 .info.dev = dev, 8793 .type = NETDEV_OFFLOAD_XSTATS_TYPE_L3, 8794 }; 8795 8796 call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_DISABLE, 8797 &info.info); 8798 kfree(dev->offload_xstats_l3); 8799 dev->offload_xstats_l3 = NULL; 8800 } 8801 netdev_offload_xstats_disable(struct net_device * dev,enum netdev_offload_xstats_type type)8802 int netdev_offload_xstats_disable(struct net_device *dev, 8803 enum netdev_offload_xstats_type type) 8804 { 8805 ASSERT_RTNL(); 8806 8807 if (!netdev_offload_xstats_enabled(dev, type)) 8808 return -EALREADY; 8809 8810 switch (type) { 8811 case NETDEV_OFFLOAD_XSTATS_TYPE_L3: 8812 netdev_offload_xstats_disable_l3(dev); 8813 return 0; 8814 } 8815 8816 WARN_ON(1); 8817 return -EINVAL; 8818 } 8819 EXPORT_SYMBOL(netdev_offload_xstats_disable); 8820 netdev_offload_xstats_disable_all(struct net_device * dev)8821 static void netdev_offload_xstats_disable_all(struct net_device *dev) 8822 { 8823 netdev_offload_xstats_disable(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3); 8824 } 8825 8826 static struct rtnl_hw_stats64 * netdev_offload_xstats_get_ptr(const struct net_device * dev,enum netdev_offload_xstats_type type)8827 netdev_offload_xstats_get_ptr(const struct net_device *dev, 8828 enum netdev_offload_xstats_type type) 8829 { 8830 switch (type) { 8831 case NETDEV_OFFLOAD_XSTATS_TYPE_L3: 8832 return dev->offload_xstats_l3; 8833 } 8834 8835 WARN_ON(1); 8836 return NULL; 8837 } 8838 netdev_offload_xstats_enabled(const struct net_device * dev,enum netdev_offload_xstats_type type)8839 bool netdev_offload_xstats_enabled(const struct net_device *dev, 8840 enum netdev_offload_xstats_type type) 8841 { 8842 ASSERT_RTNL(); 8843 8844 return netdev_offload_xstats_get_ptr(dev, type); 8845 } 8846 EXPORT_SYMBOL(netdev_offload_xstats_enabled); 8847 8848 struct netdev_notifier_offload_xstats_ru { 8849 bool used; 8850 }; 8851 8852 struct netdev_notifier_offload_xstats_rd { 8853 struct rtnl_hw_stats64 stats; 8854 bool used; 8855 }; 8856 netdev_hw_stats64_add(struct rtnl_hw_stats64 * dest,const struct rtnl_hw_stats64 * src)8857 static void netdev_hw_stats64_add(struct rtnl_hw_stats64 *dest, 8858 const struct rtnl_hw_stats64 *src) 8859 { 8860 dest->rx_packets += src->rx_packets; 8861 dest->tx_packets += src->tx_packets; 8862 dest->rx_bytes += src->rx_bytes; 8863 dest->tx_bytes += src->tx_bytes; 8864 dest->rx_errors += src->rx_errors; 8865 dest->tx_errors += src->tx_errors; 8866 dest->rx_dropped += src->rx_dropped; 8867 dest->tx_dropped += src->tx_dropped; 8868 dest->multicast += src->multicast; 8869 } 8870 netdev_offload_xstats_get_used(struct net_device * dev,enum netdev_offload_xstats_type type,bool * p_used,struct netlink_ext_ack * extack)8871 static int netdev_offload_xstats_get_used(struct net_device *dev, 8872 enum netdev_offload_xstats_type type, 8873 bool *p_used, 8874 struct netlink_ext_ack *extack) 8875 { 8876 struct netdev_notifier_offload_xstats_ru report_used = {}; 8877 struct netdev_notifier_offload_xstats_info info = { 8878 .info.dev = dev, 8879 .info.extack = extack, 8880 .type = type, 8881 .report_used = &report_used, 8882 }; 8883 int rc; 8884 8885 WARN_ON(!netdev_offload_xstats_enabled(dev, type)); 8886 rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_USED, 8887 &info.info); 8888 *p_used = report_used.used; 8889 return notifier_to_errno(rc); 8890 } 8891 netdev_offload_xstats_get_stats(struct net_device * dev,enum netdev_offload_xstats_type type,struct rtnl_hw_stats64 * p_stats,bool * p_used,struct netlink_ext_ack * extack)8892 static int netdev_offload_xstats_get_stats(struct net_device *dev, 8893 enum netdev_offload_xstats_type type, 8894 struct rtnl_hw_stats64 *p_stats, 8895 bool *p_used, 8896 struct netlink_ext_ack *extack) 8897 { 8898 struct netdev_notifier_offload_xstats_rd report_delta = {}; 8899 struct netdev_notifier_offload_xstats_info info = { 8900 .info.dev = dev, 8901 .info.extack = extack, 8902 .type = type, 8903 .report_delta = &report_delta, 8904 }; 8905 struct rtnl_hw_stats64 *stats; 8906 int rc; 8907 8908 stats = netdev_offload_xstats_get_ptr(dev, type); 8909 if (WARN_ON(!stats)) 8910 return -EINVAL; 8911 8912 rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_DELTA, 8913 &info.info); 8914 8915 /* Cache whatever we got, even if there was an error, otherwise the 8916 * successful stats retrievals would get lost. 8917 */ 8918 netdev_hw_stats64_add(stats, &report_delta.stats); 8919 8920 if (p_stats) 8921 *p_stats = *stats; 8922 *p_used = report_delta.used; 8923 8924 return notifier_to_errno(rc); 8925 } 8926 netdev_offload_xstats_get(struct net_device * dev,enum netdev_offload_xstats_type type,struct rtnl_hw_stats64 * p_stats,bool * p_used,struct netlink_ext_ack * extack)8927 int netdev_offload_xstats_get(struct net_device *dev, 8928 enum netdev_offload_xstats_type type, 8929 struct rtnl_hw_stats64 *p_stats, bool *p_used, 8930 struct netlink_ext_ack *extack) 8931 { 8932 ASSERT_RTNL(); 8933 8934 if (p_stats) 8935 return netdev_offload_xstats_get_stats(dev, type, p_stats, 8936 p_used, extack); 8937 else 8938 return netdev_offload_xstats_get_used(dev, type, p_used, 8939 extack); 8940 } 8941 EXPORT_SYMBOL(netdev_offload_xstats_get); 8942 8943 void netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd * report_delta,const struct rtnl_hw_stats64 * stats)8944 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *report_delta, 8945 const struct rtnl_hw_stats64 *stats) 8946 { 8947 report_delta->used = true; 8948 netdev_hw_stats64_add(&report_delta->stats, stats); 8949 } 8950 EXPORT_SYMBOL(netdev_offload_xstats_report_delta); 8951 8952 void netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru * report_used)8953 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *report_used) 8954 { 8955 report_used->used = true; 8956 } 8957 EXPORT_SYMBOL(netdev_offload_xstats_report_used); 8958 netdev_offload_xstats_push_delta(struct net_device * dev,enum netdev_offload_xstats_type type,const struct rtnl_hw_stats64 * p_stats)8959 void netdev_offload_xstats_push_delta(struct net_device *dev, 8960 enum netdev_offload_xstats_type type, 8961 const struct rtnl_hw_stats64 *p_stats) 8962 { 8963 struct rtnl_hw_stats64 *stats; 8964 8965 ASSERT_RTNL(); 8966 8967 stats = netdev_offload_xstats_get_ptr(dev, type); 8968 if (WARN_ON(!stats)) 8969 return; 8970 8971 netdev_hw_stats64_add(stats, p_stats); 8972 } 8973 EXPORT_SYMBOL(netdev_offload_xstats_push_delta); 8974 8975 /** 8976 * netdev_get_xmit_slave - Get the xmit slave of master device 8977 * @dev: device 8978 * @skb: The packet 8979 * @all_slaves: assume all the slaves are active 8980 * 8981 * The reference counters are not incremented so the caller must be 8982 * careful with locks. The caller must hold RCU lock. 8983 * %NULL is returned if no slave is found. 8984 */ 8985 netdev_get_xmit_slave(struct net_device * dev,struct sk_buff * skb,bool all_slaves)8986 struct net_device *netdev_get_xmit_slave(struct net_device *dev, 8987 struct sk_buff *skb, 8988 bool all_slaves) 8989 { 8990 const struct net_device_ops *ops = dev->netdev_ops; 8991 8992 if (!ops->ndo_get_xmit_slave) 8993 return NULL; 8994 return ops->ndo_get_xmit_slave(dev, skb, all_slaves); 8995 } 8996 EXPORT_SYMBOL(netdev_get_xmit_slave); 8997 netdev_sk_get_lower_dev(struct net_device * dev,struct sock * sk)8998 static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev, 8999 struct sock *sk) 9000 { 9001 const struct net_device_ops *ops = dev->netdev_ops; 9002 9003 if (!ops->ndo_sk_get_lower_dev) 9004 return NULL; 9005 return ops->ndo_sk_get_lower_dev(dev, sk); 9006 } 9007 9008 /** 9009 * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket 9010 * @dev: device 9011 * @sk: the socket 9012 * 9013 * %NULL is returned if no lower device is found. 9014 */ 9015 netdev_sk_get_lowest_dev(struct net_device * dev,struct sock * sk)9016 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev, 9017 struct sock *sk) 9018 { 9019 struct net_device *lower; 9020 9021 lower = netdev_sk_get_lower_dev(dev, sk); 9022 while (lower) { 9023 dev = lower; 9024 lower = netdev_sk_get_lower_dev(dev, sk); 9025 } 9026 9027 return dev; 9028 } 9029 EXPORT_SYMBOL(netdev_sk_get_lowest_dev); 9030 netdev_adjacent_add_links(struct net_device * dev)9031 static void netdev_adjacent_add_links(struct net_device *dev) 9032 { 9033 struct netdev_adjacent *iter; 9034 9035 struct net *net = dev_net(dev); 9036 9037 list_for_each_entry(iter, &dev->adj_list.upper, list) { 9038 if (!net_eq(net, dev_net(iter->dev))) 9039 continue; 9040 netdev_adjacent_sysfs_add(iter->dev, dev, 9041 &iter->dev->adj_list.lower); 9042 netdev_adjacent_sysfs_add(dev, iter->dev, 9043 &dev->adj_list.upper); 9044 } 9045 9046 list_for_each_entry(iter, &dev->adj_list.lower, list) { 9047 if (!net_eq(net, dev_net(iter->dev))) 9048 continue; 9049 netdev_adjacent_sysfs_add(iter->dev, dev, 9050 &iter->dev->adj_list.upper); 9051 netdev_adjacent_sysfs_add(dev, iter->dev, 9052 &dev->adj_list.lower); 9053 } 9054 } 9055 netdev_adjacent_del_links(struct net_device * dev)9056 static void netdev_adjacent_del_links(struct net_device *dev) 9057 { 9058 struct netdev_adjacent *iter; 9059 9060 struct net *net = dev_net(dev); 9061 9062 list_for_each_entry(iter, &dev->adj_list.upper, list) { 9063 if (!net_eq(net, dev_net(iter->dev))) 9064 continue; 9065 netdev_adjacent_sysfs_del(iter->dev, dev->name, 9066 &iter->dev->adj_list.lower); 9067 netdev_adjacent_sysfs_del(dev, iter->dev->name, 9068 &dev->adj_list.upper); 9069 } 9070 9071 list_for_each_entry(iter, &dev->adj_list.lower, list) { 9072 if (!net_eq(net, dev_net(iter->dev))) 9073 continue; 9074 netdev_adjacent_sysfs_del(iter->dev, dev->name, 9075 &iter->dev->adj_list.upper); 9076 netdev_adjacent_sysfs_del(dev, iter->dev->name, 9077 &dev->adj_list.lower); 9078 } 9079 } 9080 netdev_adjacent_rename_links(struct net_device * dev,char * oldname)9081 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname) 9082 { 9083 struct netdev_adjacent *iter; 9084 9085 struct net *net = dev_net(dev); 9086 9087 list_for_each_entry(iter, &dev->adj_list.upper, list) { 9088 if (!net_eq(net, dev_net(iter->dev))) 9089 continue; 9090 netdev_adjacent_sysfs_del(iter->dev, oldname, 9091 &iter->dev->adj_list.lower); 9092 netdev_adjacent_sysfs_add(iter->dev, dev, 9093 &iter->dev->adj_list.lower); 9094 } 9095 9096 list_for_each_entry(iter, &dev->adj_list.lower, list) { 9097 if (!net_eq(net, dev_net(iter->dev))) 9098 continue; 9099 netdev_adjacent_sysfs_del(iter->dev, oldname, 9100 &iter->dev->adj_list.upper); 9101 netdev_adjacent_sysfs_add(iter->dev, dev, 9102 &iter->dev->adj_list.upper); 9103 } 9104 } 9105 netdev_lower_dev_get_private(struct net_device * dev,struct net_device * lower_dev)9106 void *netdev_lower_dev_get_private(struct net_device *dev, 9107 struct net_device *lower_dev) 9108 { 9109 struct netdev_adjacent *lower; 9110 9111 if (!lower_dev) 9112 return NULL; 9113 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower); 9114 if (!lower) 9115 return NULL; 9116 9117 return lower->private; 9118 } 9119 EXPORT_SYMBOL(netdev_lower_dev_get_private); 9120 9121 9122 /** 9123 * netdev_lower_state_changed - Dispatch event about lower device state change 9124 * @lower_dev: device 9125 * @lower_state_info: state to dispatch 9126 * 9127 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info. 9128 * The caller must hold the RTNL lock. 9129 */ netdev_lower_state_changed(struct net_device * lower_dev,void * lower_state_info)9130 void netdev_lower_state_changed(struct net_device *lower_dev, 9131 void *lower_state_info) 9132 { 9133 struct netdev_notifier_changelowerstate_info changelowerstate_info = { 9134 .info.dev = lower_dev, 9135 }; 9136 9137 ASSERT_RTNL(); 9138 changelowerstate_info.lower_state_info = lower_state_info; 9139 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, 9140 &changelowerstate_info.info); 9141 } 9142 EXPORT_SYMBOL(netdev_lower_state_changed); 9143 dev_change_rx_flags(struct net_device * dev,int flags)9144 static void dev_change_rx_flags(struct net_device *dev, int flags) 9145 { 9146 const struct net_device_ops *ops = dev->netdev_ops; 9147 9148 if (ops->ndo_change_rx_flags) 9149 ops->ndo_change_rx_flags(dev, flags); 9150 } 9151 __dev_set_promiscuity(struct net_device * dev,int inc,bool notify)9152 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify) 9153 { 9154 unsigned int old_flags = dev->flags; 9155 unsigned int promiscuity, flags; 9156 kuid_t uid; 9157 kgid_t gid; 9158 9159 ASSERT_RTNL(); 9160 9161 promiscuity = dev->promiscuity + inc; 9162 if (promiscuity == 0) { 9163 /* 9164 * Avoid overflow. 9165 * If inc causes overflow, untouch promisc and return error. 9166 */ 9167 if (unlikely(inc > 0)) { 9168 netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n"); 9169 return -EOVERFLOW; 9170 } 9171 flags = old_flags & ~IFF_PROMISC; 9172 } else { 9173 flags = old_flags | IFF_PROMISC; 9174 } 9175 WRITE_ONCE(dev->promiscuity, promiscuity); 9176 if (flags != old_flags) { 9177 WRITE_ONCE(dev->flags, flags); 9178 netdev_info(dev, "%s promiscuous mode\n", 9179 dev->flags & IFF_PROMISC ? "entered" : "left"); 9180 if (audit_enabled) { 9181 current_uid_gid(&uid, &gid); 9182 audit_log(audit_context(), GFP_ATOMIC, 9183 AUDIT_ANOM_PROMISCUOUS, 9184 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", 9185 dev->name, (dev->flags & IFF_PROMISC), 9186 (old_flags & IFF_PROMISC), 9187 from_kuid(&init_user_ns, audit_get_loginuid(current)), 9188 from_kuid(&init_user_ns, uid), 9189 from_kgid(&init_user_ns, gid), 9190 audit_get_sessionid(current)); 9191 } 9192 9193 dev_change_rx_flags(dev, IFF_PROMISC); 9194 } 9195 if (notify) 9196 __dev_notify_flags(dev, old_flags, IFF_PROMISC, 0, NULL); 9197 return 0; 9198 } 9199 9200 /** 9201 * dev_set_promiscuity - update promiscuity count on a device 9202 * @dev: device 9203 * @inc: modifier 9204 * 9205 * Add or remove promiscuity from a device. While the count in the device 9206 * remains above zero the interface remains promiscuous. Once it hits zero 9207 * the device reverts back to normal filtering operation. A negative inc 9208 * value is used to drop promiscuity on the device. 9209 * Return 0 if successful or a negative errno code on error. 9210 */ dev_set_promiscuity(struct net_device * dev,int inc)9211 int dev_set_promiscuity(struct net_device *dev, int inc) 9212 { 9213 unsigned int old_flags = dev->flags; 9214 int err; 9215 9216 err = __dev_set_promiscuity(dev, inc, true); 9217 if (err < 0) 9218 return err; 9219 if (dev->flags != old_flags) 9220 dev_set_rx_mode(dev); 9221 return err; 9222 } 9223 EXPORT_SYMBOL(dev_set_promiscuity); 9224 netif_set_allmulti(struct net_device * dev,int inc,bool notify)9225 int netif_set_allmulti(struct net_device *dev, int inc, bool notify) 9226 { 9227 unsigned int old_flags = dev->flags, old_gflags = dev->gflags; 9228 unsigned int allmulti, flags; 9229 9230 ASSERT_RTNL(); 9231 9232 allmulti = dev->allmulti + inc; 9233 if (allmulti == 0) { 9234 /* 9235 * Avoid overflow. 9236 * If inc causes overflow, untouch allmulti and return error. 9237 */ 9238 if (unlikely(inc > 0)) { 9239 netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n"); 9240 return -EOVERFLOW; 9241 } 9242 flags = old_flags & ~IFF_ALLMULTI; 9243 } else { 9244 flags = old_flags | IFF_ALLMULTI; 9245 } 9246 WRITE_ONCE(dev->allmulti, allmulti); 9247 if (flags != old_flags) { 9248 WRITE_ONCE(dev->flags, flags); 9249 netdev_info(dev, "%s allmulticast mode\n", 9250 dev->flags & IFF_ALLMULTI ? "entered" : "left"); 9251 dev_change_rx_flags(dev, IFF_ALLMULTI); 9252 dev_set_rx_mode(dev); 9253 if (notify) 9254 __dev_notify_flags(dev, old_flags, 9255 dev->gflags ^ old_gflags, 0, NULL); 9256 } 9257 return 0; 9258 } 9259 9260 /* 9261 * Upload unicast and multicast address lists to device and 9262 * configure RX filtering. When the device doesn't support unicast 9263 * filtering it is put in promiscuous mode while unicast addresses 9264 * are present. 9265 */ __dev_set_rx_mode(struct net_device * dev)9266 void __dev_set_rx_mode(struct net_device *dev) 9267 { 9268 const struct net_device_ops *ops = dev->netdev_ops; 9269 9270 /* dev_open will call this function so the list will stay sane. */ 9271 if (!(dev->flags&IFF_UP)) 9272 return; 9273 9274 if (!netif_device_present(dev)) 9275 return; 9276 9277 if (!(dev->priv_flags & IFF_UNICAST_FLT)) { 9278 /* Unicast addresses changes may only happen under the rtnl, 9279 * therefore calling __dev_set_promiscuity here is safe. 9280 */ 9281 if (!netdev_uc_empty(dev) && !dev->uc_promisc) { 9282 __dev_set_promiscuity(dev, 1, false); 9283 dev->uc_promisc = true; 9284 } else if (netdev_uc_empty(dev) && dev->uc_promisc) { 9285 __dev_set_promiscuity(dev, -1, false); 9286 dev->uc_promisc = false; 9287 } 9288 } 9289 9290 if (ops->ndo_set_rx_mode) 9291 ops->ndo_set_rx_mode(dev); 9292 } 9293 dev_set_rx_mode(struct net_device * dev)9294 void dev_set_rx_mode(struct net_device *dev) 9295 { 9296 netif_addr_lock_bh(dev); 9297 __dev_set_rx_mode(dev); 9298 netif_addr_unlock_bh(dev); 9299 } 9300 9301 /** 9302 * dev_get_flags - get flags reported to userspace 9303 * @dev: device 9304 * 9305 * Get the combination of flag bits exported through APIs to userspace. 9306 */ dev_get_flags(const struct net_device * dev)9307 unsigned int dev_get_flags(const struct net_device *dev) 9308 { 9309 unsigned int flags; 9310 9311 flags = (READ_ONCE(dev->flags) & ~(IFF_PROMISC | 9312 IFF_ALLMULTI | 9313 IFF_RUNNING | 9314 IFF_LOWER_UP | 9315 IFF_DORMANT)) | 9316 (READ_ONCE(dev->gflags) & (IFF_PROMISC | 9317 IFF_ALLMULTI)); 9318 9319 if (netif_running(dev)) { 9320 if (netif_oper_up(dev)) 9321 flags |= IFF_RUNNING; 9322 if (netif_carrier_ok(dev)) 9323 flags |= IFF_LOWER_UP; 9324 if (netif_dormant(dev)) 9325 flags |= IFF_DORMANT; 9326 } 9327 9328 return flags; 9329 } 9330 EXPORT_SYMBOL(dev_get_flags); 9331 __dev_change_flags(struct net_device * dev,unsigned int flags,struct netlink_ext_ack * extack)9332 int __dev_change_flags(struct net_device *dev, unsigned int flags, 9333 struct netlink_ext_ack *extack) 9334 { 9335 unsigned int old_flags = dev->flags; 9336 int ret; 9337 9338 ASSERT_RTNL(); 9339 9340 /* 9341 * Set the flags on our device. 9342 */ 9343 9344 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | 9345 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | 9346 IFF_AUTOMEDIA)) | 9347 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | 9348 IFF_ALLMULTI)); 9349 9350 /* 9351 * Load in the correct multicast list now the flags have changed. 9352 */ 9353 9354 if ((old_flags ^ flags) & IFF_MULTICAST) 9355 dev_change_rx_flags(dev, IFF_MULTICAST); 9356 9357 dev_set_rx_mode(dev); 9358 9359 /* 9360 * Have we downed the interface. We handle IFF_UP ourselves 9361 * according to user attempts to set it, rather than blindly 9362 * setting it. 9363 */ 9364 9365 ret = 0; 9366 if ((old_flags ^ flags) & IFF_UP) { 9367 if (old_flags & IFF_UP) 9368 __dev_close(dev); 9369 else 9370 ret = __dev_open(dev, extack); 9371 } 9372 9373 if ((flags ^ dev->gflags) & IFF_PROMISC) { 9374 int inc = (flags & IFF_PROMISC) ? 1 : -1; 9375 old_flags = dev->flags; 9376 9377 dev->gflags ^= IFF_PROMISC; 9378 9379 if (__dev_set_promiscuity(dev, inc, false) >= 0) 9380 if (dev->flags != old_flags) 9381 dev_set_rx_mode(dev); 9382 } 9383 9384 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI 9385 * is important. Some (broken) drivers set IFF_PROMISC, when 9386 * IFF_ALLMULTI is requested not asking us and not reporting. 9387 */ 9388 if ((flags ^ dev->gflags) & IFF_ALLMULTI) { 9389 int inc = (flags & IFF_ALLMULTI) ? 1 : -1; 9390 9391 dev->gflags ^= IFF_ALLMULTI; 9392 netif_set_allmulti(dev, inc, false); 9393 } 9394 9395 return ret; 9396 } 9397 __dev_notify_flags(struct net_device * dev,unsigned int old_flags,unsigned int gchanges,u32 portid,const struct nlmsghdr * nlh)9398 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags, 9399 unsigned int gchanges, u32 portid, 9400 const struct nlmsghdr *nlh) 9401 { 9402 unsigned int changes = dev->flags ^ old_flags; 9403 9404 if (gchanges) 9405 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC, portid, nlh); 9406 9407 if (changes & IFF_UP) { 9408 if (dev->flags & IFF_UP) 9409 call_netdevice_notifiers(NETDEV_UP, dev); 9410 else 9411 call_netdevice_notifiers(NETDEV_DOWN, dev); 9412 } 9413 9414 if (dev->flags & IFF_UP && 9415 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) { 9416 struct netdev_notifier_change_info change_info = { 9417 .info = { 9418 .dev = dev, 9419 }, 9420 .flags_changed = changes, 9421 }; 9422 9423 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info); 9424 } 9425 } 9426 netif_change_flags(struct net_device * dev,unsigned int flags,struct netlink_ext_ack * extack)9427 int netif_change_flags(struct net_device *dev, unsigned int flags, 9428 struct netlink_ext_ack *extack) 9429 { 9430 int ret; 9431 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags; 9432 9433 ret = __dev_change_flags(dev, flags, extack); 9434 if (ret < 0) 9435 return ret; 9436 9437 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags); 9438 __dev_notify_flags(dev, old_flags, changes, 0, NULL); 9439 return ret; 9440 } 9441 __dev_set_mtu(struct net_device * dev,int new_mtu)9442 int __dev_set_mtu(struct net_device *dev, int new_mtu) 9443 { 9444 const struct net_device_ops *ops = dev->netdev_ops; 9445 9446 if (ops->ndo_change_mtu) 9447 return ops->ndo_change_mtu(dev, new_mtu); 9448 9449 /* Pairs with all the lockless reads of dev->mtu in the stack */ 9450 WRITE_ONCE(dev->mtu, new_mtu); 9451 return 0; 9452 } 9453 EXPORT_SYMBOL(__dev_set_mtu); 9454 dev_validate_mtu(struct net_device * dev,int new_mtu,struct netlink_ext_ack * extack)9455 int dev_validate_mtu(struct net_device *dev, int new_mtu, 9456 struct netlink_ext_ack *extack) 9457 { 9458 /* MTU must be positive, and in range */ 9459 if (new_mtu < 0 || new_mtu < dev->min_mtu) { 9460 NL_SET_ERR_MSG(extack, "mtu less than device minimum"); 9461 return -EINVAL; 9462 } 9463 9464 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) { 9465 NL_SET_ERR_MSG(extack, "mtu greater than device maximum"); 9466 return -EINVAL; 9467 } 9468 return 0; 9469 } 9470 9471 /** 9472 * netif_set_mtu_ext - Change maximum transfer unit 9473 * @dev: device 9474 * @new_mtu: new transfer unit 9475 * @extack: netlink extended ack 9476 * 9477 * Change the maximum transfer size of the network device. 9478 */ netif_set_mtu_ext(struct net_device * dev,int new_mtu,struct netlink_ext_ack * extack)9479 int netif_set_mtu_ext(struct net_device *dev, int new_mtu, 9480 struct netlink_ext_ack *extack) 9481 { 9482 int err, orig_mtu; 9483 9484 if (new_mtu == dev->mtu) 9485 return 0; 9486 9487 err = dev_validate_mtu(dev, new_mtu, extack); 9488 if (err) 9489 return err; 9490 9491 if (!netif_device_present(dev)) 9492 return -ENODEV; 9493 9494 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev); 9495 err = notifier_to_errno(err); 9496 if (err) 9497 return err; 9498 9499 orig_mtu = dev->mtu; 9500 err = __dev_set_mtu(dev, new_mtu); 9501 9502 if (!err) { 9503 err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, 9504 orig_mtu); 9505 err = notifier_to_errno(err); 9506 if (err) { 9507 /* setting mtu back and notifying everyone again, 9508 * so that they have a chance to revert changes. 9509 */ 9510 __dev_set_mtu(dev, orig_mtu); 9511 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, 9512 new_mtu); 9513 } 9514 } 9515 return err; 9516 } 9517 netif_set_mtu(struct net_device * dev,int new_mtu)9518 int netif_set_mtu(struct net_device *dev, int new_mtu) 9519 { 9520 struct netlink_ext_ack extack; 9521 int err; 9522 9523 memset(&extack, 0, sizeof(extack)); 9524 err = netif_set_mtu_ext(dev, new_mtu, &extack); 9525 if (err && extack._msg) 9526 net_err_ratelimited("%s: %s\n", dev->name, extack._msg); 9527 return err; 9528 } 9529 EXPORT_SYMBOL(netif_set_mtu); 9530 netif_change_tx_queue_len(struct net_device * dev,unsigned long new_len)9531 int netif_change_tx_queue_len(struct net_device *dev, unsigned long new_len) 9532 { 9533 unsigned int orig_len = dev->tx_queue_len; 9534 int res; 9535 9536 if (new_len != (unsigned int)new_len) 9537 return -ERANGE; 9538 9539 if (new_len != orig_len) { 9540 WRITE_ONCE(dev->tx_queue_len, new_len); 9541 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev); 9542 res = notifier_to_errno(res); 9543 if (res) 9544 goto err_rollback; 9545 res = dev_qdisc_change_tx_queue_len(dev); 9546 if (res) 9547 goto err_rollback; 9548 } 9549 9550 return 0; 9551 9552 err_rollback: 9553 netdev_err(dev, "refused to change device tx_queue_len\n"); 9554 WRITE_ONCE(dev->tx_queue_len, orig_len); 9555 return res; 9556 } 9557 netif_set_group(struct net_device * dev,int new_group)9558 void netif_set_group(struct net_device *dev, int new_group) 9559 { 9560 dev->group = new_group; 9561 } 9562 9563 /** 9564 * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR. 9565 * @dev: device 9566 * @addr: new address 9567 * @extack: netlink extended ack 9568 */ dev_pre_changeaddr_notify(struct net_device * dev,const char * addr,struct netlink_ext_ack * extack)9569 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, 9570 struct netlink_ext_ack *extack) 9571 { 9572 struct netdev_notifier_pre_changeaddr_info info = { 9573 .info.dev = dev, 9574 .info.extack = extack, 9575 .dev_addr = addr, 9576 }; 9577 int rc; 9578 9579 rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info); 9580 return notifier_to_errno(rc); 9581 } 9582 EXPORT_SYMBOL(dev_pre_changeaddr_notify); 9583 netif_set_mac_address(struct net_device * dev,struct sockaddr * sa,struct netlink_ext_ack * extack)9584 int netif_set_mac_address(struct net_device *dev, struct sockaddr *sa, 9585 struct netlink_ext_ack *extack) 9586 { 9587 const struct net_device_ops *ops = dev->netdev_ops; 9588 int err; 9589 9590 if (!ops->ndo_set_mac_address) 9591 return -EOPNOTSUPP; 9592 if (sa->sa_family != dev->type) 9593 return -EINVAL; 9594 if (!netif_device_present(dev)) 9595 return -ENODEV; 9596 err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack); 9597 if (err) 9598 return err; 9599 if (memcmp(dev->dev_addr, sa->sa_data, dev->addr_len)) { 9600 err = ops->ndo_set_mac_address(dev, sa); 9601 if (err) 9602 return err; 9603 } 9604 dev->addr_assign_type = NET_ADDR_SET; 9605 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 9606 add_device_randomness(dev->dev_addr, dev->addr_len); 9607 return 0; 9608 } 9609 9610 DECLARE_RWSEM(dev_addr_sem); 9611 dev_get_mac_address(struct sockaddr * sa,struct net * net,char * dev_name)9612 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name) 9613 { 9614 size_t size = sizeof(sa->sa_data_min); 9615 struct net_device *dev; 9616 int ret = 0; 9617 9618 down_read(&dev_addr_sem); 9619 rcu_read_lock(); 9620 9621 dev = dev_get_by_name_rcu(net, dev_name); 9622 if (!dev) { 9623 ret = -ENODEV; 9624 goto unlock; 9625 } 9626 if (!dev->addr_len) 9627 memset(sa->sa_data, 0, size); 9628 else 9629 memcpy(sa->sa_data, dev->dev_addr, 9630 min_t(size_t, size, dev->addr_len)); 9631 sa->sa_family = dev->type; 9632 9633 unlock: 9634 rcu_read_unlock(); 9635 up_read(&dev_addr_sem); 9636 return ret; 9637 } 9638 EXPORT_SYMBOL(dev_get_mac_address); 9639 netif_change_carrier(struct net_device * dev,bool new_carrier)9640 int netif_change_carrier(struct net_device *dev, bool new_carrier) 9641 { 9642 const struct net_device_ops *ops = dev->netdev_ops; 9643 9644 if (!ops->ndo_change_carrier) 9645 return -EOPNOTSUPP; 9646 if (!netif_device_present(dev)) 9647 return -ENODEV; 9648 return ops->ndo_change_carrier(dev, new_carrier); 9649 } 9650 9651 /** 9652 * dev_get_phys_port_id - Get device physical port ID 9653 * @dev: device 9654 * @ppid: port ID 9655 * 9656 * Get device physical port ID 9657 */ dev_get_phys_port_id(struct net_device * dev,struct netdev_phys_item_id * ppid)9658 int dev_get_phys_port_id(struct net_device *dev, 9659 struct netdev_phys_item_id *ppid) 9660 { 9661 const struct net_device_ops *ops = dev->netdev_ops; 9662 9663 if (!ops->ndo_get_phys_port_id) 9664 return -EOPNOTSUPP; 9665 return ops->ndo_get_phys_port_id(dev, ppid); 9666 } 9667 9668 /** 9669 * dev_get_phys_port_name - Get device physical port name 9670 * @dev: device 9671 * @name: port name 9672 * @len: limit of bytes to copy to name 9673 * 9674 * Get device physical port name 9675 */ dev_get_phys_port_name(struct net_device * dev,char * name,size_t len)9676 int dev_get_phys_port_name(struct net_device *dev, 9677 char *name, size_t len) 9678 { 9679 const struct net_device_ops *ops = dev->netdev_ops; 9680 int err; 9681 9682 if (ops->ndo_get_phys_port_name) { 9683 err = ops->ndo_get_phys_port_name(dev, name, len); 9684 if (err != -EOPNOTSUPP) 9685 return err; 9686 } 9687 return devlink_compat_phys_port_name_get(dev, name, len); 9688 } 9689 9690 /** 9691 * dev_get_port_parent_id - Get the device's port parent identifier 9692 * @dev: network device 9693 * @ppid: pointer to a storage for the port's parent identifier 9694 * @recurse: allow/disallow recursion to lower devices 9695 * 9696 * Get the devices's port parent identifier 9697 */ dev_get_port_parent_id(struct net_device * dev,struct netdev_phys_item_id * ppid,bool recurse)9698 int dev_get_port_parent_id(struct net_device *dev, 9699 struct netdev_phys_item_id *ppid, 9700 bool recurse) 9701 { 9702 const struct net_device_ops *ops = dev->netdev_ops; 9703 struct netdev_phys_item_id first = { }; 9704 struct net_device *lower_dev; 9705 struct list_head *iter; 9706 int err; 9707 9708 if (ops->ndo_get_port_parent_id) { 9709 err = ops->ndo_get_port_parent_id(dev, ppid); 9710 if (err != -EOPNOTSUPP) 9711 return err; 9712 } 9713 9714 err = devlink_compat_switch_id_get(dev, ppid); 9715 if (!recurse || err != -EOPNOTSUPP) 9716 return err; 9717 9718 netdev_for_each_lower_dev(dev, lower_dev, iter) { 9719 err = dev_get_port_parent_id(lower_dev, ppid, true); 9720 if (err) 9721 break; 9722 if (!first.id_len) 9723 first = *ppid; 9724 else if (memcmp(&first, ppid, sizeof(*ppid))) 9725 return -EOPNOTSUPP; 9726 } 9727 9728 return err; 9729 } 9730 EXPORT_SYMBOL(dev_get_port_parent_id); 9731 9732 /** 9733 * netdev_port_same_parent_id - Indicate if two network devices have 9734 * the same port parent identifier 9735 * @a: first network device 9736 * @b: second network device 9737 */ netdev_port_same_parent_id(struct net_device * a,struct net_device * b)9738 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b) 9739 { 9740 struct netdev_phys_item_id a_id = { }; 9741 struct netdev_phys_item_id b_id = { }; 9742 9743 if (dev_get_port_parent_id(a, &a_id, true) || 9744 dev_get_port_parent_id(b, &b_id, true)) 9745 return false; 9746 9747 return netdev_phys_item_id_same(&a_id, &b_id); 9748 } 9749 EXPORT_SYMBOL(netdev_port_same_parent_id); 9750 netif_change_proto_down(struct net_device * dev,bool proto_down)9751 int netif_change_proto_down(struct net_device *dev, bool proto_down) 9752 { 9753 if (!dev->change_proto_down) 9754 return -EOPNOTSUPP; 9755 if (!netif_device_present(dev)) 9756 return -ENODEV; 9757 if (proto_down) 9758 netif_carrier_off(dev); 9759 else 9760 netif_carrier_on(dev); 9761 WRITE_ONCE(dev->proto_down, proto_down); 9762 return 0; 9763 } 9764 9765 /** 9766 * netdev_change_proto_down_reason_locked - proto down reason 9767 * 9768 * @dev: device 9769 * @mask: proto down mask 9770 * @value: proto down value 9771 */ netdev_change_proto_down_reason_locked(struct net_device * dev,unsigned long mask,u32 value)9772 void netdev_change_proto_down_reason_locked(struct net_device *dev, 9773 unsigned long mask, u32 value) 9774 { 9775 u32 proto_down_reason; 9776 int b; 9777 9778 if (!mask) { 9779 proto_down_reason = value; 9780 } else { 9781 proto_down_reason = dev->proto_down_reason; 9782 for_each_set_bit(b, &mask, 32) { 9783 if (value & (1 << b)) 9784 proto_down_reason |= BIT(b); 9785 else 9786 proto_down_reason &= ~BIT(b); 9787 } 9788 } 9789 WRITE_ONCE(dev->proto_down_reason, proto_down_reason); 9790 } 9791 9792 struct bpf_xdp_link { 9793 struct bpf_link link; 9794 struct net_device *dev; /* protected by rtnl_lock, no refcnt held */ 9795 int flags; 9796 }; 9797 dev_xdp_mode(struct net_device * dev,u32 flags)9798 static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags) 9799 { 9800 if (flags & XDP_FLAGS_HW_MODE) 9801 return XDP_MODE_HW; 9802 if (flags & XDP_FLAGS_DRV_MODE) 9803 return XDP_MODE_DRV; 9804 if (flags & XDP_FLAGS_SKB_MODE) 9805 return XDP_MODE_SKB; 9806 return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB; 9807 } 9808 dev_xdp_bpf_op(struct net_device * dev,enum bpf_xdp_mode mode)9809 static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode) 9810 { 9811 switch (mode) { 9812 case XDP_MODE_SKB: 9813 return generic_xdp_install; 9814 case XDP_MODE_DRV: 9815 case XDP_MODE_HW: 9816 return dev->netdev_ops->ndo_bpf; 9817 default: 9818 return NULL; 9819 } 9820 } 9821 dev_xdp_link(struct net_device * dev,enum bpf_xdp_mode mode)9822 static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev, 9823 enum bpf_xdp_mode mode) 9824 { 9825 return dev->xdp_state[mode].link; 9826 } 9827 dev_xdp_prog(struct net_device * dev,enum bpf_xdp_mode mode)9828 static struct bpf_prog *dev_xdp_prog(struct net_device *dev, 9829 enum bpf_xdp_mode mode) 9830 { 9831 struct bpf_xdp_link *link = dev_xdp_link(dev, mode); 9832 9833 if (link) 9834 return link->link.prog; 9835 return dev->xdp_state[mode].prog; 9836 } 9837 dev_xdp_prog_count(struct net_device * dev)9838 u8 dev_xdp_prog_count(struct net_device *dev) 9839 { 9840 u8 count = 0; 9841 int i; 9842 9843 for (i = 0; i < __MAX_XDP_MODE; i++) 9844 if (dev->xdp_state[i].prog || dev->xdp_state[i].link) 9845 count++; 9846 return count; 9847 } 9848 EXPORT_SYMBOL_GPL(dev_xdp_prog_count); 9849 dev_xdp_sb_prog_count(struct net_device * dev)9850 u8 dev_xdp_sb_prog_count(struct net_device *dev) 9851 { 9852 u8 count = 0; 9853 int i; 9854 9855 for (i = 0; i < __MAX_XDP_MODE; i++) 9856 if (dev->xdp_state[i].prog && 9857 !dev->xdp_state[i].prog->aux->xdp_has_frags) 9858 count++; 9859 return count; 9860 } 9861 netif_xdp_propagate(struct net_device * dev,struct netdev_bpf * bpf)9862 int netif_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf) 9863 { 9864 if (!dev->netdev_ops->ndo_bpf) 9865 return -EOPNOTSUPP; 9866 9867 if (dev->cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_ENABLED && 9868 bpf->command == XDP_SETUP_PROG && 9869 bpf->prog && !bpf->prog->aux->xdp_has_frags) { 9870 NL_SET_ERR_MSG(bpf->extack, 9871 "unable to propagate XDP to device using tcp-data-split"); 9872 return -EBUSY; 9873 } 9874 9875 if (dev_get_min_mp_channel_count(dev)) { 9876 NL_SET_ERR_MSG(bpf->extack, "unable to propagate XDP to device using memory provider"); 9877 return -EBUSY; 9878 } 9879 9880 return dev->netdev_ops->ndo_bpf(dev, bpf); 9881 } 9882 dev_xdp_prog_id(struct net_device * dev,enum bpf_xdp_mode mode)9883 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode) 9884 { 9885 struct bpf_prog *prog = dev_xdp_prog(dev, mode); 9886 9887 return prog ? prog->aux->id : 0; 9888 } 9889 dev_xdp_set_link(struct net_device * dev,enum bpf_xdp_mode mode,struct bpf_xdp_link * link)9890 static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode, 9891 struct bpf_xdp_link *link) 9892 { 9893 dev->xdp_state[mode].link = link; 9894 dev->xdp_state[mode].prog = NULL; 9895 } 9896 dev_xdp_set_prog(struct net_device * dev,enum bpf_xdp_mode mode,struct bpf_prog * prog)9897 static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode, 9898 struct bpf_prog *prog) 9899 { 9900 dev->xdp_state[mode].link = NULL; 9901 dev->xdp_state[mode].prog = prog; 9902 } 9903 dev_xdp_install(struct net_device * dev,enum bpf_xdp_mode mode,bpf_op_t bpf_op,struct netlink_ext_ack * extack,u32 flags,struct bpf_prog * prog)9904 static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode, 9905 bpf_op_t bpf_op, struct netlink_ext_ack *extack, 9906 u32 flags, struct bpf_prog *prog) 9907 { 9908 struct netdev_bpf xdp; 9909 int err; 9910 9911 netdev_ops_assert_locked(dev); 9912 9913 if (dev->cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_ENABLED && 9914 prog && !prog->aux->xdp_has_frags) { 9915 NL_SET_ERR_MSG(extack, "unable to install XDP to device using tcp-data-split"); 9916 return -EBUSY; 9917 } 9918 9919 if (dev_get_min_mp_channel_count(dev)) { 9920 NL_SET_ERR_MSG(extack, "unable to install XDP to device using memory provider"); 9921 return -EBUSY; 9922 } 9923 9924 memset(&xdp, 0, sizeof(xdp)); 9925 xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG; 9926 xdp.extack = extack; 9927 xdp.flags = flags; 9928 xdp.prog = prog; 9929 9930 /* Drivers assume refcnt is already incremented (i.e, prog pointer is 9931 * "moved" into driver), so they don't increment it on their own, but 9932 * they do decrement refcnt when program is detached or replaced. 9933 * Given net_device also owns link/prog, we need to bump refcnt here 9934 * to prevent drivers from underflowing it. 9935 */ 9936 if (prog) 9937 bpf_prog_inc(prog); 9938 err = bpf_op(dev, &xdp); 9939 if (err) { 9940 if (prog) 9941 bpf_prog_put(prog); 9942 return err; 9943 } 9944 9945 if (mode != XDP_MODE_HW) 9946 bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog); 9947 9948 return 0; 9949 } 9950 dev_xdp_uninstall(struct net_device * dev)9951 static void dev_xdp_uninstall(struct net_device *dev) 9952 { 9953 struct bpf_xdp_link *link; 9954 struct bpf_prog *prog; 9955 enum bpf_xdp_mode mode; 9956 bpf_op_t bpf_op; 9957 9958 ASSERT_RTNL(); 9959 9960 for (mode = XDP_MODE_SKB; mode < __MAX_XDP_MODE; mode++) { 9961 prog = dev_xdp_prog(dev, mode); 9962 if (!prog) 9963 continue; 9964 9965 bpf_op = dev_xdp_bpf_op(dev, mode); 9966 if (!bpf_op) 9967 continue; 9968 9969 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL)); 9970 9971 /* auto-detach link from net device */ 9972 link = dev_xdp_link(dev, mode); 9973 if (link) 9974 link->dev = NULL; 9975 else 9976 bpf_prog_put(prog); 9977 9978 dev_xdp_set_link(dev, mode, NULL); 9979 } 9980 } 9981 dev_xdp_attach(struct net_device * dev,struct netlink_ext_ack * extack,struct bpf_xdp_link * link,struct bpf_prog * new_prog,struct bpf_prog * old_prog,u32 flags)9982 static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack, 9983 struct bpf_xdp_link *link, struct bpf_prog *new_prog, 9984 struct bpf_prog *old_prog, u32 flags) 9985 { 9986 unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES); 9987 struct bpf_prog *cur_prog; 9988 struct net_device *upper; 9989 struct list_head *iter; 9990 enum bpf_xdp_mode mode; 9991 bpf_op_t bpf_op; 9992 int err; 9993 9994 ASSERT_RTNL(); 9995 9996 /* either link or prog attachment, never both */ 9997 if (link && (new_prog || old_prog)) 9998 return -EINVAL; 9999 /* link supports only XDP mode flags */ 10000 if (link && (flags & ~XDP_FLAGS_MODES)) { 10001 NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment"); 10002 return -EINVAL; 10003 } 10004 /* just one XDP mode bit should be set, zero defaults to drv/skb mode */ 10005 if (num_modes > 1) { 10006 NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set"); 10007 return -EINVAL; 10008 } 10009 /* avoid ambiguity if offload + drv/skb mode progs are both loaded */ 10010 if (!num_modes && dev_xdp_prog_count(dev) > 1) { 10011 NL_SET_ERR_MSG(extack, 10012 "More than one program loaded, unset mode is ambiguous"); 10013 return -EINVAL; 10014 } 10015 /* old_prog != NULL implies XDP_FLAGS_REPLACE is set */ 10016 if (old_prog && !(flags & XDP_FLAGS_REPLACE)) { 10017 NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified"); 10018 return -EINVAL; 10019 } 10020 10021 mode = dev_xdp_mode(dev, flags); 10022 /* can't replace attached link */ 10023 if (dev_xdp_link(dev, mode)) { 10024 NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link"); 10025 return -EBUSY; 10026 } 10027 10028 /* don't allow if an upper device already has a program */ 10029 netdev_for_each_upper_dev_rcu(dev, upper, iter) { 10030 if (dev_xdp_prog_count(upper) > 0) { 10031 NL_SET_ERR_MSG(extack, "Cannot attach when an upper device already has a program"); 10032 return -EEXIST; 10033 } 10034 } 10035 10036 cur_prog = dev_xdp_prog(dev, mode); 10037 /* can't replace attached prog with link */ 10038 if (link && cur_prog) { 10039 NL_SET_ERR_MSG(extack, "Can't replace active XDP program with BPF link"); 10040 return -EBUSY; 10041 } 10042 if ((flags & XDP_FLAGS_REPLACE) && cur_prog != old_prog) { 10043 NL_SET_ERR_MSG(extack, "Active program does not match expected"); 10044 return -EEXIST; 10045 } 10046 10047 /* put effective new program into new_prog */ 10048 if (link) 10049 new_prog = link->link.prog; 10050 10051 if (new_prog) { 10052 bool offload = mode == XDP_MODE_HW; 10053 enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB 10054 ? XDP_MODE_DRV : XDP_MODE_SKB; 10055 10056 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) { 10057 NL_SET_ERR_MSG(extack, "XDP program already attached"); 10058 return -EBUSY; 10059 } 10060 if (!offload && dev_xdp_prog(dev, other_mode)) { 10061 NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time"); 10062 return -EEXIST; 10063 } 10064 if (!offload && bpf_prog_is_offloaded(new_prog->aux)) { 10065 NL_SET_ERR_MSG(extack, "Using offloaded program without HW_MODE flag is not supported"); 10066 return -EINVAL; 10067 } 10068 if (bpf_prog_is_dev_bound(new_prog->aux) && !bpf_offload_dev_match(new_prog, dev)) { 10069 NL_SET_ERR_MSG(extack, "Program bound to different device"); 10070 return -EINVAL; 10071 } 10072 if (bpf_prog_is_dev_bound(new_prog->aux) && mode == XDP_MODE_SKB) { 10073 NL_SET_ERR_MSG(extack, "Can't attach device-bound programs in generic mode"); 10074 return -EINVAL; 10075 } 10076 if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) { 10077 NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device"); 10078 return -EINVAL; 10079 } 10080 if (new_prog->expected_attach_type == BPF_XDP_CPUMAP) { 10081 NL_SET_ERR_MSG(extack, "BPF_XDP_CPUMAP programs can not be attached to a device"); 10082 return -EINVAL; 10083 } 10084 } 10085 10086 /* don't call drivers if the effective program didn't change */ 10087 if (new_prog != cur_prog) { 10088 bpf_op = dev_xdp_bpf_op(dev, mode); 10089 if (!bpf_op) { 10090 NL_SET_ERR_MSG(extack, "Underlying driver does not support XDP in native mode"); 10091 return -EOPNOTSUPP; 10092 } 10093 10094 err = dev_xdp_install(dev, mode, bpf_op, extack, flags, new_prog); 10095 if (err) 10096 return err; 10097 } 10098 10099 if (link) 10100 dev_xdp_set_link(dev, mode, link); 10101 else 10102 dev_xdp_set_prog(dev, mode, new_prog); 10103 if (cur_prog) 10104 bpf_prog_put(cur_prog); 10105 10106 return 0; 10107 } 10108 dev_xdp_attach_link(struct net_device * dev,struct netlink_ext_ack * extack,struct bpf_xdp_link * link)10109 static int dev_xdp_attach_link(struct net_device *dev, 10110 struct netlink_ext_ack *extack, 10111 struct bpf_xdp_link *link) 10112 { 10113 return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags); 10114 } 10115 dev_xdp_detach_link(struct net_device * dev,struct netlink_ext_ack * extack,struct bpf_xdp_link * link)10116 static int dev_xdp_detach_link(struct net_device *dev, 10117 struct netlink_ext_ack *extack, 10118 struct bpf_xdp_link *link) 10119 { 10120 enum bpf_xdp_mode mode; 10121 bpf_op_t bpf_op; 10122 10123 ASSERT_RTNL(); 10124 10125 mode = dev_xdp_mode(dev, link->flags); 10126 if (dev_xdp_link(dev, mode) != link) 10127 return -EINVAL; 10128 10129 bpf_op = dev_xdp_bpf_op(dev, mode); 10130 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL)); 10131 dev_xdp_set_link(dev, mode, NULL); 10132 return 0; 10133 } 10134 bpf_xdp_link_release(struct bpf_link * link)10135 static void bpf_xdp_link_release(struct bpf_link *link) 10136 { 10137 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 10138 10139 rtnl_lock(); 10140 10141 /* if racing with net_device's tear down, xdp_link->dev might be 10142 * already NULL, in which case link was already auto-detached 10143 */ 10144 if (xdp_link->dev) { 10145 netdev_lock_ops(xdp_link->dev); 10146 WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link)); 10147 netdev_unlock_ops(xdp_link->dev); 10148 xdp_link->dev = NULL; 10149 } 10150 10151 rtnl_unlock(); 10152 } 10153 bpf_xdp_link_detach(struct bpf_link * link)10154 static int bpf_xdp_link_detach(struct bpf_link *link) 10155 { 10156 bpf_xdp_link_release(link); 10157 return 0; 10158 } 10159 bpf_xdp_link_dealloc(struct bpf_link * link)10160 static void bpf_xdp_link_dealloc(struct bpf_link *link) 10161 { 10162 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 10163 10164 kfree(xdp_link); 10165 } 10166 bpf_xdp_link_show_fdinfo(const struct bpf_link * link,struct seq_file * seq)10167 static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link, 10168 struct seq_file *seq) 10169 { 10170 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 10171 u32 ifindex = 0; 10172 10173 rtnl_lock(); 10174 if (xdp_link->dev) 10175 ifindex = xdp_link->dev->ifindex; 10176 rtnl_unlock(); 10177 10178 seq_printf(seq, "ifindex:\t%u\n", ifindex); 10179 } 10180 bpf_xdp_link_fill_link_info(const struct bpf_link * link,struct bpf_link_info * info)10181 static int bpf_xdp_link_fill_link_info(const struct bpf_link *link, 10182 struct bpf_link_info *info) 10183 { 10184 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 10185 u32 ifindex = 0; 10186 10187 rtnl_lock(); 10188 if (xdp_link->dev) 10189 ifindex = xdp_link->dev->ifindex; 10190 rtnl_unlock(); 10191 10192 info->xdp.ifindex = ifindex; 10193 return 0; 10194 } 10195 bpf_xdp_link_update(struct bpf_link * link,struct bpf_prog * new_prog,struct bpf_prog * old_prog)10196 static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog, 10197 struct bpf_prog *old_prog) 10198 { 10199 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 10200 enum bpf_xdp_mode mode; 10201 bpf_op_t bpf_op; 10202 int err = 0; 10203 10204 rtnl_lock(); 10205 10206 /* link might have been auto-released already, so fail */ 10207 if (!xdp_link->dev) { 10208 err = -ENOLINK; 10209 goto out_unlock; 10210 } 10211 10212 if (old_prog && link->prog != old_prog) { 10213 err = -EPERM; 10214 goto out_unlock; 10215 } 10216 old_prog = link->prog; 10217 if (old_prog->type != new_prog->type || 10218 old_prog->expected_attach_type != new_prog->expected_attach_type) { 10219 err = -EINVAL; 10220 goto out_unlock; 10221 } 10222 10223 if (old_prog == new_prog) { 10224 /* no-op, don't disturb drivers */ 10225 bpf_prog_put(new_prog); 10226 goto out_unlock; 10227 } 10228 10229 netdev_lock_ops(xdp_link->dev); 10230 mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags); 10231 bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode); 10232 err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL, 10233 xdp_link->flags, new_prog); 10234 netdev_unlock_ops(xdp_link->dev); 10235 if (err) 10236 goto out_unlock; 10237 10238 old_prog = xchg(&link->prog, new_prog); 10239 bpf_prog_put(old_prog); 10240 10241 out_unlock: 10242 rtnl_unlock(); 10243 return err; 10244 } 10245 10246 static const struct bpf_link_ops bpf_xdp_link_lops = { 10247 .release = bpf_xdp_link_release, 10248 .dealloc = bpf_xdp_link_dealloc, 10249 .detach = bpf_xdp_link_detach, 10250 .show_fdinfo = bpf_xdp_link_show_fdinfo, 10251 .fill_link_info = bpf_xdp_link_fill_link_info, 10252 .update_prog = bpf_xdp_link_update, 10253 }; 10254 bpf_xdp_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)10255 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 10256 { 10257 struct net *net = current->nsproxy->net_ns; 10258 struct bpf_link_primer link_primer; 10259 struct netlink_ext_ack extack = {}; 10260 struct bpf_xdp_link *link; 10261 struct net_device *dev; 10262 int err, fd; 10263 10264 rtnl_lock(); 10265 dev = dev_get_by_index(net, attr->link_create.target_ifindex); 10266 if (!dev) { 10267 rtnl_unlock(); 10268 return -EINVAL; 10269 } 10270 10271 link = kzalloc(sizeof(*link), GFP_USER); 10272 if (!link) { 10273 err = -ENOMEM; 10274 goto unlock; 10275 } 10276 10277 bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog); 10278 link->dev = dev; 10279 link->flags = attr->link_create.flags; 10280 10281 err = bpf_link_prime(&link->link, &link_primer); 10282 if (err) { 10283 kfree(link); 10284 goto unlock; 10285 } 10286 10287 err = dev_xdp_attach_link(dev, &extack, link); 10288 rtnl_unlock(); 10289 10290 if (err) { 10291 link->dev = NULL; 10292 bpf_link_cleanup(&link_primer); 10293 trace_bpf_xdp_link_attach_failed(extack._msg); 10294 goto out_put_dev; 10295 } 10296 10297 fd = bpf_link_settle(&link_primer); 10298 /* link itself doesn't hold dev's refcnt to not complicate shutdown */ 10299 dev_put(dev); 10300 return fd; 10301 10302 unlock: 10303 rtnl_unlock(); 10304 10305 out_put_dev: 10306 dev_put(dev); 10307 return err; 10308 } 10309 10310 /** 10311 * dev_change_xdp_fd - set or clear a bpf program for a device rx path 10312 * @dev: device 10313 * @extack: netlink extended ack 10314 * @fd: new program fd or negative value to clear 10315 * @expected_fd: old program fd that userspace expects to replace or clear 10316 * @flags: xdp-related flags 10317 * 10318 * Set or clear a bpf program for a device 10319 */ dev_change_xdp_fd(struct net_device * dev,struct netlink_ext_ack * extack,int fd,int expected_fd,u32 flags)10320 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 10321 int fd, int expected_fd, u32 flags) 10322 { 10323 enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags); 10324 struct bpf_prog *new_prog = NULL, *old_prog = NULL; 10325 int err; 10326 10327 ASSERT_RTNL(); 10328 10329 if (fd >= 0) { 10330 new_prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP, 10331 mode != XDP_MODE_SKB); 10332 if (IS_ERR(new_prog)) 10333 return PTR_ERR(new_prog); 10334 } 10335 10336 if (expected_fd >= 0) { 10337 old_prog = bpf_prog_get_type_dev(expected_fd, BPF_PROG_TYPE_XDP, 10338 mode != XDP_MODE_SKB); 10339 if (IS_ERR(old_prog)) { 10340 err = PTR_ERR(old_prog); 10341 old_prog = NULL; 10342 goto err_out; 10343 } 10344 } 10345 10346 err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags); 10347 10348 err_out: 10349 if (err && new_prog) 10350 bpf_prog_put(new_prog); 10351 if (old_prog) 10352 bpf_prog_put(old_prog); 10353 return err; 10354 } 10355 dev_get_min_mp_channel_count(const struct net_device * dev)10356 u32 dev_get_min_mp_channel_count(const struct net_device *dev) 10357 { 10358 int i; 10359 10360 netdev_ops_assert_locked(dev); 10361 10362 for (i = dev->real_num_rx_queues - 1; i >= 0; i--) 10363 if (dev->_rx[i].mp_params.mp_priv) 10364 /* The channel count is the idx plus 1. */ 10365 return i + 1; 10366 10367 return 0; 10368 } 10369 10370 /** 10371 * dev_index_reserve() - allocate an ifindex in a namespace 10372 * @net: the applicable net namespace 10373 * @ifindex: requested ifindex, pass %0 to get one allocated 10374 * 10375 * Allocate a ifindex for a new device. Caller must either use the ifindex 10376 * to store the device (via list_netdevice()) or call dev_index_release() 10377 * to give the index up. 10378 * 10379 * Return: a suitable unique value for a new device interface number or -errno. 10380 */ dev_index_reserve(struct net * net,u32 ifindex)10381 static int dev_index_reserve(struct net *net, u32 ifindex) 10382 { 10383 int err; 10384 10385 if (ifindex > INT_MAX) { 10386 DEBUG_NET_WARN_ON_ONCE(1); 10387 return -EINVAL; 10388 } 10389 10390 if (!ifindex) 10391 err = xa_alloc_cyclic(&net->dev_by_index, &ifindex, NULL, 10392 xa_limit_31b, &net->ifindex, GFP_KERNEL); 10393 else 10394 err = xa_insert(&net->dev_by_index, ifindex, NULL, GFP_KERNEL); 10395 if (err < 0) 10396 return err; 10397 10398 return ifindex; 10399 } 10400 dev_index_release(struct net * net,int ifindex)10401 static void dev_index_release(struct net *net, int ifindex) 10402 { 10403 /* Expect only unused indexes, unlist_netdevice() removes the used */ 10404 WARN_ON(xa_erase(&net->dev_by_index, ifindex)); 10405 } 10406 from_cleanup_net(void)10407 static bool from_cleanup_net(void) 10408 { 10409 #ifdef CONFIG_NET_NS 10410 return current == cleanup_net_task; 10411 #else 10412 return false; 10413 #endif 10414 } 10415 10416 /* Delayed registration/unregisteration */ 10417 LIST_HEAD(net_todo_list); 10418 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); 10419 atomic_t dev_unreg_count = ATOMIC_INIT(0); 10420 net_set_todo(struct net_device * dev)10421 static void net_set_todo(struct net_device *dev) 10422 { 10423 list_add_tail(&dev->todo_list, &net_todo_list); 10424 } 10425 netdev_sync_upper_features(struct net_device * lower,struct net_device * upper,netdev_features_t features)10426 static netdev_features_t netdev_sync_upper_features(struct net_device *lower, 10427 struct net_device *upper, netdev_features_t features) 10428 { 10429 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; 10430 netdev_features_t feature; 10431 int feature_bit; 10432 10433 for_each_netdev_feature(upper_disables, feature_bit) { 10434 feature = __NETIF_F_BIT(feature_bit); 10435 if (!(upper->wanted_features & feature) 10436 && (features & feature)) { 10437 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n", 10438 &feature, upper->name); 10439 features &= ~feature; 10440 } 10441 } 10442 10443 return features; 10444 } 10445 netdev_sync_lower_features(struct net_device * upper,struct net_device * lower,netdev_features_t features)10446 static void netdev_sync_lower_features(struct net_device *upper, 10447 struct net_device *lower, netdev_features_t features) 10448 { 10449 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; 10450 netdev_features_t feature; 10451 int feature_bit; 10452 10453 for_each_netdev_feature(upper_disables, feature_bit) { 10454 feature = __NETIF_F_BIT(feature_bit); 10455 if (!(features & feature) && (lower->features & feature)) { 10456 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", 10457 &feature, lower->name); 10458 lower->wanted_features &= ~feature; 10459 __netdev_update_features(lower); 10460 10461 if (unlikely(lower->features & feature)) 10462 netdev_WARN(upper, "failed to disable %pNF on %s!\n", 10463 &feature, lower->name); 10464 else 10465 netdev_features_change(lower); 10466 } 10467 } 10468 } 10469 netdev_has_ip_or_hw_csum(netdev_features_t features)10470 static bool netdev_has_ip_or_hw_csum(netdev_features_t features) 10471 { 10472 netdev_features_t ip_csum_mask = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 10473 bool ip_csum = (features & ip_csum_mask) == ip_csum_mask; 10474 bool hw_csum = features & NETIF_F_HW_CSUM; 10475 10476 return ip_csum || hw_csum; 10477 } 10478 netdev_fix_features(struct net_device * dev,netdev_features_t features)10479 static netdev_features_t netdev_fix_features(struct net_device *dev, 10480 netdev_features_t features) 10481 { 10482 /* Fix illegal checksum combinations */ 10483 if ((features & NETIF_F_HW_CSUM) && 10484 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 10485 netdev_warn(dev, "mixed HW and IP checksum settings.\n"); 10486 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 10487 } 10488 10489 /* TSO requires that SG is present as well. */ 10490 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { 10491 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); 10492 features &= ~NETIF_F_ALL_TSO; 10493 } 10494 10495 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) && 10496 !(features & NETIF_F_IP_CSUM)) { 10497 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n"); 10498 features &= ~NETIF_F_TSO; 10499 features &= ~NETIF_F_TSO_ECN; 10500 } 10501 10502 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) && 10503 !(features & NETIF_F_IPV6_CSUM)) { 10504 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n"); 10505 features &= ~NETIF_F_TSO6; 10506 } 10507 10508 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */ 10509 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO)) 10510 features &= ~NETIF_F_TSO_MANGLEID; 10511 10512 /* TSO ECN requires that TSO is present as well. */ 10513 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) 10514 features &= ~NETIF_F_TSO_ECN; 10515 10516 /* Software GSO depends on SG. */ 10517 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { 10518 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); 10519 features &= ~NETIF_F_GSO; 10520 } 10521 10522 /* GSO partial features require GSO partial be set */ 10523 if ((features & dev->gso_partial_features) && 10524 !(features & NETIF_F_GSO_PARTIAL)) { 10525 netdev_dbg(dev, 10526 "Dropping partially supported GSO features since no GSO partial.\n"); 10527 features &= ~dev->gso_partial_features; 10528 } 10529 10530 if (!(features & NETIF_F_RXCSUM)) { 10531 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet 10532 * successfully merged by hardware must also have the 10533 * checksum verified by hardware. If the user does not 10534 * want to enable RXCSUM, logically, we should disable GRO_HW. 10535 */ 10536 if (features & NETIF_F_GRO_HW) { 10537 netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n"); 10538 features &= ~NETIF_F_GRO_HW; 10539 } 10540 } 10541 10542 /* LRO/HW-GRO features cannot be combined with RX-FCS */ 10543 if (features & NETIF_F_RXFCS) { 10544 if (features & NETIF_F_LRO) { 10545 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n"); 10546 features &= ~NETIF_F_LRO; 10547 } 10548 10549 if (features & NETIF_F_GRO_HW) { 10550 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n"); 10551 features &= ~NETIF_F_GRO_HW; 10552 } 10553 } 10554 10555 if ((features & NETIF_F_GRO_HW) && (features & NETIF_F_LRO)) { 10556 netdev_dbg(dev, "Dropping LRO feature since HW-GRO is requested.\n"); 10557 features &= ~NETIF_F_LRO; 10558 } 10559 10560 if ((features & NETIF_F_HW_TLS_TX) && !netdev_has_ip_or_hw_csum(features)) { 10561 netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n"); 10562 features &= ~NETIF_F_HW_TLS_TX; 10563 } 10564 10565 if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) { 10566 netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n"); 10567 features &= ~NETIF_F_HW_TLS_RX; 10568 } 10569 10570 if ((features & NETIF_F_GSO_UDP_L4) && !netdev_has_ip_or_hw_csum(features)) { 10571 netdev_dbg(dev, "Dropping USO feature since no CSUM feature.\n"); 10572 features &= ~NETIF_F_GSO_UDP_L4; 10573 } 10574 10575 return features; 10576 } 10577 __netdev_update_features(struct net_device * dev)10578 int __netdev_update_features(struct net_device *dev) 10579 { 10580 struct net_device *upper, *lower; 10581 netdev_features_t features; 10582 struct list_head *iter; 10583 int err = -1; 10584 10585 ASSERT_RTNL(); 10586 netdev_ops_assert_locked(dev); 10587 10588 features = netdev_get_wanted_features(dev); 10589 10590 if (dev->netdev_ops->ndo_fix_features) 10591 features = dev->netdev_ops->ndo_fix_features(dev, features); 10592 10593 /* driver might be less strict about feature dependencies */ 10594 features = netdev_fix_features(dev, features); 10595 10596 /* some features can't be enabled if they're off on an upper device */ 10597 netdev_for_each_upper_dev_rcu(dev, upper, iter) 10598 features = netdev_sync_upper_features(dev, upper, features); 10599 10600 if (dev->features == features) 10601 goto sync_lower; 10602 10603 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n", 10604 &dev->features, &features); 10605 10606 if (dev->netdev_ops->ndo_set_features) 10607 err = dev->netdev_ops->ndo_set_features(dev, features); 10608 else 10609 err = 0; 10610 10611 if (unlikely(err < 0)) { 10612 netdev_err(dev, 10613 "set_features() failed (%d); wanted %pNF, left %pNF\n", 10614 err, &features, &dev->features); 10615 /* return non-0 since some features might have changed and 10616 * it's better to fire a spurious notification than miss it 10617 */ 10618 return -1; 10619 } 10620 10621 sync_lower: 10622 /* some features must be disabled on lower devices when disabled 10623 * on an upper device (think: bonding master or bridge) 10624 */ 10625 netdev_for_each_lower_dev(dev, lower, iter) 10626 netdev_sync_lower_features(dev, lower, features); 10627 10628 if (!err) { 10629 netdev_features_t diff = features ^ dev->features; 10630 10631 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) { 10632 /* udp_tunnel_{get,drop}_rx_info both need 10633 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the 10634 * device, or they won't do anything. 10635 * Thus we need to update dev->features 10636 * *before* calling udp_tunnel_get_rx_info, 10637 * but *after* calling udp_tunnel_drop_rx_info. 10638 */ 10639 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) { 10640 dev->features = features; 10641 udp_tunnel_get_rx_info(dev); 10642 } else { 10643 udp_tunnel_drop_rx_info(dev); 10644 } 10645 } 10646 10647 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) { 10648 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { 10649 dev->features = features; 10650 err |= vlan_get_rx_ctag_filter_info(dev); 10651 } else { 10652 vlan_drop_rx_ctag_filter_info(dev); 10653 } 10654 } 10655 10656 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) { 10657 if (features & NETIF_F_HW_VLAN_STAG_FILTER) { 10658 dev->features = features; 10659 err |= vlan_get_rx_stag_filter_info(dev); 10660 } else { 10661 vlan_drop_rx_stag_filter_info(dev); 10662 } 10663 } 10664 10665 dev->features = features; 10666 } 10667 10668 return err < 0 ? 0 : 1; 10669 } 10670 10671 /** 10672 * netdev_update_features - recalculate device features 10673 * @dev: the device to check 10674 * 10675 * Recalculate dev->features set and send notifications if it 10676 * has changed. Should be called after driver or hardware dependent 10677 * conditions might have changed that influence the features. 10678 */ netdev_update_features(struct net_device * dev)10679 void netdev_update_features(struct net_device *dev) 10680 { 10681 if (__netdev_update_features(dev)) 10682 netdev_features_change(dev); 10683 } 10684 EXPORT_SYMBOL(netdev_update_features); 10685 10686 /** 10687 * netdev_change_features - recalculate device features 10688 * @dev: the device to check 10689 * 10690 * Recalculate dev->features set and send notifications even 10691 * if they have not changed. Should be called instead of 10692 * netdev_update_features() if also dev->vlan_features might 10693 * have changed to allow the changes to be propagated to stacked 10694 * VLAN devices. 10695 */ netdev_change_features(struct net_device * dev)10696 void netdev_change_features(struct net_device *dev) 10697 { 10698 __netdev_update_features(dev); 10699 netdev_features_change(dev); 10700 } 10701 EXPORT_SYMBOL(netdev_change_features); 10702 10703 /** 10704 * netif_stacked_transfer_operstate - transfer operstate 10705 * @rootdev: the root or lower level device to transfer state from 10706 * @dev: the device to transfer operstate to 10707 * 10708 * Transfer operational state from root to device. This is normally 10709 * called when a stacking relationship exists between the root 10710 * device and the device(a leaf device). 10711 */ netif_stacked_transfer_operstate(const struct net_device * rootdev,struct net_device * dev)10712 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 10713 struct net_device *dev) 10714 { 10715 if (rootdev->operstate == IF_OPER_DORMANT) 10716 netif_dormant_on(dev); 10717 else 10718 netif_dormant_off(dev); 10719 10720 if (rootdev->operstate == IF_OPER_TESTING) 10721 netif_testing_on(dev); 10722 else 10723 netif_testing_off(dev); 10724 10725 if (netif_carrier_ok(rootdev)) 10726 netif_carrier_on(dev); 10727 else 10728 netif_carrier_off(dev); 10729 } 10730 EXPORT_SYMBOL(netif_stacked_transfer_operstate); 10731 netif_alloc_rx_queues(struct net_device * dev)10732 static int netif_alloc_rx_queues(struct net_device *dev) 10733 { 10734 unsigned int i, count = dev->num_rx_queues; 10735 struct netdev_rx_queue *rx; 10736 size_t sz = count * sizeof(*rx); 10737 int err = 0; 10738 10739 BUG_ON(count < 1); 10740 10741 rx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); 10742 if (!rx) 10743 return -ENOMEM; 10744 10745 dev->_rx = rx; 10746 10747 for (i = 0; i < count; i++) { 10748 rx[i].dev = dev; 10749 10750 /* XDP RX-queue setup */ 10751 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0); 10752 if (err < 0) 10753 goto err_rxq_info; 10754 } 10755 return 0; 10756 10757 err_rxq_info: 10758 /* Rollback successful reg's and free other resources */ 10759 while (i--) 10760 xdp_rxq_info_unreg(&rx[i].xdp_rxq); 10761 kvfree(dev->_rx); 10762 dev->_rx = NULL; 10763 return err; 10764 } 10765 netif_free_rx_queues(struct net_device * dev)10766 static void netif_free_rx_queues(struct net_device *dev) 10767 { 10768 unsigned int i, count = dev->num_rx_queues; 10769 10770 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */ 10771 if (!dev->_rx) 10772 return; 10773 10774 for (i = 0; i < count; i++) 10775 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq); 10776 10777 kvfree(dev->_rx); 10778 } 10779 netdev_init_one_queue(struct net_device * dev,struct netdev_queue * queue,void * _unused)10780 static void netdev_init_one_queue(struct net_device *dev, 10781 struct netdev_queue *queue, void *_unused) 10782 { 10783 /* Initialize queue lock */ 10784 spin_lock_init(&queue->_xmit_lock); 10785 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); 10786 queue->xmit_lock_owner = -1; 10787 netdev_queue_numa_node_write(queue, NUMA_NO_NODE); 10788 queue->dev = dev; 10789 #ifdef CONFIG_BQL 10790 dql_init(&queue->dql, HZ); 10791 #endif 10792 } 10793 netif_free_tx_queues(struct net_device * dev)10794 static void netif_free_tx_queues(struct net_device *dev) 10795 { 10796 kvfree(dev->_tx); 10797 } 10798 netif_alloc_netdev_queues(struct net_device * dev)10799 static int netif_alloc_netdev_queues(struct net_device *dev) 10800 { 10801 unsigned int count = dev->num_tx_queues; 10802 struct netdev_queue *tx; 10803 size_t sz = count * sizeof(*tx); 10804 10805 if (count < 1 || count > 0xffff) 10806 return -EINVAL; 10807 10808 tx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); 10809 if (!tx) 10810 return -ENOMEM; 10811 10812 dev->_tx = tx; 10813 10814 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 10815 spin_lock_init(&dev->tx_global_lock); 10816 10817 return 0; 10818 } 10819 netif_tx_stop_all_queues(struct net_device * dev)10820 void netif_tx_stop_all_queues(struct net_device *dev) 10821 { 10822 unsigned int i; 10823 10824 for (i = 0; i < dev->num_tx_queues; i++) { 10825 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 10826 10827 netif_tx_stop_queue(txq); 10828 } 10829 } 10830 EXPORT_SYMBOL(netif_tx_stop_all_queues); 10831 netdev_do_alloc_pcpu_stats(struct net_device * dev)10832 static int netdev_do_alloc_pcpu_stats(struct net_device *dev) 10833 { 10834 void __percpu *v; 10835 10836 /* Drivers implementing ndo_get_peer_dev must support tstat 10837 * accounting, so that skb_do_redirect() can bump the dev's 10838 * RX stats upon network namespace switch. 10839 */ 10840 if (dev->netdev_ops->ndo_get_peer_dev && 10841 dev->pcpu_stat_type != NETDEV_PCPU_STAT_TSTATS) 10842 return -EOPNOTSUPP; 10843 10844 switch (dev->pcpu_stat_type) { 10845 case NETDEV_PCPU_STAT_NONE: 10846 return 0; 10847 case NETDEV_PCPU_STAT_LSTATS: 10848 v = dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats); 10849 break; 10850 case NETDEV_PCPU_STAT_TSTATS: 10851 v = dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 10852 break; 10853 case NETDEV_PCPU_STAT_DSTATS: 10854 v = dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats); 10855 break; 10856 default: 10857 return -EINVAL; 10858 } 10859 10860 return v ? 0 : -ENOMEM; 10861 } 10862 netdev_do_free_pcpu_stats(struct net_device * dev)10863 static void netdev_do_free_pcpu_stats(struct net_device *dev) 10864 { 10865 switch (dev->pcpu_stat_type) { 10866 case NETDEV_PCPU_STAT_NONE: 10867 return; 10868 case NETDEV_PCPU_STAT_LSTATS: 10869 free_percpu(dev->lstats); 10870 break; 10871 case NETDEV_PCPU_STAT_TSTATS: 10872 free_percpu(dev->tstats); 10873 break; 10874 case NETDEV_PCPU_STAT_DSTATS: 10875 free_percpu(dev->dstats); 10876 break; 10877 } 10878 } 10879 netdev_free_phy_link_topology(struct net_device * dev)10880 static void netdev_free_phy_link_topology(struct net_device *dev) 10881 { 10882 struct phy_link_topology *topo = dev->link_topo; 10883 10884 if (IS_ENABLED(CONFIG_PHYLIB) && topo) { 10885 xa_destroy(&topo->phys); 10886 kfree(topo); 10887 dev->link_topo = NULL; 10888 } 10889 } 10890 10891 /** 10892 * register_netdevice() - register a network device 10893 * @dev: device to register 10894 * 10895 * Take a prepared network device structure and make it externally accessible. 10896 * A %NETDEV_REGISTER message is sent to the netdev notifier chain. 10897 * Callers must hold the rtnl lock - you may want register_netdev() 10898 * instead of this. 10899 */ register_netdevice(struct net_device * dev)10900 int register_netdevice(struct net_device *dev) 10901 { 10902 int ret; 10903 struct net *net = dev_net(dev); 10904 10905 BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE < 10906 NETDEV_FEATURE_COUNT); 10907 BUG_ON(dev_boot_phase); 10908 ASSERT_RTNL(); 10909 10910 might_sleep(); 10911 10912 /* When net_device's are persistent, this will be fatal. */ 10913 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 10914 BUG_ON(!net); 10915 10916 ret = ethtool_check_ops(dev->ethtool_ops); 10917 if (ret) 10918 return ret; 10919 10920 /* rss ctx ID 0 is reserved for the default context, start from 1 */ 10921 xa_init_flags(&dev->ethtool->rss_ctx, XA_FLAGS_ALLOC1); 10922 mutex_init(&dev->ethtool->rss_lock); 10923 10924 spin_lock_init(&dev->addr_list_lock); 10925 netdev_set_addr_lockdep_class(dev); 10926 10927 ret = dev_get_valid_name(net, dev, dev->name); 10928 if (ret < 0) 10929 goto out; 10930 10931 ret = -ENOMEM; 10932 dev->name_node = netdev_name_node_head_alloc(dev); 10933 if (!dev->name_node) 10934 goto out; 10935 10936 /* Init, if this function is available */ 10937 if (dev->netdev_ops->ndo_init) { 10938 ret = dev->netdev_ops->ndo_init(dev); 10939 if (ret) { 10940 if (ret > 0) 10941 ret = -EIO; 10942 goto err_free_name; 10943 } 10944 } 10945 10946 if (((dev->hw_features | dev->features) & 10947 NETIF_F_HW_VLAN_CTAG_FILTER) && 10948 (!dev->netdev_ops->ndo_vlan_rx_add_vid || 10949 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) { 10950 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n"); 10951 ret = -EINVAL; 10952 goto err_uninit; 10953 } 10954 10955 ret = netdev_do_alloc_pcpu_stats(dev); 10956 if (ret) 10957 goto err_uninit; 10958 10959 ret = dev_index_reserve(net, dev->ifindex); 10960 if (ret < 0) 10961 goto err_free_pcpu; 10962 dev->ifindex = ret; 10963 10964 /* Transfer changeable features to wanted_features and enable 10965 * software offloads (GSO and GRO). 10966 */ 10967 dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF); 10968 dev->features |= NETIF_F_SOFT_FEATURES; 10969 10970 if (dev->udp_tunnel_nic_info) { 10971 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT; 10972 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT; 10973 } 10974 10975 dev->wanted_features = dev->features & dev->hw_features; 10976 10977 if (!(dev->flags & IFF_LOOPBACK)) 10978 dev->hw_features |= NETIF_F_NOCACHE_COPY; 10979 10980 /* If IPv4 TCP segmentation offload is supported we should also 10981 * allow the device to enable segmenting the frame with the option 10982 * of ignoring a static IP ID value. This doesn't enable the 10983 * feature itself but allows the user to enable it later. 10984 */ 10985 if (dev->hw_features & NETIF_F_TSO) 10986 dev->hw_features |= NETIF_F_TSO_MANGLEID; 10987 if (dev->vlan_features & NETIF_F_TSO) 10988 dev->vlan_features |= NETIF_F_TSO_MANGLEID; 10989 if (dev->mpls_features & NETIF_F_TSO) 10990 dev->mpls_features |= NETIF_F_TSO_MANGLEID; 10991 if (dev->hw_enc_features & NETIF_F_TSO) 10992 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 10993 10994 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices. 10995 */ 10996 dev->vlan_features |= NETIF_F_HIGHDMA; 10997 10998 /* Make NETIF_F_SG inheritable to tunnel devices. 10999 */ 11000 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL; 11001 11002 /* Make NETIF_F_SG inheritable to MPLS. 11003 */ 11004 dev->mpls_features |= NETIF_F_SG; 11005 11006 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); 11007 ret = notifier_to_errno(ret); 11008 if (ret) 11009 goto err_ifindex_release; 11010 11011 ret = netdev_register_kobject(dev); 11012 11013 netdev_lock(dev); 11014 WRITE_ONCE(dev->reg_state, ret ? NETREG_UNREGISTERED : NETREG_REGISTERED); 11015 netdev_unlock(dev); 11016 11017 if (ret) 11018 goto err_uninit_notify; 11019 11020 netdev_lock_ops(dev); 11021 __netdev_update_features(dev); 11022 netdev_unlock_ops(dev); 11023 11024 /* 11025 * Default initial state at registry is that the 11026 * device is present. 11027 */ 11028 11029 set_bit(__LINK_STATE_PRESENT, &dev->state); 11030 11031 linkwatch_init_dev(dev); 11032 11033 dev_init_scheduler(dev); 11034 11035 netdev_hold(dev, &dev->dev_registered_tracker, GFP_KERNEL); 11036 list_netdevice(dev); 11037 11038 add_device_randomness(dev->dev_addr, dev->addr_len); 11039 11040 /* If the device has permanent device address, driver should 11041 * set dev_addr and also addr_assign_type should be set to 11042 * NET_ADDR_PERM (default value). 11043 */ 11044 if (dev->addr_assign_type == NET_ADDR_PERM) 11045 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 11046 11047 /* Notify protocols, that a new device appeared. */ 11048 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); 11049 ret = notifier_to_errno(ret); 11050 if (ret) { 11051 /* Expect explicit free_netdev() on failure */ 11052 dev->needs_free_netdev = false; 11053 unregister_netdevice_queue(dev, NULL); 11054 goto out; 11055 } 11056 /* 11057 * Prevent userspace races by waiting until the network 11058 * device is fully setup before sending notifications. 11059 */ 11060 if (!dev->rtnl_link_ops || 11061 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 11062 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL); 11063 11064 out: 11065 return ret; 11066 11067 err_uninit_notify: 11068 call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev); 11069 err_ifindex_release: 11070 dev_index_release(net, dev->ifindex); 11071 err_free_pcpu: 11072 netdev_do_free_pcpu_stats(dev); 11073 err_uninit: 11074 if (dev->netdev_ops->ndo_uninit) 11075 dev->netdev_ops->ndo_uninit(dev); 11076 if (dev->priv_destructor) 11077 dev->priv_destructor(dev); 11078 err_free_name: 11079 netdev_name_node_free(dev->name_node); 11080 goto out; 11081 } 11082 EXPORT_SYMBOL(register_netdevice); 11083 11084 /* Initialize the core of a dummy net device. 11085 * The setup steps dummy netdevs need which normal netdevs get by going 11086 * through register_netdevice(). 11087 */ init_dummy_netdev(struct net_device * dev)11088 static void init_dummy_netdev(struct net_device *dev) 11089 { 11090 /* make sure we BUG if trying to hit standard 11091 * register/unregister code path 11092 */ 11093 dev->reg_state = NETREG_DUMMY; 11094 11095 /* a dummy interface is started by default */ 11096 set_bit(__LINK_STATE_PRESENT, &dev->state); 11097 set_bit(__LINK_STATE_START, &dev->state); 11098 11099 /* Note : We dont allocate pcpu_refcnt for dummy devices, 11100 * because users of this 'device' dont need to change 11101 * its refcount. 11102 */ 11103 } 11104 11105 /** 11106 * register_netdev - register a network device 11107 * @dev: device to register 11108 * 11109 * Take a completed network device structure and add it to the kernel 11110 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 11111 * chain. 0 is returned on success. A negative errno code is returned 11112 * on a failure to set up the device, or if the name is a duplicate. 11113 * 11114 * This is a wrapper around register_netdevice that takes the rtnl semaphore 11115 * and expands the device name if you passed a format string to 11116 * alloc_netdev. 11117 */ register_netdev(struct net_device * dev)11118 int register_netdev(struct net_device *dev) 11119 { 11120 struct net *net = dev_net(dev); 11121 int err; 11122 11123 if (rtnl_net_lock_killable(net)) 11124 return -EINTR; 11125 11126 err = register_netdevice(dev); 11127 11128 rtnl_net_unlock(net); 11129 11130 return err; 11131 } 11132 EXPORT_SYMBOL(register_netdev); 11133 netdev_refcnt_read(const struct net_device * dev)11134 int netdev_refcnt_read(const struct net_device *dev) 11135 { 11136 #ifdef CONFIG_PCPU_DEV_REFCNT 11137 int i, refcnt = 0; 11138 11139 for_each_possible_cpu(i) 11140 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i); 11141 return refcnt; 11142 #else 11143 return refcount_read(&dev->dev_refcnt); 11144 #endif 11145 } 11146 EXPORT_SYMBOL(netdev_refcnt_read); 11147 11148 int netdev_unregister_timeout_secs __read_mostly = 10; 11149 11150 #define WAIT_REFS_MIN_MSECS 1 11151 #define WAIT_REFS_MAX_MSECS 250 11152 /** 11153 * netdev_wait_allrefs_any - wait until all references are gone. 11154 * @list: list of net_devices to wait on 11155 * 11156 * This is called when unregistering network devices. 11157 * 11158 * Any protocol or device that holds a reference should register 11159 * for netdevice notification, and cleanup and put back the 11160 * reference if they receive an UNREGISTER event. 11161 * We can get stuck here if buggy protocols don't correctly 11162 * call dev_put. 11163 */ netdev_wait_allrefs_any(struct list_head * list)11164 static struct net_device *netdev_wait_allrefs_any(struct list_head *list) 11165 { 11166 unsigned long rebroadcast_time, warning_time; 11167 struct net_device *dev; 11168 int wait = 0; 11169 11170 rebroadcast_time = warning_time = jiffies; 11171 11172 list_for_each_entry(dev, list, todo_list) 11173 if (netdev_refcnt_read(dev) == 1) 11174 return dev; 11175 11176 while (true) { 11177 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { 11178 rtnl_lock(); 11179 11180 /* Rebroadcast unregister notification */ 11181 list_for_each_entry(dev, list, todo_list) 11182 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 11183 11184 __rtnl_unlock(); 11185 rcu_barrier(); 11186 rtnl_lock(); 11187 11188 list_for_each_entry(dev, list, todo_list) 11189 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 11190 &dev->state)) { 11191 /* We must not have linkwatch events 11192 * pending on unregister. If this 11193 * happens, we simply run the queue 11194 * unscheduled, resulting in a noop 11195 * for this device. 11196 */ 11197 linkwatch_run_queue(); 11198 break; 11199 } 11200 11201 __rtnl_unlock(); 11202 11203 rebroadcast_time = jiffies; 11204 } 11205 11206 rcu_barrier(); 11207 11208 if (!wait) { 11209 wait = WAIT_REFS_MIN_MSECS; 11210 } else { 11211 msleep(wait); 11212 wait = min(wait << 1, WAIT_REFS_MAX_MSECS); 11213 } 11214 11215 list_for_each_entry(dev, list, todo_list) 11216 if (netdev_refcnt_read(dev) == 1) 11217 return dev; 11218 11219 if (time_after(jiffies, warning_time + 11220 READ_ONCE(netdev_unregister_timeout_secs) * HZ)) { 11221 list_for_each_entry(dev, list, todo_list) { 11222 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", 11223 dev->name, netdev_refcnt_read(dev)); 11224 ref_tracker_dir_print(&dev->refcnt_tracker, 10); 11225 } 11226 11227 warning_time = jiffies; 11228 } 11229 } 11230 } 11231 11232 /* The sequence is: 11233 * 11234 * rtnl_lock(); 11235 * ... 11236 * register_netdevice(x1); 11237 * register_netdevice(x2); 11238 * ... 11239 * unregister_netdevice(y1); 11240 * unregister_netdevice(y2); 11241 * ... 11242 * rtnl_unlock(); 11243 * free_netdev(y1); 11244 * free_netdev(y2); 11245 * 11246 * We are invoked by rtnl_unlock(). 11247 * This allows us to deal with problems: 11248 * 1) We can delete sysfs objects which invoke hotplug 11249 * without deadlocking with linkwatch via keventd. 11250 * 2) Since we run with the RTNL semaphore not held, we can sleep 11251 * safely in order to wait for the netdev refcnt to drop to zero. 11252 * 11253 * We must not return until all unregister events added during 11254 * the interval the lock was held have been completed. 11255 */ netdev_run_todo(void)11256 void netdev_run_todo(void) 11257 { 11258 struct net_device *dev, *tmp; 11259 struct list_head list; 11260 int cnt; 11261 #ifdef CONFIG_LOCKDEP 11262 struct list_head unlink_list; 11263 11264 list_replace_init(&net_unlink_list, &unlink_list); 11265 11266 while (!list_empty(&unlink_list)) { 11267 dev = list_first_entry(&unlink_list, struct net_device, 11268 unlink_list); 11269 list_del_init(&dev->unlink_list); 11270 dev->nested_level = dev->lower_level - 1; 11271 } 11272 #endif 11273 11274 /* Snapshot list, allow later requests */ 11275 list_replace_init(&net_todo_list, &list); 11276 11277 __rtnl_unlock(); 11278 11279 /* Wait for rcu callbacks to finish before next phase */ 11280 if (!list_empty(&list)) 11281 rcu_barrier(); 11282 11283 list_for_each_entry_safe(dev, tmp, &list, todo_list) { 11284 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 11285 netdev_WARN(dev, "run_todo but not unregistering\n"); 11286 list_del(&dev->todo_list); 11287 continue; 11288 } 11289 11290 netdev_lock(dev); 11291 WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERED); 11292 netdev_unlock(dev); 11293 linkwatch_sync_dev(dev); 11294 } 11295 11296 cnt = 0; 11297 while (!list_empty(&list)) { 11298 dev = netdev_wait_allrefs_any(&list); 11299 list_del(&dev->todo_list); 11300 11301 /* paranoia */ 11302 BUG_ON(netdev_refcnt_read(dev) != 1); 11303 BUG_ON(!list_empty(&dev->ptype_all)); 11304 BUG_ON(!list_empty(&dev->ptype_specific)); 11305 WARN_ON(rcu_access_pointer(dev->ip_ptr)); 11306 WARN_ON(rcu_access_pointer(dev->ip6_ptr)); 11307 11308 netdev_do_free_pcpu_stats(dev); 11309 if (dev->priv_destructor) 11310 dev->priv_destructor(dev); 11311 if (dev->needs_free_netdev) 11312 free_netdev(dev); 11313 11314 cnt++; 11315 11316 /* Free network device */ 11317 kobject_put(&dev->dev.kobj); 11318 } 11319 if (cnt && atomic_sub_and_test(cnt, &dev_unreg_count)) 11320 wake_up(&netdev_unregistering_wq); 11321 } 11322 11323 /* Collate per-cpu network dstats statistics 11324 * 11325 * Read per-cpu network statistics from dev->dstats and populate the related 11326 * fields in @s. 11327 */ dev_fetch_dstats(struct rtnl_link_stats64 * s,const struct pcpu_dstats __percpu * dstats)11328 static void dev_fetch_dstats(struct rtnl_link_stats64 *s, 11329 const struct pcpu_dstats __percpu *dstats) 11330 { 11331 int cpu; 11332 11333 for_each_possible_cpu(cpu) { 11334 u64 rx_packets, rx_bytes, rx_drops; 11335 u64 tx_packets, tx_bytes, tx_drops; 11336 const struct pcpu_dstats *stats; 11337 unsigned int start; 11338 11339 stats = per_cpu_ptr(dstats, cpu); 11340 do { 11341 start = u64_stats_fetch_begin(&stats->syncp); 11342 rx_packets = u64_stats_read(&stats->rx_packets); 11343 rx_bytes = u64_stats_read(&stats->rx_bytes); 11344 rx_drops = u64_stats_read(&stats->rx_drops); 11345 tx_packets = u64_stats_read(&stats->tx_packets); 11346 tx_bytes = u64_stats_read(&stats->tx_bytes); 11347 tx_drops = u64_stats_read(&stats->tx_drops); 11348 } while (u64_stats_fetch_retry(&stats->syncp, start)); 11349 11350 s->rx_packets += rx_packets; 11351 s->rx_bytes += rx_bytes; 11352 s->rx_dropped += rx_drops; 11353 s->tx_packets += tx_packets; 11354 s->tx_bytes += tx_bytes; 11355 s->tx_dropped += tx_drops; 11356 } 11357 } 11358 11359 /* ndo_get_stats64 implementation for dtstats-based accounting. 11360 * 11361 * Populate @s from dev->stats and dev->dstats. This is used internally by the 11362 * core for NETDEV_PCPU_STAT_DSTAT-type stats collection. 11363 */ dev_get_dstats64(const struct net_device * dev,struct rtnl_link_stats64 * s)11364 static void dev_get_dstats64(const struct net_device *dev, 11365 struct rtnl_link_stats64 *s) 11366 { 11367 netdev_stats_to_stats64(s, &dev->stats); 11368 dev_fetch_dstats(s, dev->dstats); 11369 } 11370 11371 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has 11372 * all the same fields in the same order as net_device_stats, with only 11373 * the type differing, but rtnl_link_stats64 may have additional fields 11374 * at the end for newer counters. 11375 */ netdev_stats_to_stats64(struct rtnl_link_stats64 * stats64,const struct net_device_stats * netdev_stats)11376 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 11377 const struct net_device_stats *netdev_stats) 11378 { 11379 size_t i, n = sizeof(*netdev_stats) / sizeof(atomic_long_t); 11380 const atomic_long_t *src = (atomic_long_t *)netdev_stats; 11381 u64 *dst = (u64 *)stats64; 11382 11383 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64)); 11384 for (i = 0; i < n; i++) 11385 dst[i] = (unsigned long)atomic_long_read(&src[i]); 11386 /* zero out counters that only exist in rtnl_link_stats64 */ 11387 memset((char *)stats64 + n * sizeof(u64), 0, 11388 sizeof(*stats64) - n * sizeof(u64)); 11389 } 11390 EXPORT_SYMBOL(netdev_stats_to_stats64); 11391 netdev_core_stats_alloc(struct net_device * dev)11392 static __cold struct net_device_core_stats __percpu *netdev_core_stats_alloc( 11393 struct net_device *dev) 11394 { 11395 struct net_device_core_stats __percpu *p; 11396 11397 p = alloc_percpu_gfp(struct net_device_core_stats, 11398 GFP_ATOMIC | __GFP_NOWARN); 11399 11400 if (p && cmpxchg(&dev->core_stats, NULL, p)) 11401 free_percpu(p); 11402 11403 /* This READ_ONCE() pairs with the cmpxchg() above */ 11404 return READ_ONCE(dev->core_stats); 11405 } 11406 netdev_core_stats_inc(struct net_device * dev,u32 offset)11407 noinline void netdev_core_stats_inc(struct net_device *dev, u32 offset) 11408 { 11409 /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */ 11410 struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats); 11411 unsigned long __percpu *field; 11412 11413 if (unlikely(!p)) { 11414 p = netdev_core_stats_alloc(dev); 11415 if (!p) 11416 return; 11417 } 11418 11419 field = (unsigned long __percpu *)((void __percpu *)p + offset); 11420 this_cpu_inc(*field); 11421 } 11422 EXPORT_SYMBOL_GPL(netdev_core_stats_inc); 11423 11424 /** 11425 * dev_get_stats - get network device statistics 11426 * @dev: device to get statistics from 11427 * @storage: place to store stats 11428 * 11429 * Get network statistics from device. Return @storage. 11430 * The device driver may provide its own method by setting 11431 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats; 11432 * otherwise the internal statistics structure is used. 11433 */ dev_get_stats(struct net_device * dev,struct rtnl_link_stats64 * storage)11434 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 11435 struct rtnl_link_stats64 *storage) 11436 { 11437 const struct net_device_ops *ops = dev->netdev_ops; 11438 const struct net_device_core_stats __percpu *p; 11439 11440 /* 11441 * IPv{4,6} and udp tunnels share common stat helpers and use 11442 * different stat type (NETDEV_PCPU_STAT_TSTATS vs 11443 * NETDEV_PCPU_STAT_DSTATS). Ensure the accounting is consistent. 11444 */ 11445 BUILD_BUG_ON(offsetof(struct pcpu_sw_netstats, rx_bytes) != 11446 offsetof(struct pcpu_dstats, rx_bytes)); 11447 BUILD_BUG_ON(offsetof(struct pcpu_sw_netstats, rx_packets) != 11448 offsetof(struct pcpu_dstats, rx_packets)); 11449 BUILD_BUG_ON(offsetof(struct pcpu_sw_netstats, tx_bytes) != 11450 offsetof(struct pcpu_dstats, tx_bytes)); 11451 BUILD_BUG_ON(offsetof(struct pcpu_sw_netstats, tx_packets) != 11452 offsetof(struct pcpu_dstats, tx_packets)); 11453 11454 if (ops->ndo_get_stats64) { 11455 memset(storage, 0, sizeof(*storage)); 11456 ops->ndo_get_stats64(dev, storage); 11457 } else if (ops->ndo_get_stats) { 11458 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); 11459 } else if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_TSTATS) { 11460 dev_get_tstats64(dev, storage); 11461 } else if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_DSTATS) { 11462 dev_get_dstats64(dev, storage); 11463 } else { 11464 netdev_stats_to_stats64(storage, &dev->stats); 11465 } 11466 11467 /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */ 11468 p = READ_ONCE(dev->core_stats); 11469 if (p) { 11470 const struct net_device_core_stats *core_stats; 11471 int i; 11472 11473 for_each_possible_cpu(i) { 11474 core_stats = per_cpu_ptr(p, i); 11475 storage->rx_dropped += READ_ONCE(core_stats->rx_dropped); 11476 storage->tx_dropped += READ_ONCE(core_stats->tx_dropped); 11477 storage->rx_nohandler += READ_ONCE(core_stats->rx_nohandler); 11478 storage->rx_otherhost_dropped += READ_ONCE(core_stats->rx_otherhost_dropped); 11479 } 11480 } 11481 return storage; 11482 } 11483 EXPORT_SYMBOL(dev_get_stats); 11484 11485 /** 11486 * dev_fetch_sw_netstats - get per-cpu network device statistics 11487 * @s: place to store stats 11488 * @netstats: per-cpu network stats to read from 11489 * 11490 * Read per-cpu network statistics and populate the related fields in @s. 11491 */ dev_fetch_sw_netstats(struct rtnl_link_stats64 * s,const struct pcpu_sw_netstats __percpu * netstats)11492 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s, 11493 const struct pcpu_sw_netstats __percpu *netstats) 11494 { 11495 int cpu; 11496 11497 for_each_possible_cpu(cpu) { 11498 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 11499 const struct pcpu_sw_netstats *stats; 11500 unsigned int start; 11501 11502 stats = per_cpu_ptr(netstats, cpu); 11503 do { 11504 start = u64_stats_fetch_begin(&stats->syncp); 11505 rx_packets = u64_stats_read(&stats->rx_packets); 11506 rx_bytes = u64_stats_read(&stats->rx_bytes); 11507 tx_packets = u64_stats_read(&stats->tx_packets); 11508 tx_bytes = u64_stats_read(&stats->tx_bytes); 11509 } while (u64_stats_fetch_retry(&stats->syncp, start)); 11510 11511 s->rx_packets += rx_packets; 11512 s->rx_bytes += rx_bytes; 11513 s->tx_packets += tx_packets; 11514 s->tx_bytes += tx_bytes; 11515 } 11516 } 11517 EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats); 11518 11519 /** 11520 * dev_get_tstats64 - ndo_get_stats64 implementation 11521 * @dev: device to get statistics from 11522 * @s: place to store stats 11523 * 11524 * Populate @s from dev->stats and dev->tstats. Can be used as 11525 * ndo_get_stats64() callback. 11526 */ dev_get_tstats64(struct net_device * dev,struct rtnl_link_stats64 * s)11527 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s) 11528 { 11529 netdev_stats_to_stats64(s, &dev->stats); 11530 dev_fetch_sw_netstats(s, dev->tstats); 11531 } 11532 EXPORT_SYMBOL_GPL(dev_get_tstats64); 11533 dev_ingress_queue_create(struct net_device * dev)11534 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) 11535 { 11536 struct netdev_queue *queue = dev_ingress_queue(dev); 11537 11538 #ifdef CONFIG_NET_CLS_ACT 11539 if (queue) 11540 return queue; 11541 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 11542 if (!queue) 11543 return NULL; 11544 netdev_init_one_queue(dev, queue, NULL); 11545 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc); 11546 RCU_INIT_POINTER(queue->qdisc_sleeping, &noop_qdisc); 11547 rcu_assign_pointer(dev->ingress_queue, queue); 11548 #endif 11549 return queue; 11550 } 11551 11552 static const struct ethtool_ops default_ethtool_ops; 11553 netdev_set_default_ethtool_ops(struct net_device * dev,const struct ethtool_ops * ops)11554 void netdev_set_default_ethtool_ops(struct net_device *dev, 11555 const struct ethtool_ops *ops) 11556 { 11557 if (dev->ethtool_ops == &default_ethtool_ops) 11558 dev->ethtool_ops = ops; 11559 } 11560 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops); 11561 11562 /** 11563 * netdev_sw_irq_coalesce_default_on() - enable SW IRQ coalescing by default 11564 * @dev: netdev to enable the IRQ coalescing on 11565 * 11566 * Sets a conservative default for SW IRQ coalescing. Users can use 11567 * sysfs attributes to override the default values. 11568 */ netdev_sw_irq_coalesce_default_on(struct net_device * dev)11569 void netdev_sw_irq_coalesce_default_on(struct net_device *dev) 11570 { 11571 WARN_ON(dev->reg_state == NETREG_REGISTERED); 11572 11573 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { 11574 netdev_set_gro_flush_timeout(dev, 20000); 11575 netdev_set_defer_hard_irqs(dev, 1); 11576 } 11577 } 11578 EXPORT_SYMBOL_GPL(netdev_sw_irq_coalesce_default_on); 11579 11580 /** 11581 * alloc_netdev_mqs - allocate network device 11582 * @sizeof_priv: size of private data to allocate space for 11583 * @name: device name format string 11584 * @name_assign_type: origin of device name 11585 * @setup: callback to initialize device 11586 * @txqs: the number of TX subqueues to allocate 11587 * @rxqs: the number of RX subqueues to allocate 11588 * 11589 * Allocates a struct net_device with private data area for driver use 11590 * and performs basic initialization. Also allocates subqueue structs 11591 * for each queue on the device. 11592 */ alloc_netdev_mqs(int sizeof_priv,const char * name,unsigned char name_assign_type,void (* setup)(struct net_device *),unsigned int txqs,unsigned int rxqs)11593 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 11594 unsigned char name_assign_type, 11595 void (*setup)(struct net_device *), 11596 unsigned int txqs, unsigned int rxqs) 11597 { 11598 struct net_device *dev; 11599 size_t napi_config_sz; 11600 unsigned int maxqs; 11601 11602 BUG_ON(strlen(name) >= sizeof(dev->name)); 11603 11604 if (txqs < 1) { 11605 pr_err("alloc_netdev: Unable to allocate device with zero queues\n"); 11606 return NULL; 11607 } 11608 11609 if (rxqs < 1) { 11610 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n"); 11611 return NULL; 11612 } 11613 11614 maxqs = max(txqs, rxqs); 11615 11616 dev = kvzalloc(struct_size(dev, priv, sizeof_priv), 11617 GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); 11618 if (!dev) 11619 return NULL; 11620 11621 dev->priv_len = sizeof_priv; 11622 11623 ref_tracker_dir_init(&dev->refcnt_tracker, 128, name); 11624 #ifdef CONFIG_PCPU_DEV_REFCNT 11625 dev->pcpu_refcnt = alloc_percpu(int); 11626 if (!dev->pcpu_refcnt) 11627 goto free_dev; 11628 __dev_hold(dev); 11629 #else 11630 refcount_set(&dev->dev_refcnt, 1); 11631 #endif 11632 11633 if (dev_addr_init(dev)) 11634 goto free_pcpu; 11635 11636 dev_mc_init(dev); 11637 dev_uc_init(dev); 11638 11639 dev_net_set(dev, &init_net); 11640 11641 dev->gso_max_size = GSO_LEGACY_MAX_SIZE; 11642 dev->xdp_zc_max_segs = 1; 11643 dev->gso_max_segs = GSO_MAX_SEGS; 11644 dev->gro_max_size = GRO_LEGACY_MAX_SIZE; 11645 dev->gso_ipv4_max_size = GSO_LEGACY_MAX_SIZE; 11646 dev->gro_ipv4_max_size = GRO_LEGACY_MAX_SIZE; 11647 dev->tso_max_size = TSO_LEGACY_MAX_SIZE; 11648 dev->tso_max_segs = TSO_MAX_SEGS; 11649 dev->upper_level = 1; 11650 dev->lower_level = 1; 11651 #ifdef CONFIG_LOCKDEP 11652 dev->nested_level = 0; 11653 INIT_LIST_HEAD(&dev->unlink_list); 11654 #endif 11655 11656 INIT_LIST_HEAD(&dev->napi_list); 11657 INIT_LIST_HEAD(&dev->unreg_list); 11658 INIT_LIST_HEAD(&dev->close_list); 11659 INIT_LIST_HEAD(&dev->link_watch_list); 11660 INIT_LIST_HEAD(&dev->adj_list.upper); 11661 INIT_LIST_HEAD(&dev->adj_list.lower); 11662 INIT_LIST_HEAD(&dev->ptype_all); 11663 INIT_LIST_HEAD(&dev->ptype_specific); 11664 INIT_LIST_HEAD(&dev->net_notifier_list); 11665 #ifdef CONFIG_NET_SCHED 11666 hash_init(dev->qdisc_hash); 11667 #endif 11668 11669 mutex_init(&dev->lock); 11670 11671 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; 11672 setup(dev); 11673 11674 if (!dev->tx_queue_len) { 11675 dev->priv_flags |= IFF_NO_QUEUE; 11676 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; 11677 } 11678 11679 dev->num_tx_queues = txqs; 11680 dev->real_num_tx_queues = txqs; 11681 if (netif_alloc_netdev_queues(dev)) 11682 goto free_all; 11683 11684 dev->num_rx_queues = rxqs; 11685 dev->real_num_rx_queues = rxqs; 11686 if (netif_alloc_rx_queues(dev)) 11687 goto free_all; 11688 dev->ethtool = kzalloc(sizeof(*dev->ethtool), GFP_KERNEL_ACCOUNT); 11689 if (!dev->ethtool) 11690 goto free_all; 11691 11692 dev->cfg = kzalloc(sizeof(*dev->cfg), GFP_KERNEL_ACCOUNT); 11693 if (!dev->cfg) 11694 goto free_all; 11695 dev->cfg_pending = dev->cfg; 11696 11697 napi_config_sz = array_size(maxqs, sizeof(*dev->napi_config)); 11698 dev->napi_config = kvzalloc(napi_config_sz, GFP_KERNEL_ACCOUNT); 11699 if (!dev->napi_config) 11700 goto free_all; 11701 11702 strscpy(dev->name, name); 11703 dev->name_assign_type = name_assign_type; 11704 dev->group = INIT_NETDEV_GROUP; 11705 if (!dev->ethtool_ops) 11706 dev->ethtool_ops = &default_ethtool_ops; 11707 11708 nf_hook_netdev_init(dev); 11709 11710 return dev; 11711 11712 free_all: 11713 free_netdev(dev); 11714 return NULL; 11715 11716 free_pcpu: 11717 #ifdef CONFIG_PCPU_DEV_REFCNT 11718 free_percpu(dev->pcpu_refcnt); 11719 free_dev: 11720 #endif 11721 kvfree(dev); 11722 return NULL; 11723 } 11724 EXPORT_SYMBOL(alloc_netdev_mqs); 11725 netdev_napi_exit(struct net_device * dev)11726 static void netdev_napi_exit(struct net_device *dev) 11727 { 11728 if (!list_empty(&dev->napi_list)) { 11729 struct napi_struct *p, *n; 11730 11731 netdev_lock(dev); 11732 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 11733 __netif_napi_del_locked(p); 11734 netdev_unlock(dev); 11735 11736 synchronize_net(); 11737 } 11738 11739 kvfree(dev->napi_config); 11740 } 11741 11742 /** 11743 * free_netdev - free network device 11744 * @dev: device 11745 * 11746 * This function does the last stage of destroying an allocated device 11747 * interface. The reference to the device object is released. If this 11748 * is the last reference then it will be freed.Must be called in process 11749 * context. 11750 */ free_netdev(struct net_device * dev)11751 void free_netdev(struct net_device *dev) 11752 { 11753 might_sleep(); 11754 11755 /* When called immediately after register_netdevice() failed the unwind 11756 * handling may still be dismantling the device. Handle that case by 11757 * deferring the free. 11758 */ 11759 if (dev->reg_state == NETREG_UNREGISTERING) { 11760 ASSERT_RTNL(); 11761 dev->needs_free_netdev = true; 11762 return; 11763 } 11764 11765 WARN_ON(dev->cfg != dev->cfg_pending); 11766 kfree(dev->cfg); 11767 kfree(dev->ethtool); 11768 netif_free_tx_queues(dev); 11769 netif_free_rx_queues(dev); 11770 11771 kfree(rcu_dereference_protected(dev->ingress_queue, 1)); 11772 11773 /* Flush device addresses */ 11774 dev_addr_flush(dev); 11775 11776 netdev_napi_exit(dev); 11777 11778 netif_del_cpu_rmap(dev); 11779 11780 ref_tracker_dir_exit(&dev->refcnt_tracker); 11781 #ifdef CONFIG_PCPU_DEV_REFCNT 11782 free_percpu(dev->pcpu_refcnt); 11783 dev->pcpu_refcnt = NULL; 11784 #endif 11785 free_percpu(dev->core_stats); 11786 dev->core_stats = NULL; 11787 free_percpu(dev->xdp_bulkq); 11788 dev->xdp_bulkq = NULL; 11789 11790 netdev_free_phy_link_topology(dev); 11791 11792 mutex_destroy(&dev->lock); 11793 11794 /* Compatibility with error handling in drivers */ 11795 if (dev->reg_state == NETREG_UNINITIALIZED || 11796 dev->reg_state == NETREG_DUMMY) { 11797 kvfree(dev); 11798 return; 11799 } 11800 11801 BUG_ON(dev->reg_state != NETREG_UNREGISTERED); 11802 WRITE_ONCE(dev->reg_state, NETREG_RELEASED); 11803 11804 /* will free via device release */ 11805 put_device(&dev->dev); 11806 } 11807 EXPORT_SYMBOL(free_netdev); 11808 11809 /** 11810 * alloc_netdev_dummy - Allocate and initialize a dummy net device. 11811 * @sizeof_priv: size of private data to allocate space for 11812 * 11813 * Return: the allocated net_device on success, NULL otherwise 11814 */ alloc_netdev_dummy(int sizeof_priv)11815 struct net_device *alloc_netdev_dummy(int sizeof_priv) 11816 { 11817 return alloc_netdev(sizeof_priv, "dummy#", NET_NAME_UNKNOWN, 11818 init_dummy_netdev); 11819 } 11820 EXPORT_SYMBOL_GPL(alloc_netdev_dummy); 11821 11822 /** 11823 * synchronize_net - Synchronize with packet receive processing 11824 * 11825 * Wait for packets currently being received to be done. 11826 * Does not block later packets from starting. 11827 */ synchronize_net(void)11828 void synchronize_net(void) 11829 { 11830 might_sleep(); 11831 if (from_cleanup_net() || rtnl_is_locked()) 11832 synchronize_rcu_expedited(); 11833 else 11834 synchronize_rcu(); 11835 } 11836 EXPORT_SYMBOL(synchronize_net); 11837 netdev_rss_contexts_free(struct net_device * dev)11838 static void netdev_rss_contexts_free(struct net_device *dev) 11839 { 11840 struct ethtool_rxfh_context *ctx; 11841 unsigned long context; 11842 11843 mutex_lock(&dev->ethtool->rss_lock); 11844 xa_for_each(&dev->ethtool->rss_ctx, context, ctx) { 11845 struct ethtool_rxfh_param rxfh; 11846 11847 rxfh.indir = ethtool_rxfh_context_indir(ctx); 11848 rxfh.key = ethtool_rxfh_context_key(ctx); 11849 rxfh.hfunc = ctx->hfunc; 11850 rxfh.input_xfrm = ctx->input_xfrm; 11851 rxfh.rss_context = context; 11852 rxfh.rss_delete = true; 11853 11854 xa_erase(&dev->ethtool->rss_ctx, context); 11855 if (dev->ethtool_ops->create_rxfh_context) 11856 dev->ethtool_ops->remove_rxfh_context(dev, ctx, 11857 context, NULL); 11858 else 11859 dev->ethtool_ops->set_rxfh(dev, &rxfh, NULL); 11860 kfree(ctx); 11861 } 11862 xa_destroy(&dev->ethtool->rss_ctx); 11863 mutex_unlock(&dev->ethtool->rss_lock); 11864 } 11865 11866 /** 11867 * unregister_netdevice_queue - remove device from the kernel 11868 * @dev: device 11869 * @head: list 11870 * 11871 * This function shuts down a device interface and removes it 11872 * from the kernel tables. 11873 * If head not NULL, device is queued to be unregistered later. 11874 * 11875 * Callers must hold the rtnl semaphore. You may want 11876 * unregister_netdev() instead of this. 11877 */ 11878 unregister_netdevice_queue(struct net_device * dev,struct list_head * head)11879 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) 11880 { 11881 ASSERT_RTNL(); 11882 11883 if (head) { 11884 list_move_tail(&dev->unreg_list, head); 11885 } else { 11886 LIST_HEAD(single); 11887 11888 list_add(&dev->unreg_list, &single); 11889 unregister_netdevice_many(&single); 11890 } 11891 } 11892 EXPORT_SYMBOL(unregister_netdevice_queue); 11893 dev_memory_provider_uninstall(struct net_device * dev)11894 static void dev_memory_provider_uninstall(struct net_device *dev) 11895 { 11896 unsigned int i; 11897 11898 for (i = 0; i < dev->real_num_rx_queues; i++) { 11899 struct netdev_rx_queue *rxq = &dev->_rx[i]; 11900 struct pp_memory_provider_params *p = &rxq->mp_params; 11901 11902 if (p->mp_ops && p->mp_ops->uninstall) 11903 p->mp_ops->uninstall(rxq->mp_params.mp_priv, rxq); 11904 } 11905 } 11906 unregister_netdevice_many_notify(struct list_head * head,u32 portid,const struct nlmsghdr * nlh)11907 void unregister_netdevice_many_notify(struct list_head *head, 11908 u32 portid, const struct nlmsghdr *nlh) 11909 { 11910 struct net_device *dev, *tmp; 11911 LIST_HEAD(close_head); 11912 int cnt = 0; 11913 11914 BUG_ON(dev_boot_phase); 11915 ASSERT_RTNL(); 11916 11917 if (list_empty(head)) 11918 return; 11919 11920 list_for_each_entry_safe(dev, tmp, head, unreg_list) { 11921 /* Some devices call without registering 11922 * for initialization unwind. Remove those 11923 * devices and proceed with the remaining. 11924 */ 11925 if (dev->reg_state == NETREG_UNINITIALIZED) { 11926 pr_debug("unregister_netdevice: device %s/%p never was registered\n", 11927 dev->name, dev); 11928 11929 WARN_ON(1); 11930 list_del(&dev->unreg_list); 11931 continue; 11932 } 11933 dev->dismantle = true; 11934 BUG_ON(dev->reg_state != NETREG_REGISTERED); 11935 } 11936 11937 /* If device is running, close it first. */ 11938 list_for_each_entry(dev, head, unreg_list) { 11939 list_add_tail(&dev->close_list, &close_head); 11940 netdev_lock_ops(dev); 11941 } 11942 dev_close_many(&close_head, true); 11943 11944 list_for_each_entry(dev, head, unreg_list) { 11945 netdev_unlock_ops(dev); 11946 /* And unlink it from device chain. */ 11947 unlist_netdevice(dev); 11948 netdev_lock(dev); 11949 WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERING); 11950 netdev_unlock(dev); 11951 } 11952 flush_all_backlogs(); 11953 11954 synchronize_net(); 11955 11956 list_for_each_entry(dev, head, unreg_list) { 11957 struct sk_buff *skb = NULL; 11958 11959 /* Shutdown queueing discipline. */ 11960 dev_shutdown(dev); 11961 dev_tcx_uninstall(dev); 11962 netdev_lock_ops(dev); 11963 dev_xdp_uninstall(dev); 11964 dev_memory_provider_uninstall(dev); 11965 netdev_unlock_ops(dev); 11966 bpf_dev_bound_netdev_unregister(dev); 11967 11968 netdev_offload_xstats_disable_all(dev); 11969 11970 /* Notify protocols, that we are about to destroy 11971 * this device. They should clean all the things. 11972 */ 11973 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 11974 11975 if (!dev->rtnl_link_ops || 11976 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 11977 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0, 11978 GFP_KERNEL, NULL, 0, 11979 portid, nlh); 11980 11981 /* 11982 * Flush the unicast and multicast chains 11983 */ 11984 dev_uc_flush(dev); 11985 dev_mc_flush(dev); 11986 11987 netdev_name_node_alt_flush(dev); 11988 netdev_name_node_free(dev->name_node); 11989 11990 netdev_rss_contexts_free(dev); 11991 11992 call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev); 11993 11994 if (dev->netdev_ops->ndo_uninit) 11995 dev->netdev_ops->ndo_uninit(dev); 11996 11997 mutex_destroy(&dev->ethtool->rss_lock); 11998 11999 net_shaper_flush_netdev(dev); 12000 12001 if (skb) 12002 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL, portid, nlh); 12003 12004 /* Notifier chain MUST detach us all upper devices. */ 12005 WARN_ON(netdev_has_any_upper_dev(dev)); 12006 WARN_ON(netdev_has_any_lower_dev(dev)); 12007 12008 /* Remove entries from kobject tree */ 12009 netdev_unregister_kobject(dev); 12010 #ifdef CONFIG_XPS 12011 /* Remove XPS queueing entries */ 12012 netif_reset_xps_queues_gt(dev, 0); 12013 #endif 12014 } 12015 12016 synchronize_net(); 12017 12018 list_for_each_entry(dev, head, unreg_list) { 12019 netdev_put(dev, &dev->dev_registered_tracker); 12020 net_set_todo(dev); 12021 cnt++; 12022 } 12023 atomic_add(cnt, &dev_unreg_count); 12024 12025 list_del(head); 12026 } 12027 12028 /** 12029 * unregister_netdevice_many - unregister many devices 12030 * @head: list of devices 12031 * 12032 * Note: As most callers use a stack allocated list_head, 12033 * we force a list_del() to make sure stack won't be corrupted later. 12034 */ unregister_netdevice_many(struct list_head * head)12035 void unregister_netdevice_many(struct list_head *head) 12036 { 12037 unregister_netdevice_many_notify(head, 0, NULL); 12038 } 12039 EXPORT_SYMBOL(unregister_netdevice_many); 12040 12041 /** 12042 * unregister_netdev - remove device from the kernel 12043 * @dev: device 12044 * 12045 * This function shuts down a device interface and removes it 12046 * from the kernel tables. 12047 * 12048 * This is just a wrapper for unregister_netdevice that takes 12049 * the rtnl semaphore. In general you want to use this and not 12050 * unregister_netdevice. 12051 */ unregister_netdev(struct net_device * dev)12052 void unregister_netdev(struct net_device *dev) 12053 { 12054 rtnl_net_dev_lock(dev); 12055 unregister_netdevice(dev); 12056 rtnl_net_dev_unlock(dev); 12057 } 12058 EXPORT_SYMBOL(unregister_netdev); 12059 netif_change_net_namespace(struct net_device * dev,struct net * net,const char * pat,int new_ifindex,struct netlink_ext_ack * extack)12060 int netif_change_net_namespace(struct net_device *dev, struct net *net, 12061 const char *pat, int new_ifindex, 12062 struct netlink_ext_ack *extack) 12063 { 12064 struct netdev_name_node *name_node; 12065 struct net *net_old = dev_net(dev); 12066 char new_name[IFNAMSIZ] = {}; 12067 int err, new_nsid; 12068 12069 ASSERT_RTNL(); 12070 12071 /* Don't allow namespace local devices to be moved. */ 12072 err = -EINVAL; 12073 if (dev->netns_immutable) { 12074 NL_SET_ERR_MSG(extack, "The interface netns is immutable"); 12075 goto out; 12076 } 12077 12078 /* Ensure the device has been registered */ 12079 if (dev->reg_state != NETREG_REGISTERED) { 12080 NL_SET_ERR_MSG(extack, "The interface isn't registered"); 12081 goto out; 12082 } 12083 12084 /* Get out if there is nothing todo */ 12085 err = 0; 12086 if (net_eq(net_old, net)) 12087 goto out; 12088 12089 /* Pick the destination device name, and ensure 12090 * we can use it in the destination network namespace. 12091 */ 12092 err = -EEXIST; 12093 if (netdev_name_in_use(net, dev->name)) { 12094 /* We get here if we can't use the current device name */ 12095 if (!pat) { 12096 NL_SET_ERR_MSG(extack, 12097 "An interface with the same name exists in the target netns"); 12098 goto out; 12099 } 12100 err = dev_prep_valid_name(net, dev, pat, new_name, EEXIST); 12101 if (err < 0) { 12102 NL_SET_ERR_MSG_FMT(extack, 12103 "Unable to use '%s' for the new interface name in the target netns", 12104 pat); 12105 goto out; 12106 } 12107 } 12108 /* Check that none of the altnames conflicts. */ 12109 err = -EEXIST; 12110 netdev_for_each_altname(dev, name_node) { 12111 if (netdev_name_in_use(net, name_node->name)) { 12112 NL_SET_ERR_MSG_FMT(extack, 12113 "An interface with the altname %s exists in the target netns", 12114 name_node->name); 12115 goto out; 12116 } 12117 } 12118 12119 /* Check that new_ifindex isn't used yet. */ 12120 if (new_ifindex) { 12121 err = dev_index_reserve(net, new_ifindex); 12122 if (err < 0) { 12123 NL_SET_ERR_MSG_FMT(extack, 12124 "The ifindex %d is not available in the target netns", 12125 new_ifindex); 12126 goto out; 12127 } 12128 } else { 12129 /* If there is an ifindex conflict assign a new one */ 12130 err = dev_index_reserve(net, dev->ifindex); 12131 if (err == -EBUSY) 12132 err = dev_index_reserve(net, 0); 12133 if (err < 0) { 12134 NL_SET_ERR_MSG(extack, 12135 "Unable to allocate a new ifindex in the target netns"); 12136 goto out; 12137 } 12138 new_ifindex = err; 12139 } 12140 12141 /* 12142 * And now a mini version of register_netdevice unregister_netdevice. 12143 */ 12144 12145 /* If device is running close it first. */ 12146 netif_close(dev); 12147 12148 /* And unlink it from device chain */ 12149 unlist_netdevice(dev); 12150 12151 synchronize_net(); 12152 12153 /* Shutdown queueing discipline. */ 12154 dev_shutdown(dev); 12155 12156 /* Notify protocols, that we are about to destroy 12157 * this device. They should clean all the things. 12158 * 12159 * Note that dev->reg_state stays at NETREG_REGISTERED. 12160 * This is wanted because this way 8021q and macvlan know 12161 * the device is just moving and can keep their slaves up. 12162 */ 12163 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 12164 rcu_barrier(); 12165 12166 new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL); 12167 12168 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid, 12169 new_ifindex); 12170 12171 /* 12172 * Flush the unicast and multicast chains 12173 */ 12174 dev_uc_flush(dev); 12175 dev_mc_flush(dev); 12176 12177 /* Send a netdev-removed uevent to the old namespace */ 12178 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE); 12179 netdev_adjacent_del_links(dev); 12180 12181 /* Move per-net netdevice notifiers that are following the netdevice */ 12182 move_netdevice_notifiers_dev_net(dev, net); 12183 12184 /* Actually switch the network namespace */ 12185 dev_net_set(dev, net); 12186 dev->ifindex = new_ifindex; 12187 12188 if (new_name[0]) { 12189 /* Rename the netdev to prepared name */ 12190 write_seqlock_bh(&netdev_rename_lock); 12191 strscpy(dev->name, new_name, IFNAMSIZ); 12192 write_sequnlock_bh(&netdev_rename_lock); 12193 } 12194 12195 /* Fixup kobjects */ 12196 dev_set_uevent_suppress(&dev->dev, 1); 12197 err = device_rename(&dev->dev, dev->name); 12198 dev_set_uevent_suppress(&dev->dev, 0); 12199 WARN_ON(err); 12200 12201 /* Send a netdev-add uevent to the new namespace */ 12202 kobject_uevent(&dev->dev.kobj, KOBJ_ADD); 12203 netdev_adjacent_add_links(dev); 12204 12205 /* Adapt owner in case owning user namespace of target network 12206 * namespace is different from the original one. 12207 */ 12208 err = netdev_change_owner(dev, net_old, net); 12209 WARN_ON(err); 12210 12211 /* Add the device back in the hashes */ 12212 list_netdevice(dev); 12213 12214 /* Notify protocols, that a new device appeared. */ 12215 call_netdevice_notifiers(NETDEV_REGISTER, dev); 12216 12217 /* 12218 * Prevent userspace races by waiting until the network 12219 * device is fully setup before sending notifications. 12220 */ 12221 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL); 12222 12223 synchronize_net(); 12224 err = 0; 12225 out: 12226 return err; 12227 } 12228 dev_cpu_dead(unsigned int oldcpu)12229 static int dev_cpu_dead(unsigned int oldcpu) 12230 { 12231 struct sk_buff **list_skb; 12232 struct sk_buff *skb; 12233 unsigned int cpu; 12234 struct softnet_data *sd, *oldsd, *remsd = NULL; 12235 12236 local_irq_disable(); 12237 cpu = smp_processor_id(); 12238 sd = &per_cpu(softnet_data, cpu); 12239 oldsd = &per_cpu(softnet_data, oldcpu); 12240 12241 /* Find end of our completion_queue. */ 12242 list_skb = &sd->completion_queue; 12243 while (*list_skb) 12244 list_skb = &(*list_skb)->next; 12245 /* Append completion queue from offline CPU. */ 12246 *list_skb = oldsd->completion_queue; 12247 oldsd->completion_queue = NULL; 12248 12249 /* Append output queue from offline CPU. */ 12250 if (oldsd->output_queue) { 12251 *sd->output_queue_tailp = oldsd->output_queue; 12252 sd->output_queue_tailp = oldsd->output_queue_tailp; 12253 oldsd->output_queue = NULL; 12254 oldsd->output_queue_tailp = &oldsd->output_queue; 12255 } 12256 /* Append NAPI poll list from offline CPU, with one exception : 12257 * process_backlog() must be called by cpu owning percpu backlog. 12258 * We properly handle process_queue & input_pkt_queue later. 12259 */ 12260 while (!list_empty(&oldsd->poll_list)) { 12261 struct napi_struct *napi = list_first_entry(&oldsd->poll_list, 12262 struct napi_struct, 12263 poll_list); 12264 12265 list_del_init(&napi->poll_list); 12266 if (napi->poll == process_backlog) 12267 napi->state &= NAPIF_STATE_THREADED; 12268 else 12269 ____napi_schedule(sd, napi); 12270 } 12271 12272 raise_softirq_irqoff(NET_TX_SOFTIRQ); 12273 local_irq_enable(); 12274 12275 if (!use_backlog_threads()) { 12276 #ifdef CONFIG_RPS 12277 remsd = oldsd->rps_ipi_list; 12278 oldsd->rps_ipi_list = NULL; 12279 #endif 12280 /* send out pending IPI's on offline CPU */ 12281 net_rps_send_ipi(remsd); 12282 } 12283 12284 /* Process offline CPU's input_pkt_queue */ 12285 while ((skb = __skb_dequeue(&oldsd->process_queue))) { 12286 netif_rx(skb); 12287 rps_input_queue_head_incr(oldsd); 12288 } 12289 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { 12290 netif_rx(skb); 12291 rps_input_queue_head_incr(oldsd); 12292 } 12293 12294 return 0; 12295 } 12296 12297 /** 12298 * netdev_increment_features - increment feature set by one 12299 * @all: current feature set 12300 * @one: new feature set 12301 * @mask: mask feature set 12302 * 12303 * Computes a new feature set after adding a device with feature set 12304 * @one to the master device with current feature set @all. Will not 12305 * enable anything that is off in @mask. Returns the new feature set. 12306 */ netdev_increment_features(netdev_features_t all,netdev_features_t one,netdev_features_t mask)12307 netdev_features_t netdev_increment_features(netdev_features_t all, 12308 netdev_features_t one, netdev_features_t mask) 12309 { 12310 if (mask & NETIF_F_HW_CSUM) 12311 mask |= NETIF_F_CSUM_MASK; 12312 mask |= NETIF_F_VLAN_CHALLENGED; 12313 12314 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask; 12315 all &= one | ~NETIF_F_ALL_FOR_ALL; 12316 12317 /* If one device supports hw checksumming, set for all. */ 12318 if (all & NETIF_F_HW_CSUM) 12319 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM); 12320 12321 return all; 12322 } 12323 EXPORT_SYMBOL(netdev_increment_features); 12324 netdev_create_hash(void)12325 static struct hlist_head * __net_init netdev_create_hash(void) 12326 { 12327 int i; 12328 struct hlist_head *hash; 12329 12330 hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL); 12331 if (hash != NULL) 12332 for (i = 0; i < NETDEV_HASHENTRIES; i++) 12333 INIT_HLIST_HEAD(&hash[i]); 12334 12335 return hash; 12336 } 12337 12338 /* Initialize per network namespace state */ netdev_init(struct net * net)12339 static int __net_init netdev_init(struct net *net) 12340 { 12341 BUILD_BUG_ON(GRO_HASH_BUCKETS > 12342 BITS_PER_BYTE * sizeof_field(struct gro_node, bitmask)); 12343 12344 INIT_LIST_HEAD(&net->dev_base_head); 12345 12346 net->dev_name_head = netdev_create_hash(); 12347 if (net->dev_name_head == NULL) 12348 goto err_name; 12349 12350 net->dev_index_head = netdev_create_hash(); 12351 if (net->dev_index_head == NULL) 12352 goto err_idx; 12353 12354 xa_init_flags(&net->dev_by_index, XA_FLAGS_ALLOC1); 12355 12356 RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain); 12357 12358 return 0; 12359 12360 err_idx: 12361 kfree(net->dev_name_head); 12362 err_name: 12363 return -ENOMEM; 12364 } 12365 12366 /** 12367 * netdev_drivername - network driver for the device 12368 * @dev: network device 12369 * 12370 * Determine network driver for device. 12371 */ netdev_drivername(const struct net_device * dev)12372 const char *netdev_drivername(const struct net_device *dev) 12373 { 12374 const struct device_driver *driver; 12375 const struct device *parent; 12376 const char *empty = ""; 12377 12378 parent = dev->dev.parent; 12379 if (!parent) 12380 return empty; 12381 12382 driver = parent->driver; 12383 if (driver && driver->name) 12384 return driver->name; 12385 return empty; 12386 } 12387 __netdev_printk(const char * level,const struct net_device * dev,struct va_format * vaf)12388 static void __netdev_printk(const char *level, const struct net_device *dev, 12389 struct va_format *vaf) 12390 { 12391 if (dev && dev->dev.parent) { 12392 dev_printk_emit(level[1] - '0', 12393 dev->dev.parent, 12394 "%s %s %s%s: %pV", 12395 dev_driver_string(dev->dev.parent), 12396 dev_name(dev->dev.parent), 12397 netdev_name(dev), netdev_reg_state(dev), 12398 vaf); 12399 } else if (dev) { 12400 printk("%s%s%s: %pV", 12401 level, netdev_name(dev), netdev_reg_state(dev), vaf); 12402 } else { 12403 printk("%s(NULL net_device): %pV", level, vaf); 12404 } 12405 } 12406 netdev_printk(const char * level,const struct net_device * dev,const char * format,...)12407 void netdev_printk(const char *level, const struct net_device *dev, 12408 const char *format, ...) 12409 { 12410 struct va_format vaf; 12411 va_list args; 12412 12413 va_start(args, format); 12414 12415 vaf.fmt = format; 12416 vaf.va = &args; 12417 12418 __netdev_printk(level, dev, &vaf); 12419 12420 va_end(args); 12421 } 12422 EXPORT_SYMBOL(netdev_printk); 12423 12424 #define define_netdev_printk_level(func, level) \ 12425 void func(const struct net_device *dev, const char *fmt, ...) \ 12426 { \ 12427 struct va_format vaf; \ 12428 va_list args; \ 12429 \ 12430 va_start(args, fmt); \ 12431 \ 12432 vaf.fmt = fmt; \ 12433 vaf.va = &args; \ 12434 \ 12435 __netdev_printk(level, dev, &vaf); \ 12436 \ 12437 va_end(args); \ 12438 } \ 12439 EXPORT_SYMBOL(func); 12440 12441 define_netdev_printk_level(netdev_emerg, KERN_EMERG); 12442 define_netdev_printk_level(netdev_alert, KERN_ALERT); 12443 define_netdev_printk_level(netdev_crit, KERN_CRIT); 12444 define_netdev_printk_level(netdev_err, KERN_ERR); 12445 define_netdev_printk_level(netdev_warn, KERN_WARNING); 12446 define_netdev_printk_level(netdev_notice, KERN_NOTICE); 12447 define_netdev_printk_level(netdev_info, KERN_INFO); 12448 netdev_exit(struct net * net)12449 static void __net_exit netdev_exit(struct net *net) 12450 { 12451 kfree(net->dev_name_head); 12452 kfree(net->dev_index_head); 12453 xa_destroy(&net->dev_by_index); 12454 if (net != &init_net) 12455 WARN_ON_ONCE(!list_empty(&net->dev_base_head)); 12456 } 12457 12458 static struct pernet_operations __net_initdata netdev_net_ops = { 12459 .init = netdev_init, 12460 .exit = netdev_exit, 12461 }; 12462 default_device_exit_net(struct net * net)12463 static void __net_exit default_device_exit_net(struct net *net) 12464 { 12465 struct netdev_name_node *name_node, *tmp; 12466 struct net_device *dev, *aux; 12467 /* 12468 * Push all migratable network devices back to the 12469 * initial network namespace 12470 */ 12471 ASSERT_RTNL(); 12472 for_each_netdev_safe(net, dev, aux) { 12473 int err; 12474 char fb_name[IFNAMSIZ]; 12475 12476 /* Ignore unmoveable devices (i.e. loopback) */ 12477 if (dev->netns_immutable) 12478 continue; 12479 12480 /* Leave virtual devices for the generic cleanup */ 12481 if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund) 12482 continue; 12483 12484 /* Push remaining network devices to init_net */ 12485 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 12486 if (netdev_name_in_use(&init_net, fb_name)) 12487 snprintf(fb_name, IFNAMSIZ, "dev%%d"); 12488 12489 netdev_for_each_altname_safe(dev, name_node, tmp) 12490 if (netdev_name_in_use(&init_net, name_node->name)) 12491 __netdev_name_node_alt_destroy(name_node); 12492 12493 err = dev_change_net_namespace(dev, &init_net, fb_name); 12494 if (err) { 12495 pr_emerg("%s: failed to move %s to init_net: %d\n", 12496 __func__, dev->name, err); 12497 BUG(); 12498 } 12499 } 12500 } 12501 default_device_exit_batch(struct list_head * net_list)12502 static void __net_exit default_device_exit_batch(struct list_head *net_list) 12503 { 12504 /* At exit all network devices most be removed from a network 12505 * namespace. Do this in the reverse order of registration. 12506 * Do this across as many network namespaces as possible to 12507 * improve batching efficiency. 12508 */ 12509 struct net_device *dev; 12510 struct net *net; 12511 LIST_HEAD(dev_kill_list); 12512 12513 rtnl_lock(); 12514 list_for_each_entry(net, net_list, exit_list) { 12515 default_device_exit_net(net); 12516 cond_resched(); 12517 } 12518 12519 list_for_each_entry(net, net_list, exit_list) { 12520 for_each_netdev_reverse(net, dev) { 12521 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) 12522 dev->rtnl_link_ops->dellink(dev, &dev_kill_list); 12523 else 12524 unregister_netdevice_queue(dev, &dev_kill_list); 12525 } 12526 } 12527 unregister_netdevice_many(&dev_kill_list); 12528 rtnl_unlock(); 12529 } 12530 12531 static struct pernet_operations __net_initdata default_device_ops = { 12532 .exit_batch = default_device_exit_batch, 12533 }; 12534 net_dev_struct_check(void)12535 static void __init net_dev_struct_check(void) 12536 { 12537 /* TX read-mostly hotpath */ 12538 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, priv_flags_fast); 12539 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, netdev_ops); 12540 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, header_ops); 12541 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, _tx); 12542 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, real_num_tx_queues); 12543 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_max_size); 12544 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_ipv4_max_size); 12545 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_max_segs); 12546 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_partial_features); 12547 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, num_tc); 12548 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, mtu); 12549 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, needed_headroom); 12550 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, tc_to_txq); 12551 #ifdef CONFIG_XPS 12552 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, xps_maps); 12553 #endif 12554 #ifdef CONFIG_NETFILTER_EGRESS 12555 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, nf_hooks_egress); 12556 #endif 12557 #ifdef CONFIG_NET_XGRESS 12558 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, tcx_egress); 12559 #endif 12560 CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_tx, 160); 12561 12562 /* TXRX read-mostly hotpath */ 12563 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, lstats); 12564 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, state); 12565 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, flags); 12566 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, hard_header_len); 12567 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, features); 12568 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, ip6_ptr); 12569 CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_txrx, 46); 12570 12571 /* RX read-mostly hotpath */ 12572 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, ptype_specific); 12573 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, ifindex); 12574 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, real_num_rx_queues); 12575 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, _rx); 12576 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_max_size); 12577 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_ipv4_max_size); 12578 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, rx_handler); 12579 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, rx_handler_data); 12580 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, nd_net); 12581 #ifdef CONFIG_NETPOLL 12582 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, npinfo); 12583 #endif 12584 #ifdef CONFIG_NET_XGRESS 12585 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, tcx_ingress); 12586 #endif 12587 CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_rx, 92); 12588 } 12589 12590 /* 12591 * Initialize the DEV module. At boot time this walks the device list and 12592 * unhooks any devices that fail to initialise (normally hardware not 12593 * present) and leaves us with a valid list of present and active devices. 12594 * 12595 */ 12596 12597 /* We allocate 256 pages for each CPU if PAGE_SHIFT is 12 */ 12598 #define SYSTEM_PERCPU_PAGE_POOL_SIZE ((1 << 20) / PAGE_SIZE) 12599 net_page_pool_create(int cpuid)12600 static int net_page_pool_create(int cpuid) 12601 { 12602 #if IS_ENABLED(CONFIG_PAGE_POOL) 12603 struct page_pool_params page_pool_params = { 12604 .pool_size = SYSTEM_PERCPU_PAGE_POOL_SIZE, 12605 .flags = PP_FLAG_SYSTEM_POOL, 12606 .nid = cpu_to_mem(cpuid), 12607 }; 12608 struct page_pool *pp_ptr; 12609 int err; 12610 12611 pp_ptr = page_pool_create_percpu(&page_pool_params, cpuid); 12612 if (IS_ERR(pp_ptr)) 12613 return -ENOMEM; 12614 12615 err = xdp_reg_page_pool(pp_ptr); 12616 if (err) { 12617 page_pool_destroy(pp_ptr); 12618 return err; 12619 } 12620 12621 per_cpu(system_page_pool, cpuid) = pp_ptr; 12622 #endif 12623 return 0; 12624 } 12625 backlog_napi_should_run(unsigned int cpu)12626 static int backlog_napi_should_run(unsigned int cpu) 12627 { 12628 struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu); 12629 struct napi_struct *napi = &sd->backlog; 12630 12631 return test_bit(NAPI_STATE_SCHED_THREADED, &napi->state); 12632 } 12633 run_backlog_napi(unsigned int cpu)12634 static void run_backlog_napi(unsigned int cpu) 12635 { 12636 struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu); 12637 12638 napi_threaded_poll_loop(&sd->backlog); 12639 } 12640 backlog_napi_setup(unsigned int cpu)12641 static void backlog_napi_setup(unsigned int cpu) 12642 { 12643 struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu); 12644 struct napi_struct *napi = &sd->backlog; 12645 12646 napi->thread = this_cpu_read(backlog_napi); 12647 set_bit(NAPI_STATE_THREADED, &napi->state); 12648 } 12649 12650 static struct smp_hotplug_thread backlog_threads = { 12651 .store = &backlog_napi, 12652 .thread_should_run = backlog_napi_should_run, 12653 .thread_fn = run_backlog_napi, 12654 .thread_comm = "backlog_napi/%u", 12655 .setup = backlog_napi_setup, 12656 }; 12657 12658 /* 12659 * This is called single threaded during boot, so no need 12660 * to take the rtnl semaphore. 12661 */ net_dev_init(void)12662 static int __init net_dev_init(void) 12663 { 12664 int i, rc = -ENOMEM; 12665 12666 BUG_ON(!dev_boot_phase); 12667 12668 net_dev_struct_check(); 12669 12670 if (dev_proc_init()) 12671 goto out; 12672 12673 if (netdev_kobject_init()) 12674 goto out; 12675 12676 for (i = 0; i < PTYPE_HASH_SIZE; i++) 12677 INIT_LIST_HEAD(&ptype_base[i]); 12678 12679 if (register_pernet_subsys(&netdev_net_ops)) 12680 goto out; 12681 12682 /* 12683 * Initialise the packet receive queues. 12684 */ 12685 12686 flush_backlogs_fallback = flush_backlogs_alloc(); 12687 if (!flush_backlogs_fallback) 12688 goto out; 12689 12690 for_each_possible_cpu(i) { 12691 struct softnet_data *sd = &per_cpu(softnet_data, i); 12692 12693 skb_queue_head_init(&sd->input_pkt_queue); 12694 skb_queue_head_init(&sd->process_queue); 12695 #ifdef CONFIG_XFRM_OFFLOAD 12696 skb_queue_head_init(&sd->xfrm_backlog); 12697 #endif 12698 INIT_LIST_HEAD(&sd->poll_list); 12699 sd->output_queue_tailp = &sd->output_queue; 12700 #ifdef CONFIG_RPS 12701 INIT_CSD(&sd->csd, rps_trigger_softirq, sd); 12702 sd->cpu = i; 12703 #endif 12704 INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd); 12705 spin_lock_init(&sd->defer_lock); 12706 12707 gro_init(&sd->backlog.gro); 12708 sd->backlog.poll = process_backlog; 12709 sd->backlog.weight = weight_p; 12710 INIT_LIST_HEAD(&sd->backlog.poll_list); 12711 12712 if (net_page_pool_create(i)) 12713 goto out; 12714 } 12715 if (use_backlog_threads()) 12716 smpboot_register_percpu_thread(&backlog_threads); 12717 12718 dev_boot_phase = 0; 12719 12720 /* The loopback device is special if any other network devices 12721 * is present in a network namespace the loopback device must 12722 * be present. Since we now dynamically allocate and free the 12723 * loopback device ensure this invariant is maintained by 12724 * keeping the loopback device as the first device on the 12725 * list of network devices. Ensuring the loopback devices 12726 * is the first device that appears and the last network device 12727 * that disappears. 12728 */ 12729 if (register_pernet_device(&loopback_net_ops)) 12730 goto out; 12731 12732 if (register_pernet_device(&default_device_ops)) 12733 goto out; 12734 12735 open_softirq(NET_TX_SOFTIRQ, net_tx_action); 12736 open_softirq(NET_RX_SOFTIRQ, net_rx_action); 12737 12738 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead", 12739 NULL, dev_cpu_dead); 12740 WARN_ON(rc < 0); 12741 rc = 0; 12742 12743 /* avoid static key IPIs to isolated CPUs */ 12744 if (housekeeping_enabled(HK_TYPE_MISC)) 12745 net_enable_timestamp(); 12746 out: 12747 if (rc < 0) { 12748 for_each_possible_cpu(i) { 12749 struct page_pool *pp_ptr; 12750 12751 pp_ptr = per_cpu(system_page_pool, i); 12752 if (!pp_ptr) 12753 continue; 12754 12755 xdp_unreg_page_pool(pp_ptr); 12756 page_pool_destroy(pp_ptr); 12757 per_cpu(system_page_pool, i) = NULL; 12758 } 12759 } 12760 12761 return rc; 12762 } 12763 12764 subsys_initcall(net_dev_init); 12765