xref: /linux/net/core/dev.c (revision 81ee0eb6c0fe34490ed92667538197d9295e899e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *      NET3    Protocol independent device support routines.
4  *
5  *	Derived from the non IP parts of dev.c 1.0.19
6  *              Authors:	Ross Biro
7  *				Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
8  *				Mark Evans, <evansmp@uhura.aston.ac.uk>
9  *
10  *	Additional Authors:
11  *		Florian la Roche <rzsfl@rz.uni-sb.de>
12  *		Alan Cox <gw4pts@gw4pts.ampr.org>
13  *		David Hinds <dahinds@users.sourceforge.net>
14  *		Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
15  *		Adam Sulmicki <adam@cfar.umd.edu>
16  *              Pekka Riikonen <priikone@poesidon.pspt.fi>
17  *
18  *	Changes:
19  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
20  *                                      to 2 if register_netdev gets called
21  *                                      before net_dev_init & also removed a
22  *                                      few lines of code in the process.
23  *		Alan Cox	:	device private ioctl copies fields back.
24  *		Alan Cox	:	Transmit queue code does relevant
25  *					stunts to keep the queue safe.
26  *		Alan Cox	:	Fixed double lock.
27  *		Alan Cox	:	Fixed promisc NULL pointer trap
28  *		????????	:	Support the full private ioctl range
29  *		Alan Cox	:	Moved ioctl permission check into
30  *					drivers
31  *		Tim Kordas	:	SIOCADDMULTI/SIOCDELMULTI
32  *		Alan Cox	:	100 backlog just doesn't cut it when
33  *					you start doing multicast video 8)
34  *		Alan Cox	:	Rewrote net_bh and list manager.
35  *              Alan Cox        :       Fix ETH_P_ALL echoback lengths.
36  *		Alan Cox	:	Took out transmit every packet pass
37  *					Saved a few bytes in the ioctl handler
38  *		Alan Cox	:	Network driver sets packet type before
39  *					calling netif_rx. Saves a function
40  *					call a packet.
41  *		Alan Cox	:	Hashed net_bh()
42  *		Richard Kooijman:	Timestamp fixes.
43  *		Alan Cox	:	Wrong field in SIOCGIFDSTADDR
44  *		Alan Cox	:	Device lock protection.
45  *              Alan Cox        :       Fixed nasty side effect of device close
46  *					changes.
47  *		Rudi Cilibrasi	:	Pass the right thing to
48  *					set_mac_address()
49  *		Dave Miller	:	32bit quantity for the device lock to
50  *					make it work out on a Sparc.
51  *		Bjorn Ekwall	:	Added KERNELD hack.
52  *		Alan Cox	:	Cleaned up the backlog initialise.
53  *		Craig Metz	:	SIOCGIFCONF fix if space for under
54  *					1 device.
55  *	    Thomas Bogendoerfer :	Return ENODEV for dev_open, if there
56  *					is no device open function.
57  *		Andi Kleen	:	Fix error reporting for SIOCGIFCONF
58  *	    Michael Chastain	:	Fix signed/unsigned for SIOCGIFCONF
59  *		Cyrus Durgin	:	Cleaned for KMOD
60  *		Adam Sulmicki   :	Bug Fix : Network Device Unload
61  *					A network device unload needs to purge
62  *					the backlog queue.
63  *	Paul Rusty Russell	:	SIOCSIFNAME
64  *              Pekka Riikonen  :	Netdev boot-time settings code
65  *              Andrew Morton   :       Make unregister_netdevice wait
66  *                                      indefinitely on dev->refcnt
67  *              J Hadi Salim    :       - Backlog queue sampling
68  *				        - netif_rx() feedback
69  */
70 
71 #include <linux/uaccess.h>
72 #include <linux/bitops.h>
73 #include <linux/capability.h>
74 #include <linux/cpu.h>
75 #include <linux/types.h>
76 #include <linux/kernel.h>
77 #include <linux/hash.h>
78 #include <linux/slab.h>
79 #include <linux/sched.h>
80 #include <linux/sched/mm.h>
81 #include <linux/mutex.h>
82 #include <linux/rwsem.h>
83 #include <linux/string.h>
84 #include <linux/mm.h>
85 #include <linux/socket.h>
86 #include <linux/sockios.h>
87 #include <linux/errno.h>
88 #include <linux/interrupt.h>
89 #include <linux/if_ether.h>
90 #include <linux/netdevice.h>
91 #include <linux/etherdevice.h>
92 #include <linux/ethtool.h>
93 #include <linux/skbuff.h>
94 #include <linux/kthread.h>
95 #include <linux/bpf.h>
96 #include <linux/bpf_trace.h>
97 #include <net/net_namespace.h>
98 #include <net/sock.h>
99 #include <net/busy_poll.h>
100 #include <linux/rtnetlink.h>
101 #include <linux/stat.h>
102 #include <net/dsa.h>
103 #include <net/dst.h>
104 #include <net/dst_metadata.h>
105 #include <net/gro.h>
106 #include <net/pkt_sched.h>
107 #include <net/pkt_cls.h>
108 #include <net/checksum.h>
109 #include <net/xfrm.h>
110 #include <linux/highmem.h>
111 #include <linux/init.h>
112 #include <linux/module.h>
113 #include <linux/netpoll.h>
114 #include <linux/rcupdate.h>
115 #include <linux/delay.h>
116 #include <net/iw_handler.h>
117 #include <asm/current.h>
118 #include <linux/audit.h>
119 #include <linux/dmaengine.h>
120 #include <linux/err.h>
121 #include <linux/ctype.h>
122 #include <linux/if_arp.h>
123 #include <linux/if_vlan.h>
124 #include <linux/ip.h>
125 #include <net/ip.h>
126 #include <net/mpls.h>
127 #include <linux/ipv6.h>
128 #include <linux/in.h>
129 #include <linux/jhash.h>
130 #include <linux/random.h>
131 #include <trace/events/napi.h>
132 #include <trace/events/net.h>
133 #include <trace/events/skb.h>
134 #include <trace/events/qdisc.h>
135 #include <linux/inetdevice.h>
136 #include <linux/cpu_rmap.h>
137 #include <linux/static_key.h>
138 #include <linux/hashtable.h>
139 #include <linux/vmalloc.h>
140 #include <linux/if_macvlan.h>
141 #include <linux/errqueue.h>
142 #include <linux/hrtimer.h>
143 #include <linux/netfilter_netdev.h>
144 #include <linux/crash_dump.h>
145 #include <linux/sctp.h>
146 #include <net/udp_tunnel.h>
147 #include <linux/net_namespace.h>
148 #include <linux/indirect_call_wrapper.h>
149 #include <net/devlink.h>
150 #include <linux/pm_runtime.h>
151 #include <linux/prandom.h>
152 #include <linux/once_lite.h>
153 
154 #include "dev.h"
155 #include "net-sysfs.h"
156 
157 
158 static DEFINE_SPINLOCK(ptype_lock);
159 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
160 struct list_head ptype_all __read_mostly;	/* Taps */
161 
162 static int netif_rx_internal(struct sk_buff *skb);
163 static int call_netdevice_notifiers_info(unsigned long val,
164 					 struct netdev_notifier_info *info);
165 static int call_netdevice_notifiers_extack(unsigned long val,
166 					   struct net_device *dev,
167 					   struct netlink_ext_ack *extack);
168 static struct napi_struct *napi_by_id(unsigned int napi_id);
169 
170 /*
171  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
172  * semaphore.
173  *
174  * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
175  *
176  * Writers must hold the rtnl semaphore while they loop through the
177  * dev_base_head list, and hold dev_base_lock for writing when they do the
178  * actual updates.  This allows pure readers to access the list even
179  * while a writer is preparing to update it.
180  *
181  * To put it another way, dev_base_lock is held for writing only to
182  * protect against pure readers; the rtnl semaphore provides the
183  * protection against other writers.
184  *
185  * See, for example usages, register_netdevice() and
186  * unregister_netdevice(), which must be called with the rtnl
187  * semaphore held.
188  */
189 DEFINE_RWLOCK(dev_base_lock);
190 EXPORT_SYMBOL(dev_base_lock);
191 
192 static DEFINE_MUTEX(ifalias_mutex);
193 
194 /* protects napi_hash addition/deletion and napi_gen_id */
195 static DEFINE_SPINLOCK(napi_hash_lock);
196 
197 static unsigned int napi_gen_id = NR_CPUS;
198 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
199 
200 static DECLARE_RWSEM(devnet_rename_sem);
201 
202 static inline void dev_base_seq_inc(struct net *net)
203 {
204 	while (++net->dev_base_seq == 0)
205 		;
206 }
207 
208 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
209 {
210 	unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
211 
212 	return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
213 }
214 
215 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
216 {
217 	return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
218 }
219 
220 static inline void rps_lock_irqsave(struct softnet_data *sd,
221 				    unsigned long *flags)
222 {
223 	if (IS_ENABLED(CONFIG_RPS))
224 		spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
225 	else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
226 		local_irq_save(*flags);
227 }
228 
229 static inline void rps_lock_irq_disable(struct softnet_data *sd)
230 {
231 	if (IS_ENABLED(CONFIG_RPS))
232 		spin_lock_irq(&sd->input_pkt_queue.lock);
233 	else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
234 		local_irq_disable();
235 }
236 
237 static inline void rps_unlock_irq_restore(struct softnet_data *sd,
238 					  unsigned long *flags)
239 {
240 	if (IS_ENABLED(CONFIG_RPS))
241 		spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
242 	else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
243 		local_irq_restore(*flags);
244 }
245 
246 static inline void rps_unlock_irq_enable(struct softnet_data *sd)
247 {
248 	if (IS_ENABLED(CONFIG_RPS))
249 		spin_unlock_irq(&sd->input_pkt_queue.lock);
250 	else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
251 		local_irq_enable();
252 }
253 
254 static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
255 						       const char *name)
256 {
257 	struct netdev_name_node *name_node;
258 
259 	name_node = kmalloc(sizeof(*name_node), GFP_KERNEL);
260 	if (!name_node)
261 		return NULL;
262 	INIT_HLIST_NODE(&name_node->hlist);
263 	name_node->dev = dev;
264 	name_node->name = name;
265 	return name_node;
266 }
267 
268 static struct netdev_name_node *
269 netdev_name_node_head_alloc(struct net_device *dev)
270 {
271 	struct netdev_name_node *name_node;
272 
273 	name_node = netdev_name_node_alloc(dev, dev->name);
274 	if (!name_node)
275 		return NULL;
276 	INIT_LIST_HEAD(&name_node->list);
277 	return name_node;
278 }
279 
280 static void netdev_name_node_free(struct netdev_name_node *name_node)
281 {
282 	kfree(name_node);
283 }
284 
285 static void netdev_name_node_add(struct net *net,
286 				 struct netdev_name_node *name_node)
287 {
288 	hlist_add_head_rcu(&name_node->hlist,
289 			   dev_name_hash(net, name_node->name));
290 }
291 
292 static void netdev_name_node_del(struct netdev_name_node *name_node)
293 {
294 	hlist_del_rcu(&name_node->hlist);
295 }
296 
297 static struct netdev_name_node *netdev_name_node_lookup(struct net *net,
298 							const char *name)
299 {
300 	struct hlist_head *head = dev_name_hash(net, name);
301 	struct netdev_name_node *name_node;
302 
303 	hlist_for_each_entry(name_node, head, hlist)
304 		if (!strcmp(name_node->name, name))
305 			return name_node;
306 	return NULL;
307 }
308 
309 static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net,
310 							    const char *name)
311 {
312 	struct hlist_head *head = dev_name_hash(net, name);
313 	struct netdev_name_node *name_node;
314 
315 	hlist_for_each_entry_rcu(name_node, head, hlist)
316 		if (!strcmp(name_node->name, name))
317 			return name_node;
318 	return NULL;
319 }
320 
321 bool netdev_name_in_use(struct net *net, const char *name)
322 {
323 	return netdev_name_node_lookup(net, name);
324 }
325 EXPORT_SYMBOL(netdev_name_in_use);
326 
327 int netdev_name_node_alt_create(struct net_device *dev, const char *name)
328 {
329 	struct netdev_name_node *name_node;
330 	struct net *net = dev_net(dev);
331 
332 	name_node = netdev_name_node_lookup(net, name);
333 	if (name_node)
334 		return -EEXIST;
335 	name_node = netdev_name_node_alloc(dev, name);
336 	if (!name_node)
337 		return -ENOMEM;
338 	netdev_name_node_add(net, name_node);
339 	/* The node that holds dev->name acts as a head of per-device list. */
340 	list_add_tail(&name_node->list, &dev->name_node->list);
341 
342 	return 0;
343 }
344 
345 static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
346 {
347 	list_del(&name_node->list);
348 	netdev_name_node_del(name_node);
349 	kfree(name_node->name);
350 	netdev_name_node_free(name_node);
351 }
352 
353 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
354 {
355 	struct netdev_name_node *name_node;
356 	struct net *net = dev_net(dev);
357 
358 	name_node = netdev_name_node_lookup(net, name);
359 	if (!name_node)
360 		return -ENOENT;
361 	/* lookup might have found our primary name or a name belonging
362 	 * to another device.
363 	 */
364 	if (name_node == dev->name_node || name_node->dev != dev)
365 		return -EINVAL;
366 
367 	__netdev_name_node_alt_destroy(name_node);
368 
369 	return 0;
370 }
371 
372 static void netdev_name_node_alt_flush(struct net_device *dev)
373 {
374 	struct netdev_name_node *name_node, *tmp;
375 
376 	list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list)
377 		__netdev_name_node_alt_destroy(name_node);
378 }
379 
380 /* Device list insertion */
381 static void list_netdevice(struct net_device *dev)
382 {
383 	struct net *net = dev_net(dev);
384 
385 	ASSERT_RTNL();
386 
387 	write_lock(&dev_base_lock);
388 	list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
389 	netdev_name_node_add(net, dev->name_node);
390 	hlist_add_head_rcu(&dev->index_hlist,
391 			   dev_index_hash(net, dev->ifindex));
392 	write_unlock(&dev_base_lock);
393 
394 	dev_base_seq_inc(net);
395 }
396 
397 /* Device list removal
398  * caller must respect a RCU grace period before freeing/reusing dev
399  */
400 static void unlist_netdevice(struct net_device *dev)
401 {
402 	ASSERT_RTNL();
403 
404 	/* Unlink dev from the device chain */
405 	write_lock(&dev_base_lock);
406 	list_del_rcu(&dev->dev_list);
407 	netdev_name_node_del(dev->name_node);
408 	hlist_del_rcu(&dev->index_hlist);
409 	write_unlock(&dev_base_lock);
410 
411 	dev_base_seq_inc(dev_net(dev));
412 }
413 
414 /*
415  *	Our notifier list
416  */
417 
418 static RAW_NOTIFIER_HEAD(netdev_chain);
419 
420 /*
421  *	Device drivers call our routines to queue packets here. We empty the
422  *	queue in the local softnet handler.
423  */
424 
425 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
426 EXPORT_PER_CPU_SYMBOL(softnet_data);
427 
428 #ifdef CONFIG_LOCKDEP
429 /*
430  * register_netdevice() inits txq->_xmit_lock and sets lockdep class
431  * according to dev->type
432  */
433 static const unsigned short netdev_lock_type[] = {
434 	 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
435 	 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
436 	 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
437 	 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
438 	 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
439 	 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
440 	 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
441 	 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
442 	 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
443 	 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
444 	 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
445 	 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
446 	 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
447 	 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
448 	 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
449 
450 static const char *const netdev_lock_name[] = {
451 	"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
452 	"_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
453 	"_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
454 	"_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
455 	"_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
456 	"_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
457 	"_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
458 	"_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
459 	"_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
460 	"_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
461 	"_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
462 	"_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
463 	"_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
464 	"_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
465 	"_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
466 
467 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
468 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
469 
470 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
471 {
472 	int i;
473 
474 	for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
475 		if (netdev_lock_type[i] == dev_type)
476 			return i;
477 	/* the last key is used by default */
478 	return ARRAY_SIZE(netdev_lock_type) - 1;
479 }
480 
481 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
482 						 unsigned short dev_type)
483 {
484 	int i;
485 
486 	i = netdev_lock_pos(dev_type);
487 	lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
488 				   netdev_lock_name[i]);
489 }
490 
491 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
492 {
493 	int i;
494 
495 	i = netdev_lock_pos(dev->type);
496 	lockdep_set_class_and_name(&dev->addr_list_lock,
497 				   &netdev_addr_lock_key[i],
498 				   netdev_lock_name[i]);
499 }
500 #else
501 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
502 						 unsigned short dev_type)
503 {
504 }
505 
506 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
507 {
508 }
509 #endif
510 
511 /*******************************************************************************
512  *
513  *		Protocol management and registration routines
514  *
515  *******************************************************************************/
516 
517 
518 /*
519  *	Add a protocol ID to the list. Now that the input handler is
520  *	smarter we can dispense with all the messy stuff that used to be
521  *	here.
522  *
523  *	BEWARE!!! Protocol handlers, mangling input packets,
524  *	MUST BE last in hash buckets and checking protocol handlers
525  *	MUST start from promiscuous ptype_all chain in net_bh.
526  *	It is true now, do not change it.
527  *	Explanation follows: if protocol handler, mangling packet, will
528  *	be the first on list, it is not able to sense, that packet
529  *	is cloned and should be copied-on-write, so that it will
530  *	change it and subsequent readers will get broken packet.
531  *							--ANK (980803)
532  */
533 
534 static inline struct list_head *ptype_head(const struct packet_type *pt)
535 {
536 	if (pt->type == htons(ETH_P_ALL))
537 		return pt->dev ? &pt->dev->ptype_all : &ptype_all;
538 	else
539 		return pt->dev ? &pt->dev->ptype_specific :
540 				 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
541 }
542 
543 /**
544  *	dev_add_pack - add packet handler
545  *	@pt: packet type declaration
546  *
547  *	Add a protocol handler to the networking stack. The passed &packet_type
548  *	is linked into kernel lists and may not be freed until it has been
549  *	removed from the kernel lists.
550  *
551  *	This call does not sleep therefore it can not
552  *	guarantee all CPU's that are in middle of receiving packets
553  *	will see the new packet type (until the next received packet).
554  */
555 
556 void dev_add_pack(struct packet_type *pt)
557 {
558 	struct list_head *head = ptype_head(pt);
559 
560 	spin_lock(&ptype_lock);
561 	list_add_rcu(&pt->list, head);
562 	spin_unlock(&ptype_lock);
563 }
564 EXPORT_SYMBOL(dev_add_pack);
565 
566 /**
567  *	__dev_remove_pack	 - remove packet handler
568  *	@pt: packet type declaration
569  *
570  *	Remove a protocol handler that was previously added to the kernel
571  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
572  *	from the kernel lists and can be freed or reused once this function
573  *	returns.
574  *
575  *      The packet type might still be in use by receivers
576  *	and must not be freed until after all the CPU's have gone
577  *	through a quiescent state.
578  */
579 void __dev_remove_pack(struct packet_type *pt)
580 {
581 	struct list_head *head = ptype_head(pt);
582 	struct packet_type *pt1;
583 
584 	spin_lock(&ptype_lock);
585 
586 	list_for_each_entry(pt1, head, list) {
587 		if (pt == pt1) {
588 			list_del_rcu(&pt->list);
589 			goto out;
590 		}
591 	}
592 
593 	pr_warn("dev_remove_pack: %p not found\n", pt);
594 out:
595 	spin_unlock(&ptype_lock);
596 }
597 EXPORT_SYMBOL(__dev_remove_pack);
598 
599 /**
600  *	dev_remove_pack	 - remove packet handler
601  *	@pt: packet type declaration
602  *
603  *	Remove a protocol handler that was previously added to the kernel
604  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
605  *	from the kernel lists and can be freed or reused once this function
606  *	returns.
607  *
608  *	This call sleeps to guarantee that no CPU is looking at the packet
609  *	type after return.
610  */
611 void dev_remove_pack(struct packet_type *pt)
612 {
613 	__dev_remove_pack(pt);
614 
615 	synchronize_net();
616 }
617 EXPORT_SYMBOL(dev_remove_pack);
618 
619 
620 /*******************************************************************************
621  *
622  *			    Device Interface Subroutines
623  *
624  *******************************************************************************/
625 
626 /**
627  *	dev_get_iflink	- get 'iflink' value of a interface
628  *	@dev: targeted interface
629  *
630  *	Indicates the ifindex the interface is linked to.
631  *	Physical interfaces have the same 'ifindex' and 'iflink' values.
632  */
633 
634 int dev_get_iflink(const struct net_device *dev)
635 {
636 	if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
637 		return dev->netdev_ops->ndo_get_iflink(dev);
638 
639 	return dev->ifindex;
640 }
641 EXPORT_SYMBOL(dev_get_iflink);
642 
643 /**
644  *	dev_fill_metadata_dst - Retrieve tunnel egress information.
645  *	@dev: targeted interface
646  *	@skb: The packet.
647  *
648  *	For better visibility of tunnel traffic OVS needs to retrieve
649  *	egress tunnel information for a packet. Following API allows
650  *	user to get this info.
651  */
652 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
653 {
654 	struct ip_tunnel_info *info;
655 
656 	if (!dev->netdev_ops  || !dev->netdev_ops->ndo_fill_metadata_dst)
657 		return -EINVAL;
658 
659 	info = skb_tunnel_info_unclone(skb);
660 	if (!info)
661 		return -ENOMEM;
662 	if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
663 		return -EINVAL;
664 
665 	return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
666 }
667 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
668 
669 static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack)
670 {
671 	int k = stack->num_paths++;
672 
673 	if (WARN_ON_ONCE(k >= NET_DEVICE_PATH_STACK_MAX))
674 		return NULL;
675 
676 	return &stack->path[k];
677 }
678 
679 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
680 			  struct net_device_path_stack *stack)
681 {
682 	const struct net_device *last_dev;
683 	struct net_device_path_ctx ctx = {
684 		.dev	= dev,
685 		.daddr	= daddr,
686 	};
687 	struct net_device_path *path;
688 	int ret = 0;
689 
690 	stack->num_paths = 0;
691 	while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) {
692 		last_dev = ctx.dev;
693 		path = dev_fwd_path(stack);
694 		if (!path)
695 			return -1;
696 
697 		memset(path, 0, sizeof(struct net_device_path));
698 		ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path);
699 		if (ret < 0)
700 			return -1;
701 
702 		if (WARN_ON_ONCE(last_dev == ctx.dev))
703 			return -1;
704 	}
705 
706 	if (!ctx.dev)
707 		return ret;
708 
709 	path = dev_fwd_path(stack);
710 	if (!path)
711 		return -1;
712 	path->type = DEV_PATH_ETHERNET;
713 	path->dev = ctx.dev;
714 
715 	return ret;
716 }
717 EXPORT_SYMBOL_GPL(dev_fill_forward_path);
718 
719 /**
720  *	__dev_get_by_name	- find a device by its name
721  *	@net: the applicable net namespace
722  *	@name: name to find
723  *
724  *	Find an interface by name. Must be called under RTNL semaphore
725  *	or @dev_base_lock. If the name is found a pointer to the device
726  *	is returned. If the name is not found then %NULL is returned. The
727  *	reference counters are not incremented so the caller must be
728  *	careful with locks.
729  */
730 
731 struct net_device *__dev_get_by_name(struct net *net, const char *name)
732 {
733 	struct netdev_name_node *node_name;
734 
735 	node_name = netdev_name_node_lookup(net, name);
736 	return node_name ? node_name->dev : NULL;
737 }
738 EXPORT_SYMBOL(__dev_get_by_name);
739 
740 /**
741  * dev_get_by_name_rcu	- find a device by its name
742  * @net: the applicable net namespace
743  * @name: name to find
744  *
745  * Find an interface by name.
746  * If the name is found a pointer to the device is returned.
747  * If the name is not found then %NULL is returned.
748  * The reference counters are not incremented so the caller must be
749  * careful with locks. The caller must hold RCU lock.
750  */
751 
752 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
753 {
754 	struct netdev_name_node *node_name;
755 
756 	node_name = netdev_name_node_lookup_rcu(net, name);
757 	return node_name ? node_name->dev : NULL;
758 }
759 EXPORT_SYMBOL(dev_get_by_name_rcu);
760 
761 /**
762  *	dev_get_by_name		- find a device by its name
763  *	@net: the applicable net namespace
764  *	@name: name to find
765  *
766  *	Find an interface by name. This can be called from any
767  *	context and does its own locking. The returned handle has
768  *	the usage count incremented and the caller must use dev_put() to
769  *	release it when it is no longer needed. %NULL is returned if no
770  *	matching device is found.
771  */
772 
773 struct net_device *dev_get_by_name(struct net *net, const char *name)
774 {
775 	struct net_device *dev;
776 
777 	rcu_read_lock();
778 	dev = dev_get_by_name_rcu(net, name);
779 	dev_hold(dev);
780 	rcu_read_unlock();
781 	return dev;
782 }
783 EXPORT_SYMBOL(dev_get_by_name);
784 
785 /**
786  *	__dev_get_by_index - find a device by its ifindex
787  *	@net: the applicable net namespace
788  *	@ifindex: index of device
789  *
790  *	Search for an interface by index. Returns %NULL if the device
791  *	is not found or a pointer to the device. The device has not
792  *	had its reference counter increased so the caller must be careful
793  *	about locking. The caller must hold either the RTNL semaphore
794  *	or @dev_base_lock.
795  */
796 
797 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
798 {
799 	struct net_device *dev;
800 	struct hlist_head *head = dev_index_hash(net, ifindex);
801 
802 	hlist_for_each_entry(dev, head, index_hlist)
803 		if (dev->ifindex == ifindex)
804 			return dev;
805 
806 	return NULL;
807 }
808 EXPORT_SYMBOL(__dev_get_by_index);
809 
810 /**
811  *	dev_get_by_index_rcu - find a device by its ifindex
812  *	@net: the applicable net namespace
813  *	@ifindex: index of device
814  *
815  *	Search for an interface by index. Returns %NULL if the device
816  *	is not found or a pointer to the device. The device has not
817  *	had its reference counter increased so the caller must be careful
818  *	about locking. The caller must hold RCU lock.
819  */
820 
821 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
822 {
823 	struct net_device *dev;
824 	struct hlist_head *head = dev_index_hash(net, ifindex);
825 
826 	hlist_for_each_entry_rcu(dev, head, index_hlist)
827 		if (dev->ifindex == ifindex)
828 			return dev;
829 
830 	return NULL;
831 }
832 EXPORT_SYMBOL(dev_get_by_index_rcu);
833 
834 
835 /**
836  *	dev_get_by_index - find a device by its ifindex
837  *	@net: the applicable net namespace
838  *	@ifindex: index of device
839  *
840  *	Search for an interface by index. Returns NULL if the device
841  *	is not found or a pointer to the device. The device returned has
842  *	had a reference added and the pointer is safe until the user calls
843  *	dev_put to indicate they have finished with it.
844  */
845 
846 struct net_device *dev_get_by_index(struct net *net, int ifindex)
847 {
848 	struct net_device *dev;
849 
850 	rcu_read_lock();
851 	dev = dev_get_by_index_rcu(net, ifindex);
852 	dev_hold(dev);
853 	rcu_read_unlock();
854 	return dev;
855 }
856 EXPORT_SYMBOL(dev_get_by_index);
857 
858 /**
859  *	dev_get_by_napi_id - find a device by napi_id
860  *	@napi_id: ID of the NAPI struct
861  *
862  *	Search for an interface by NAPI ID. Returns %NULL if the device
863  *	is not found or a pointer to the device. The device has not had
864  *	its reference counter increased so the caller must be careful
865  *	about locking. The caller must hold RCU lock.
866  */
867 
868 struct net_device *dev_get_by_napi_id(unsigned int napi_id)
869 {
870 	struct napi_struct *napi;
871 
872 	WARN_ON_ONCE(!rcu_read_lock_held());
873 
874 	if (napi_id < MIN_NAPI_ID)
875 		return NULL;
876 
877 	napi = napi_by_id(napi_id);
878 
879 	return napi ? napi->dev : NULL;
880 }
881 EXPORT_SYMBOL(dev_get_by_napi_id);
882 
883 /**
884  *	netdev_get_name - get a netdevice name, knowing its ifindex.
885  *	@net: network namespace
886  *	@name: a pointer to the buffer where the name will be stored.
887  *	@ifindex: the ifindex of the interface to get the name from.
888  */
889 int netdev_get_name(struct net *net, char *name, int ifindex)
890 {
891 	struct net_device *dev;
892 	int ret;
893 
894 	down_read(&devnet_rename_sem);
895 	rcu_read_lock();
896 
897 	dev = dev_get_by_index_rcu(net, ifindex);
898 	if (!dev) {
899 		ret = -ENODEV;
900 		goto out;
901 	}
902 
903 	strcpy(name, dev->name);
904 
905 	ret = 0;
906 out:
907 	rcu_read_unlock();
908 	up_read(&devnet_rename_sem);
909 	return ret;
910 }
911 
912 /**
913  *	dev_getbyhwaddr_rcu - find a device by its hardware address
914  *	@net: the applicable net namespace
915  *	@type: media type of device
916  *	@ha: hardware address
917  *
918  *	Search for an interface by MAC address. Returns NULL if the device
919  *	is not found or a pointer to the device.
920  *	The caller must hold RCU or RTNL.
921  *	The returned device has not had its ref count increased
922  *	and the caller must therefore be careful about locking
923  *
924  */
925 
926 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
927 				       const char *ha)
928 {
929 	struct net_device *dev;
930 
931 	for_each_netdev_rcu(net, dev)
932 		if (dev->type == type &&
933 		    !memcmp(dev->dev_addr, ha, dev->addr_len))
934 			return dev;
935 
936 	return NULL;
937 }
938 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
939 
940 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
941 {
942 	struct net_device *dev, *ret = NULL;
943 
944 	rcu_read_lock();
945 	for_each_netdev_rcu(net, dev)
946 		if (dev->type == type) {
947 			dev_hold(dev);
948 			ret = dev;
949 			break;
950 		}
951 	rcu_read_unlock();
952 	return ret;
953 }
954 EXPORT_SYMBOL(dev_getfirstbyhwtype);
955 
956 /**
957  *	__dev_get_by_flags - find any device with given flags
958  *	@net: the applicable net namespace
959  *	@if_flags: IFF_* values
960  *	@mask: bitmask of bits in if_flags to check
961  *
962  *	Search for any interface with the given flags. Returns NULL if a device
963  *	is not found or a pointer to the device. Must be called inside
964  *	rtnl_lock(), and result refcount is unchanged.
965  */
966 
967 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
968 				      unsigned short mask)
969 {
970 	struct net_device *dev, *ret;
971 
972 	ASSERT_RTNL();
973 
974 	ret = NULL;
975 	for_each_netdev(net, dev) {
976 		if (((dev->flags ^ if_flags) & mask) == 0) {
977 			ret = dev;
978 			break;
979 		}
980 	}
981 	return ret;
982 }
983 EXPORT_SYMBOL(__dev_get_by_flags);
984 
985 /**
986  *	dev_valid_name - check if name is okay for network device
987  *	@name: name string
988  *
989  *	Network device names need to be valid file names to
990  *	allow sysfs to work.  We also disallow any kind of
991  *	whitespace.
992  */
993 bool dev_valid_name(const char *name)
994 {
995 	if (*name == '\0')
996 		return false;
997 	if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
998 		return false;
999 	if (!strcmp(name, ".") || !strcmp(name, ".."))
1000 		return false;
1001 
1002 	while (*name) {
1003 		if (*name == '/' || *name == ':' || isspace(*name))
1004 			return false;
1005 		name++;
1006 	}
1007 	return true;
1008 }
1009 EXPORT_SYMBOL(dev_valid_name);
1010 
1011 /**
1012  *	__dev_alloc_name - allocate a name for a device
1013  *	@net: network namespace to allocate the device name in
1014  *	@name: name format string
1015  *	@buf:  scratch buffer and result name string
1016  *
1017  *	Passed a format string - eg "lt%d" it will try and find a suitable
1018  *	id. It scans list of devices to build up a free map, then chooses
1019  *	the first empty slot. The caller must hold the dev_base or rtnl lock
1020  *	while allocating the name and adding the device in order to avoid
1021  *	duplicates.
1022  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1023  *	Returns the number of the unit assigned or a negative errno code.
1024  */
1025 
1026 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1027 {
1028 	int i = 0;
1029 	const char *p;
1030 	const int max_netdevices = 8*PAGE_SIZE;
1031 	unsigned long *inuse;
1032 	struct net_device *d;
1033 
1034 	if (!dev_valid_name(name))
1035 		return -EINVAL;
1036 
1037 	p = strchr(name, '%');
1038 	if (p) {
1039 		/*
1040 		 * Verify the string as this thing may have come from
1041 		 * the user.  There must be either one "%d" and no other "%"
1042 		 * characters.
1043 		 */
1044 		if (p[1] != 'd' || strchr(p + 2, '%'))
1045 			return -EINVAL;
1046 
1047 		/* Use one page as a bit array of possible slots */
1048 		inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1049 		if (!inuse)
1050 			return -ENOMEM;
1051 
1052 		for_each_netdev(net, d) {
1053 			struct netdev_name_node *name_node;
1054 			list_for_each_entry(name_node, &d->name_node->list, list) {
1055 				if (!sscanf(name_node->name, name, &i))
1056 					continue;
1057 				if (i < 0 || i >= max_netdevices)
1058 					continue;
1059 
1060 				/*  avoid cases where sscanf is not exact inverse of printf */
1061 				snprintf(buf, IFNAMSIZ, name, i);
1062 				if (!strncmp(buf, name_node->name, IFNAMSIZ))
1063 					__set_bit(i, inuse);
1064 			}
1065 			if (!sscanf(d->name, name, &i))
1066 				continue;
1067 			if (i < 0 || i >= max_netdevices)
1068 				continue;
1069 
1070 			/*  avoid cases where sscanf is not exact inverse of printf */
1071 			snprintf(buf, IFNAMSIZ, name, i);
1072 			if (!strncmp(buf, d->name, IFNAMSIZ))
1073 				__set_bit(i, inuse);
1074 		}
1075 
1076 		i = find_first_zero_bit(inuse, max_netdevices);
1077 		free_page((unsigned long) inuse);
1078 	}
1079 
1080 	snprintf(buf, IFNAMSIZ, name, i);
1081 	if (!netdev_name_in_use(net, buf))
1082 		return i;
1083 
1084 	/* It is possible to run out of possible slots
1085 	 * when the name is long and there isn't enough space left
1086 	 * for the digits, or if all bits are used.
1087 	 */
1088 	return -ENFILE;
1089 }
1090 
1091 static int dev_alloc_name_ns(struct net *net,
1092 			     struct net_device *dev,
1093 			     const char *name)
1094 {
1095 	char buf[IFNAMSIZ];
1096 	int ret;
1097 
1098 	BUG_ON(!net);
1099 	ret = __dev_alloc_name(net, name, buf);
1100 	if (ret >= 0)
1101 		strlcpy(dev->name, buf, IFNAMSIZ);
1102 	return ret;
1103 }
1104 
1105 /**
1106  *	dev_alloc_name - allocate a name for a device
1107  *	@dev: device
1108  *	@name: name format string
1109  *
1110  *	Passed a format string - eg "lt%d" it will try and find a suitable
1111  *	id. It scans list of devices to build up a free map, then chooses
1112  *	the first empty slot. The caller must hold the dev_base or rtnl lock
1113  *	while allocating the name and adding the device in order to avoid
1114  *	duplicates.
1115  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1116  *	Returns the number of the unit assigned or a negative errno code.
1117  */
1118 
1119 int dev_alloc_name(struct net_device *dev, const char *name)
1120 {
1121 	return dev_alloc_name_ns(dev_net(dev), dev, name);
1122 }
1123 EXPORT_SYMBOL(dev_alloc_name);
1124 
1125 static int dev_get_valid_name(struct net *net, struct net_device *dev,
1126 			      const char *name)
1127 {
1128 	BUG_ON(!net);
1129 
1130 	if (!dev_valid_name(name))
1131 		return -EINVAL;
1132 
1133 	if (strchr(name, '%'))
1134 		return dev_alloc_name_ns(net, dev, name);
1135 	else if (netdev_name_in_use(net, name))
1136 		return -EEXIST;
1137 	else if (dev->name != name)
1138 		strlcpy(dev->name, name, IFNAMSIZ);
1139 
1140 	return 0;
1141 }
1142 
1143 /**
1144  *	dev_change_name - change name of a device
1145  *	@dev: device
1146  *	@newname: name (or format string) must be at least IFNAMSIZ
1147  *
1148  *	Change name of a device, can pass format strings "eth%d".
1149  *	for wildcarding.
1150  */
1151 int dev_change_name(struct net_device *dev, const char *newname)
1152 {
1153 	unsigned char old_assign_type;
1154 	char oldname[IFNAMSIZ];
1155 	int err = 0;
1156 	int ret;
1157 	struct net *net;
1158 
1159 	ASSERT_RTNL();
1160 	BUG_ON(!dev_net(dev));
1161 
1162 	net = dev_net(dev);
1163 
1164 	/* Some auto-enslaved devices e.g. failover slaves are
1165 	 * special, as userspace might rename the device after
1166 	 * the interface had been brought up and running since
1167 	 * the point kernel initiated auto-enslavement. Allow
1168 	 * live name change even when these slave devices are
1169 	 * up and running.
1170 	 *
1171 	 * Typically, users of these auto-enslaving devices
1172 	 * don't actually care about slave name change, as
1173 	 * they are supposed to operate on master interface
1174 	 * directly.
1175 	 */
1176 	if (dev->flags & IFF_UP &&
1177 	    likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
1178 		return -EBUSY;
1179 
1180 	down_write(&devnet_rename_sem);
1181 
1182 	if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1183 		up_write(&devnet_rename_sem);
1184 		return 0;
1185 	}
1186 
1187 	memcpy(oldname, dev->name, IFNAMSIZ);
1188 
1189 	err = dev_get_valid_name(net, dev, newname);
1190 	if (err < 0) {
1191 		up_write(&devnet_rename_sem);
1192 		return err;
1193 	}
1194 
1195 	if (oldname[0] && !strchr(oldname, '%'))
1196 		netdev_info(dev, "renamed from %s\n", oldname);
1197 
1198 	old_assign_type = dev->name_assign_type;
1199 	dev->name_assign_type = NET_NAME_RENAMED;
1200 
1201 rollback:
1202 	ret = device_rename(&dev->dev, dev->name);
1203 	if (ret) {
1204 		memcpy(dev->name, oldname, IFNAMSIZ);
1205 		dev->name_assign_type = old_assign_type;
1206 		up_write(&devnet_rename_sem);
1207 		return ret;
1208 	}
1209 
1210 	up_write(&devnet_rename_sem);
1211 
1212 	netdev_adjacent_rename_links(dev, oldname);
1213 
1214 	write_lock(&dev_base_lock);
1215 	netdev_name_node_del(dev->name_node);
1216 	write_unlock(&dev_base_lock);
1217 
1218 	synchronize_rcu();
1219 
1220 	write_lock(&dev_base_lock);
1221 	netdev_name_node_add(net, dev->name_node);
1222 	write_unlock(&dev_base_lock);
1223 
1224 	ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1225 	ret = notifier_to_errno(ret);
1226 
1227 	if (ret) {
1228 		/* err >= 0 after dev_alloc_name() or stores the first errno */
1229 		if (err >= 0) {
1230 			err = ret;
1231 			down_write(&devnet_rename_sem);
1232 			memcpy(dev->name, oldname, IFNAMSIZ);
1233 			memcpy(oldname, newname, IFNAMSIZ);
1234 			dev->name_assign_type = old_assign_type;
1235 			old_assign_type = NET_NAME_RENAMED;
1236 			goto rollback;
1237 		} else {
1238 			netdev_err(dev, "name change rollback failed: %d\n",
1239 				   ret);
1240 		}
1241 	}
1242 
1243 	return err;
1244 }
1245 
1246 /**
1247  *	dev_set_alias - change ifalias of a device
1248  *	@dev: device
1249  *	@alias: name up to IFALIASZ
1250  *	@len: limit of bytes to copy from info
1251  *
1252  *	Set ifalias for a device,
1253  */
1254 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1255 {
1256 	struct dev_ifalias *new_alias = NULL;
1257 
1258 	if (len >= IFALIASZ)
1259 		return -EINVAL;
1260 
1261 	if (len) {
1262 		new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1263 		if (!new_alias)
1264 			return -ENOMEM;
1265 
1266 		memcpy(new_alias->ifalias, alias, len);
1267 		new_alias->ifalias[len] = 0;
1268 	}
1269 
1270 	mutex_lock(&ifalias_mutex);
1271 	new_alias = rcu_replace_pointer(dev->ifalias, new_alias,
1272 					mutex_is_locked(&ifalias_mutex));
1273 	mutex_unlock(&ifalias_mutex);
1274 
1275 	if (new_alias)
1276 		kfree_rcu(new_alias, rcuhead);
1277 
1278 	return len;
1279 }
1280 EXPORT_SYMBOL(dev_set_alias);
1281 
1282 /**
1283  *	dev_get_alias - get ifalias of a device
1284  *	@dev: device
1285  *	@name: buffer to store name of ifalias
1286  *	@len: size of buffer
1287  *
1288  *	get ifalias for a device.  Caller must make sure dev cannot go
1289  *	away,  e.g. rcu read lock or own a reference count to device.
1290  */
1291 int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1292 {
1293 	const struct dev_ifalias *alias;
1294 	int ret = 0;
1295 
1296 	rcu_read_lock();
1297 	alias = rcu_dereference(dev->ifalias);
1298 	if (alias)
1299 		ret = snprintf(name, len, "%s", alias->ifalias);
1300 	rcu_read_unlock();
1301 
1302 	return ret;
1303 }
1304 
1305 /**
1306  *	netdev_features_change - device changes features
1307  *	@dev: device to cause notification
1308  *
1309  *	Called to indicate a device has changed features.
1310  */
1311 void netdev_features_change(struct net_device *dev)
1312 {
1313 	call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1314 }
1315 EXPORT_SYMBOL(netdev_features_change);
1316 
1317 /**
1318  *	netdev_state_change - device changes state
1319  *	@dev: device to cause notification
1320  *
1321  *	Called to indicate a device has changed state. This function calls
1322  *	the notifier chains for netdev_chain and sends a NEWLINK message
1323  *	to the routing socket.
1324  */
1325 void netdev_state_change(struct net_device *dev)
1326 {
1327 	if (dev->flags & IFF_UP) {
1328 		struct netdev_notifier_change_info change_info = {
1329 			.info.dev = dev,
1330 		};
1331 
1332 		call_netdevice_notifiers_info(NETDEV_CHANGE,
1333 					      &change_info.info);
1334 		rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1335 	}
1336 }
1337 EXPORT_SYMBOL(netdev_state_change);
1338 
1339 /**
1340  * __netdev_notify_peers - notify network peers about existence of @dev,
1341  * to be called when rtnl lock is already held.
1342  * @dev: network device
1343  *
1344  * Generate traffic such that interested network peers are aware of
1345  * @dev, such as by generating a gratuitous ARP. This may be used when
1346  * a device wants to inform the rest of the network about some sort of
1347  * reconfiguration such as a failover event or virtual machine
1348  * migration.
1349  */
1350 void __netdev_notify_peers(struct net_device *dev)
1351 {
1352 	ASSERT_RTNL();
1353 	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1354 	call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1355 }
1356 EXPORT_SYMBOL(__netdev_notify_peers);
1357 
1358 /**
1359  * netdev_notify_peers - notify network peers about existence of @dev
1360  * @dev: network device
1361  *
1362  * Generate traffic such that interested network peers are aware of
1363  * @dev, such as by generating a gratuitous ARP. This may be used when
1364  * a device wants to inform the rest of the network about some sort of
1365  * reconfiguration such as a failover event or virtual machine
1366  * migration.
1367  */
1368 void netdev_notify_peers(struct net_device *dev)
1369 {
1370 	rtnl_lock();
1371 	__netdev_notify_peers(dev);
1372 	rtnl_unlock();
1373 }
1374 EXPORT_SYMBOL(netdev_notify_peers);
1375 
1376 static int napi_threaded_poll(void *data);
1377 
1378 static int napi_kthread_create(struct napi_struct *n)
1379 {
1380 	int err = 0;
1381 
1382 	/* Create and wake up the kthread once to put it in
1383 	 * TASK_INTERRUPTIBLE mode to avoid the blocked task
1384 	 * warning and work with loadavg.
1385 	 */
1386 	n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d",
1387 				n->dev->name, n->napi_id);
1388 	if (IS_ERR(n->thread)) {
1389 		err = PTR_ERR(n->thread);
1390 		pr_err("kthread_run failed with err %d\n", err);
1391 		n->thread = NULL;
1392 	}
1393 
1394 	return err;
1395 }
1396 
1397 static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1398 {
1399 	const struct net_device_ops *ops = dev->netdev_ops;
1400 	int ret;
1401 
1402 	ASSERT_RTNL();
1403 	dev_addr_check(dev);
1404 
1405 	if (!netif_device_present(dev)) {
1406 		/* may be detached because parent is runtime-suspended */
1407 		if (dev->dev.parent)
1408 			pm_runtime_resume(dev->dev.parent);
1409 		if (!netif_device_present(dev))
1410 			return -ENODEV;
1411 	}
1412 
1413 	/* Block netpoll from trying to do any rx path servicing.
1414 	 * If we don't do this there is a chance ndo_poll_controller
1415 	 * or ndo_poll may be running while we open the device
1416 	 */
1417 	netpoll_poll_disable(dev);
1418 
1419 	ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack);
1420 	ret = notifier_to_errno(ret);
1421 	if (ret)
1422 		return ret;
1423 
1424 	set_bit(__LINK_STATE_START, &dev->state);
1425 
1426 	if (ops->ndo_validate_addr)
1427 		ret = ops->ndo_validate_addr(dev);
1428 
1429 	if (!ret && ops->ndo_open)
1430 		ret = ops->ndo_open(dev);
1431 
1432 	netpoll_poll_enable(dev);
1433 
1434 	if (ret)
1435 		clear_bit(__LINK_STATE_START, &dev->state);
1436 	else {
1437 		dev->flags |= IFF_UP;
1438 		dev_set_rx_mode(dev);
1439 		dev_activate(dev);
1440 		add_device_randomness(dev->dev_addr, dev->addr_len);
1441 	}
1442 
1443 	return ret;
1444 }
1445 
1446 /**
1447  *	dev_open	- prepare an interface for use.
1448  *	@dev: device to open
1449  *	@extack: netlink extended ack
1450  *
1451  *	Takes a device from down to up state. The device's private open
1452  *	function is invoked and then the multicast lists are loaded. Finally
1453  *	the device is moved into the up state and a %NETDEV_UP message is
1454  *	sent to the netdev notifier chain.
1455  *
1456  *	Calling this function on an active interface is a nop. On a failure
1457  *	a negative errno code is returned.
1458  */
1459 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1460 {
1461 	int ret;
1462 
1463 	if (dev->flags & IFF_UP)
1464 		return 0;
1465 
1466 	ret = __dev_open(dev, extack);
1467 	if (ret < 0)
1468 		return ret;
1469 
1470 	rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1471 	call_netdevice_notifiers(NETDEV_UP, dev);
1472 
1473 	return ret;
1474 }
1475 EXPORT_SYMBOL(dev_open);
1476 
1477 static void __dev_close_many(struct list_head *head)
1478 {
1479 	struct net_device *dev;
1480 
1481 	ASSERT_RTNL();
1482 	might_sleep();
1483 
1484 	list_for_each_entry(dev, head, close_list) {
1485 		/* Temporarily disable netpoll until the interface is down */
1486 		netpoll_poll_disable(dev);
1487 
1488 		call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1489 
1490 		clear_bit(__LINK_STATE_START, &dev->state);
1491 
1492 		/* Synchronize to scheduled poll. We cannot touch poll list, it
1493 		 * can be even on different cpu. So just clear netif_running().
1494 		 *
1495 		 * dev->stop() will invoke napi_disable() on all of it's
1496 		 * napi_struct instances on this device.
1497 		 */
1498 		smp_mb__after_atomic(); /* Commit netif_running(). */
1499 	}
1500 
1501 	dev_deactivate_many(head);
1502 
1503 	list_for_each_entry(dev, head, close_list) {
1504 		const struct net_device_ops *ops = dev->netdev_ops;
1505 
1506 		/*
1507 		 *	Call the device specific close. This cannot fail.
1508 		 *	Only if device is UP
1509 		 *
1510 		 *	We allow it to be called even after a DETACH hot-plug
1511 		 *	event.
1512 		 */
1513 		if (ops->ndo_stop)
1514 			ops->ndo_stop(dev);
1515 
1516 		dev->flags &= ~IFF_UP;
1517 		netpoll_poll_enable(dev);
1518 	}
1519 }
1520 
1521 static void __dev_close(struct net_device *dev)
1522 {
1523 	LIST_HEAD(single);
1524 
1525 	list_add(&dev->close_list, &single);
1526 	__dev_close_many(&single);
1527 	list_del(&single);
1528 }
1529 
1530 void dev_close_many(struct list_head *head, bool unlink)
1531 {
1532 	struct net_device *dev, *tmp;
1533 
1534 	/* Remove the devices that don't need to be closed */
1535 	list_for_each_entry_safe(dev, tmp, head, close_list)
1536 		if (!(dev->flags & IFF_UP))
1537 			list_del_init(&dev->close_list);
1538 
1539 	__dev_close_many(head);
1540 
1541 	list_for_each_entry_safe(dev, tmp, head, close_list) {
1542 		rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1543 		call_netdevice_notifiers(NETDEV_DOWN, dev);
1544 		if (unlink)
1545 			list_del_init(&dev->close_list);
1546 	}
1547 }
1548 EXPORT_SYMBOL(dev_close_many);
1549 
1550 /**
1551  *	dev_close - shutdown an interface.
1552  *	@dev: device to shutdown
1553  *
1554  *	This function moves an active device into down state. A
1555  *	%NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1556  *	is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1557  *	chain.
1558  */
1559 void dev_close(struct net_device *dev)
1560 {
1561 	if (dev->flags & IFF_UP) {
1562 		LIST_HEAD(single);
1563 
1564 		list_add(&dev->close_list, &single);
1565 		dev_close_many(&single, true);
1566 		list_del(&single);
1567 	}
1568 }
1569 EXPORT_SYMBOL(dev_close);
1570 
1571 
1572 /**
1573  *	dev_disable_lro - disable Large Receive Offload on a device
1574  *	@dev: device
1575  *
1576  *	Disable Large Receive Offload (LRO) on a net device.  Must be
1577  *	called under RTNL.  This is needed if received packets may be
1578  *	forwarded to another interface.
1579  */
1580 void dev_disable_lro(struct net_device *dev)
1581 {
1582 	struct net_device *lower_dev;
1583 	struct list_head *iter;
1584 
1585 	dev->wanted_features &= ~NETIF_F_LRO;
1586 	netdev_update_features(dev);
1587 
1588 	if (unlikely(dev->features & NETIF_F_LRO))
1589 		netdev_WARN(dev, "failed to disable LRO!\n");
1590 
1591 	netdev_for_each_lower_dev(dev, lower_dev, iter)
1592 		dev_disable_lro(lower_dev);
1593 }
1594 EXPORT_SYMBOL(dev_disable_lro);
1595 
1596 /**
1597  *	dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1598  *	@dev: device
1599  *
1600  *	Disable HW Generic Receive Offload (GRO_HW) on a net device.  Must be
1601  *	called under RTNL.  This is needed if Generic XDP is installed on
1602  *	the device.
1603  */
1604 static void dev_disable_gro_hw(struct net_device *dev)
1605 {
1606 	dev->wanted_features &= ~NETIF_F_GRO_HW;
1607 	netdev_update_features(dev);
1608 
1609 	if (unlikely(dev->features & NETIF_F_GRO_HW))
1610 		netdev_WARN(dev, "failed to disable GRO_HW!\n");
1611 }
1612 
1613 const char *netdev_cmd_to_name(enum netdev_cmd cmd)
1614 {
1615 #define N(val) 						\
1616 	case NETDEV_##val:				\
1617 		return "NETDEV_" __stringify(val);
1618 	switch (cmd) {
1619 	N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
1620 	N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
1621 	N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
1622 	N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER)
1623 	N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO)
1624 	N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO)
1625 	N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
1626 	N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
1627 	N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
1628 	N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE)
1629 	N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA)
1630 	}
1631 #undef N
1632 	return "UNKNOWN_NETDEV_EVENT";
1633 }
1634 EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
1635 
1636 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1637 				   struct net_device *dev)
1638 {
1639 	struct netdev_notifier_info info = {
1640 		.dev = dev,
1641 	};
1642 
1643 	return nb->notifier_call(nb, val, &info);
1644 }
1645 
1646 static int call_netdevice_register_notifiers(struct notifier_block *nb,
1647 					     struct net_device *dev)
1648 {
1649 	int err;
1650 
1651 	err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1652 	err = notifier_to_errno(err);
1653 	if (err)
1654 		return err;
1655 
1656 	if (!(dev->flags & IFF_UP))
1657 		return 0;
1658 
1659 	call_netdevice_notifier(nb, NETDEV_UP, dev);
1660 	return 0;
1661 }
1662 
1663 static void call_netdevice_unregister_notifiers(struct notifier_block *nb,
1664 						struct net_device *dev)
1665 {
1666 	if (dev->flags & IFF_UP) {
1667 		call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1668 					dev);
1669 		call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1670 	}
1671 	call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1672 }
1673 
1674 static int call_netdevice_register_net_notifiers(struct notifier_block *nb,
1675 						 struct net *net)
1676 {
1677 	struct net_device *dev;
1678 	int err;
1679 
1680 	for_each_netdev(net, dev) {
1681 		err = call_netdevice_register_notifiers(nb, dev);
1682 		if (err)
1683 			goto rollback;
1684 	}
1685 	return 0;
1686 
1687 rollback:
1688 	for_each_netdev_continue_reverse(net, dev)
1689 		call_netdevice_unregister_notifiers(nb, dev);
1690 	return err;
1691 }
1692 
1693 static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb,
1694 						    struct net *net)
1695 {
1696 	struct net_device *dev;
1697 
1698 	for_each_netdev(net, dev)
1699 		call_netdevice_unregister_notifiers(nb, dev);
1700 }
1701 
1702 static int dev_boot_phase = 1;
1703 
1704 /**
1705  * register_netdevice_notifier - register a network notifier block
1706  * @nb: notifier
1707  *
1708  * Register a notifier to be called when network device events occur.
1709  * The notifier passed is linked into the kernel structures and must
1710  * not be reused until it has been unregistered. A negative errno code
1711  * is returned on a failure.
1712  *
1713  * When registered all registration and up events are replayed
1714  * to the new notifier to allow device to have a race free
1715  * view of the network device list.
1716  */
1717 
1718 int register_netdevice_notifier(struct notifier_block *nb)
1719 {
1720 	struct net *net;
1721 	int err;
1722 
1723 	/* Close race with setup_net() and cleanup_net() */
1724 	down_write(&pernet_ops_rwsem);
1725 	rtnl_lock();
1726 	err = raw_notifier_chain_register(&netdev_chain, nb);
1727 	if (err)
1728 		goto unlock;
1729 	if (dev_boot_phase)
1730 		goto unlock;
1731 	for_each_net(net) {
1732 		err = call_netdevice_register_net_notifiers(nb, net);
1733 		if (err)
1734 			goto rollback;
1735 	}
1736 
1737 unlock:
1738 	rtnl_unlock();
1739 	up_write(&pernet_ops_rwsem);
1740 	return err;
1741 
1742 rollback:
1743 	for_each_net_continue_reverse(net)
1744 		call_netdevice_unregister_net_notifiers(nb, net);
1745 
1746 	raw_notifier_chain_unregister(&netdev_chain, nb);
1747 	goto unlock;
1748 }
1749 EXPORT_SYMBOL(register_netdevice_notifier);
1750 
1751 /**
1752  * unregister_netdevice_notifier - unregister a network notifier block
1753  * @nb: notifier
1754  *
1755  * Unregister a notifier previously registered by
1756  * register_netdevice_notifier(). The notifier is unlinked into the
1757  * kernel structures and may then be reused. A negative errno code
1758  * is returned on a failure.
1759  *
1760  * After unregistering unregister and down device events are synthesized
1761  * for all devices on the device list to the removed notifier to remove
1762  * the need for special case cleanup code.
1763  */
1764 
1765 int unregister_netdevice_notifier(struct notifier_block *nb)
1766 {
1767 	struct net *net;
1768 	int err;
1769 
1770 	/* Close race with setup_net() and cleanup_net() */
1771 	down_write(&pernet_ops_rwsem);
1772 	rtnl_lock();
1773 	err = raw_notifier_chain_unregister(&netdev_chain, nb);
1774 	if (err)
1775 		goto unlock;
1776 
1777 	for_each_net(net)
1778 		call_netdevice_unregister_net_notifiers(nb, net);
1779 
1780 unlock:
1781 	rtnl_unlock();
1782 	up_write(&pernet_ops_rwsem);
1783 	return err;
1784 }
1785 EXPORT_SYMBOL(unregister_netdevice_notifier);
1786 
1787 static int __register_netdevice_notifier_net(struct net *net,
1788 					     struct notifier_block *nb,
1789 					     bool ignore_call_fail)
1790 {
1791 	int err;
1792 
1793 	err = raw_notifier_chain_register(&net->netdev_chain, nb);
1794 	if (err)
1795 		return err;
1796 	if (dev_boot_phase)
1797 		return 0;
1798 
1799 	err = call_netdevice_register_net_notifiers(nb, net);
1800 	if (err && !ignore_call_fail)
1801 		goto chain_unregister;
1802 
1803 	return 0;
1804 
1805 chain_unregister:
1806 	raw_notifier_chain_unregister(&net->netdev_chain, nb);
1807 	return err;
1808 }
1809 
1810 static int __unregister_netdevice_notifier_net(struct net *net,
1811 					       struct notifier_block *nb)
1812 {
1813 	int err;
1814 
1815 	err = raw_notifier_chain_unregister(&net->netdev_chain, nb);
1816 	if (err)
1817 		return err;
1818 
1819 	call_netdevice_unregister_net_notifiers(nb, net);
1820 	return 0;
1821 }
1822 
1823 /**
1824  * register_netdevice_notifier_net - register a per-netns network notifier block
1825  * @net: network namespace
1826  * @nb: notifier
1827  *
1828  * Register a notifier to be called when network device events occur.
1829  * The notifier passed is linked into the kernel structures and must
1830  * not be reused until it has been unregistered. A negative errno code
1831  * is returned on a failure.
1832  *
1833  * When registered all registration and up events are replayed
1834  * to the new notifier to allow device to have a race free
1835  * view of the network device list.
1836  */
1837 
1838 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb)
1839 {
1840 	int err;
1841 
1842 	rtnl_lock();
1843 	err = __register_netdevice_notifier_net(net, nb, false);
1844 	rtnl_unlock();
1845 	return err;
1846 }
1847 EXPORT_SYMBOL(register_netdevice_notifier_net);
1848 
1849 /**
1850  * unregister_netdevice_notifier_net - unregister a per-netns
1851  *                                     network notifier block
1852  * @net: network namespace
1853  * @nb: notifier
1854  *
1855  * Unregister a notifier previously registered by
1856  * register_netdevice_notifier(). The notifier is unlinked into the
1857  * kernel structures and may then be reused. A negative errno code
1858  * is returned on a failure.
1859  *
1860  * After unregistering unregister and down device events are synthesized
1861  * for all devices on the device list to the removed notifier to remove
1862  * the need for special case cleanup code.
1863  */
1864 
1865 int unregister_netdevice_notifier_net(struct net *net,
1866 				      struct notifier_block *nb)
1867 {
1868 	int err;
1869 
1870 	rtnl_lock();
1871 	err = __unregister_netdevice_notifier_net(net, nb);
1872 	rtnl_unlock();
1873 	return err;
1874 }
1875 EXPORT_SYMBOL(unregister_netdevice_notifier_net);
1876 
1877 int register_netdevice_notifier_dev_net(struct net_device *dev,
1878 					struct notifier_block *nb,
1879 					struct netdev_net_notifier *nn)
1880 {
1881 	int err;
1882 
1883 	rtnl_lock();
1884 	err = __register_netdevice_notifier_net(dev_net(dev), nb, false);
1885 	if (!err) {
1886 		nn->nb = nb;
1887 		list_add(&nn->list, &dev->net_notifier_list);
1888 	}
1889 	rtnl_unlock();
1890 	return err;
1891 }
1892 EXPORT_SYMBOL(register_netdevice_notifier_dev_net);
1893 
1894 int unregister_netdevice_notifier_dev_net(struct net_device *dev,
1895 					  struct notifier_block *nb,
1896 					  struct netdev_net_notifier *nn)
1897 {
1898 	int err;
1899 
1900 	rtnl_lock();
1901 	list_del(&nn->list);
1902 	err = __unregister_netdevice_notifier_net(dev_net(dev), nb);
1903 	rtnl_unlock();
1904 	return err;
1905 }
1906 EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net);
1907 
1908 static void move_netdevice_notifiers_dev_net(struct net_device *dev,
1909 					     struct net *net)
1910 {
1911 	struct netdev_net_notifier *nn;
1912 
1913 	list_for_each_entry(nn, &dev->net_notifier_list, list) {
1914 		__unregister_netdevice_notifier_net(dev_net(dev), nn->nb);
1915 		__register_netdevice_notifier_net(net, nn->nb, true);
1916 	}
1917 }
1918 
1919 /**
1920  *	call_netdevice_notifiers_info - call all network notifier blocks
1921  *	@val: value passed unmodified to notifier function
1922  *	@info: notifier information data
1923  *
1924  *	Call all network notifier blocks.  Parameters and return value
1925  *	are as for raw_notifier_call_chain().
1926  */
1927 
1928 static int call_netdevice_notifiers_info(unsigned long val,
1929 					 struct netdev_notifier_info *info)
1930 {
1931 	struct net *net = dev_net(info->dev);
1932 	int ret;
1933 
1934 	ASSERT_RTNL();
1935 
1936 	/* Run per-netns notifier block chain first, then run the global one.
1937 	 * Hopefully, one day, the global one is going to be removed after
1938 	 * all notifier block registrators get converted to be per-netns.
1939 	 */
1940 	ret = raw_notifier_call_chain(&net->netdev_chain, val, info);
1941 	if (ret & NOTIFY_STOP_MASK)
1942 		return ret;
1943 	return raw_notifier_call_chain(&netdev_chain, val, info);
1944 }
1945 
1946 /**
1947  *	call_netdevice_notifiers_info_robust - call per-netns notifier blocks
1948  *	                                       for and rollback on error
1949  *	@val_up: value passed unmodified to notifier function
1950  *	@val_down: value passed unmodified to the notifier function when
1951  *	           recovering from an error on @val_up
1952  *	@info: notifier information data
1953  *
1954  *	Call all per-netns network notifier blocks, but not notifier blocks on
1955  *	the global notifier chain. Parameters and return value are as for
1956  *	raw_notifier_call_chain_robust().
1957  */
1958 
1959 static int
1960 call_netdevice_notifiers_info_robust(unsigned long val_up,
1961 				     unsigned long val_down,
1962 				     struct netdev_notifier_info *info)
1963 {
1964 	struct net *net = dev_net(info->dev);
1965 
1966 	ASSERT_RTNL();
1967 
1968 	return raw_notifier_call_chain_robust(&net->netdev_chain,
1969 					      val_up, val_down, info);
1970 }
1971 
1972 static int call_netdevice_notifiers_extack(unsigned long val,
1973 					   struct net_device *dev,
1974 					   struct netlink_ext_ack *extack)
1975 {
1976 	struct netdev_notifier_info info = {
1977 		.dev = dev,
1978 		.extack = extack,
1979 	};
1980 
1981 	return call_netdevice_notifiers_info(val, &info);
1982 }
1983 
1984 /**
1985  *	call_netdevice_notifiers - call all network notifier blocks
1986  *      @val: value passed unmodified to notifier function
1987  *      @dev: net_device pointer passed unmodified to notifier function
1988  *
1989  *	Call all network notifier blocks.  Parameters and return value
1990  *	are as for raw_notifier_call_chain().
1991  */
1992 
1993 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1994 {
1995 	return call_netdevice_notifiers_extack(val, dev, NULL);
1996 }
1997 EXPORT_SYMBOL(call_netdevice_notifiers);
1998 
1999 /**
2000  *	call_netdevice_notifiers_mtu - call all network notifier blocks
2001  *	@val: value passed unmodified to notifier function
2002  *	@dev: net_device pointer passed unmodified to notifier function
2003  *	@arg: additional u32 argument passed to the notifier function
2004  *
2005  *	Call all network notifier blocks.  Parameters and return value
2006  *	are as for raw_notifier_call_chain().
2007  */
2008 static int call_netdevice_notifiers_mtu(unsigned long val,
2009 					struct net_device *dev, u32 arg)
2010 {
2011 	struct netdev_notifier_info_ext info = {
2012 		.info.dev = dev,
2013 		.ext.mtu = arg,
2014 	};
2015 
2016 	BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
2017 
2018 	return call_netdevice_notifiers_info(val, &info.info);
2019 }
2020 
2021 #ifdef CONFIG_NET_INGRESS
2022 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
2023 
2024 void net_inc_ingress_queue(void)
2025 {
2026 	static_branch_inc(&ingress_needed_key);
2027 }
2028 EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
2029 
2030 void net_dec_ingress_queue(void)
2031 {
2032 	static_branch_dec(&ingress_needed_key);
2033 }
2034 EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
2035 #endif
2036 
2037 #ifdef CONFIG_NET_EGRESS
2038 static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
2039 
2040 void net_inc_egress_queue(void)
2041 {
2042 	static_branch_inc(&egress_needed_key);
2043 }
2044 EXPORT_SYMBOL_GPL(net_inc_egress_queue);
2045 
2046 void net_dec_egress_queue(void)
2047 {
2048 	static_branch_dec(&egress_needed_key);
2049 }
2050 EXPORT_SYMBOL_GPL(net_dec_egress_queue);
2051 #endif
2052 
2053 DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
2054 EXPORT_SYMBOL(netstamp_needed_key);
2055 #ifdef CONFIG_JUMP_LABEL
2056 static atomic_t netstamp_needed_deferred;
2057 static atomic_t netstamp_wanted;
2058 static void netstamp_clear(struct work_struct *work)
2059 {
2060 	int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
2061 	int wanted;
2062 
2063 	wanted = atomic_add_return(deferred, &netstamp_wanted);
2064 	if (wanted > 0)
2065 		static_branch_enable(&netstamp_needed_key);
2066 	else
2067 		static_branch_disable(&netstamp_needed_key);
2068 }
2069 static DECLARE_WORK(netstamp_work, netstamp_clear);
2070 #endif
2071 
2072 void net_enable_timestamp(void)
2073 {
2074 #ifdef CONFIG_JUMP_LABEL
2075 	int wanted;
2076 
2077 	while (1) {
2078 		wanted = atomic_read(&netstamp_wanted);
2079 		if (wanted <= 0)
2080 			break;
2081 		if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
2082 			return;
2083 	}
2084 	atomic_inc(&netstamp_needed_deferred);
2085 	schedule_work(&netstamp_work);
2086 #else
2087 	static_branch_inc(&netstamp_needed_key);
2088 #endif
2089 }
2090 EXPORT_SYMBOL(net_enable_timestamp);
2091 
2092 void net_disable_timestamp(void)
2093 {
2094 #ifdef CONFIG_JUMP_LABEL
2095 	int wanted;
2096 
2097 	while (1) {
2098 		wanted = atomic_read(&netstamp_wanted);
2099 		if (wanted <= 1)
2100 			break;
2101 		if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
2102 			return;
2103 	}
2104 	atomic_dec(&netstamp_needed_deferred);
2105 	schedule_work(&netstamp_work);
2106 #else
2107 	static_branch_dec(&netstamp_needed_key);
2108 #endif
2109 }
2110 EXPORT_SYMBOL(net_disable_timestamp);
2111 
2112 static inline void net_timestamp_set(struct sk_buff *skb)
2113 {
2114 	skb->tstamp = 0;
2115 	skb->mono_delivery_time = 0;
2116 	if (static_branch_unlikely(&netstamp_needed_key))
2117 		skb->tstamp = ktime_get_real();
2118 }
2119 
2120 #define net_timestamp_check(COND, SKB)				\
2121 	if (static_branch_unlikely(&netstamp_needed_key)) {	\
2122 		if ((COND) && !(SKB)->tstamp)			\
2123 			(SKB)->tstamp = ktime_get_real();	\
2124 	}							\
2125 
2126 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
2127 {
2128 	return __is_skb_forwardable(dev, skb, true);
2129 }
2130 EXPORT_SYMBOL_GPL(is_skb_forwardable);
2131 
2132 static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb,
2133 			      bool check_mtu)
2134 {
2135 	int ret = ____dev_forward_skb(dev, skb, check_mtu);
2136 
2137 	if (likely(!ret)) {
2138 		skb->protocol = eth_type_trans(skb, dev);
2139 		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
2140 	}
2141 
2142 	return ret;
2143 }
2144 
2145 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2146 {
2147 	return __dev_forward_skb2(dev, skb, true);
2148 }
2149 EXPORT_SYMBOL_GPL(__dev_forward_skb);
2150 
2151 /**
2152  * dev_forward_skb - loopback an skb to another netif
2153  *
2154  * @dev: destination network device
2155  * @skb: buffer to forward
2156  *
2157  * return values:
2158  *	NET_RX_SUCCESS	(no congestion)
2159  *	NET_RX_DROP     (packet was dropped, but freed)
2160  *
2161  * dev_forward_skb can be used for injecting an skb from the
2162  * start_xmit function of one device into the receive queue
2163  * of another device.
2164  *
2165  * The receiving device may be in another namespace, so
2166  * we have to clear all information in the skb that could
2167  * impact namespace isolation.
2168  */
2169 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2170 {
2171 	return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
2172 }
2173 EXPORT_SYMBOL_GPL(dev_forward_skb);
2174 
2175 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb)
2176 {
2177 	return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb);
2178 }
2179 
2180 static inline int deliver_skb(struct sk_buff *skb,
2181 			      struct packet_type *pt_prev,
2182 			      struct net_device *orig_dev)
2183 {
2184 	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
2185 		return -ENOMEM;
2186 	refcount_inc(&skb->users);
2187 	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2188 }
2189 
2190 static inline void deliver_ptype_list_skb(struct sk_buff *skb,
2191 					  struct packet_type **pt,
2192 					  struct net_device *orig_dev,
2193 					  __be16 type,
2194 					  struct list_head *ptype_list)
2195 {
2196 	struct packet_type *ptype, *pt_prev = *pt;
2197 
2198 	list_for_each_entry_rcu(ptype, ptype_list, list) {
2199 		if (ptype->type != type)
2200 			continue;
2201 		if (pt_prev)
2202 			deliver_skb(skb, pt_prev, orig_dev);
2203 		pt_prev = ptype;
2204 	}
2205 	*pt = pt_prev;
2206 }
2207 
2208 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
2209 {
2210 	if (!ptype->af_packet_priv || !skb->sk)
2211 		return false;
2212 
2213 	if (ptype->id_match)
2214 		return ptype->id_match(ptype, skb->sk);
2215 	else if ((struct sock *)ptype->af_packet_priv == skb->sk)
2216 		return true;
2217 
2218 	return false;
2219 }
2220 
2221 /**
2222  * dev_nit_active - return true if any network interface taps are in use
2223  *
2224  * @dev: network device to check for the presence of taps
2225  */
2226 bool dev_nit_active(struct net_device *dev)
2227 {
2228 	return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all);
2229 }
2230 EXPORT_SYMBOL_GPL(dev_nit_active);
2231 
2232 /*
2233  *	Support routine. Sends outgoing frames to any network
2234  *	taps currently in use.
2235  */
2236 
2237 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
2238 {
2239 	struct packet_type *ptype;
2240 	struct sk_buff *skb2 = NULL;
2241 	struct packet_type *pt_prev = NULL;
2242 	struct list_head *ptype_list = &ptype_all;
2243 
2244 	rcu_read_lock();
2245 again:
2246 	list_for_each_entry_rcu(ptype, ptype_list, list) {
2247 		if (ptype->ignore_outgoing)
2248 			continue;
2249 
2250 		/* Never send packets back to the socket
2251 		 * they originated from - MvS (miquels@drinkel.ow.org)
2252 		 */
2253 		if (skb_loop_sk(ptype, skb))
2254 			continue;
2255 
2256 		if (pt_prev) {
2257 			deliver_skb(skb2, pt_prev, skb->dev);
2258 			pt_prev = ptype;
2259 			continue;
2260 		}
2261 
2262 		/* need to clone skb, done only once */
2263 		skb2 = skb_clone(skb, GFP_ATOMIC);
2264 		if (!skb2)
2265 			goto out_unlock;
2266 
2267 		net_timestamp_set(skb2);
2268 
2269 		/* skb->nh should be correctly
2270 		 * set by sender, so that the second statement is
2271 		 * just protection against buggy protocols.
2272 		 */
2273 		skb_reset_mac_header(skb2);
2274 
2275 		if (skb_network_header(skb2) < skb2->data ||
2276 		    skb_network_header(skb2) > skb_tail_pointer(skb2)) {
2277 			net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2278 					     ntohs(skb2->protocol),
2279 					     dev->name);
2280 			skb_reset_network_header(skb2);
2281 		}
2282 
2283 		skb2->transport_header = skb2->network_header;
2284 		skb2->pkt_type = PACKET_OUTGOING;
2285 		pt_prev = ptype;
2286 	}
2287 
2288 	if (ptype_list == &ptype_all) {
2289 		ptype_list = &dev->ptype_all;
2290 		goto again;
2291 	}
2292 out_unlock:
2293 	if (pt_prev) {
2294 		if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
2295 			pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2296 		else
2297 			kfree_skb(skb2);
2298 	}
2299 	rcu_read_unlock();
2300 }
2301 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
2302 
2303 /**
2304  * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2305  * @dev: Network device
2306  * @txq: number of queues available
2307  *
2308  * If real_num_tx_queues is changed the tc mappings may no longer be
2309  * valid. To resolve this verify the tc mapping remains valid and if
2310  * not NULL the mapping. With no priorities mapping to this
2311  * offset/count pair it will no longer be used. In the worst case TC0
2312  * is invalid nothing can be done so disable priority mappings. If is
2313  * expected that drivers will fix this mapping if they can before
2314  * calling netif_set_real_num_tx_queues.
2315  */
2316 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
2317 {
2318 	int i;
2319 	struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2320 
2321 	/* If TC0 is invalidated disable TC mapping */
2322 	if (tc->offset + tc->count > txq) {
2323 		netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2324 		dev->num_tc = 0;
2325 		return;
2326 	}
2327 
2328 	/* Invalidated prio to tc mappings set to TC0 */
2329 	for (i = 1; i < TC_BITMASK + 1; i++) {
2330 		int q = netdev_get_prio_tc_map(dev, i);
2331 
2332 		tc = &dev->tc_to_txq[q];
2333 		if (tc->offset + tc->count > txq) {
2334 			netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2335 				    i, q);
2336 			netdev_set_prio_tc_map(dev, i, 0);
2337 		}
2338 	}
2339 }
2340 
2341 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2342 {
2343 	if (dev->num_tc) {
2344 		struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2345 		int i;
2346 
2347 		/* walk through the TCs and see if it falls into any of them */
2348 		for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2349 			if ((txq - tc->offset) < tc->count)
2350 				return i;
2351 		}
2352 
2353 		/* didn't find it, just return -1 to indicate no match */
2354 		return -1;
2355 	}
2356 
2357 	return 0;
2358 }
2359 EXPORT_SYMBOL(netdev_txq_to_tc);
2360 
2361 #ifdef CONFIG_XPS
2362 static struct static_key xps_needed __read_mostly;
2363 static struct static_key xps_rxqs_needed __read_mostly;
2364 static DEFINE_MUTEX(xps_map_mutex);
2365 #define xmap_dereference(P)		\
2366 	rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2367 
2368 static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2369 			     struct xps_dev_maps *old_maps, int tci, u16 index)
2370 {
2371 	struct xps_map *map = NULL;
2372 	int pos;
2373 
2374 	if (dev_maps)
2375 		map = xmap_dereference(dev_maps->attr_map[tci]);
2376 	if (!map)
2377 		return false;
2378 
2379 	for (pos = map->len; pos--;) {
2380 		if (map->queues[pos] != index)
2381 			continue;
2382 
2383 		if (map->len > 1) {
2384 			map->queues[pos] = map->queues[--map->len];
2385 			break;
2386 		}
2387 
2388 		if (old_maps)
2389 			RCU_INIT_POINTER(old_maps->attr_map[tci], NULL);
2390 		RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2391 		kfree_rcu(map, rcu);
2392 		return false;
2393 	}
2394 
2395 	return true;
2396 }
2397 
2398 static bool remove_xps_queue_cpu(struct net_device *dev,
2399 				 struct xps_dev_maps *dev_maps,
2400 				 int cpu, u16 offset, u16 count)
2401 {
2402 	int num_tc = dev_maps->num_tc;
2403 	bool active = false;
2404 	int tci;
2405 
2406 	for (tci = cpu * num_tc; num_tc--; tci++) {
2407 		int i, j;
2408 
2409 		for (i = count, j = offset; i--; j++) {
2410 			if (!remove_xps_queue(dev_maps, NULL, tci, j))
2411 				break;
2412 		}
2413 
2414 		active |= i < 0;
2415 	}
2416 
2417 	return active;
2418 }
2419 
2420 static void reset_xps_maps(struct net_device *dev,
2421 			   struct xps_dev_maps *dev_maps,
2422 			   enum xps_map_type type)
2423 {
2424 	static_key_slow_dec_cpuslocked(&xps_needed);
2425 	if (type == XPS_RXQS)
2426 		static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
2427 
2428 	RCU_INIT_POINTER(dev->xps_maps[type], NULL);
2429 
2430 	kfree_rcu(dev_maps, rcu);
2431 }
2432 
2433 static void clean_xps_maps(struct net_device *dev, enum xps_map_type type,
2434 			   u16 offset, u16 count)
2435 {
2436 	struct xps_dev_maps *dev_maps;
2437 	bool active = false;
2438 	int i, j;
2439 
2440 	dev_maps = xmap_dereference(dev->xps_maps[type]);
2441 	if (!dev_maps)
2442 		return;
2443 
2444 	for (j = 0; j < dev_maps->nr_ids; j++)
2445 		active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, count);
2446 	if (!active)
2447 		reset_xps_maps(dev, dev_maps, type);
2448 
2449 	if (type == XPS_CPUS) {
2450 		for (i = offset + (count - 1); count--; i--)
2451 			netdev_queue_numa_node_write(
2452 				netdev_get_tx_queue(dev, i), NUMA_NO_NODE);
2453 	}
2454 }
2455 
2456 static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2457 				   u16 count)
2458 {
2459 	if (!static_key_false(&xps_needed))
2460 		return;
2461 
2462 	cpus_read_lock();
2463 	mutex_lock(&xps_map_mutex);
2464 
2465 	if (static_key_false(&xps_rxqs_needed))
2466 		clean_xps_maps(dev, XPS_RXQS, offset, count);
2467 
2468 	clean_xps_maps(dev, XPS_CPUS, offset, count);
2469 
2470 	mutex_unlock(&xps_map_mutex);
2471 	cpus_read_unlock();
2472 }
2473 
2474 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2475 {
2476 	netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2477 }
2478 
2479 static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
2480 				      u16 index, bool is_rxqs_map)
2481 {
2482 	struct xps_map *new_map;
2483 	int alloc_len = XPS_MIN_MAP_ALLOC;
2484 	int i, pos;
2485 
2486 	for (pos = 0; map && pos < map->len; pos++) {
2487 		if (map->queues[pos] != index)
2488 			continue;
2489 		return map;
2490 	}
2491 
2492 	/* Need to add tx-queue to this CPU's/rx-queue's existing map */
2493 	if (map) {
2494 		if (pos < map->alloc_len)
2495 			return map;
2496 
2497 		alloc_len = map->alloc_len * 2;
2498 	}
2499 
2500 	/* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2501 	 *  map
2502 	 */
2503 	if (is_rxqs_map)
2504 		new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
2505 	else
2506 		new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2507 				       cpu_to_node(attr_index));
2508 	if (!new_map)
2509 		return NULL;
2510 
2511 	for (i = 0; i < pos; i++)
2512 		new_map->queues[i] = map->queues[i];
2513 	new_map->alloc_len = alloc_len;
2514 	new_map->len = pos;
2515 
2516 	return new_map;
2517 }
2518 
2519 /* Copy xps maps at a given index */
2520 static void xps_copy_dev_maps(struct xps_dev_maps *dev_maps,
2521 			      struct xps_dev_maps *new_dev_maps, int index,
2522 			      int tc, bool skip_tc)
2523 {
2524 	int i, tci = index * dev_maps->num_tc;
2525 	struct xps_map *map;
2526 
2527 	/* copy maps belonging to foreign traffic classes */
2528 	for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2529 		if (i == tc && skip_tc)
2530 			continue;
2531 
2532 		/* fill in the new device map from the old device map */
2533 		map = xmap_dereference(dev_maps->attr_map[tci]);
2534 		RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2535 	}
2536 }
2537 
2538 /* Must be called under cpus_read_lock */
2539 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2540 			  u16 index, enum xps_map_type type)
2541 {
2542 	struct xps_dev_maps *dev_maps, *new_dev_maps = NULL, *old_dev_maps = NULL;
2543 	const unsigned long *online_mask = NULL;
2544 	bool active = false, copy = false;
2545 	int i, j, tci, numa_node_id = -2;
2546 	int maps_sz, num_tc = 1, tc = 0;
2547 	struct xps_map *map, *new_map;
2548 	unsigned int nr_ids;
2549 
2550 	if (dev->num_tc) {
2551 		/* Do not allow XPS on subordinate device directly */
2552 		num_tc = dev->num_tc;
2553 		if (num_tc < 0)
2554 			return -EINVAL;
2555 
2556 		/* If queue belongs to subordinate dev use its map */
2557 		dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2558 
2559 		tc = netdev_txq_to_tc(dev, index);
2560 		if (tc < 0)
2561 			return -EINVAL;
2562 	}
2563 
2564 	mutex_lock(&xps_map_mutex);
2565 
2566 	dev_maps = xmap_dereference(dev->xps_maps[type]);
2567 	if (type == XPS_RXQS) {
2568 		maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
2569 		nr_ids = dev->num_rx_queues;
2570 	} else {
2571 		maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
2572 		if (num_possible_cpus() > 1)
2573 			online_mask = cpumask_bits(cpu_online_mask);
2574 		nr_ids = nr_cpu_ids;
2575 	}
2576 
2577 	if (maps_sz < L1_CACHE_BYTES)
2578 		maps_sz = L1_CACHE_BYTES;
2579 
2580 	/* The old dev_maps could be larger or smaller than the one we're
2581 	 * setting up now, as dev->num_tc or nr_ids could have been updated in
2582 	 * between. We could try to be smart, but let's be safe instead and only
2583 	 * copy foreign traffic classes if the two map sizes match.
2584 	 */
2585 	if (dev_maps &&
2586 	    dev_maps->num_tc == num_tc && dev_maps->nr_ids == nr_ids)
2587 		copy = true;
2588 
2589 	/* allocate memory for queue storage */
2590 	for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
2591 	     j < nr_ids;) {
2592 		if (!new_dev_maps) {
2593 			new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2594 			if (!new_dev_maps) {
2595 				mutex_unlock(&xps_map_mutex);
2596 				return -ENOMEM;
2597 			}
2598 
2599 			new_dev_maps->nr_ids = nr_ids;
2600 			new_dev_maps->num_tc = num_tc;
2601 		}
2602 
2603 		tci = j * num_tc + tc;
2604 		map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL;
2605 
2606 		map = expand_xps_map(map, j, index, type == XPS_RXQS);
2607 		if (!map)
2608 			goto error;
2609 
2610 		RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2611 	}
2612 
2613 	if (!new_dev_maps)
2614 		goto out_no_new_maps;
2615 
2616 	if (!dev_maps) {
2617 		/* Increment static keys at most once per type */
2618 		static_key_slow_inc_cpuslocked(&xps_needed);
2619 		if (type == XPS_RXQS)
2620 			static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
2621 	}
2622 
2623 	for (j = 0; j < nr_ids; j++) {
2624 		bool skip_tc = false;
2625 
2626 		tci = j * num_tc + tc;
2627 		if (netif_attr_test_mask(j, mask, nr_ids) &&
2628 		    netif_attr_test_online(j, online_mask, nr_ids)) {
2629 			/* add tx-queue to CPU/rx-queue maps */
2630 			int pos = 0;
2631 
2632 			skip_tc = true;
2633 
2634 			map = xmap_dereference(new_dev_maps->attr_map[tci]);
2635 			while ((pos < map->len) && (map->queues[pos] != index))
2636 				pos++;
2637 
2638 			if (pos == map->len)
2639 				map->queues[map->len++] = index;
2640 #ifdef CONFIG_NUMA
2641 			if (type == XPS_CPUS) {
2642 				if (numa_node_id == -2)
2643 					numa_node_id = cpu_to_node(j);
2644 				else if (numa_node_id != cpu_to_node(j))
2645 					numa_node_id = -1;
2646 			}
2647 #endif
2648 		}
2649 
2650 		if (copy)
2651 			xps_copy_dev_maps(dev_maps, new_dev_maps, j, tc,
2652 					  skip_tc);
2653 	}
2654 
2655 	rcu_assign_pointer(dev->xps_maps[type], new_dev_maps);
2656 
2657 	/* Cleanup old maps */
2658 	if (!dev_maps)
2659 		goto out_no_old_maps;
2660 
2661 	for (j = 0; j < dev_maps->nr_ids; j++) {
2662 		for (i = num_tc, tci = j * dev_maps->num_tc; i--; tci++) {
2663 			map = xmap_dereference(dev_maps->attr_map[tci]);
2664 			if (!map)
2665 				continue;
2666 
2667 			if (copy) {
2668 				new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2669 				if (map == new_map)
2670 					continue;
2671 			}
2672 
2673 			RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2674 			kfree_rcu(map, rcu);
2675 		}
2676 	}
2677 
2678 	old_dev_maps = dev_maps;
2679 
2680 out_no_old_maps:
2681 	dev_maps = new_dev_maps;
2682 	active = true;
2683 
2684 out_no_new_maps:
2685 	if (type == XPS_CPUS)
2686 		/* update Tx queue numa node */
2687 		netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2688 					     (numa_node_id >= 0) ?
2689 					     numa_node_id : NUMA_NO_NODE);
2690 
2691 	if (!dev_maps)
2692 		goto out_no_maps;
2693 
2694 	/* removes tx-queue from unused CPUs/rx-queues */
2695 	for (j = 0; j < dev_maps->nr_ids; j++) {
2696 		tci = j * dev_maps->num_tc;
2697 
2698 		for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2699 			if (i == tc &&
2700 			    netif_attr_test_mask(j, mask, dev_maps->nr_ids) &&
2701 			    netif_attr_test_online(j, online_mask, dev_maps->nr_ids))
2702 				continue;
2703 
2704 			active |= remove_xps_queue(dev_maps,
2705 						   copy ? old_dev_maps : NULL,
2706 						   tci, index);
2707 		}
2708 	}
2709 
2710 	if (old_dev_maps)
2711 		kfree_rcu(old_dev_maps, rcu);
2712 
2713 	/* free map if not active */
2714 	if (!active)
2715 		reset_xps_maps(dev, dev_maps, type);
2716 
2717 out_no_maps:
2718 	mutex_unlock(&xps_map_mutex);
2719 
2720 	return 0;
2721 error:
2722 	/* remove any maps that we added */
2723 	for (j = 0; j < nr_ids; j++) {
2724 		for (i = num_tc, tci = j * num_tc; i--; tci++) {
2725 			new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2726 			map = copy ?
2727 			      xmap_dereference(dev_maps->attr_map[tci]) :
2728 			      NULL;
2729 			if (new_map && new_map != map)
2730 				kfree(new_map);
2731 		}
2732 	}
2733 
2734 	mutex_unlock(&xps_map_mutex);
2735 
2736 	kfree(new_dev_maps);
2737 	return -ENOMEM;
2738 }
2739 EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
2740 
2741 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2742 			u16 index)
2743 {
2744 	int ret;
2745 
2746 	cpus_read_lock();
2747 	ret =  __netif_set_xps_queue(dev, cpumask_bits(mask), index, XPS_CPUS);
2748 	cpus_read_unlock();
2749 
2750 	return ret;
2751 }
2752 EXPORT_SYMBOL(netif_set_xps_queue);
2753 
2754 #endif
2755 static void netdev_unbind_all_sb_channels(struct net_device *dev)
2756 {
2757 	struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2758 
2759 	/* Unbind any subordinate channels */
2760 	while (txq-- != &dev->_tx[0]) {
2761 		if (txq->sb_dev)
2762 			netdev_unbind_sb_channel(dev, txq->sb_dev);
2763 	}
2764 }
2765 
2766 void netdev_reset_tc(struct net_device *dev)
2767 {
2768 #ifdef CONFIG_XPS
2769 	netif_reset_xps_queues_gt(dev, 0);
2770 #endif
2771 	netdev_unbind_all_sb_channels(dev);
2772 
2773 	/* Reset TC configuration of device */
2774 	dev->num_tc = 0;
2775 	memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2776 	memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2777 }
2778 EXPORT_SYMBOL(netdev_reset_tc);
2779 
2780 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2781 {
2782 	if (tc >= dev->num_tc)
2783 		return -EINVAL;
2784 
2785 #ifdef CONFIG_XPS
2786 	netif_reset_xps_queues(dev, offset, count);
2787 #endif
2788 	dev->tc_to_txq[tc].count = count;
2789 	dev->tc_to_txq[tc].offset = offset;
2790 	return 0;
2791 }
2792 EXPORT_SYMBOL(netdev_set_tc_queue);
2793 
2794 int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2795 {
2796 	if (num_tc > TC_MAX_QUEUE)
2797 		return -EINVAL;
2798 
2799 #ifdef CONFIG_XPS
2800 	netif_reset_xps_queues_gt(dev, 0);
2801 #endif
2802 	netdev_unbind_all_sb_channels(dev);
2803 
2804 	dev->num_tc = num_tc;
2805 	return 0;
2806 }
2807 EXPORT_SYMBOL(netdev_set_num_tc);
2808 
2809 void netdev_unbind_sb_channel(struct net_device *dev,
2810 			      struct net_device *sb_dev)
2811 {
2812 	struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2813 
2814 #ifdef CONFIG_XPS
2815 	netif_reset_xps_queues_gt(sb_dev, 0);
2816 #endif
2817 	memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
2818 	memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
2819 
2820 	while (txq-- != &dev->_tx[0]) {
2821 		if (txq->sb_dev == sb_dev)
2822 			txq->sb_dev = NULL;
2823 	}
2824 }
2825 EXPORT_SYMBOL(netdev_unbind_sb_channel);
2826 
2827 int netdev_bind_sb_channel_queue(struct net_device *dev,
2828 				 struct net_device *sb_dev,
2829 				 u8 tc, u16 count, u16 offset)
2830 {
2831 	/* Make certain the sb_dev and dev are already configured */
2832 	if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
2833 		return -EINVAL;
2834 
2835 	/* We cannot hand out queues we don't have */
2836 	if ((offset + count) > dev->real_num_tx_queues)
2837 		return -EINVAL;
2838 
2839 	/* Record the mapping */
2840 	sb_dev->tc_to_txq[tc].count = count;
2841 	sb_dev->tc_to_txq[tc].offset = offset;
2842 
2843 	/* Provide a way for Tx queue to find the tc_to_txq map or
2844 	 * XPS map for itself.
2845 	 */
2846 	while (count--)
2847 		netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
2848 
2849 	return 0;
2850 }
2851 EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
2852 
2853 int netdev_set_sb_channel(struct net_device *dev, u16 channel)
2854 {
2855 	/* Do not use a multiqueue device to represent a subordinate channel */
2856 	if (netif_is_multiqueue(dev))
2857 		return -ENODEV;
2858 
2859 	/* We allow channels 1 - 32767 to be used for subordinate channels.
2860 	 * Channel 0 is meant to be "native" mode and used only to represent
2861 	 * the main root device. We allow writing 0 to reset the device back
2862 	 * to normal mode after being used as a subordinate channel.
2863 	 */
2864 	if (channel > S16_MAX)
2865 		return -EINVAL;
2866 
2867 	dev->num_tc = -channel;
2868 
2869 	return 0;
2870 }
2871 EXPORT_SYMBOL(netdev_set_sb_channel);
2872 
2873 /*
2874  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2875  * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
2876  */
2877 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2878 {
2879 	bool disabling;
2880 	int rc;
2881 
2882 	disabling = txq < dev->real_num_tx_queues;
2883 
2884 	if (txq < 1 || txq > dev->num_tx_queues)
2885 		return -EINVAL;
2886 
2887 	if (dev->reg_state == NETREG_REGISTERED ||
2888 	    dev->reg_state == NETREG_UNREGISTERING) {
2889 		ASSERT_RTNL();
2890 
2891 		rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2892 						  txq);
2893 		if (rc)
2894 			return rc;
2895 
2896 		if (dev->num_tc)
2897 			netif_setup_tc(dev, txq);
2898 
2899 		dev_qdisc_change_real_num_tx(dev, txq);
2900 
2901 		dev->real_num_tx_queues = txq;
2902 
2903 		if (disabling) {
2904 			synchronize_net();
2905 			qdisc_reset_all_tx_gt(dev, txq);
2906 #ifdef CONFIG_XPS
2907 			netif_reset_xps_queues_gt(dev, txq);
2908 #endif
2909 		}
2910 	} else {
2911 		dev->real_num_tx_queues = txq;
2912 	}
2913 
2914 	return 0;
2915 }
2916 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2917 
2918 #ifdef CONFIG_SYSFS
2919 /**
2920  *	netif_set_real_num_rx_queues - set actual number of RX queues used
2921  *	@dev: Network device
2922  *	@rxq: Actual number of RX queues
2923  *
2924  *	This must be called either with the rtnl_lock held or before
2925  *	registration of the net device.  Returns 0 on success, or a
2926  *	negative error code.  If called before registration, it always
2927  *	succeeds.
2928  */
2929 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2930 {
2931 	int rc;
2932 
2933 	if (rxq < 1 || rxq > dev->num_rx_queues)
2934 		return -EINVAL;
2935 
2936 	if (dev->reg_state == NETREG_REGISTERED) {
2937 		ASSERT_RTNL();
2938 
2939 		rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2940 						  rxq);
2941 		if (rc)
2942 			return rc;
2943 	}
2944 
2945 	dev->real_num_rx_queues = rxq;
2946 	return 0;
2947 }
2948 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2949 #endif
2950 
2951 /**
2952  *	netif_set_real_num_queues - set actual number of RX and TX queues used
2953  *	@dev: Network device
2954  *	@txq: Actual number of TX queues
2955  *	@rxq: Actual number of RX queues
2956  *
2957  *	Set the real number of both TX and RX queues.
2958  *	Does nothing if the number of queues is already correct.
2959  */
2960 int netif_set_real_num_queues(struct net_device *dev,
2961 			      unsigned int txq, unsigned int rxq)
2962 {
2963 	unsigned int old_rxq = dev->real_num_rx_queues;
2964 	int err;
2965 
2966 	if (txq < 1 || txq > dev->num_tx_queues ||
2967 	    rxq < 1 || rxq > dev->num_rx_queues)
2968 		return -EINVAL;
2969 
2970 	/* Start from increases, so the error path only does decreases -
2971 	 * decreases can't fail.
2972 	 */
2973 	if (rxq > dev->real_num_rx_queues) {
2974 		err = netif_set_real_num_rx_queues(dev, rxq);
2975 		if (err)
2976 			return err;
2977 	}
2978 	if (txq > dev->real_num_tx_queues) {
2979 		err = netif_set_real_num_tx_queues(dev, txq);
2980 		if (err)
2981 			goto undo_rx;
2982 	}
2983 	if (rxq < dev->real_num_rx_queues)
2984 		WARN_ON(netif_set_real_num_rx_queues(dev, rxq));
2985 	if (txq < dev->real_num_tx_queues)
2986 		WARN_ON(netif_set_real_num_tx_queues(dev, txq));
2987 
2988 	return 0;
2989 undo_rx:
2990 	WARN_ON(netif_set_real_num_rx_queues(dev, old_rxq));
2991 	return err;
2992 }
2993 EXPORT_SYMBOL(netif_set_real_num_queues);
2994 
2995 /**
2996  * netif_get_num_default_rss_queues - default number of RSS queues
2997  *
2998  * Default value is the number of physical cores if there are only 1 or 2, or
2999  * divided by 2 if there are more.
3000  */
3001 int netif_get_num_default_rss_queues(void)
3002 {
3003 	cpumask_var_t cpus;
3004 	int cpu, count = 0;
3005 
3006 	if (unlikely(is_kdump_kernel() || !zalloc_cpumask_var(&cpus, GFP_KERNEL)))
3007 		return 1;
3008 
3009 	cpumask_copy(cpus, cpu_online_mask);
3010 	for_each_cpu(cpu, cpus) {
3011 		++count;
3012 		cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
3013 	}
3014 	free_cpumask_var(cpus);
3015 
3016 	return count > 2 ? DIV_ROUND_UP(count, 2) : count;
3017 }
3018 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
3019 
3020 static void __netif_reschedule(struct Qdisc *q)
3021 {
3022 	struct softnet_data *sd;
3023 	unsigned long flags;
3024 
3025 	local_irq_save(flags);
3026 	sd = this_cpu_ptr(&softnet_data);
3027 	q->next_sched = NULL;
3028 	*sd->output_queue_tailp = q;
3029 	sd->output_queue_tailp = &q->next_sched;
3030 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
3031 	local_irq_restore(flags);
3032 }
3033 
3034 void __netif_schedule(struct Qdisc *q)
3035 {
3036 	if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
3037 		__netif_reschedule(q);
3038 }
3039 EXPORT_SYMBOL(__netif_schedule);
3040 
3041 struct dev_kfree_skb_cb {
3042 	enum skb_free_reason reason;
3043 };
3044 
3045 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
3046 {
3047 	return (struct dev_kfree_skb_cb *)skb->cb;
3048 }
3049 
3050 void netif_schedule_queue(struct netdev_queue *txq)
3051 {
3052 	rcu_read_lock();
3053 	if (!netif_xmit_stopped(txq)) {
3054 		struct Qdisc *q = rcu_dereference(txq->qdisc);
3055 
3056 		__netif_schedule(q);
3057 	}
3058 	rcu_read_unlock();
3059 }
3060 EXPORT_SYMBOL(netif_schedule_queue);
3061 
3062 void netif_tx_wake_queue(struct netdev_queue *dev_queue)
3063 {
3064 	if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
3065 		struct Qdisc *q;
3066 
3067 		rcu_read_lock();
3068 		q = rcu_dereference(dev_queue->qdisc);
3069 		__netif_schedule(q);
3070 		rcu_read_unlock();
3071 	}
3072 }
3073 EXPORT_SYMBOL(netif_tx_wake_queue);
3074 
3075 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
3076 {
3077 	unsigned long flags;
3078 
3079 	if (unlikely(!skb))
3080 		return;
3081 
3082 	if (likely(refcount_read(&skb->users) == 1)) {
3083 		smp_rmb();
3084 		refcount_set(&skb->users, 0);
3085 	} else if (likely(!refcount_dec_and_test(&skb->users))) {
3086 		return;
3087 	}
3088 	get_kfree_skb_cb(skb)->reason = reason;
3089 	local_irq_save(flags);
3090 	skb->next = __this_cpu_read(softnet_data.completion_queue);
3091 	__this_cpu_write(softnet_data.completion_queue, skb);
3092 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
3093 	local_irq_restore(flags);
3094 }
3095 EXPORT_SYMBOL(__dev_kfree_skb_irq);
3096 
3097 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
3098 {
3099 	if (in_hardirq() || irqs_disabled())
3100 		__dev_kfree_skb_irq(skb, reason);
3101 	else
3102 		dev_kfree_skb(skb);
3103 }
3104 EXPORT_SYMBOL(__dev_kfree_skb_any);
3105 
3106 
3107 /**
3108  * netif_device_detach - mark device as removed
3109  * @dev: network device
3110  *
3111  * Mark device as removed from system and therefore no longer available.
3112  */
3113 void netif_device_detach(struct net_device *dev)
3114 {
3115 	if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
3116 	    netif_running(dev)) {
3117 		netif_tx_stop_all_queues(dev);
3118 	}
3119 }
3120 EXPORT_SYMBOL(netif_device_detach);
3121 
3122 /**
3123  * netif_device_attach - mark device as attached
3124  * @dev: network device
3125  *
3126  * Mark device as attached from system and restart if needed.
3127  */
3128 void netif_device_attach(struct net_device *dev)
3129 {
3130 	if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
3131 	    netif_running(dev)) {
3132 		netif_tx_wake_all_queues(dev);
3133 		__netdev_watchdog_up(dev);
3134 	}
3135 }
3136 EXPORT_SYMBOL(netif_device_attach);
3137 
3138 /*
3139  * Returns a Tx hash based on the given packet descriptor a Tx queues' number
3140  * to be used as a distribution range.
3141  */
3142 static u16 skb_tx_hash(const struct net_device *dev,
3143 		       const struct net_device *sb_dev,
3144 		       struct sk_buff *skb)
3145 {
3146 	u32 hash;
3147 	u16 qoffset = 0;
3148 	u16 qcount = dev->real_num_tx_queues;
3149 
3150 	if (dev->num_tc) {
3151 		u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
3152 
3153 		qoffset = sb_dev->tc_to_txq[tc].offset;
3154 		qcount = sb_dev->tc_to_txq[tc].count;
3155 		if (unlikely(!qcount)) {
3156 			net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n",
3157 					     sb_dev->name, qoffset, tc);
3158 			qoffset = 0;
3159 			qcount = dev->real_num_tx_queues;
3160 		}
3161 	}
3162 
3163 	if (skb_rx_queue_recorded(skb)) {
3164 		hash = skb_get_rx_queue(skb);
3165 		if (hash >= qoffset)
3166 			hash -= qoffset;
3167 		while (unlikely(hash >= qcount))
3168 			hash -= qcount;
3169 		return hash + qoffset;
3170 	}
3171 
3172 	return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
3173 }
3174 
3175 static void skb_warn_bad_offload(const struct sk_buff *skb)
3176 {
3177 	static const netdev_features_t null_features;
3178 	struct net_device *dev = skb->dev;
3179 	const char *name = "";
3180 
3181 	if (!net_ratelimit())
3182 		return;
3183 
3184 	if (dev) {
3185 		if (dev->dev.parent)
3186 			name = dev_driver_string(dev->dev.parent);
3187 		else
3188 			name = netdev_name(dev);
3189 	}
3190 	skb_dump(KERN_WARNING, skb, false);
3191 	WARN(1, "%s: caps=(%pNF, %pNF)\n",
3192 	     name, dev ? &dev->features : &null_features,
3193 	     skb->sk ? &skb->sk->sk_route_caps : &null_features);
3194 }
3195 
3196 /*
3197  * Invalidate hardware checksum when packet is to be mangled, and
3198  * complete checksum manually on outgoing path.
3199  */
3200 int skb_checksum_help(struct sk_buff *skb)
3201 {
3202 	__wsum csum;
3203 	int ret = 0, offset;
3204 
3205 	if (skb->ip_summed == CHECKSUM_COMPLETE)
3206 		goto out_set_summed;
3207 
3208 	if (unlikely(skb_is_gso(skb))) {
3209 		skb_warn_bad_offload(skb);
3210 		return -EINVAL;
3211 	}
3212 
3213 	/* Before computing a checksum, we should make sure no frag could
3214 	 * be modified by an external entity : checksum could be wrong.
3215 	 */
3216 	if (skb_has_shared_frag(skb)) {
3217 		ret = __skb_linearize(skb);
3218 		if (ret)
3219 			goto out;
3220 	}
3221 
3222 	offset = skb_checksum_start_offset(skb);
3223 	BUG_ON(offset >= skb_headlen(skb));
3224 	csum = skb_checksum(skb, offset, skb->len - offset, 0);
3225 
3226 	offset += skb->csum_offset;
3227 	BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
3228 
3229 	ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
3230 	if (ret)
3231 		goto out;
3232 
3233 	*(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
3234 out_set_summed:
3235 	skb->ip_summed = CHECKSUM_NONE;
3236 out:
3237 	return ret;
3238 }
3239 EXPORT_SYMBOL(skb_checksum_help);
3240 
3241 int skb_crc32c_csum_help(struct sk_buff *skb)
3242 {
3243 	__le32 crc32c_csum;
3244 	int ret = 0, offset, start;
3245 
3246 	if (skb->ip_summed != CHECKSUM_PARTIAL)
3247 		goto out;
3248 
3249 	if (unlikely(skb_is_gso(skb)))
3250 		goto out;
3251 
3252 	/* Before computing a checksum, we should make sure no frag could
3253 	 * be modified by an external entity : checksum could be wrong.
3254 	 */
3255 	if (unlikely(skb_has_shared_frag(skb))) {
3256 		ret = __skb_linearize(skb);
3257 		if (ret)
3258 			goto out;
3259 	}
3260 	start = skb_checksum_start_offset(skb);
3261 	offset = start + offsetof(struct sctphdr, checksum);
3262 	if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
3263 		ret = -EINVAL;
3264 		goto out;
3265 	}
3266 
3267 	ret = skb_ensure_writable(skb, offset + sizeof(__le32));
3268 	if (ret)
3269 		goto out;
3270 
3271 	crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
3272 						  skb->len - start, ~(__u32)0,
3273 						  crc32c_csum_stub));
3274 	*(__le32 *)(skb->data + offset) = crc32c_csum;
3275 	skb->ip_summed = CHECKSUM_NONE;
3276 	skb->csum_not_inet = 0;
3277 out:
3278 	return ret;
3279 }
3280 
3281 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
3282 {
3283 	__be16 type = skb->protocol;
3284 
3285 	/* Tunnel gso handlers can set protocol to ethernet. */
3286 	if (type == htons(ETH_P_TEB)) {
3287 		struct ethhdr *eth;
3288 
3289 		if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
3290 			return 0;
3291 
3292 		eth = (struct ethhdr *)skb->data;
3293 		type = eth->h_proto;
3294 	}
3295 
3296 	return __vlan_get_protocol(skb, type, depth);
3297 }
3298 
3299 /* openvswitch calls this on rx path, so we need a different check.
3300  */
3301 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
3302 {
3303 	if (tx_path)
3304 		return skb->ip_summed != CHECKSUM_PARTIAL &&
3305 		       skb->ip_summed != CHECKSUM_UNNECESSARY;
3306 
3307 	return skb->ip_summed == CHECKSUM_NONE;
3308 }
3309 
3310 /**
3311  *	__skb_gso_segment - Perform segmentation on skb.
3312  *	@skb: buffer to segment
3313  *	@features: features for the output path (see dev->features)
3314  *	@tx_path: whether it is called in TX path
3315  *
3316  *	This function segments the given skb and returns a list of segments.
3317  *
3318  *	It may return NULL if the skb requires no segmentation.  This is
3319  *	only possible when GSO is used for verifying header integrity.
3320  *
3321  *	Segmentation preserves SKB_GSO_CB_OFFSET bytes of previous skb cb.
3322  */
3323 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3324 				  netdev_features_t features, bool tx_path)
3325 {
3326 	struct sk_buff *segs;
3327 
3328 	if (unlikely(skb_needs_check(skb, tx_path))) {
3329 		int err;
3330 
3331 		/* We're going to init ->check field in TCP or UDP header */
3332 		err = skb_cow_head(skb, 0);
3333 		if (err < 0)
3334 			return ERR_PTR(err);
3335 	}
3336 
3337 	/* Only report GSO partial support if it will enable us to
3338 	 * support segmentation on this frame without needing additional
3339 	 * work.
3340 	 */
3341 	if (features & NETIF_F_GSO_PARTIAL) {
3342 		netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
3343 		struct net_device *dev = skb->dev;
3344 
3345 		partial_features |= dev->features & dev->gso_partial_features;
3346 		if (!skb_gso_ok(skb, features | partial_features))
3347 			features &= ~NETIF_F_GSO_PARTIAL;
3348 	}
3349 
3350 	BUILD_BUG_ON(SKB_GSO_CB_OFFSET +
3351 		     sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
3352 
3353 	SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3354 	SKB_GSO_CB(skb)->encap_level = 0;
3355 
3356 	skb_reset_mac_header(skb);
3357 	skb_reset_mac_len(skb);
3358 
3359 	segs = skb_mac_gso_segment(skb, features);
3360 
3361 	if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
3362 		skb_warn_bad_offload(skb);
3363 
3364 	return segs;
3365 }
3366 EXPORT_SYMBOL(__skb_gso_segment);
3367 
3368 /* Take action when hardware reception checksum errors are detected. */
3369 #ifdef CONFIG_BUG
3370 static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3371 {
3372 	netdev_err(dev, "hw csum failure\n");
3373 	skb_dump(KERN_ERR, skb, true);
3374 	dump_stack();
3375 }
3376 
3377 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3378 {
3379 	DO_ONCE_LITE(do_netdev_rx_csum_fault, dev, skb);
3380 }
3381 EXPORT_SYMBOL(netdev_rx_csum_fault);
3382 #endif
3383 
3384 /* XXX: check that highmem exists at all on the given machine. */
3385 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
3386 {
3387 #ifdef CONFIG_HIGHMEM
3388 	int i;
3389 
3390 	if (!(dev->features & NETIF_F_HIGHDMA)) {
3391 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3392 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3393 
3394 			if (PageHighMem(skb_frag_page(frag)))
3395 				return 1;
3396 		}
3397 	}
3398 #endif
3399 	return 0;
3400 }
3401 
3402 /* If MPLS offload request, verify we are testing hardware MPLS features
3403  * instead of standard features for the netdev.
3404  */
3405 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3406 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3407 					   netdev_features_t features,
3408 					   __be16 type)
3409 {
3410 	if (eth_p_mpls(type))
3411 		features &= skb->dev->mpls_features;
3412 
3413 	return features;
3414 }
3415 #else
3416 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3417 					   netdev_features_t features,
3418 					   __be16 type)
3419 {
3420 	return features;
3421 }
3422 #endif
3423 
3424 static netdev_features_t harmonize_features(struct sk_buff *skb,
3425 	netdev_features_t features)
3426 {
3427 	__be16 type;
3428 
3429 	type = skb_network_protocol(skb, NULL);
3430 	features = net_mpls_features(skb, features, type);
3431 
3432 	if (skb->ip_summed != CHECKSUM_NONE &&
3433 	    !can_checksum_protocol(features, type)) {
3434 		features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3435 	}
3436 	if (illegal_highdma(skb->dev, skb))
3437 		features &= ~NETIF_F_SG;
3438 
3439 	return features;
3440 }
3441 
3442 netdev_features_t passthru_features_check(struct sk_buff *skb,
3443 					  struct net_device *dev,
3444 					  netdev_features_t features)
3445 {
3446 	return features;
3447 }
3448 EXPORT_SYMBOL(passthru_features_check);
3449 
3450 static netdev_features_t dflt_features_check(struct sk_buff *skb,
3451 					     struct net_device *dev,
3452 					     netdev_features_t features)
3453 {
3454 	return vlan_features_check(skb, features);
3455 }
3456 
3457 static netdev_features_t gso_features_check(const struct sk_buff *skb,
3458 					    struct net_device *dev,
3459 					    netdev_features_t features)
3460 {
3461 	u16 gso_segs = skb_shinfo(skb)->gso_segs;
3462 
3463 	if (gso_segs > READ_ONCE(dev->gso_max_segs))
3464 		return features & ~NETIF_F_GSO_MASK;
3465 
3466 	if (!skb_shinfo(skb)->gso_type) {
3467 		skb_warn_bad_offload(skb);
3468 		return features & ~NETIF_F_GSO_MASK;
3469 	}
3470 
3471 	/* Support for GSO partial features requires software
3472 	 * intervention before we can actually process the packets
3473 	 * so we need to strip support for any partial features now
3474 	 * and we can pull them back in after we have partially
3475 	 * segmented the frame.
3476 	 */
3477 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
3478 		features &= ~dev->gso_partial_features;
3479 
3480 	/* Make sure to clear the IPv4 ID mangling feature if the
3481 	 * IPv4 header has the potential to be fragmented.
3482 	 */
3483 	if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
3484 		struct iphdr *iph = skb->encapsulation ?
3485 				    inner_ip_hdr(skb) : ip_hdr(skb);
3486 
3487 		if (!(iph->frag_off & htons(IP_DF)))
3488 			features &= ~NETIF_F_TSO_MANGLEID;
3489 	}
3490 
3491 	return features;
3492 }
3493 
3494 netdev_features_t netif_skb_features(struct sk_buff *skb)
3495 {
3496 	struct net_device *dev = skb->dev;
3497 	netdev_features_t features = dev->features;
3498 
3499 	if (skb_is_gso(skb))
3500 		features = gso_features_check(skb, dev, features);
3501 
3502 	/* If encapsulation offload request, verify we are testing
3503 	 * hardware encapsulation features instead of standard
3504 	 * features for the netdev
3505 	 */
3506 	if (skb->encapsulation)
3507 		features &= dev->hw_enc_features;
3508 
3509 	if (skb_vlan_tagged(skb))
3510 		features = netdev_intersect_features(features,
3511 						     dev->vlan_features |
3512 						     NETIF_F_HW_VLAN_CTAG_TX |
3513 						     NETIF_F_HW_VLAN_STAG_TX);
3514 
3515 	if (dev->netdev_ops->ndo_features_check)
3516 		features &= dev->netdev_ops->ndo_features_check(skb, dev,
3517 								features);
3518 	else
3519 		features &= dflt_features_check(skb, dev, features);
3520 
3521 	return harmonize_features(skb, features);
3522 }
3523 EXPORT_SYMBOL(netif_skb_features);
3524 
3525 static int xmit_one(struct sk_buff *skb, struct net_device *dev,
3526 		    struct netdev_queue *txq, bool more)
3527 {
3528 	unsigned int len;
3529 	int rc;
3530 
3531 	if (dev_nit_active(dev))
3532 		dev_queue_xmit_nit(skb, dev);
3533 
3534 	len = skb->len;
3535 	PRANDOM_ADD_NOISE(skb, dev, txq, len + jiffies);
3536 	trace_net_dev_start_xmit(skb, dev);
3537 	rc = netdev_start_xmit(skb, dev, txq, more);
3538 	trace_net_dev_xmit(skb, rc, dev, len);
3539 
3540 	return rc;
3541 }
3542 
3543 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3544 				    struct netdev_queue *txq, int *ret)
3545 {
3546 	struct sk_buff *skb = first;
3547 	int rc = NETDEV_TX_OK;
3548 
3549 	while (skb) {
3550 		struct sk_buff *next = skb->next;
3551 
3552 		skb_mark_not_on_list(skb);
3553 		rc = xmit_one(skb, dev, txq, next != NULL);
3554 		if (unlikely(!dev_xmit_complete(rc))) {
3555 			skb->next = next;
3556 			goto out;
3557 		}
3558 
3559 		skb = next;
3560 		if (netif_tx_queue_stopped(txq) && skb) {
3561 			rc = NETDEV_TX_BUSY;
3562 			break;
3563 		}
3564 	}
3565 
3566 out:
3567 	*ret = rc;
3568 	return skb;
3569 }
3570 
3571 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3572 					  netdev_features_t features)
3573 {
3574 	if (skb_vlan_tag_present(skb) &&
3575 	    !vlan_hw_offload_capable(features, skb->vlan_proto))
3576 		skb = __vlan_hwaccel_push_inside(skb);
3577 	return skb;
3578 }
3579 
3580 int skb_csum_hwoffload_help(struct sk_buff *skb,
3581 			    const netdev_features_t features)
3582 {
3583 	if (unlikely(skb_csum_is_sctp(skb)))
3584 		return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3585 			skb_crc32c_csum_help(skb);
3586 
3587 	if (features & NETIF_F_HW_CSUM)
3588 		return 0;
3589 
3590 	if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
3591 		switch (skb->csum_offset) {
3592 		case offsetof(struct tcphdr, check):
3593 		case offsetof(struct udphdr, check):
3594 			return 0;
3595 		}
3596 	}
3597 
3598 	return skb_checksum_help(skb);
3599 }
3600 EXPORT_SYMBOL(skb_csum_hwoffload_help);
3601 
3602 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
3603 {
3604 	netdev_features_t features;
3605 
3606 	features = netif_skb_features(skb);
3607 	skb = validate_xmit_vlan(skb, features);
3608 	if (unlikely(!skb))
3609 		goto out_null;
3610 
3611 	skb = sk_validate_xmit_skb(skb, dev);
3612 	if (unlikely(!skb))
3613 		goto out_null;
3614 
3615 	if (netif_needs_gso(skb, features)) {
3616 		struct sk_buff *segs;
3617 
3618 		segs = skb_gso_segment(skb, features);
3619 		if (IS_ERR(segs)) {
3620 			goto out_kfree_skb;
3621 		} else if (segs) {
3622 			consume_skb(skb);
3623 			skb = segs;
3624 		}
3625 	} else {
3626 		if (skb_needs_linearize(skb, features) &&
3627 		    __skb_linearize(skb))
3628 			goto out_kfree_skb;
3629 
3630 		/* If packet is not checksummed and device does not
3631 		 * support checksumming for this protocol, complete
3632 		 * checksumming here.
3633 		 */
3634 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
3635 			if (skb->encapsulation)
3636 				skb_set_inner_transport_header(skb,
3637 							       skb_checksum_start_offset(skb));
3638 			else
3639 				skb_set_transport_header(skb,
3640 							 skb_checksum_start_offset(skb));
3641 			if (skb_csum_hwoffload_help(skb, features))
3642 				goto out_kfree_skb;
3643 		}
3644 	}
3645 
3646 	skb = validate_xmit_xfrm(skb, features, again);
3647 
3648 	return skb;
3649 
3650 out_kfree_skb:
3651 	kfree_skb(skb);
3652 out_null:
3653 	dev_core_stats_tx_dropped_inc(dev);
3654 	return NULL;
3655 }
3656 
3657 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
3658 {
3659 	struct sk_buff *next, *head = NULL, *tail;
3660 
3661 	for (; skb != NULL; skb = next) {
3662 		next = skb->next;
3663 		skb_mark_not_on_list(skb);
3664 
3665 		/* in case skb wont be segmented, point to itself */
3666 		skb->prev = skb;
3667 
3668 		skb = validate_xmit_skb(skb, dev, again);
3669 		if (!skb)
3670 			continue;
3671 
3672 		if (!head)
3673 			head = skb;
3674 		else
3675 			tail->next = skb;
3676 		/* If skb was segmented, skb->prev points to
3677 		 * the last segment. If not, it still contains skb.
3678 		 */
3679 		tail = skb->prev;
3680 	}
3681 	return head;
3682 }
3683 EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
3684 
3685 static void qdisc_pkt_len_init(struct sk_buff *skb)
3686 {
3687 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
3688 
3689 	qdisc_skb_cb(skb)->pkt_len = skb->len;
3690 
3691 	/* To get more precise estimation of bytes sent on wire,
3692 	 * we add to pkt_len the headers size of all segments
3693 	 */
3694 	if (shinfo->gso_size && skb_transport_header_was_set(skb)) {
3695 		unsigned int hdr_len;
3696 		u16 gso_segs = shinfo->gso_segs;
3697 
3698 		/* mac layer + network layer */
3699 		hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3700 
3701 		/* + transport layer */
3702 		if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3703 			const struct tcphdr *th;
3704 			struct tcphdr _tcphdr;
3705 
3706 			th = skb_header_pointer(skb, skb_transport_offset(skb),
3707 						sizeof(_tcphdr), &_tcphdr);
3708 			if (likely(th))
3709 				hdr_len += __tcp_hdrlen(th);
3710 		} else {
3711 			struct udphdr _udphdr;
3712 
3713 			if (skb_header_pointer(skb, skb_transport_offset(skb),
3714 					       sizeof(_udphdr), &_udphdr))
3715 				hdr_len += sizeof(struct udphdr);
3716 		}
3717 
3718 		if (shinfo->gso_type & SKB_GSO_DODGY)
3719 			gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3720 						shinfo->gso_size);
3721 
3722 		qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
3723 	}
3724 }
3725 
3726 static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q,
3727 			     struct sk_buff **to_free,
3728 			     struct netdev_queue *txq)
3729 {
3730 	int rc;
3731 
3732 	rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK;
3733 	if (rc == NET_XMIT_SUCCESS)
3734 		trace_qdisc_enqueue(q, txq, skb);
3735 	return rc;
3736 }
3737 
3738 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3739 				 struct net_device *dev,
3740 				 struct netdev_queue *txq)
3741 {
3742 	spinlock_t *root_lock = qdisc_lock(q);
3743 	struct sk_buff *to_free = NULL;
3744 	bool contended;
3745 	int rc;
3746 
3747 	qdisc_calculate_pkt_len(skb, q);
3748 
3749 	if (q->flags & TCQ_F_NOLOCK) {
3750 		if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) &&
3751 		    qdisc_run_begin(q)) {
3752 			/* Retest nolock_qdisc_is_empty() within the protection
3753 			 * of q->seqlock to protect from racing with requeuing.
3754 			 */
3755 			if (unlikely(!nolock_qdisc_is_empty(q))) {
3756 				rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3757 				__qdisc_run(q);
3758 				qdisc_run_end(q);
3759 
3760 				goto no_lock_out;
3761 			}
3762 
3763 			qdisc_bstats_cpu_update(q, skb);
3764 			if (sch_direct_xmit(skb, q, dev, txq, NULL, true) &&
3765 			    !nolock_qdisc_is_empty(q))
3766 				__qdisc_run(q);
3767 
3768 			qdisc_run_end(q);
3769 			return NET_XMIT_SUCCESS;
3770 		}
3771 
3772 		rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3773 		qdisc_run(q);
3774 
3775 no_lock_out:
3776 		if (unlikely(to_free))
3777 			kfree_skb_list_reason(to_free,
3778 					      SKB_DROP_REASON_QDISC_DROP);
3779 		return rc;
3780 	}
3781 
3782 	/*
3783 	 * Heuristic to force contended enqueues to serialize on a
3784 	 * separate lock before trying to get qdisc main lock.
3785 	 * This permits qdisc->running owner to get the lock more
3786 	 * often and dequeue packets faster.
3787 	 * On PREEMPT_RT it is possible to preempt the qdisc owner during xmit
3788 	 * and then other tasks will only enqueue packets. The packets will be
3789 	 * sent after the qdisc owner is scheduled again. To prevent this
3790 	 * scenario the task always serialize on the lock.
3791 	 */
3792 	contended = qdisc_is_running(q) || IS_ENABLED(CONFIG_PREEMPT_RT);
3793 	if (unlikely(contended))
3794 		spin_lock(&q->busylock);
3795 
3796 	spin_lock(root_lock);
3797 	if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3798 		__qdisc_drop(skb, &to_free);
3799 		rc = NET_XMIT_DROP;
3800 	} else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
3801 		   qdisc_run_begin(q)) {
3802 		/*
3803 		 * This is a work-conserving queue; there are no old skbs
3804 		 * waiting to be sent out; and the qdisc is not running -
3805 		 * xmit the skb directly.
3806 		 */
3807 
3808 		qdisc_bstats_update(q, skb);
3809 
3810 		if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
3811 			if (unlikely(contended)) {
3812 				spin_unlock(&q->busylock);
3813 				contended = false;
3814 			}
3815 			__qdisc_run(q);
3816 		}
3817 
3818 		qdisc_run_end(q);
3819 		rc = NET_XMIT_SUCCESS;
3820 	} else {
3821 		rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3822 		if (qdisc_run_begin(q)) {
3823 			if (unlikely(contended)) {
3824 				spin_unlock(&q->busylock);
3825 				contended = false;
3826 			}
3827 			__qdisc_run(q);
3828 			qdisc_run_end(q);
3829 		}
3830 	}
3831 	spin_unlock(root_lock);
3832 	if (unlikely(to_free))
3833 		kfree_skb_list_reason(to_free, SKB_DROP_REASON_QDISC_DROP);
3834 	if (unlikely(contended))
3835 		spin_unlock(&q->busylock);
3836 	return rc;
3837 }
3838 
3839 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3840 static void skb_update_prio(struct sk_buff *skb)
3841 {
3842 	const struct netprio_map *map;
3843 	const struct sock *sk;
3844 	unsigned int prioidx;
3845 
3846 	if (skb->priority)
3847 		return;
3848 	map = rcu_dereference_bh(skb->dev->priomap);
3849 	if (!map)
3850 		return;
3851 	sk = skb_to_full_sk(skb);
3852 	if (!sk)
3853 		return;
3854 
3855 	prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3856 
3857 	if (prioidx < map->priomap_len)
3858 		skb->priority = map->priomap[prioidx];
3859 }
3860 #else
3861 #define skb_update_prio(skb)
3862 #endif
3863 
3864 /**
3865  *	dev_loopback_xmit - loop back @skb
3866  *	@net: network namespace this loopback is happening in
3867  *	@sk:  sk needed to be a netfilter okfn
3868  *	@skb: buffer to transmit
3869  */
3870 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
3871 {
3872 	skb_reset_mac_header(skb);
3873 	__skb_pull(skb, skb_network_offset(skb));
3874 	skb->pkt_type = PACKET_LOOPBACK;
3875 	if (skb->ip_summed == CHECKSUM_NONE)
3876 		skb->ip_summed = CHECKSUM_UNNECESSARY;
3877 	WARN_ON(!skb_dst(skb));
3878 	skb_dst_force(skb);
3879 	netif_rx(skb);
3880 	return 0;
3881 }
3882 EXPORT_SYMBOL(dev_loopback_xmit);
3883 
3884 #ifdef CONFIG_NET_EGRESS
3885 static struct sk_buff *
3886 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3887 {
3888 #ifdef CONFIG_NET_CLS_ACT
3889 	struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
3890 	struct tcf_result cl_res;
3891 
3892 	if (!miniq)
3893 		return skb;
3894 
3895 	/* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
3896 	tc_skb_cb(skb)->mru = 0;
3897 	tc_skb_cb(skb)->post_ct = false;
3898 	mini_qdisc_bstats_cpu_update(miniq, skb);
3899 
3900 	switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) {
3901 	case TC_ACT_OK:
3902 	case TC_ACT_RECLASSIFY:
3903 		skb->tc_index = TC_H_MIN(cl_res.classid);
3904 		break;
3905 	case TC_ACT_SHOT:
3906 		mini_qdisc_qstats_cpu_drop(miniq);
3907 		*ret = NET_XMIT_DROP;
3908 		kfree_skb_reason(skb, SKB_DROP_REASON_TC_EGRESS);
3909 		return NULL;
3910 	case TC_ACT_STOLEN:
3911 	case TC_ACT_QUEUED:
3912 	case TC_ACT_TRAP:
3913 		*ret = NET_XMIT_SUCCESS;
3914 		consume_skb(skb);
3915 		return NULL;
3916 	case TC_ACT_REDIRECT:
3917 		/* No need to push/pop skb's mac_header here on egress! */
3918 		skb_do_redirect(skb);
3919 		*ret = NET_XMIT_SUCCESS;
3920 		return NULL;
3921 	default:
3922 		break;
3923 	}
3924 #endif /* CONFIG_NET_CLS_ACT */
3925 
3926 	return skb;
3927 }
3928 
3929 static struct netdev_queue *
3930 netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb)
3931 {
3932 	int qm = skb_get_queue_mapping(skb);
3933 
3934 	return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm));
3935 }
3936 
3937 static bool netdev_xmit_txqueue_skipped(void)
3938 {
3939 	return __this_cpu_read(softnet_data.xmit.skip_txqueue);
3940 }
3941 
3942 void netdev_xmit_skip_txqueue(bool skip)
3943 {
3944 	__this_cpu_write(softnet_data.xmit.skip_txqueue, skip);
3945 }
3946 EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
3947 #endif /* CONFIG_NET_EGRESS */
3948 
3949 #ifdef CONFIG_XPS
3950 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
3951 			       struct xps_dev_maps *dev_maps, unsigned int tci)
3952 {
3953 	int tc = netdev_get_prio_tc_map(dev, skb->priority);
3954 	struct xps_map *map;
3955 	int queue_index = -1;
3956 
3957 	if (tc >= dev_maps->num_tc || tci >= dev_maps->nr_ids)
3958 		return queue_index;
3959 
3960 	tci *= dev_maps->num_tc;
3961 	tci += tc;
3962 
3963 	map = rcu_dereference(dev_maps->attr_map[tci]);
3964 	if (map) {
3965 		if (map->len == 1)
3966 			queue_index = map->queues[0];
3967 		else
3968 			queue_index = map->queues[reciprocal_scale(
3969 						skb_get_hash(skb), map->len)];
3970 		if (unlikely(queue_index >= dev->real_num_tx_queues))
3971 			queue_index = -1;
3972 	}
3973 	return queue_index;
3974 }
3975 #endif
3976 
3977 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
3978 			 struct sk_buff *skb)
3979 {
3980 #ifdef CONFIG_XPS
3981 	struct xps_dev_maps *dev_maps;
3982 	struct sock *sk = skb->sk;
3983 	int queue_index = -1;
3984 
3985 	if (!static_key_false(&xps_needed))
3986 		return -1;
3987 
3988 	rcu_read_lock();
3989 	if (!static_key_false(&xps_rxqs_needed))
3990 		goto get_cpus_map;
3991 
3992 	dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_RXQS]);
3993 	if (dev_maps) {
3994 		int tci = sk_rx_queue_get(sk);
3995 
3996 		if (tci >= 0)
3997 			queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
3998 							  tci);
3999 	}
4000 
4001 get_cpus_map:
4002 	if (queue_index < 0) {
4003 		dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_CPUS]);
4004 		if (dev_maps) {
4005 			unsigned int tci = skb->sender_cpu - 1;
4006 
4007 			queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4008 							  tci);
4009 		}
4010 	}
4011 	rcu_read_unlock();
4012 
4013 	return queue_index;
4014 #else
4015 	return -1;
4016 #endif
4017 }
4018 
4019 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
4020 		     struct net_device *sb_dev)
4021 {
4022 	return 0;
4023 }
4024 EXPORT_SYMBOL(dev_pick_tx_zero);
4025 
4026 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
4027 		       struct net_device *sb_dev)
4028 {
4029 	return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
4030 }
4031 EXPORT_SYMBOL(dev_pick_tx_cpu_id);
4032 
4033 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
4034 		     struct net_device *sb_dev)
4035 {
4036 	struct sock *sk = skb->sk;
4037 	int queue_index = sk_tx_queue_get(sk);
4038 
4039 	sb_dev = sb_dev ? : dev;
4040 
4041 	if (queue_index < 0 || skb->ooo_okay ||
4042 	    queue_index >= dev->real_num_tx_queues) {
4043 		int new_index = get_xps_queue(dev, sb_dev, skb);
4044 
4045 		if (new_index < 0)
4046 			new_index = skb_tx_hash(dev, sb_dev, skb);
4047 
4048 		if (queue_index != new_index && sk &&
4049 		    sk_fullsock(sk) &&
4050 		    rcu_access_pointer(sk->sk_dst_cache))
4051 			sk_tx_queue_set(sk, new_index);
4052 
4053 		queue_index = new_index;
4054 	}
4055 
4056 	return queue_index;
4057 }
4058 EXPORT_SYMBOL(netdev_pick_tx);
4059 
4060 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
4061 					 struct sk_buff *skb,
4062 					 struct net_device *sb_dev)
4063 {
4064 	int queue_index = 0;
4065 
4066 #ifdef CONFIG_XPS
4067 	u32 sender_cpu = skb->sender_cpu - 1;
4068 
4069 	if (sender_cpu >= (u32)NR_CPUS)
4070 		skb->sender_cpu = raw_smp_processor_id() + 1;
4071 #endif
4072 
4073 	if (dev->real_num_tx_queues != 1) {
4074 		const struct net_device_ops *ops = dev->netdev_ops;
4075 
4076 		if (ops->ndo_select_queue)
4077 			queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
4078 		else
4079 			queue_index = netdev_pick_tx(dev, skb, sb_dev);
4080 
4081 		queue_index = netdev_cap_txqueue(dev, queue_index);
4082 	}
4083 
4084 	skb_set_queue_mapping(skb, queue_index);
4085 	return netdev_get_tx_queue(dev, queue_index);
4086 }
4087 
4088 /**
4089  *	__dev_queue_xmit - transmit a buffer
4090  *	@skb: buffer to transmit
4091  *	@sb_dev: suboordinate device used for L2 forwarding offload
4092  *
4093  *	Queue a buffer for transmission to a network device. The caller must
4094  *	have set the device and priority and built the buffer before calling
4095  *	this function. The function can be called from an interrupt.
4096  *
4097  *	A negative errno code is returned on a failure. A success does not
4098  *	guarantee the frame will be transmitted as it may be dropped due
4099  *	to congestion or traffic shaping.
4100  *
4101  * -----------------------------------------------------------------------------------
4102  *      I notice this method can also return errors from the queue disciplines,
4103  *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
4104  *      be positive.
4105  *
4106  *      Regardless of the return value, the skb is consumed, so it is currently
4107  *      difficult to retry a send to this method.  (You can bump the ref count
4108  *      before sending to hold a reference for retry if you are careful.)
4109  *
4110  *      When calling this method, interrupts MUST be enabled.  This is because
4111  *      the BH enable code must have IRQs enabled so that it will not deadlock.
4112  *          --BLG
4113  */
4114 static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
4115 {
4116 	struct net_device *dev = skb->dev;
4117 	struct netdev_queue *txq = NULL;
4118 	struct Qdisc *q;
4119 	int rc = -ENOMEM;
4120 	bool again = false;
4121 
4122 	skb_reset_mac_header(skb);
4123 
4124 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
4125 		__skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED);
4126 
4127 	/* Disable soft irqs for various locks below. Also
4128 	 * stops preemption for RCU.
4129 	 */
4130 	rcu_read_lock_bh();
4131 
4132 	skb_update_prio(skb);
4133 
4134 	qdisc_pkt_len_init(skb);
4135 #ifdef CONFIG_NET_CLS_ACT
4136 	skb->tc_at_ingress = 0;
4137 #endif
4138 #ifdef CONFIG_NET_EGRESS
4139 	if (static_branch_unlikely(&egress_needed_key)) {
4140 		if (nf_hook_egress_active()) {
4141 			skb = nf_hook_egress(skb, &rc, dev);
4142 			if (!skb)
4143 				goto out;
4144 		}
4145 
4146 		netdev_xmit_skip_txqueue(false);
4147 
4148 		nf_skip_egress(skb, true);
4149 		skb = sch_handle_egress(skb, &rc, dev);
4150 		if (!skb)
4151 			goto out;
4152 		nf_skip_egress(skb, false);
4153 
4154 		if (netdev_xmit_txqueue_skipped())
4155 			txq = netdev_tx_queue_mapping(dev, skb);
4156 	}
4157 #endif
4158 	/* If device/qdisc don't need skb->dst, release it right now while
4159 	 * its hot in this cpu cache.
4160 	 */
4161 	if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
4162 		skb_dst_drop(skb);
4163 	else
4164 		skb_dst_force(skb);
4165 
4166 	if (!txq)
4167 		txq = netdev_core_pick_tx(dev, skb, sb_dev);
4168 
4169 	q = rcu_dereference_bh(txq->qdisc);
4170 
4171 	trace_net_dev_queue(skb);
4172 	if (q->enqueue) {
4173 		rc = __dev_xmit_skb(skb, q, dev, txq);
4174 		goto out;
4175 	}
4176 
4177 	/* The device has no queue. Common case for software devices:
4178 	 * loopback, all the sorts of tunnels...
4179 
4180 	 * Really, it is unlikely that netif_tx_lock protection is necessary
4181 	 * here.  (f.e. loopback and IP tunnels are clean ignoring statistics
4182 	 * counters.)
4183 	 * However, it is possible, that they rely on protection
4184 	 * made by us here.
4185 
4186 	 * Check this and shot the lock. It is not prone from deadlocks.
4187 	 *Either shot noqueue qdisc, it is even simpler 8)
4188 	 */
4189 	if (dev->flags & IFF_UP) {
4190 		int cpu = smp_processor_id(); /* ok because BHs are off */
4191 
4192 		/* Other cpus might concurrently change txq->xmit_lock_owner
4193 		 * to -1 or to their cpu id, but not to our id.
4194 		 */
4195 		if (READ_ONCE(txq->xmit_lock_owner) != cpu) {
4196 			if (dev_xmit_recursion())
4197 				goto recursion_alert;
4198 
4199 			skb = validate_xmit_skb(skb, dev, &again);
4200 			if (!skb)
4201 				goto out;
4202 
4203 			PRANDOM_ADD_NOISE(skb, dev, txq, jiffies);
4204 			HARD_TX_LOCK(dev, txq, cpu);
4205 
4206 			if (!netif_xmit_stopped(txq)) {
4207 				dev_xmit_recursion_inc();
4208 				skb = dev_hard_start_xmit(skb, dev, txq, &rc);
4209 				dev_xmit_recursion_dec();
4210 				if (dev_xmit_complete(rc)) {
4211 					HARD_TX_UNLOCK(dev, txq);
4212 					goto out;
4213 				}
4214 			}
4215 			HARD_TX_UNLOCK(dev, txq);
4216 			net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
4217 					     dev->name);
4218 		} else {
4219 			/* Recursion is detected! It is possible,
4220 			 * unfortunately
4221 			 */
4222 recursion_alert:
4223 			net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
4224 					     dev->name);
4225 		}
4226 	}
4227 
4228 	rc = -ENETDOWN;
4229 	rcu_read_unlock_bh();
4230 
4231 	dev_core_stats_tx_dropped_inc(dev);
4232 	kfree_skb_list(skb);
4233 	return rc;
4234 out:
4235 	rcu_read_unlock_bh();
4236 	return rc;
4237 }
4238 
4239 int dev_queue_xmit(struct sk_buff *skb)
4240 {
4241 	return __dev_queue_xmit(skb, NULL);
4242 }
4243 EXPORT_SYMBOL(dev_queue_xmit);
4244 
4245 int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
4246 {
4247 	return __dev_queue_xmit(skb, sb_dev);
4248 }
4249 EXPORT_SYMBOL(dev_queue_xmit_accel);
4250 
4251 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
4252 {
4253 	struct net_device *dev = skb->dev;
4254 	struct sk_buff *orig_skb = skb;
4255 	struct netdev_queue *txq;
4256 	int ret = NETDEV_TX_BUSY;
4257 	bool again = false;
4258 
4259 	if (unlikely(!netif_running(dev) ||
4260 		     !netif_carrier_ok(dev)))
4261 		goto drop;
4262 
4263 	skb = validate_xmit_skb_list(skb, dev, &again);
4264 	if (skb != orig_skb)
4265 		goto drop;
4266 
4267 	skb_set_queue_mapping(skb, queue_id);
4268 	txq = skb_get_tx_queue(dev, skb);
4269 	PRANDOM_ADD_NOISE(skb, dev, txq, jiffies);
4270 
4271 	local_bh_disable();
4272 
4273 	dev_xmit_recursion_inc();
4274 	HARD_TX_LOCK(dev, txq, smp_processor_id());
4275 	if (!netif_xmit_frozen_or_drv_stopped(txq))
4276 		ret = netdev_start_xmit(skb, dev, txq, false);
4277 	HARD_TX_UNLOCK(dev, txq);
4278 	dev_xmit_recursion_dec();
4279 
4280 	local_bh_enable();
4281 	return ret;
4282 drop:
4283 	dev_core_stats_tx_dropped_inc(dev);
4284 	kfree_skb_list(skb);
4285 	return NET_XMIT_DROP;
4286 }
4287 EXPORT_SYMBOL(__dev_direct_xmit);
4288 
4289 /*************************************************************************
4290  *			Receiver routines
4291  *************************************************************************/
4292 
4293 int netdev_max_backlog __read_mostly = 1000;
4294 EXPORT_SYMBOL(netdev_max_backlog);
4295 
4296 int netdev_tstamp_prequeue __read_mostly = 1;
4297 int netdev_budget __read_mostly = 300;
4298 /* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
4299 unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ;
4300 int weight_p __read_mostly = 64;           /* old backlog weight */
4301 int dev_weight_rx_bias __read_mostly = 1;  /* bias for backlog weight */
4302 int dev_weight_tx_bias __read_mostly = 1;  /* bias for output_queue quota */
4303 int dev_rx_weight __read_mostly = 64;
4304 int dev_tx_weight __read_mostly = 64;
4305 
4306 /* Called with irq disabled */
4307 static inline void ____napi_schedule(struct softnet_data *sd,
4308 				     struct napi_struct *napi)
4309 {
4310 	struct task_struct *thread;
4311 
4312 	lockdep_assert_irqs_disabled();
4313 
4314 	if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
4315 		/* Paired with smp_mb__before_atomic() in
4316 		 * napi_enable()/dev_set_threaded().
4317 		 * Use READ_ONCE() to guarantee a complete
4318 		 * read on napi->thread. Only call
4319 		 * wake_up_process() when it's not NULL.
4320 		 */
4321 		thread = READ_ONCE(napi->thread);
4322 		if (thread) {
4323 			/* Avoid doing set_bit() if the thread is in
4324 			 * INTERRUPTIBLE state, cause napi_thread_wait()
4325 			 * makes sure to proceed with napi polling
4326 			 * if the thread is explicitly woken from here.
4327 			 */
4328 			if (READ_ONCE(thread->__state) != TASK_INTERRUPTIBLE)
4329 				set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
4330 			wake_up_process(thread);
4331 			return;
4332 		}
4333 	}
4334 
4335 	list_add_tail(&napi->poll_list, &sd->poll_list);
4336 	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
4337 }
4338 
4339 #ifdef CONFIG_RPS
4340 
4341 /* One global table that all flow-based protocols share. */
4342 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
4343 EXPORT_SYMBOL(rps_sock_flow_table);
4344 u32 rps_cpu_mask __read_mostly;
4345 EXPORT_SYMBOL(rps_cpu_mask);
4346 
4347 struct static_key_false rps_needed __read_mostly;
4348 EXPORT_SYMBOL(rps_needed);
4349 struct static_key_false rfs_needed __read_mostly;
4350 EXPORT_SYMBOL(rfs_needed);
4351 
4352 static struct rps_dev_flow *
4353 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4354 	    struct rps_dev_flow *rflow, u16 next_cpu)
4355 {
4356 	if (next_cpu < nr_cpu_ids) {
4357 #ifdef CONFIG_RFS_ACCEL
4358 		struct netdev_rx_queue *rxqueue;
4359 		struct rps_dev_flow_table *flow_table;
4360 		struct rps_dev_flow *old_rflow;
4361 		u32 flow_id;
4362 		u16 rxq_index;
4363 		int rc;
4364 
4365 		/* Should we steer this flow to a different hardware queue? */
4366 		if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
4367 		    !(dev->features & NETIF_F_NTUPLE))
4368 			goto out;
4369 		rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
4370 		if (rxq_index == skb_get_rx_queue(skb))
4371 			goto out;
4372 
4373 		rxqueue = dev->_rx + rxq_index;
4374 		flow_table = rcu_dereference(rxqueue->rps_flow_table);
4375 		if (!flow_table)
4376 			goto out;
4377 		flow_id = skb_get_hash(skb) & flow_table->mask;
4378 		rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
4379 							rxq_index, flow_id);
4380 		if (rc < 0)
4381 			goto out;
4382 		old_rflow = rflow;
4383 		rflow = &flow_table->flows[flow_id];
4384 		rflow->filter = rc;
4385 		if (old_rflow->filter == rflow->filter)
4386 			old_rflow->filter = RPS_NO_FILTER;
4387 	out:
4388 #endif
4389 		rflow->last_qtail =
4390 			per_cpu(softnet_data, next_cpu).input_queue_head;
4391 	}
4392 
4393 	rflow->cpu = next_cpu;
4394 	return rflow;
4395 }
4396 
4397 /*
4398  * get_rps_cpu is called from netif_receive_skb and returns the target
4399  * CPU from the RPS map of the receiving queue for a given skb.
4400  * rcu_read_lock must be held on entry.
4401  */
4402 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4403 		       struct rps_dev_flow **rflowp)
4404 {
4405 	const struct rps_sock_flow_table *sock_flow_table;
4406 	struct netdev_rx_queue *rxqueue = dev->_rx;
4407 	struct rps_dev_flow_table *flow_table;
4408 	struct rps_map *map;
4409 	int cpu = -1;
4410 	u32 tcpu;
4411 	u32 hash;
4412 
4413 	if (skb_rx_queue_recorded(skb)) {
4414 		u16 index = skb_get_rx_queue(skb);
4415 
4416 		if (unlikely(index >= dev->real_num_rx_queues)) {
4417 			WARN_ONCE(dev->real_num_rx_queues > 1,
4418 				  "%s received packet on queue %u, but number "
4419 				  "of RX queues is %u\n",
4420 				  dev->name, index, dev->real_num_rx_queues);
4421 			goto done;
4422 		}
4423 		rxqueue += index;
4424 	}
4425 
4426 	/* Avoid computing hash if RFS/RPS is not active for this rxqueue */
4427 
4428 	flow_table = rcu_dereference(rxqueue->rps_flow_table);
4429 	map = rcu_dereference(rxqueue->rps_map);
4430 	if (!flow_table && !map)
4431 		goto done;
4432 
4433 	skb_reset_network_header(skb);
4434 	hash = skb_get_hash(skb);
4435 	if (!hash)
4436 		goto done;
4437 
4438 	sock_flow_table = rcu_dereference(rps_sock_flow_table);
4439 	if (flow_table && sock_flow_table) {
4440 		struct rps_dev_flow *rflow;
4441 		u32 next_cpu;
4442 		u32 ident;
4443 
4444 		/* First check into global flow table if there is a match */
4445 		ident = sock_flow_table->ents[hash & sock_flow_table->mask];
4446 		if ((ident ^ hash) & ~rps_cpu_mask)
4447 			goto try_rps;
4448 
4449 		next_cpu = ident & rps_cpu_mask;
4450 
4451 		/* OK, now we know there is a match,
4452 		 * we can look at the local (per receive queue) flow table
4453 		 */
4454 		rflow = &flow_table->flows[hash & flow_table->mask];
4455 		tcpu = rflow->cpu;
4456 
4457 		/*
4458 		 * If the desired CPU (where last recvmsg was done) is
4459 		 * different from current CPU (one in the rx-queue flow
4460 		 * table entry), switch if one of the following holds:
4461 		 *   - Current CPU is unset (>= nr_cpu_ids).
4462 		 *   - Current CPU is offline.
4463 		 *   - The current CPU's queue tail has advanced beyond the
4464 		 *     last packet that was enqueued using this table entry.
4465 		 *     This guarantees that all previous packets for the flow
4466 		 *     have been dequeued, thus preserving in order delivery.
4467 		 */
4468 		if (unlikely(tcpu != next_cpu) &&
4469 		    (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
4470 		     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
4471 		      rflow->last_qtail)) >= 0)) {
4472 			tcpu = next_cpu;
4473 			rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
4474 		}
4475 
4476 		if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
4477 			*rflowp = rflow;
4478 			cpu = tcpu;
4479 			goto done;
4480 		}
4481 	}
4482 
4483 try_rps:
4484 
4485 	if (map) {
4486 		tcpu = map->cpus[reciprocal_scale(hash, map->len)];
4487 		if (cpu_online(tcpu)) {
4488 			cpu = tcpu;
4489 			goto done;
4490 		}
4491 	}
4492 
4493 done:
4494 	return cpu;
4495 }
4496 
4497 #ifdef CONFIG_RFS_ACCEL
4498 
4499 /**
4500  * rps_may_expire_flow - check whether an RFS hardware filter may be removed
4501  * @dev: Device on which the filter was set
4502  * @rxq_index: RX queue index
4503  * @flow_id: Flow ID passed to ndo_rx_flow_steer()
4504  * @filter_id: Filter ID returned by ndo_rx_flow_steer()
4505  *
4506  * Drivers that implement ndo_rx_flow_steer() should periodically call
4507  * this function for each installed filter and remove the filters for
4508  * which it returns %true.
4509  */
4510 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
4511 			 u32 flow_id, u16 filter_id)
4512 {
4513 	struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
4514 	struct rps_dev_flow_table *flow_table;
4515 	struct rps_dev_flow *rflow;
4516 	bool expire = true;
4517 	unsigned int cpu;
4518 
4519 	rcu_read_lock();
4520 	flow_table = rcu_dereference(rxqueue->rps_flow_table);
4521 	if (flow_table && flow_id <= flow_table->mask) {
4522 		rflow = &flow_table->flows[flow_id];
4523 		cpu = READ_ONCE(rflow->cpu);
4524 		if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
4525 		    ((int)(per_cpu(softnet_data, cpu).input_queue_head -
4526 			   rflow->last_qtail) <
4527 		     (int)(10 * flow_table->mask)))
4528 			expire = false;
4529 	}
4530 	rcu_read_unlock();
4531 	return expire;
4532 }
4533 EXPORT_SYMBOL(rps_may_expire_flow);
4534 
4535 #endif /* CONFIG_RFS_ACCEL */
4536 
4537 /* Called from hardirq (IPI) context */
4538 static void rps_trigger_softirq(void *data)
4539 {
4540 	struct softnet_data *sd = data;
4541 
4542 	____napi_schedule(sd, &sd->backlog);
4543 	sd->received_rps++;
4544 }
4545 
4546 #endif /* CONFIG_RPS */
4547 
4548 /*
4549  * Check if this softnet_data structure is another cpu one
4550  * If yes, queue it to our IPI list and return 1
4551  * If no, return 0
4552  */
4553 static int napi_schedule_rps(struct softnet_data *sd)
4554 {
4555 	struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
4556 
4557 #ifdef CONFIG_RPS
4558 	if (sd != mysd) {
4559 		sd->rps_ipi_next = mysd->rps_ipi_list;
4560 		mysd->rps_ipi_list = sd;
4561 
4562 		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
4563 		return 1;
4564 	}
4565 #endif /* CONFIG_RPS */
4566 	__napi_schedule_irqoff(&mysd->backlog);
4567 	return 0;
4568 }
4569 
4570 #ifdef CONFIG_NET_FLOW_LIMIT
4571 int netdev_flow_limit_table_len __read_mostly = (1 << 12);
4572 #endif
4573 
4574 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
4575 {
4576 #ifdef CONFIG_NET_FLOW_LIMIT
4577 	struct sd_flow_limit *fl;
4578 	struct softnet_data *sd;
4579 	unsigned int old_flow, new_flow;
4580 
4581 	if (qlen < (netdev_max_backlog >> 1))
4582 		return false;
4583 
4584 	sd = this_cpu_ptr(&softnet_data);
4585 
4586 	rcu_read_lock();
4587 	fl = rcu_dereference(sd->flow_limit);
4588 	if (fl) {
4589 		new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
4590 		old_flow = fl->history[fl->history_head];
4591 		fl->history[fl->history_head] = new_flow;
4592 
4593 		fl->history_head++;
4594 		fl->history_head &= FLOW_LIMIT_HISTORY - 1;
4595 
4596 		if (likely(fl->buckets[old_flow]))
4597 			fl->buckets[old_flow]--;
4598 
4599 		if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
4600 			fl->count++;
4601 			rcu_read_unlock();
4602 			return true;
4603 		}
4604 	}
4605 	rcu_read_unlock();
4606 #endif
4607 	return false;
4608 }
4609 
4610 /*
4611  * enqueue_to_backlog is called to queue an skb to a per CPU backlog
4612  * queue (may be a remote CPU queue).
4613  */
4614 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
4615 			      unsigned int *qtail)
4616 {
4617 	enum skb_drop_reason reason;
4618 	struct softnet_data *sd;
4619 	unsigned long flags;
4620 	unsigned int qlen;
4621 
4622 	reason = SKB_DROP_REASON_NOT_SPECIFIED;
4623 	sd = &per_cpu(softnet_data, cpu);
4624 
4625 	rps_lock_irqsave(sd, &flags);
4626 	if (!netif_running(skb->dev))
4627 		goto drop;
4628 	qlen = skb_queue_len(&sd->input_pkt_queue);
4629 	if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
4630 		if (qlen) {
4631 enqueue:
4632 			__skb_queue_tail(&sd->input_pkt_queue, skb);
4633 			input_queue_tail_incr_save(sd, qtail);
4634 			rps_unlock_irq_restore(sd, &flags);
4635 			return NET_RX_SUCCESS;
4636 		}
4637 
4638 		/* Schedule NAPI for backlog device
4639 		 * We can use non atomic operation since we own the queue lock
4640 		 */
4641 		if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
4642 			napi_schedule_rps(sd);
4643 		goto enqueue;
4644 	}
4645 	reason = SKB_DROP_REASON_CPU_BACKLOG;
4646 
4647 drop:
4648 	sd->dropped++;
4649 	rps_unlock_irq_restore(sd, &flags);
4650 
4651 	dev_core_stats_rx_dropped_inc(skb->dev);
4652 	kfree_skb_reason(skb, reason);
4653 	return NET_RX_DROP;
4654 }
4655 
4656 static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
4657 {
4658 	struct net_device *dev = skb->dev;
4659 	struct netdev_rx_queue *rxqueue;
4660 
4661 	rxqueue = dev->_rx;
4662 
4663 	if (skb_rx_queue_recorded(skb)) {
4664 		u16 index = skb_get_rx_queue(skb);
4665 
4666 		if (unlikely(index >= dev->real_num_rx_queues)) {
4667 			WARN_ONCE(dev->real_num_rx_queues > 1,
4668 				  "%s received packet on queue %u, but number "
4669 				  "of RX queues is %u\n",
4670 				  dev->name, index, dev->real_num_rx_queues);
4671 
4672 			return rxqueue; /* Return first rxqueue */
4673 		}
4674 		rxqueue += index;
4675 	}
4676 	return rxqueue;
4677 }
4678 
4679 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
4680 			     struct bpf_prog *xdp_prog)
4681 {
4682 	void *orig_data, *orig_data_end, *hard_start;
4683 	struct netdev_rx_queue *rxqueue;
4684 	bool orig_bcast, orig_host;
4685 	u32 mac_len, frame_sz;
4686 	__be16 orig_eth_type;
4687 	struct ethhdr *eth;
4688 	u32 metalen, act;
4689 	int off;
4690 
4691 	/* The XDP program wants to see the packet starting at the MAC
4692 	 * header.
4693 	 */
4694 	mac_len = skb->data - skb_mac_header(skb);
4695 	hard_start = skb->data - skb_headroom(skb);
4696 
4697 	/* SKB "head" area always have tailroom for skb_shared_info */
4698 	frame_sz = (void *)skb_end_pointer(skb) - hard_start;
4699 	frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4700 
4701 	rxqueue = netif_get_rxqueue(skb);
4702 	xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
4703 	xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
4704 			 skb_headlen(skb) + mac_len, true);
4705 
4706 	orig_data_end = xdp->data_end;
4707 	orig_data = xdp->data;
4708 	eth = (struct ethhdr *)xdp->data;
4709 	orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr);
4710 	orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
4711 	orig_eth_type = eth->h_proto;
4712 
4713 	act = bpf_prog_run_xdp(xdp_prog, xdp);
4714 
4715 	/* check if bpf_xdp_adjust_head was used */
4716 	off = xdp->data - orig_data;
4717 	if (off) {
4718 		if (off > 0)
4719 			__skb_pull(skb, off);
4720 		else if (off < 0)
4721 			__skb_push(skb, -off);
4722 
4723 		skb->mac_header += off;
4724 		skb_reset_network_header(skb);
4725 	}
4726 
4727 	/* check if bpf_xdp_adjust_tail was used */
4728 	off = xdp->data_end - orig_data_end;
4729 	if (off != 0) {
4730 		skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
4731 		skb->len += off; /* positive on grow, negative on shrink */
4732 	}
4733 
4734 	/* check if XDP changed eth hdr such SKB needs update */
4735 	eth = (struct ethhdr *)xdp->data;
4736 	if ((orig_eth_type != eth->h_proto) ||
4737 	    (orig_host != ether_addr_equal_64bits(eth->h_dest,
4738 						  skb->dev->dev_addr)) ||
4739 	    (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
4740 		__skb_push(skb, ETH_HLEN);
4741 		skb->pkt_type = PACKET_HOST;
4742 		skb->protocol = eth_type_trans(skb, skb->dev);
4743 	}
4744 
4745 	/* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull
4746 	 * before calling us again on redirect path. We do not call do_redirect
4747 	 * as we leave that up to the caller.
4748 	 *
4749 	 * Caller is responsible for managing lifetime of skb (i.e. calling
4750 	 * kfree_skb in response to actions it cannot handle/XDP_DROP).
4751 	 */
4752 	switch (act) {
4753 	case XDP_REDIRECT:
4754 	case XDP_TX:
4755 		__skb_push(skb, mac_len);
4756 		break;
4757 	case XDP_PASS:
4758 		metalen = xdp->data - xdp->data_meta;
4759 		if (metalen)
4760 			skb_metadata_set(skb, metalen);
4761 		break;
4762 	}
4763 
4764 	return act;
4765 }
4766 
4767 static u32 netif_receive_generic_xdp(struct sk_buff *skb,
4768 				     struct xdp_buff *xdp,
4769 				     struct bpf_prog *xdp_prog)
4770 {
4771 	u32 act = XDP_DROP;
4772 
4773 	/* Reinjected packets coming from act_mirred or similar should
4774 	 * not get XDP generic processing.
4775 	 */
4776 	if (skb_is_redirected(skb))
4777 		return XDP_PASS;
4778 
4779 	/* XDP packets must be linear and must have sufficient headroom
4780 	 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
4781 	 * native XDP provides, thus we need to do it here as well.
4782 	 */
4783 	if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
4784 	    skb_headroom(skb) < XDP_PACKET_HEADROOM) {
4785 		int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
4786 		int troom = skb->tail + skb->data_len - skb->end;
4787 
4788 		/* In case we have to go down the path and also linearize,
4789 		 * then lets do the pskb_expand_head() work just once here.
4790 		 */
4791 		if (pskb_expand_head(skb,
4792 				     hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
4793 				     troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
4794 			goto do_drop;
4795 		if (skb_linearize(skb))
4796 			goto do_drop;
4797 	}
4798 
4799 	act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog);
4800 	switch (act) {
4801 	case XDP_REDIRECT:
4802 	case XDP_TX:
4803 	case XDP_PASS:
4804 		break;
4805 	default:
4806 		bpf_warn_invalid_xdp_action(skb->dev, xdp_prog, act);
4807 		fallthrough;
4808 	case XDP_ABORTED:
4809 		trace_xdp_exception(skb->dev, xdp_prog, act);
4810 		fallthrough;
4811 	case XDP_DROP:
4812 	do_drop:
4813 		kfree_skb(skb);
4814 		break;
4815 	}
4816 
4817 	return act;
4818 }
4819 
4820 /* When doing generic XDP we have to bypass the qdisc layer and the
4821  * network taps in order to match in-driver-XDP behavior.
4822  */
4823 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
4824 {
4825 	struct net_device *dev = skb->dev;
4826 	struct netdev_queue *txq;
4827 	bool free_skb = true;
4828 	int cpu, rc;
4829 
4830 	txq = netdev_core_pick_tx(dev, skb, NULL);
4831 	cpu = smp_processor_id();
4832 	HARD_TX_LOCK(dev, txq, cpu);
4833 	if (!netif_xmit_stopped(txq)) {
4834 		rc = netdev_start_xmit(skb, dev, txq, 0);
4835 		if (dev_xmit_complete(rc))
4836 			free_skb = false;
4837 	}
4838 	HARD_TX_UNLOCK(dev, txq);
4839 	if (free_skb) {
4840 		trace_xdp_exception(dev, xdp_prog, XDP_TX);
4841 		kfree_skb(skb);
4842 	}
4843 }
4844 
4845 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
4846 
4847 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
4848 {
4849 	if (xdp_prog) {
4850 		struct xdp_buff xdp;
4851 		u32 act;
4852 		int err;
4853 
4854 		act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
4855 		if (act != XDP_PASS) {
4856 			switch (act) {
4857 			case XDP_REDIRECT:
4858 				err = xdp_do_generic_redirect(skb->dev, skb,
4859 							      &xdp, xdp_prog);
4860 				if (err)
4861 					goto out_redir;
4862 				break;
4863 			case XDP_TX:
4864 				generic_xdp_tx(skb, xdp_prog);
4865 				break;
4866 			}
4867 			return XDP_DROP;
4868 		}
4869 	}
4870 	return XDP_PASS;
4871 out_redir:
4872 	kfree_skb_reason(skb, SKB_DROP_REASON_XDP);
4873 	return XDP_DROP;
4874 }
4875 EXPORT_SYMBOL_GPL(do_xdp_generic);
4876 
4877 static int netif_rx_internal(struct sk_buff *skb)
4878 {
4879 	int ret;
4880 
4881 	net_timestamp_check(netdev_tstamp_prequeue, skb);
4882 
4883 	trace_netif_rx(skb);
4884 
4885 #ifdef CONFIG_RPS
4886 	if (static_branch_unlikely(&rps_needed)) {
4887 		struct rps_dev_flow voidflow, *rflow = &voidflow;
4888 		int cpu;
4889 
4890 		rcu_read_lock();
4891 
4892 		cpu = get_rps_cpu(skb->dev, skb, &rflow);
4893 		if (cpu < 0)
4894 			cpu = smp_processor_id();
4895 
4896 		ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4897 
4898 		rcu_read_unlock();
4899 	} else
4900 #endif
4901 	{
4902 		unsigned int qtail;
4903 
4904 		ret = enqueue_to_backlog(skb, smp_processor_id(), &qtail);
4905 	}
4906 	return ret;
4907 }
4908 
4909 /**
4910  *	__netif_rx	-	Slightly optimized version of netif_rx
4911  *	@skb: buffer to post
4912  *
4913  *	This behaves as netif_rx except that it does not disable bottom halves.
4914  *	As a result this function may only be invoked from the interrupt context
4915  *	(either hard or soft interrupt).
4916  */
4917 int __netif_rx(struct sk_buff *skb)
4918 {
4919 	int ret;
4920 
4921 	lockdep_assert_once(hardirq_count() | softirq_count());
4922 
4923 	trace_netif_rx_entry(skb);
4924 	ret = netif_rx_internal(skb);
4925 	trace_netif_rx_exit(ret);
4926 	return ret;
4927 }
4928 EXPORT_SYMBOL(__netif_rx);
4929 
4930 /**
4931  *	netif_rx	-	post buffer to the network code
4932  *	@skb: buffer to post
4933  *
4934  *	This function receives a packet from a device driver and queues it for
4935  *	the upper (protocol) levels to process via the backlog NAPI device. It
4936  *	always succeeds. The buffer may be dropped during processing for
4937  *	congestion control or by the protocol layers.
4938  *	The network buffer is passed via the backlog NAPI device. Modern NIC
4939  *	driver should use NAPI and GRO.
4940  *	This function can used from interrupt and from process context. The
4941  *	caller from process context must not disable interrupts before invoking
4942  *	this function.
4943  *
4944  *	return values:
4945  *	NET_RX_SUCCESS	(no congestion)
4946  *	NET_RX_DROP     (packet was dropped)
4947  *
4948  */
4949 int netif_rx(struct sk_buff *skb)
4950 {
4951 	bool need_bh_off = !(hardirq_count() | softirq_count());
4952 	int ret;
4953 
4954 	if (need_bh_off)
4955 		local_bh_disable();
4956 	trace_netif_rx_entry(skb);
4957 	ret = netif_rx_internal(skb);
4958 	trace_netif_rx_exit(ret);
4959 	if (need_bh_off)
4960 		local_bh_enable();
4961 	return ret;
4962 }
4963 EXPORT_SYMBOL(netif_rx);
4964 
4965 static __latent_entropy void net_tx_action(struct softirq_action *h)
4966 {
4967 	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4968 
4969 	if (sd->completion_queue) {
4970 		struct sk_buff *clist;
4971 
4972 		local_irq_disable();
4973 		clist = sd->completion_queue;
4974 		sd->completion_queue = NULL;
4975 		local_irq_enable();
4976 
4977 		while (clist) {
4978 			struct sk_buff *skb = clist;
4979 
4980 			clist = clist->next;
4981 
4982 			WARN_ON(refcount_read(&skb->users));
4983 			if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
4984 				trace_consume_skb(skb);
4985 			else
4986 				trace_kfree_skb(skb, net_tx_action,
4987 						SKB_DROP_REASON_NOT_SPECIFIED);
4988 
4989 			if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
4990 				__kfree_skb(skb);
4991 			else
4992 				__kfree_skb_defer(skb);
4993 		}
4994 	}
4995 
4996 	if (sd->output_queue) {
4997 		struct Qdisc *head;
4998 
4999 		local_irq_disable();
5000 		head = sd->output_queue;
5001 		sd->output_queue = NULL;
5002 		sd->output_queue_tailp = &sd->output_queue;
5003 		local_irq_enable();
5004 
5005 		rcu_read_lock();
5006 
5007 		while (head) {
5008 			struct Qdisc *q = head;
5009 			spinlock_t *root_lock = NULL;
5010 
5011 			head = head->next_sched;
5012 
5013 			/* We need to make sure head->next_sched is read
5014 			 * before clearing __QDISC_STATE_SCHED
5015 			 */
5016 			smp_mb__before_atomic();
5017 
5018 			if (!(q->flags & TCQ_F_NOLOCK)) {
5019 				root_lock = qdisc_lock(q);
5020 				spin_lock(root_lock);
5021 			} else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
5022 						     &q->state))) {
5023 				/* There is a synchronize_net() between
5024 				 * STATE_DEACTIVATED flag being set and
5025 				 * qdisc_reset()/some_qdisc_is_busy() in
5026 				 * dev_deactivate(), so we can safely bail out
5027 				 * early here to avoid data race between
5028 				 * qdisc_deactivate() and some_qdisc_is_busy()
5029 				 * for lockless qdisc.
5030 				 */
5031 				clear_bit(__QDISC_STATE_SCHED, &q->state);
5032 				continue;
5033 			}
5034 
5035 			clear_bit(__QDISC_STATE_SCHED, &q->state);
5036 			qdisc_run(q);
5037 			if (root_lock)
5038 				spin_unlock(root_lock);
5039 		}
5040 
5041 		rcu_read_unlock();
5042 	}
5043 
5044 	xfrm_dev_backlog(sd);
5045 }
5046 
5047 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
5048 /* This hook is defined here for ATM LANE */
5049 int (*br_fdb_test_addr_hook)(struct net_device *dev,
5050 			     unsigned char *addr) __read_mostly;
5051 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
5052 #endif
5053 
5054 static inline struct sk_buff *
5055 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
5056 		   struct net_device *orig_dev, bool *another)
5057 {
5058 #ifdef CONFIG_NET_CLS_ACT
5059 	struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress);
5060 	struct tcf_result cl_res;
5061 
5062 	/* If there's at least one ingress present somewhere (so
5063 	 * we get here via enabled static key), remaining devices
5064 	 * that are not configured with an ingress qdisc will bail
5065 	 * out here.
5066 	 */
5067 	if (!miniq)
5068 		return skb;
5069 
5070 	if (*pt_prev) {
5071 		*ret = deliver_skb(skb, *pt_prev, orig_dev);
5072 		*pt_prev = NULL;
5073 	}
5074 
5075 	qdisc_skb_cb(skb)->pkt_len = skb->len;
5076 	tc_skb_cb(skb)->mru = 0;
5077 	tc_skb_cb(skb)->post_ct = false;
5078 	skb->tc_at_ingress = 1;
5079 	mini_qdisc_bstats_cpu_update(miniq, skb);
5080 
5081 	switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) {
5082 	case TC_ACT_OK:
5083 	case TC_ACT_RECLASSIFY:
5084 		skb->tc_index = TC_H_MIN(cl_res.classid);
5085 		break;
5086 	case TC_ACT_SHOT:
5087 		mini_qdisc_qstats_cpu_drop(miniq);
5088 		kfree_skb_reason(skb, SKB_DROP_REASON_TC_INGRESS);
5089 		return NULL;
5090 	case TC_ACT_STOLEN:
5091 	case TC_ACT_QUEUED:
5092 	case TC_ACT_TRAP:
5093 		consume_skb(skb);
5094 		return NULL;
5095 	case TC_ACT_REDIRECT:
5096 		/* skb_mac_header check was done by cls/act_bpf, so
5097 		 * we can safely push the L2 header back before
5098 		 * redirecting to another netdev
5099 		 */
5100 		__skb_push(skb, skb->mac_len);
5101 		if (skb_do_redirect(skb) == -EAGAIN) {
5102 			__skb_pull(skb, skb->mac_len);
5103 			*another = true;
5104 			break;
5105 		}
5106 		return NULL;
5107 	case TC_ACT_CONSUMED:
5108 		return NULL;
5109 	default:
5110 		break;
5111 	}
5112 #endif /* CONFIG_NET_CLS_ACT */
5113 	return skb;
5114 }
5115 
5116 /**
5117  *	netdev_is_rx_handler_busy - check if receive handler is registered
5118  *	@dev: device to check
5119  *
5120  *	Check if a receive handler is already registered for a given device.
5121  *	Return true if there one.
5122  *
5123  *	The caller must hold the rtnl_mutex.
5124  */
5125 bool netdev_is_rx_handler_busy(struct net_device *dev)
5126 {
5127 	ASSERT_RTNL();
5128 	return dev && rtnl_dereference(dev->rx_handler);
5129 }
5130 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
5131 
5132 /**
5133  *	netdev_rx_handler_register - register receive handler
5134  *	@dev: device to register a handler for
5135  *	@rx_handler: receive handler to register
5136  *	@rx_handler_data: data pointer that is used by rx handler
5137  *
5138  *	Register a receive handler for a device. This handler will then be
5139  *	called from __netif_receive_skb. A negative errno code is returned
5140  *	on a failure.
5141  *
5142  *	The caller must hold the rtnl_mutex.
5143  *
5144  *	For a general description of rx_handler, see enum rx_handler_result.
5145  */
5146 int netdev_rx_handler_register(struct net_device *dev,
5147 			       rx_handler_func_t *rx_handler,
5148 			       void *rx_handler_data)
5149 {
5150 	if (netdev_is_rx_handler_busy(dev))
5151 		return -EBUSY;
5152 
5153 	if (dev->priv_flags & IFF_NO_RX_HANDLER)
5154 		return -EINVAL;
5155 
5156 	/* Note: rx_handler_data must be set before rx_handler */
5157 	rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
5158 	rcu_assign_pointer(dev->rx_handler, rx_handler);
5159 
5160 	return 0;
5161 }
5162 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
5163 
5164 /**
5165  *	netdev_rx_handler_unregister - unregister receive handler
5166  *	@dev: device to unregister a handler from
5167  *
5168  *	Unregister a receive handler from a device.
5169  *
5170  *	The caller must hold the rtnl_mutex.
5171  */
5172 void netdev_rx_handler_unregister(struct net_device *dev)
5173 {
5174 
5175 	ASSERT_RTNL();
5176 	RCU_INIT_POINTER(dev->rx_handler, NULL);
5177 	/* a reader seeing a non NULL rx_handler in a rcu_read_lock()
5178 	 * section has a guarantee to see a non NULL rx_handler_data
5179 	 * as well.
5180 	 */
5181 	synchronize_net();
5182 	RCU_INIT_POINTER(dev->rx_handler_data, NULL);
5183 }
5184 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
5185 
5186 /*
5187  * Limit the use of PFMEMALLOC reserves to those protocols that implement
5188  * the special handling of PFMEMALLOC skbs.
5189  */
5190 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
5191 {
5192 	switch (skb->protocol) {
5193 	case htons(ETH_P_ARP):
5194 	case htons(ETH_P_IP):
5195 	case htons(ETH_P_IPV6):
5196 	case htons(ETH_P_8021Q):
5197 	case htons(ETH_P_8021AD):
5198 		return true;
5199 	default:
5200 		return false;
5201 	}
5202 }
5203 
5204 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
5205 			     int *ret, struct net_device *orig_dev)
5206 {
5207 	if (nf_hook_ingress_active(skb)) {
5208 		int ingress_retval;
5209 
5210 		if (*pt_prev) {
5211 			*ret = deliver_skb(skb, *pt_prev, orig_dev);
5212 			*pt_prev = NULL;
5213 		}
5214 
5215 		rcu_read_lock();
5216 		ingress_retval = nf_hook_ingress(skb);
5217 		rcu_read_unlock();
5218 		return ingress_retval;
5219 	}
5220 	return 0;
5221 }
5222 
5223 static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
5224 				    struct packet_type **ppt_prev)
5225 {
5226 	struct packet_type *ptype, *pt_prev;
5227 	rx_handler_func_t *rx_handler;
5228 	struct sk_buff *skb = *pskb;
5229 	struct net_device *orig_dev;
5230 	bool deliver_exact = false;
5231 	int ret = NET_RX_DROP;
5232 	__be16 type;
5233 
5234 	net_timestamp_check(!netdev_tstamp_prequeue, skb);
5235 
5236 	trace_netif_receive_skb(skb);
5237 
5238 	orig_dev = skb->dev;
5239 
5240 	skb_reset_network_header(skb);
5241 	if (!skb_transport_header_was_set(skb))
5242 		skb_reset_transport_header(skb);
5243 	skb_reset_mac_len(skb);
5244 
5245 	pt_prev = NULL;
5246 
5247 another_round:
5248 	skb->skb_iif = skb->dev->ifindex;
5249 
5250 	__this_cpu_inc(softnet_data.processed);
5251 
5252 	if (static_branch_unlikely(&generic_xdp_needed_key)) {
5253 		int ret2;
5254 
5255 		migrate_disable();
5256 		ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
5257 		migrate_enable();
5258 
5259 		if (ret2 != XDP_PASS) {
5260 			ret = NET_RX_DROP;
5261 			goto out;
5262 		}
5263 	}
5264 
5265 	if (eth_type_vlan(skb->protocol)) {
5266 		skb = skb_vlan_untag(skb);
5267 		if (unlikely(!skb))
5268 			goto out;
5269 	}
5270 
5271 	if (skb_skip_tc_classify(skb))
5272 		goto skip_classify;
5273 
5274 	if (pfmemalloc)
5275 		goto skip_taps;
5276 
5277 	list_for_each_entry_rcu(ptype, &ptype_all, list) {
5278 		if (pt_prev)
5279 			ret = deliver_skb(skb, pt_prev, orig_dev);
5280 		pt_prev = ptype;
5281 	}
5282 
5283 	list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
5284 		if (pt_prev)
5285 			ret = deliver_skb(skb, pt_prev, orig_dev);
5286 		pt_prev = ptype;
5287 	}
5288 
5289 skip_taps:
5290 #ifdef CONFIG_NET_INGRESS
5291 	if (static_branch_unlikely(&ingress_needed_key)) {
5292 		bool another = false;
5293 
5294 		nf_skip_egress(skb, true);
5295 		skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev,
5296 					 &another);
5297 		if (another)
5298 			goto another_round;
5299 		if (!skb)
5300 			goto out;
5301 
5302 		nf_skip_egress(skb, false);
5303 		if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
5304 			goto out;
5305 	}
5306 #endif
5307 	skb_reset_redirect(skb);
5308 skip_classify:
5309 	if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
5310 		goto drop;
5311 
5312 	if (skb_vlan_tag_present(skb)) {
5313 		if (pt_prev) {
5314 			ret = deliver_skb(skb, pt_prev, orig_dev);
5315 			pt_prev = NULL;
5316 		}
5317 		if (vlan_do_receive(&skb))
5318 			goto another_round;
5319 		else if (unlikely(!skb))
5320 			goto out;
5321 	}
5322 
5323 	rx_handler = rcu_dereference(skb->dev->rx_handler);
5324 	if (rx_handler) {
5325 		if (pt_prev) {
5326 			ret = deliver_skb(skb, pt_prev, orig_dev);
5327 			pt_prev = NULL;
5328 		}
5329 		switch (rx_handler(&skb)) {
5330 		case RX_HANDLER_CONSUMED:
5331 			ret = NET_RX_SUCCESS;
5332 			goto out;
5333 		case RX_HANDLER_ANOTHER:
5334 			goto another_round;
5335 		case RX_HANDLER_EXACT:
5336 			deliver_exact = true;
5337 			break;
5338 		case RX_HANDLER_PASS:
5339 			break;
5340 		default:
5341 			BUG();
5342 		}
5343 	}
5344 
5345 	if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) {
5346 check_vlan_id:
5347 		if (skb_vlan_tag_get_id(skb)) {
5348 			/* Vlan id is non 0 and vlan_do_receive() above couldn't
5349 			 * find vlan device.
5350 			 */
5351 			skb->pkt_type = PACKET_OTHERHOST;
5352 		} else if (eth_type_vlan(skb->protocol)) {
5353 			/* Outer header is 802.1P with vlan 0, inner header is
5354 			 * 802.1Q or 802.1AD and vlan_do_receive() above could
5355 			 * not find vlan dev for vlan id 0.
5356 			 */
5357 			__vlan_hwaccel_clear_tag(skb);
5358 			skb = skb_vlan_untag(skb);
5359 			if (unlikely(!skb))
5360 				goto out;
5361 			if (vlan_do_receive(&skb))
5362 				/* After stripping off 802.1P header with vlan 0
5363 				 * vlan dev is found for inner header.
5364 				 */
5365 				goto another_round;
5366 			else if (unlikely(!skb))
5367 				goto out;
5368 			else
5369 				/* We have stripped outer 802.1P vlan 0 header.
5370 				 * But could not find vlan dev.
5371 				 * check again for vlan id to set OTHERHOST.
5372 				 */
5373 				goto check_vlan_id;
5374 		}
5375 		/* Note: we might in the future use prio bits
5376 		 * and set skb->priority like in vlan_do_receive()
5377 		 * For the time being, just ignore Priority Code Point
5378 		 */
5379 		__vlan_hwaccel_clear_tag(skb);
5380 	}
5381 
5382 	type = skb->protocol;
5383 
5384 	/* deliver only exact match when indicated */
5385 	if (likely(!deliver_exact)) {
5386 		deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5387 				       &ptype_base[ntohs(type) &
5388 						   PTYPE_HASH_MASK]);
5389 	}
5390 
5391 	deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5392 			       &orig_dev->ptype_specific);
5393 
5394 	if (unlikely(skb->dev != orig_dev)) {
5395 		deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5396 				       &skb->dev->ptype_specific);
5397 	}
5398 
5399 	if (pt_prev) {
5400 		if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
5401 			goto drop;
5402 		*ppt_prev = pt_prev;
5403 	} else {
5404 drop:
5405 		if (!deliver_exact)
5406 			dev_core_stats_rx_dropped_inc(skb->dev);
5407 		else
5408 			dev_core_stats_rx_nohandler_inc(skb->dev);
5409 		kfree_skb_reason(skb, SKB_DROP_REASON_UNHANDLED_PROTO);
5410 		/* Jamal, now you will not able to escape explaining
5411 		 * me how you were going to use this. :-)
5412 		 */
5413 		ret = NET_RX_DROP;
5414 	}
5415 
5416 out:
5417 	/* The invariant here is that if *ppt_prev is not NULL
5418 	 * then skb should also be non-NULL.
5419 	 *
5420 	 * Apparently *ppt_prev assignment above holds this invariant due to
5421 	 * skb dereferencing near it.
5422 	 */
5423 	*pskb = skb;
5424 	return ret;
5425 }
5426 
5427 static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
5428 {
5429 	struct net_device *orig_dev = skb->dev;
5430 	struct packet_type *pt_prev = NULL;
5431 	int ret;
5432 
5433 	ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5434 	if (pt_prev)
5435 		ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
5436 					 skb->dev, pt_prev, orig_dev);
5437 	return ret;
5438 }
5439 
5440 /**
5441  *	netif_receive_skb_core - special purpose version of netif_receive_skb
5442  *	@skb: buffer to process
5443  *
5444  *	More direct receive version of netif_receive_skb().  It should
5445  *	only be used by callers that have a need to skip RPS and Generic XDP.
5446  *	Caller must also take care of handling if ``(page_is_)pfmemalloc``.
5447  *
5448  *	This function may only be called from softirq context and interrupts
5449  *	should be enabled.
5450  *
5451  *	Return values (usually ignored):
5452  *	NET_RX_SUCCESS: no congestion
5453  *	NET_RX_DROP: packet was dropped
5454  */
5455 int netif_receive_skb_core(struct sk_buff *skb)
5456 {
5457 	int ret;
5458 
5459 	rcu_read_lock();
5460 	ret = __netif_receive_skb_one_core(skb, false);
5461 	rcu_read_unlock();
5462 
5463 	return ret;
5464 }
5465 EXPORT_SYMBOL(netif_receive_skb_core);
5466 
5467 static inline void __netif_receive_skb_list_ptype(struct list_head *head,
5468 						  struct packet_type *pt_prev,
5469 						  struct net_device *orig_dev)
5470 {
5471 	struct sk_buff *skb, *next;
5472 
5473 	if (!pt_prev)
5474 		return;
5475 	if (list_empty(head))
5476 		return;
5477 	if (pt_prev->list_func != NULL)
5478 		INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv,
5479 				   ip_list_rcv, head, pt_prev, orig_dev);
5480 	else
5481 		list_for_each_entry_safe(skb, next, head, list) {
5482 			skb_list_del_init(skb);
5483 			pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
5484 		}
5485 }
5486 
5487 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
5488 {
5489 	/* Fast-path assumptions:
5490 	 * - There is no RX handler.
5491 	 * - Only one packet_type matches.
5492 	 * If either of these fails, we will end up doing some per-packet
5493 	 * processing in-line, then handling the 'last ptype' for the whole
5494 	 * sublist.  This can't cause out-of-order delivery to any single ptype,
5495 	 * because the 'last ptype' must be constant across the sublist, and all
5496 	 * other ptypes are handled per-packet.
5497 	 */
5498 	/* Current (common) ptype of sublist */
5499 	struct packet_type *pt_curr = NULL;
5500 	/* Current (common) orig_dev of sublist */
5501 	struct net_device *od_curr = NULL;
5502 	struct list_head sublist;
5503 	struct sk_buff *skb, *next;
5504 
5505 	INIT_LIST_HEAD(&sublist);
5506 	list_for_each_entry_safe(skb, next, head, list) {
5507 		struct net_device *orig_dev = skb->dev;
5508 		struct packet_type *pt_prev = NULL;
5509 
5510 		skb_list_del_init(skb);
5511 		__netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5512 		if (!pt_prev)
5513 			continue;
5514 		if (pt_curr != pt_prev || od_curr != orig_dev) {
5515 			/* dispatch old sublist */
5516 			__netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5517 			/* start new sublist */
5518 			INIT_LIST_HEAD(&sublist);
5519 			pt_curr = pt_prev;
5520 			od_curr = orig_dev;
5521 		}
5522 		list_add_tail(&skb->list, &sublist);
5523 	}
5524 
5525 	/* dispatch final sublist */
5526 	__netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5527 }
5528 
5529 static int __netif_receive_skb(struct sk_buff *skb)
5530 {
5531 	int ret;
5532 
5533 	if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
5534 		unsigned int noreclaim_flag;
5535 
5536 		/*
5537 		 * PFMEMALLOC skbs are special, they should
5538 		 * - be delivered to SOCK_MEMALLOC sockets only
5539 		 * - stay away from userspace
5540 		 * - have bounded memory usage
5541 		 *
5542 		 * Use PF_MEMALLOC as this saves us from propagating the allocation
5543 		 * context down to all allocation sites.
5544 		 */
5545 		noreclaim_flag = memalloc_noreclaim_save();
5546 		ret = __netif_receive_skb_one_core(skb, true);
5547 		memalloc_noreclaim_restore(noreclaim_flag);
5548 	} else
5549 		ret = __netif_receive_skb_one_core(skb, false);
5550 
5551 	return ret;
5552 }
5553 
5554 static void __netif_receive_skb_list(struct list_head *head)
5555 {
5556 	unsigned long noreclaim_flag = 0;
5557 	struct sk_buff *skb, *next;
5558 	bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
5559 
5560 	list_for_each_entry_safe(skb, next, head, list) {
5561 		if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
5562 			struct list_head sublist;
5563 
5564 			/* Handle the previous sublist */
5565 			list_cut_before(&sublist, head, &skb->list);
5566 			if (!list_empty(&sublist))
5567 				__netif_receive_skb_list_core(&sublist, pfmemalloc);
5568 			pfmemalloc = !pfmemalloc;
5569 			/* See comments in __netif_receive_skb */
5570 			if (pfmemalloc)
5571 				noreclaim_flag = memalloc_noreclaim_save();
5572 			else
5573 				memalloc_noreclaim_restore(noreclaim_flag);
5574 		}
5575 	}
5576 	/* Handle the remaining sublist */
5577 	if (!list_empty(head))
5578 		__netif_receive_skb_list_core(head, pfmemalloc);
5579 	/* Restore pflags */
5580 	if (pfmemalloc)
5581 		memalloc_noreclaim_restore(noreclaim_flag);
5582 }
5583 
5584 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
5585 {
5586 	struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
5587 	struct bpf_prog *new = xdp->prog;
5588 	int ret = 0;
5589 
5590 	switch (xdp->command) {
5591 	case XDP_SETUP_PROG:
5592 		rcu_assign_pointer(dev->xdp_prog, new);
5593 		if (old)
5594 			bpf_prog_put(old);
5595 
5596 		if (old && !new) {
5597 			static_branch_dec(&generic_xdp_needed_key);
5598 		} else if (new && !old) {
5599 			static_branch_inc(&generic_xdp_needed_key);
5600 			dev_disable_lro(dev);
5601 			dev_disable_gro_hw(dev);
5602 		}
5603 		break;
5604 
5605 	default:
5606 		ret = -EINVAL;
5607 		break;
5608 	}
5609 
5610 	return ret;
5611 }
5612 
5613 static int netif_receive_skb_internal(struct sk_buff *skb)
5614 {
5615 	int ret;
5616 
5617 	net_timestamp_check(netdev_tstamp_prequeue, skb);
5618 
5619 	if (skb_defer_rx_timestamp(skb))
5620 		return NET_RX_SUCCESS;
5621 
5622 	rcu_read_lock();
5623 #ifdef CONFIG_RPS
5624 	if (static_branch_unlikely(&rps_needed)) {
5625 		struct rps_dev_flow voidflow, *rflow = &voidflow;
5626 		int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5627 
5628 		if (cpu >= 0) {
5629 			ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5630 			rcu_read_unlock();
5631 			return ret;
5632 		}
5633 	}
5634 #endif
5635 	ret = __netif_receive_skb(skb);
5636 	rcu_read_unlock();
5637 	return ret;
5638 }
5639 
5640 void netif_receive_skb_list_internal(struct list_head *head)
5641 {
5642 	struct sk_buff *skb, *next;
5643 	struct list_head sublist;
5644 
5645 	INIT_LIST_HEAD(&sublist);
5646 	list_for_each_entry_safe(skb, next, head, list) {
5647 		net_timestamp_check(netdev_tstamp_prequeue, skb);
5648 		skb_list_del_init(skb);
5649 		if (!skb_defer_rx_timestamp(skb))
5650 			list_add_tail(&skb->list, &sublist);
5651 	}
5652 	list_splice_init(&sublist, head);
5653 
5654 	rcu_read_lock();
5655 #ifdef CONFIG_RPS
5656 	if (static_branch_unlikely(&rps_needed)) {
5657 		list_for_each_entry_safe(skb, next, head, list) {
5658 			struct rps_dev_flow voidflow, *rflow = &voidflow;
5659 			int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5660 
5661 			if (cpu >= 0) {
5662 				/* Will be handled, remove from list */
5663 				skb_list_del_init(skb);
5664 				enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5665 			}
5666 		}
5667 	}
5668 #endif
5669 	__netif_receive_skb_list(head);
5670 	rcu_read_unlock();
5671 }
5672 
5673 /**
5674  *	netif_receive_skb - process receive buffer from network
5675  *	@skb: buffer to process
5676  *
5677  *	netif_receive_skb() is the main receive data processing function.
5678  *	It always succeeds. The buffer may be dropped during processing
5679  *	for congestion control or by the protocol layers.
5680  *
5681  *	This function may only be called from softirq context and interrupts
5682  *	should be enabled.
5683  *
5684  *	Return values (usually ignored):
5685  *	NET_RX_SUCCESS: no congestion
5686  *	NET_RX_DROP: packet was dropped
5687  */
5688 int netif_receive_skb(struct sk_buff *skb)
5689 {
5690 	int ret;
5691 
5692 	trace_netif_receive_skb_entry(skb);
5693 
5694 	ret = netif_receive_skb_internal(skb);
5695 	trace_netif_receive_skb_exit(ret);
5696 
5697 	return ret;
5698 }
5699 EXPORT_SYMBOL(netif_receive_skb);
5700 
5701 /**
5702  *	netif_receive_skb_list - process many receive buffers from network
5703  *	@head: list of skbs to process.
5704  *
5705  *	Since return value of netif_receive_skb() is normally ignored, and
5706  *	wouldn't be meaningful for a list, this function returns void.
5707  *
5708  *	This function may only be called from softirq context and interrupts
5709  *	should be enabled.
5710  */
5711 void netif_receive_skb_list(struct list_head *head)
5712 {
5713 	struct sk_buff *skb;
5714 
5715 	if (list_empty(head))
5716 		return;
5717 	if (trace_netif_receive_skb_list_entry_enabled()) {
5718 		list_for_each_entry(skb, head, list)
5719 			trace_netif_receive_skb_list_entry(skb);
5720 	}
5721 	netif_receive_skb_list_internal(head);
5722 	trace_netif_receive_skb_list_exit(0);
5723 }
5724 EXPORT_SYMBOL(netif_receive_skb_list);
5725 
5726 static DEFINE_PER_CPU(struct work_struct, flush_works);
5727 
5728 /* Network device is going away, flush any packets still pending */
5729 static void flush_backlog(struct work_struct *work)
5730 {
5731 	struct sk_buff *skb, *tmp;
5732 	struct softnet_data *sd;
5733 
5734 	local_bh_disable();
5735 	sd = this_cpu_ptr(&softnet_data);
5736 
5737 	rps_lock_irq_disable(sd);
5738 	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
5739 		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5740 			__skb_unlink(skb, &sd->input_pkt_queue);
5741 			dev_kfree_skb_irq(skb);
5742 			input_queue_head_incr(sd);
5743 		}
5744 	}
5745 	rps_unlock_irq_enable(sd);
5746 
5747 	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
5748 		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5749 			__skb_unlink(skb, &sd->process_queue);
5750 			kfree_skb(skb);
5751 			input_queue_head_incr(sd);
5752 		}
5753 	}
5754 	local_bh_enable();
5755 }
5756 
5757 static bool flush_required(int cpu)
5758 {
5759 #if IS_ENABLED(CONFIG_RPS)
5760 	struct softnet_data *sd = &per_cpu(softnet_data, cpu);
5761 	bool do_flush;
5762 
5763 	rps_lock_irq_disable(sd);
5764 
5765 	/* as insertion into process_queue happens with the rps lock held,
5766 	 * process_queue access may race only with dequeue
5767 	 */
5768 	do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
5769 		   !skb_queue_empty_lockless(&sd->process_queue);
5770 	rps_unlock_irq_enable(sd);
5771 
5772 	return do_flush;
5773 #endif
5774 	/* without RPS we can't safely check input_pkt_queue: during a
5775 	 * concurrent remote skb_queue_splice() we can detect as empty both
5776 	 * input_pkt_queue and process_queue even if the latter could end-up
5777 	 * containing a lot of packets.
5778 	 */
5779 	return true;
5780 }
5781 
5782 static void flush_all_backlogs(void)
5783 {
5784 	static cpumask_t flush_cpus;
5785 	unsigned int cpu;
5786 
5787 	/* since we are under rtnl lock protection we can use static data
5788 	 * for the cpumask and avoid allocating on stack the possibly
5789 	 * large mask
5790 	 */
5791 	ASSERT_RTNL();
5792 
5793 	cpus_read_lock();
5794 
5795 	cpumask_clear(&flush_cpus);
5796 	for_each_online_cpu(cpu) {
5797 		if (flush_required(cpu)) {
5798 			queue_work_on(cpu, system_highpri_wq,
5799 				      per_cpu_ptr(&flush_works, cpu));
5800 			cpumask_set_cpu(cpu, &flush_cpus);
5801 		}
5802 	}
5803 
5804 	/* we can have in flight packet[s] on the cpus we are not flushing,
5805 	 * synchronize_net() in unregister_netdevice_many() will take care of
5806 	 * them
5807 	 */
5808 	for_each_cpu(cpu, &flush_cpus)
5809 		flush_work(per_cpu_ptr(&flush_works, cpu));
5810 
5811 	cpus_read_unlock();
5812 }
5813 
5814 static void net_rps_send_ipi(struct softnet_data *remsd)
5815 {
5816 #ifdef CONFIG_RPS
5817 	while (remsd) {
5818 		struct softnet_data *next = remsd->rps_ipi_next;
5819 
5820 		if (cpu_online(remsd->cpu))
5821 			smp_call_function_single_async(remsd->cpu, &remsd->csd);
5822 		remsd = next;
5823 	}
5824 #endif
5825 }
5826 
5827 /*
5828  * net_rps_action_and_irq_enable sends any pending IPI's for rps.
5829  * Note: called with local irq disabled, but exits with local irq enabled.
5830  */
5831 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
5832 {
5833 #ifdef CONFIG_RPS
5834 	struct softnet_data *remsd = sd->rps_ipi_list;
5835 
5836 	if (remsd) {
5837 		sd->rps_ipi_list = NULL;
5838 
5839 		local_irq_enable();
5840 
5841 		/* Send pending IPI's to kick RPS processing on remote cpus. */
5842 		net_rps_send_ipi(remsd);
5843 	} else
5844 #endif
5845 		local_irq_enable();
5846 }
5847 
5848 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
5849 {
5850 #ifdef CONFIG_RPS
5851 	return sd->rps_ipi_list != NULL;
5852 #else
5853 	return false;
5854 #endif
5855 }
5856 
5857 static int process_backlog(struct napi_struct *napi, int quota)
5858 {
5859 	struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
5860 	bool again = true;
5861 	int work = 0;
5862 
5863 	/* Check if we have pending ipi, its better to send them now,
5864 	 * not waiting net_rx_action() end.
5865 	 */
5866 	if (sd_has_rps_ipi_waiting(sd)) {
5867 		local_irq_disable();
5868 		net_rps_action_and_irq_enable(sd);
5869 	}
5870 
5871 	napi->weight = dev_rx_weight;
5872 	while (again) {
5873 		struct sk_buff *skb;
5874 
5875 		while ((skb = __skb_dequeue(&sd->process_queue))) {
5876 			rcu_read_lock();
5877 			__netif_receive_skb(skb);
5878 			rcu_read_unlock();
5879 			input_queue_head_incr(sd);
5880 			if (++work >= quota)
5881 				return work;
5882 
5883 		}
5884 
5885 		rps_lock_irq_disable(sd);
5886 		if (skb_queue_empty(&sd->input_pkt_queue)) {
5887 			/*
5888 			 * Inline a custom version of __napi_complete().
5889 			 * only current cpu owns and manipulates this napi,
5890 			 * and NAPI_STATE_SCHED is the only possible flag set
5891 			 * on backlog.
5892 			 * We can use a plain write instead of clear_bit(),
5893 			 * and we dont need an smp_mb() memory barrier.
5894 			 */
5895 			napi->state = 0;
5896 			again = false;
5897 		} else {
5898 			skb_queue_splice_tail_init(&sd->input_pkt_queue,
5899 						   &sd->process_queue);
5900 		}
5901 		rps_unlock_irq_enable(sd);
5902 	}
5903 
5904 	return work;
5905 }
5906 
5907 /**
5908  * __napi_schedule - schedule for receive
5909  * @n: entry to schedule
5910  *
5911  * The entry's receive function will be scheduled to run.
5912  * Consider using __napi_schedule_irqoff() if hard irqs are masked.
5913  */
5914 void __napi_schedule(struct napi_struct *n)
5915 {
5916 	unsigned long flags;
5917 
5918 	local_irq_save(flags);
5919 	____napi_schedule(this_cpu_ptr(&softnet_data), n);
5920 	local_irq_restore(flags);
5921 }
5922 EXPORT_SYMBOL(__napi_schedule);
5923 
5924 /**
5925  *	napi_schedule_prep - check if napi can be scheduled
5926  *	@n: napi context
5927  *
5928  * Test if NAPI routine is already running, and if not mark
5929  * it as running.  This is used as a condition variable to
5930  * insure only one NAPI poll instance runs.  We also make
5931  * sure there is no pending NAPI disable.
5932  */
5933 bool napi_schedule_prep(struct napi_struct *n)
5934 {
5935 	unsigned long val, new;
5936 
5937 	do {
5938 		val = READ_ONCE(n->state);
5939 		if (unlikely(val & NAPIF_STATE_DISABLE))
5940 			return false;
5941 		new = val | NAPIF_STATE_SCHED;
5942 
5943 		/* Sets STATE_MISSED bit if STATE_SCHED was already set
5944 		 * This was suggested by Alexander Duyck, as compiler
5945 		 * emits better code than :
5946 		 * if (val & NAPIF_STATE_SCHED)
5947 		 *     new |= NAPIF_STATE_MISSED;
5948 		 */
5949 		new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
5950 						   NAPIF_STATE_MISSED;
5951 	} while (cmpxchg(&n->state, val, new) != val);
5952 
5953 	return !(val & NAPIF_STATE_SCHED);
5954 }
5955 EXPORT_SYMBOL(napi_schedule_prep);
5956 
5957 /**
5958  * __napi_schedule_irqoff - schedule for receive
5959  * @n: entry to schedule
5960  *
5961  * Variant of __napi_schedule() assuming hard irqs are masked.
5962  *
5963  * On PREEMPT_RT enabled kernels this maps to __napi_schedule()
5964  * because the interrupt disabled assumption might not be true
5965  * due to force-threaded interrupts and spinlock substitution.
5966  */
5967 void __napi_schedule_irqoff(struct napi_struct *n)
5968 {
5969 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
5970 		____napi_schedule(this_cpu_ptr(&softnet_data), n);
5971 	else
5972 		__napi_schedule(n);
5973 }
5974 EXPORT_SYMBOL(__napi_schedule_irqoff);
5975 
5976 bool napi_complete_done(struct napi_struct *n, int work_done)
5977 {
5978 	unsigned long flags, val, new, timeout = 0;
5979 	bool ret = true;
5980 
5981 	/*
5982 	 * 1) Don't let napi dequeue from the cpu poll list
5983 	 *    just in case its running on a different cpu.
5984 	 * 2) If we are busy polling, do nothing here, we have
5985 	 *    the guarantee we will be called later.
5986 	 */
5987 	if (unlikely(n->state & (NAPIF_STATE_NPSVC |
5988 				 NAPIF_STATE_IN_BUSY_POLL)))
5989 		return false;
5990 
5991 	if (work_done) {
5992 		if (n->gro_bitmask)
5993 			timeout = READ_ONCE(n->dev->gro_flush_timeout);
5994 		n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs);
5995 	}
5996 	if (n->defer_hard_irqs_count > 0) {
5997 		n->defer_hard_irqs_count--;
5998 		timeout = READ_ONCE(n->dev->gro_flush_timeout);
5999 		if (timeout)
6000 			ret = false;
6001 	}
6002 	if (n->gro_bitmask) {
6003 		/* When the NAPI instance uses a timeout and keeps postponing
6004 		 * it, we need to bound somehow the time packets are kept in
6005 		 * the GRO layer
6006 		 */
6007 		napi_gro_flush(n, !!timeout);
6008 	}
6009 
6010 	gro_normal_list(n);
6011 
6012 	if (unlikely(!list_empty(&n->poll_list))) {
6013 		/* If n->poll_list is not empty, we need to mask irqs */
6014 		local_irq_save(flags);
6015 		list_del_init(&n->poll_list);
6016 		local_irq_restore(flags);
6017 	}
6018 
6019 	do {
6020 		val = READ_ONCE(n->state);
6021 
6022 		WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
6023 
6024 		new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED |
6025 			      NAPIF_STATE_SCHED_THREADED |
6026 			      NAPIF_STATE_PREFER_BUSY_POLL);
6027 
6028 		/* If STATE_MISSED was set, leave STATE_SCHED set,
6029 		 * because we will call napi->poll() one more time.
6030 		 * This C code was suggested by Alexander Duyck to help gcc.
6031 		 */
6032 		new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
6033 						    NAPIF_STATE_SCHED;
6034 	} while (cmpxchg(&n->state, val, new) != val);
6035 
6036 	if (unlikely(val & NAPIF_STATE_MISSED)) {
6037 		__napi_schedule(n);
6038 		return false;
6039 	}
6040 
6041 	if (timeout)
6042 		hrtimer_start(&n->timer, ns_to_ktime(timeout),
6043 			      HRTIMER_MODE_REL_PINNED);
6044 	return ret;
6045 }
6046 EXPORT_SYMBOL(napi_complete_done);
6047 
6048 /* must be called under rcu_read_lock(), as we dont take a reference */
6049 static struct napi_struct *napi_by_id(unsigned int napi_id)
6050 {
6051 	unsigned int hash = napi_id % HASH_SIZE(napi_hash);
6052 	struct napi_struct *napi;
6053 
6054 	hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
6055 		if (napi->napi_id == napi_id)
6056 			return napi;
6057 
6058 	return NULL;
6059 }
6060 
6061 #if defined(CONFIG_NET_RX_BUSY_POLL)
6062 
6063 static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
6064 {
6065 	if (!skip_schedule) {
6066 		gro_normal_list(napi);
6067 		__napi_schedule(napi);
6068 		return;
6069 	}
6070 
6071 	if (napi->gro_bitmask) {
6072 		/* flush too old packets
6073 		 * If HZ < 1000, flush all packets.
6074 		 */
6075 		napi_gro_flush(napi, HZ >= 1000);
6076 	}
6077 
6078 	gro_normal_list(napi);
6079 	clear_bit(NAPI_STATE_SCHED, &napi->state);
6080 }
6081 
6082 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bool prefer_busy_poll,
6083 			   u16 budget)
6084 {
6085 	bool skip_schedule = false;
6086 	unsigned long timeout;
6087 	int rc;
6088 
6089 	/* Busy polling means there is a high chance device driver hard irq
6090 	 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
6091 	 * set in napi_schedule_prep().
6092 	 * Since we are about to call napi->poll() once more, we can safely
6093 	 * clear NAPI_STATE_MISSED.
6094 	 *
6095 	 * Note: x86 could use a single "lock and ..." instruction
6096 	 * to perform these two clear_bit()
6097 	 */
6098 	clear_bit(NAPI_STATE_MISSED, &napi->state);
6099 	clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
6100 
6101 	local_bh_disable();
6102 
6103 	if (prefer_busy_poll) {
6104 		napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs);
6105 		timeout = READ_ONCE(napi->dev->gro_flush_timeout);
6106 		if (napi->defer_hard_irqs_count && timeout) {
6107 			hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED);
6108 			skip_schedule = true;
6109 		}
6110 	}
6111 
6112 	/* All we really want here is to re-enable device interrupts.
6113 	 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
6114 	 */
6115 	rc = napi->poll(napi, budget);
6116 	/* We can't gro_normal_list() here, because napi->poll() might have
6117 	 * rearmed the napi (napi_complete_done()) in which case it could
6118 	 * already be running on another CPU.
6119 	 */
6120 	trace_napi_poll(napi, rc, budget);
6121 	netpoll_poll_unlock(have_poll_lock);
6122 	if (rc == budget)
6123 		__busy_poll_stop(napi, skip_schedule);
6124 	local_bh_enable();
6125 }
6126 
6127 void napi_busy_loop(unsigned int napi_id,
6128 		    bool (*loop_end)(void *, unsigned long),
6129 		    void *loop_end_arg, bool prefer_busy_poll, u16 budget)
6130 {
6131 	unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
6132 	int (*napi_poll)(struct napi_struct *napi, int budget);
6133 	void *have_poll_lock = NULL;
6134 	struct napi_struct *napi;
6135 
6136 restart:
6137 	napi_poll = NULL;
6138 
6139 	rcu_read_lock();
6140 
6141 	napi = napi_by_id(napi_id);
6142 	if (!napi)
6143 		goto out;
6144 
6145 	preempt_disable();
6146 	for (;;) {
6147 		int work = 0;
6148 
6149 		local_bh_disable();
6150 		if (!napi_poll) {
6151 			unsigned long val = READ_ONCE(napi->state);
6152 
6153 			/* If multiple threads are competing for this napi,
6154 			 * we avoid dirtying napi->state as much as we can.
6155 			 */
6156 			if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
6157 				   NAPIF_STATE_IN_BUSY_POLL)) {
6158 				if (prefer_busy_poll)
6159 					set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6160 				goto count;
6161 			}
6162 			if (cmpxchg(&napi->state, val,
6163 				    val | NAPIF_STATE_IN_BUSY_POLL |
6164 					  NAPIF_STATE_SCHED) != val) {
6165 				if (prefer_busy_poll)
6166 					set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6167 				goto count;
6168 			}
6169 			have_poll_lock = netpoll_poll_lock(napi);
6170 			napi_poll = napi->poll;
6171 		}
6172 		work = napi_poll(napi, budget);
6173 		trace_napi_poll(napi, work, budget);
6174 		gro_normal_list(napi);
6175 count:
6176 		if (work > 0)
6177 			__NET_ADD_STATS(dev_net(napi->dev),
6178 					LINUX_MIB_BUSYPOLLRXPACKETS, work);
6179 		local_bh_enable();
6180 
6181 		if (!loop_end || loop_end(loop_end_arg, start_time))
6182 			break;
6183 
6184 		if (unlikely(need_resched())) {
6185 			if (napi_poll)
6186 				busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
6187 			preempt_enable();
6188 			rcu_read_unlock();
6189 			cond_resched();
6190 			if (loop_end(loop_end_arg, start_time))
6191 				return;
6192 			goto restart;
6193 		}
6194 		cpu_relax();
6195 	}
6196 	if (napi_poll)
6197 		busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
6198 	preempt_enable();
6199 out:
6200 	rcu_read_unlock();
6201 }
6202 EXPORT_SYMBOL(napi_busy_loop);
6203 
6204 #endif /* CONFIG_NET_RX_BUSY_POLL */
6205 
6206 static void napi_hash_add(struct napi_struct *napi)
6207 {
6208 	if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state))
6209 		return;
6210 
6211 	spin_lock(&napi_hash_lock);
6212 
6213 	/* 0..NR_CPUS range is reserved for sender_cpu use */
6214 	do {
6215 		if (unlikely(++napi_gen_id < MIN_NAPI_ID))
6216 			napi_gen_id = MIN_NAPI_ID;
6217 	} while (napi_by_id(napi_gen_id));
6218 	napi->napi_id = napi_gen_id;
6219 
6220 	hlist_add_head_rcu(&napi->napi_hash_node,
6221 			   &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
6222 
6223 	spin_unlock(&napi_hash_lock);
6224 }
6225 
6226 /* Warning : caller is responsible to make sure rcu grace period
6227  * is respected before freeing memory containing @napi
6228  */
6229 static void napi_hash_del(struct napi_struct *napi)
6230 {
6231 	spin_lock(&napi_hash_lock);
6232 
6233 	hlist_del_init_rcu(&napi->napi_hash_node);
6234 
6235 	spin_unlock(&napi_hash_lock);
6236 }
6237 
6238 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
6239 {
6240 	struct napi_struct *napi;
6241 
6242 	napi = container_of(timer, struct napi_struct, timer);
6243 
6244 	/* Note : we use a relaxed variant of napi_schedule_prep() not setting
6245 	 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
6246 	 */
6247 	if (!napi_disable_pending(napi) &&
6248 	    !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) {
6249 		clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6250 		__napi_schedule_irqoff(napi);
6251 	}
6252 
6253 	return HRTIMER_NORESTART;
6254 }
6255 
6256 static void init_gro_hash(struct napi_struct *napi)
6257 {
6258 	int i;
6259 
6260 	for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6261 		INIT_LIST_HEAD(&napi->gro_hash[i].list);
6262 		napi->gro_hash[i].count = 0;
6263 	}
6264 	napi->gro_bitmask = 0;
6265 }
6266 
6267 int dev_set_threaded(struct net_device *dev, bool threaded)
6268 {
6269 	struct napi_struct *napi;
6270 	int err = 0;
6271 
6272 	if (dev->threaded == threaded)
6273 		return 0;
6274 
6275 	if (threaded) {
6276 		list_for_each_entry(napi, &dev->napi_list, dev_list) {
6277 			if (!napi->thread) {
6278 				err = napi_kthread_create(napi);
6279 				if (err) {
6280 					threaded = false;
6281 					break;
6282 				}
6283 			}
6284 		}
6285 	}
6286 
6287 	dev->threaded = threaded;
6288 
6289 	/* Make sure kthread is created before THREADED bit
6290 	 * is set.
6291 	 */
6292 	smp_mb__before_atomic();
6293 
6294 	/* Setting/unsetting threaded mode on a napi might not immediately
6295 	 * take effect, if the current napi instance is actively being
6296 	 * polled. In this case, the switch between threaded mode and
6297 	 * softirq mode will happen in the next round of napi_schedule().
6298 	 * This should not cause hiccups/stalls to the live traffic.
6299 	 */
6300 	list_for_each_entry(napi, &dev->napi_list, dev_list) {
6301 		if (threaded)
6302 			set_bit(NAPI_STATE_THREADED, &napi->state);
6303 		else
6304 			clear_bit(NAPI_STATE_THREADED, &napi->state);
6305 	}
6306 
6307 	return err;
6308 }
6309 EXPORT_SYMBOL(dev_set_threaded);
6310 
6311 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
6312 		    int (*poll)(struct napi_struct *, int), int weight)
6313 {
6314 	if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state)))
6315 		return;
6316 
6317 	INIT_LIST_HEAD(&napi->poll_list);
6318 	INIT_HLIST_NODE(&napi->napi_hash_node);
6319 	hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6320 	napi->timer.function = napi_watchdog;
6321 	init_gro_hash(napi);
6322 	napi->skb = NULL;
6323 	INIT_LIST_HEAD(&napi->rx_list);
6324 	napi->rx_count = 0;
6325 	napi->poll = poll;
6326 	if (weight > NAPI_POLL_WEIGHT)
6327 		netdev_err_once(dev, "%s() called with weight %d\n", __func__,
6328 				weight);
6329 	napi->weight = weight;
6330 	napi->dev = dev;
6331 #ifdef CONFIG_NETPOLL
6332 	napi->poll_owner = -1;
6333 #endif
6334 	set_bit(NAPI_STATE_SCHED, &napi->state);
6335 	set_bit(NAPI_STATE_NPSVC, &napi->state);
6336 	list_add_rcu(&napi->dev_list, &dev->napi_list);
6337 	napi_hash_add(napi);
6338 	/* Create kthread for this napi if dev->threaded is set.
6339 	 * Clear dev->threaded if kthread creation failed so that
6340 	 * threaded mode will not be enabled in napi_enable().
6341 	 */
6342 	if (dev->threaded && napi_kthread_create(napi))
6343 		dev->threaded = 0;
6344 }
6345 EXPORT_SYMBOL(netif_napi_add);
6346 
6347 void napi_disable(struct napi_struct *n)
6348 {
6349 	unsigned long val, new;
6350 
6351 	might_sleep();
6352 	set_bit(NAPI_STATE_DISABLE, &n->state);
6353 
6354 	for ( ; ; ) {
6355 		val = READ_ONCE(n->state);
6356 		if (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) {
6357 			usleep_range(20, 200);
6358 			continue;
6359 		}
6360 
6361 		new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC;
6362 		new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL);
6363 
6364 		if (cmpxchg(&n->state, val, new) == val)
6365 			break;
6366 	}
6367 
6368 	hrtimer_cancel(&n->timer);
6369 
6370 	clear_bit(NAPI_STATE_DISABLE, &n->state);
6371 }
6372 EXPORT_SYMBOL(napi_disable);
6373 
6374 /**
6375  *	napi_enable - enable NAPI scheduling
6376  *	@n: NAPI context
6377  *
6378  * Resume NAPI from being scheduled on this context.
6379  * Must be paired with napi_disable.
6380  */
6381 void napi_enable(struct napi_struct *n)
6382 {
6383 	unsigned long val, new;
6384 
6385 	do {
6386 		val = READ_ONCE(n->state);
6387 		BUG_ON(!test_bit(NAPI_STATE_SCHED, &val));
6388 
6389 		new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC);
6390 		if (n->dev->threaded && n->thread)
6391 			new |= NAPIF_STATE_THREADED;
6392 	} while (cmpxchg(&n->state, val, new) != val);
6393 }
6394 EXPORT_SYMBOL(napi_enable);
6395 
6396 static void flush_gro_hash(struct napi_struct *napi)
6397 {
6398 	int i;
6399 
6400 	for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6401 		struct sk_buff *skb, *n;
6402 
6403 		list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
6404 			kfree_skb(skb);
6405 		napi->gro_hash[i].count = 0;
6406 	}
6407 }
6408 
6409 /* Must be called in process context */
6410 void __netif_napi_del(struct napi_struct *napi)
6411 {
6412 	if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state))
6413 		return;
6414 
6415 	napi_hash_del(napi);
6416 	list_del_rcu(&napi->dev_list);
6417 	napi_free_frags(napi);
6418 
6419 	flush_gro_hash(napi);
6420 	napi->gro_bitmask = 0;
6421 
6422 	if (napi->thread) {
6423 		kthread_stop(napi->thread);
6424 		napi->thread = NULL;
6425 	}
6426 }
6427 EXPORT_SYMBOL(__netif_napi_del);
6428 
6429 static int __napi_poll(struct napi_struct *n, bool *repoll)
6430 {
6431 	int work, weight;
6432 
6433 	weight = n->weight;
6434 
6435 	/* This NAPI_STATE_SCHED test is for avoiding a race
6436 	 * with netpoll's poll_napi().  Only the entity which
6437 	 * obtains the lock and sees NAPI_STATE_SCHED set will
6438 	 * actually make the ->poll() call.  Therefore we avoid
6439 	 * accidentally calling ->poll() when NAPI is not scheduled.
6440 	 */
6441 	work = 0;
6442 	if (test_bit(NAPI_STATE_SCHED, &n->state)) {
6443 		work = n->poll(n, weight);
6444 		trace_napi_poll(n, work, weight);
6445 	}
6446 
6447 	if (unlikely(work > weight))
6448 		netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
6449 				n->poll, work, weight);
6450 
6451 	if (likely(work < weight))
6452 		return work;
6453 
6454 	/* Drivers must not modify the NAPI state if they
6455 	 * consume the entire weight.  In such cases this code
6456 	 * still "owns" the NAPI instance and therefore can
6457 	 * move the instance around on the list at-will.
6458 	 */
6459 	if (unlikely(napi_disable_pending(n))) {
6460 		napi_complete(n);
6461 		return work;
6462 	}
6463 
6464 	/* The NAPI context has more processing work, but busy-polling
6465 	 * is preferred. Exit early.
6466 	 */
6467 	if (napi_prefer_busy_poll(n)) {
6468 		if (napi_complete_done(n, work)) {
6469 			/* If timeout is not set, we need to make sure
6470 			 * that the NAPI is re-scheduled.
6471 			 */
6472 			napi_schedule(n);
6473 		}
6474 		return work;
6475 	}
6476 
6477 	if (n->gro_bitmask) {
6478 		/* flush too old packets
6479 		 * If HZ < 1000, flush all packets.
6480 		 */
6481 		napi_gro_flush(n, HZ >= 1000);
6482 	}
6483 
6484 	gro_normal_list(n);
6485 
6486 	/* Some drivers may have called napi_schedule
6487 	 * prior to exhausting their budget.
6488 	 */
6489 	if (unlikely(!list_empty(&n->poll_list))) {
6490 		pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
6491 			     n->dev ? n->dev->name : "backlog");
6492 		return work;
6493 	}
6494 
6495 	*repoll = true;
6496 
6497 	return work;
6498 }
6499 
6500 static int napi_poll(struct napi_struct *n, struct list_head *repoll)
6501 {
6502 	bool do_repoll = false;
6503 	void *have;
6504 	int work;
6505 
6506 	list_del_init(&n->poll_list);
6507 
6508 	have = netpoll_poll_lock(n);
6509 
6510 	work = __napi_poll(n, &do_repoll);
6511 
6512 	if (do_repoll)
6513 		list_add_tail(&n->poll_list, repoll);
6514 
6515 	netpoll_poll_unlock(have);
6516 
6517 	return work;
6518 }
6519 
6520 static int napi_thread_wait(struct napi_struct *napi)
6521 {
6522 	bool woken = false;
6523 
6524 	set_current_state(TASK_INTERRUPTIBLE);
6525 
6526 	while (!kthread_should_stop()) {
6527 		/* Testing SCHED_THREADED bit here to make sure the current
6528 		 * kthread owns this napi and could poll on this napi.
6529 		 * Testing SCHED bit is not enough because SCHED bit might be
6530 		 * set by some other busy poll thread or by napi_disable().
6531 		 */
6532 		if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) {
6533 			WARN_ON(!list_empty(&napi->poll_list));
6534 			__set_current_state(TASK_RUNNING);
6535 			return 0;
6536 		}
6537 
6538 		schedule();
6539 		/* woken being true indicates this thread owns this napi. */
6540 		woken = true;
6541 		set_current_state(TASK_INTERRUPTIBLE);
6542 	}
6543 	__set_current_state(TASK_RUNNING);
6544 
6545 	return -1;
6546 }
6547 
6548 static int napi_threaded_poll(void *data)
6549 {
6550 	struct napi_struct *napi = data;
6551 	void *have;
6552 
6553 	while (!napi_thread_wait(napi)) {
6554 		for (;;) {
6555 			bool repoll = false;
6556 
6557 			local_bh_disable();
6558 
6559 			have = netpoll_poll_lock(napi);
6560 			__napi_poll(napi, &repoll);
6561 			netpoll_poll_unlock(have);
6562 
6563 			local_bh_enable();
6564 
6565 			if (!repoll)
6566 				break;
6567 
6568 			cond_resched();
6569 		}
6570 	}
6571 	return 0;
6572 }
6573 
6574 static __latent_entropy void net_rx_action(struct softirq_action *h)
6575 {
6576 	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
6577 	unsigned long time_limit = jiffies +
6578 		usecs_to_jiffies(netdev_budget_usecs);
6579 	int budget = netdev_budget;
6580 	LIST_HEAD(list);
6581 	LIST_HEAD(repoll);
6582 
6583 	local_irq_disable();
6584 	list_splice_init(&sd->poll_list, &list);
6585 	local_irq_enable();
6586 
6587 	for (;;) {
6588 		struct napi_struct *n;
6589 
6590 		if (list_empty(&list)) {
6591 			if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
6592 				return;
6593 			break;
6594 		}
6595 
6596 		n = list_first_entry(&list, struct napi_struct, poll_list);
6597 		budget -= napi_poll(n, &repoll);
6598 
6599 		/* If softirq window is exhausted then punt.
6600 		 * Allow this to run for 2 jiffies since which will allow
6601 		 * an average latency of 1.5/HZ.
6602 		 */
6603 		if (unlikely(budget <= 0 ||
6604 			     time_after_eq(jiffies, time_limit))) {
6605 			sd->time_squeeze++;
6606 			break;
6607 		}
6608 	}
6609 
6610 	local_irq_disable();
6611 
6612 	list_splice_tail_init(&sd->poll_list, &list);
6613 	list_splice_tail(&repoll, &list);
6614 	list_splice(&list, &sd->poll_list);
6615 	if (!list_empty(&sd->poll_list))
6616 		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
6617 
6618 	net_rps_action_and_irq_enable(sd);
6619 }
6620 
6621 struct netdev_adjacent {
6622 	struct net_device *dev;
6623 	netdevice_tracker dev_tracker;
6624 
6625 	/* upper master flag, there can only be one master device per list */
6626 	bool master;
6627 
6628 	/* lookup ignore flag */
6629 	bool ignore;
6630 
6631 	/* counter for the number of times this device was added to us */
6632 	u16 ref_nr;
6633 
6634 	/* private field for the users */
6635 	void *private;
6636 
6637 	struct list_head list;
6638 	struct rcu_head rcu;
6639 };
6640 
6641 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
6642 						 struct list_head *adj_list)
6643 {
6644 	struct netdev_adjacent *adj;
6645 
6646 	list_for_each_entry(adj, adj_list, list) {
6647 		if (adj->dev == adj_dev)
6648 			return adj;
6649 	}
6650 	return NULL;
6651 }
6652 
6653 static int ____netdev_has_upper_dev(struct net_device *upper_dev,
6654 				    struct netdev_nested_priv *priv)
6655 {
6656 	struct net_device *dev = (struct net_device *)priv->data;
6657 
6658 	return upper_dev == dev;
6659 }
6660 
6661 /**
6662  * netdev_has_upper_dev - Check if device is linked to an upper device
6663  * @dev: device
6664  * @upper_dev: upper device to check
6665  *
6666  * Find out if a device is linked to specified upper device and return true
6667  * in case it is. Note that this checks only immediate upper device,
6668  * not through a complete stack of devices. The caller must hold the RTNL lock.
6669  */
6670 bool netdev_has_upper_dev(struct net_device *dev,
6671 			  struct net_device *upper_dev)
6672 {
6673 	struct netdev_nested_priv priv = {
6674 		.data = (void *)upper_dev,
6675 	};
6676 
6677 	ASSERT_RTNL();
6678 
6679 	return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
6680 					     &priv);
6681 }
6682 EXPORT_SYMBOL(netdev_has_upper_dev);
6683 
6684 /**
6685  * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device
6686  * @dev: device
6687  * @upper_dev: upper device to check
6688  *
6689  * Find out if a device is linked to specified upper device and return true
6690  * in case it is. Note that this checks the entire upper device chain.
6691  * The caller must hold rcu lock.
6692  */
6693 
6694 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
6695 				  struct net_device *upper_dev)
6696 {
6697 	struct netdev_nested_priv priv = {
6698 		.data = (void *)upper_dev,
6699 	};
6700 
6701 	return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
6702 					       &priv);
6703 }
6704 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
6705 
6706 /**
6707  * netdev_has_any_upper_dev - Check if device is linked to some device
6708  * @dev: device
6709  *
6710  * Find out if a device is linked to an upper device and return true in case
6711  * it is. The caller must hold the RTNL lock.
6712  */
6713 bool netdev_has_any_upper_dev(struct net_device *dev)
6714 {
6715 	ASSERT_RTNL();
6716 
6717 	return !list_empty(&dev->adj_list.upper);
6718 }
6719 EXPORT_SYMBOL(netdev_has_any_upper_dev);
6720 
6721 /**
6722  * netdev_master_upper_dev_get - Get master upper device
6723  * @dev: device
6724  *
6725  * Find a master upper device and return pointer to it or NULL in case
6726  * it's not there. The caller must hold the RTNL lock.
6727  */
6728 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
6729 {
6730 	struct netdev_adjacent *upper;
6731 
6732 	ASSERT_RTNL();
6733 
6734 	if (list_empty(&dev->adj_list.upper))
6735 		return NULL;
6736 
6737 	upper = list_first_entry(&dev->adj_list.upper,
6738 				 struct netdev_adjacent, list);
6739 	if (likely(upper->master))
6740 		return upper->dev;
6741 	return NULL;
6742 }
6743 EXPORT_SYMBOL(netdev_master_upper_dev_get);
6744 
6745 static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
6746 {
6747 	struct netdev_adjacent *upper;
6748 
6749 	ASSERT_RTNL();
6750 
6751 	if (list_empty(&dev->adj_list.upper))
6752 		return NULL;
6753 
6754 	upper = list_first_entry(&dev->adj_list.upper,
6755 				 struct netdev_adjacent, list);
6756 	if (likely(upper->master) && !upper->ignore)
6757 		return upper->dev;
6758 	return NULL;
6759 }
6760 
6761 /**
6762  * netdev_has_any_lower_dev - Check if device is linked to some device
6763  * @dev: device
6764  *
6765  * Find out if a device is linked to a lower device and return true in case
6766  * it is. The caller must hold the RTNL lock.
6767  */
6768 static bool netdev_has_any_lower_dev(struct net_device *dev)
6769 {
6770 	ASSERT_RTNL();
6771 
6772 	return !list_empty(&dev->adj_list.lower);
6773 }
6774 
6775 void *netdev_adjacent_get_private(struct list_head *adj_list)
6776 {
6777 	struct netdev_adjacent *adj;
6778 
6779 	adj = list_entry(adj_list, struct netdev_adjacent, list);
6780 
6781 	return adj->private;
6782 }
6783 EXPORT_SYMBOL(netdev_adjacent_get_private);
6784 
6785 /**
6786  * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
6787  * @dev: device
6788  * @iter: list_head ** of the current position
6789  *
6790  * Gets the next device from the dev's upper list, starting from iter
6791  * position. The caller must hold RCU read lock.
6792  */
6793 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
6794 						 struct list_head **iter)
6795 {
6796 	struct netdev_adjacent *upper;
6797 
6798 	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6799 
6800 	upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6801 
6802 	if (&upper->list == &dev->adj_list.upper)
6803 		return NULL;
6804 
6805 	*iter = &upper->list;
6806 
6807 	return upper->dev;
6808 }
6809 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
6810 
6811 static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
6812 						  struct list_head **iter,
6813 						  bool *ignore)
6814 {
6815 	struct netdev_adjacent *upper;
6816 
6817 	upper = list_entry((*iter)->next, struct netdev_adjacent, list);
6818 
6819 	if (&upper->list == &dev->adj_list.upper)
6820 		return NULL;
6821 
6822 	*iter = &upper->list;
6823 	*ignore = upper->ignore;
6824 
6825 	return upper->dev;
6826 }
6827 
6828 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
6829 						    struct list_head **iter)
6830 {
6831 	struct netdev_adjacent *upper;
6832 
6833 	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6834 
6835 	upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6836 
6837 	if (&upper->list == &dev->adj_list.upper)
6838 		return NULL;
6839 
6840 	*iter = &upper->list;
6841 
6842 	return upper->dev;
6843 }
6844 
6845 static int __netdev_walk_all_upper_dev(struct net_device *dev,
6846 				       int (*fn)(struct net_device *dev,
6847 					 struct netdev_nested_priv *priv),
6848 				       struct netdev_nested_priv *priv)
6849 {
6850 	struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
6851 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
6852 	int ret, cur = 0;
6853 	bool ignore;
6854 
6855 	now = dev;
6856 	iter = &dev->adj_list.upper;
6857 
6858 	while (1) {
6859 		if (now != dev) {
6860 			ret = fn(now, priv);
6861 			if (ret)
6862 				return ret;
6863 		}
6864 
6865 		next = NULL;
6866 		while (1) {
6867 			udev = __netdev_next_upper_dev(now, &iter, &ignore);
6868 			if (!udev)
6869 				break;
6870 			if (ignore)
6871 				continue;
6872 
6873 			next = udev;
6874 			niter = &udev->adj_list.upper;
6875 			dev_stack[cur] = now;
6876 			iter_stack[cur++] = iter;
6877 			break;
6878 		}
6879 
6880 		if (!next) {
6881 			if (!cur)
6882 				return 0;
6883 			next = dev_stack[--cur];
6884 			niter = iter_stack[cur];
6885 		}
6886 
6887 		now = next;
6888 		iter = niter;
6889 	}
6890 
6891 	return 0;
6892 }
6893 
6894 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
6895 				  int (*fn)(struct net_device *dev,
6896 					    struct netdev_nested_priv *priv),
6897 				  struct netdev_nested_priv *priv)
6898 {
6899 	struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
6900 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
6901 	int ret, cur = 0;
6902 
6903 	now = dev;
6904 	iter = &dev->adj_list.upper;
6905 
6906 	while (1) {
6907 		if (now != dev) {
6908 			ret = fn(now, priv);
6909 			if (ret)
6910 				return ret;
6911 		}
6912 
6913 		next = NULL;
6914 		while (1) {
6915 			udev = netdev_next_upper_dev_rcu(now, &iter);
6916 			if (!udev)
6917 				break;
6918 
6919 			next = udev;
6920 			niter = &udev->adj_list.upper;
6921 			dev_stack[cur] = now;
6922 			iter_stack[cur++] = iter;
6923 			break;
6924 		}
6925 
6926 		if (!next) {
6927 			if (!cur)
6928 				return 0;
6929 			next = dev_stack[--cur];
6930 			niter = iter_stack[cur];
6931 		}
6932 
6933 		now = next;
6934 		iter = niter;
6935 	}
6936 
6937 	return 0;
6938 }
6939 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
6940 
6941 static bool __netdev_has_upper_dev(struct net_device *dev,
6942 				   struct net_device *upper_dev)
6943 {
6944 	struct netdev_nested_priv priv = {
6945 		.flags = 0,
6946 		.data = (void *)upper_dev,
6947 	};
6948 
6949 	ASSERT_RTNL();
6950 
6951 	return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
6952 					   &priv);
6953 }
6954 
6955 /**
6956  * netdev_lower_get_next_private - Get the next ->private from the
6957  *				   lower neighbour list
6958  * @dev: device
6959  * @iter: list_head ** of the current position
6960  *
6961  * Gets the next netdev_adjacent->private from the dev's lower neighbour
6962  * list, starting from iter position. The caller must hold either hold the
6963  * RTNL lock or its own locking that guarantees that the neighbour lower
6964  * list will remain unchanged.
6965  */
6966 void *netdev_lower_get_next_private(struct net_device *dev,
6967 				    struct list_head **iter)
6968 {
6969 	struct netdev_adjacent *lower;
6970 
6971 	lower = list_entry(*iter, struct netdev_adjacent, list);
6972 
6973 	if (&lower->list == &dev->adj_list.lower)
6974 		return NULL;
6975 
6976 	*iter = lower->list.next;
6977 
6978 	return lower->private;
6979 }
6980 EXPORT_SYMBOL(netdev_lower_get_next_private);
6981 
6982 /**
6983  * netdev_lower_get_next_private_rcu - Get the next ->private from the
6984  *				       lower neighbour list, RCU
6985  *				       variant
6986  * @dev: device
6987  * @iter: list_head ** of the current position
6988  *
6989  * Gets the next netdev_adjacent->private from the dev's lower neighbour
6990  * list, starting from iter position. The caller must hold RCU read lock.
6991  */
6992 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
6993 					struct list_head **iter)
6994 {
6995 	struct netdev_adjacent *lower;
6996 
6997 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
6998 
6999 	lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7000 
7001 	if (&lower->list == &dev->adj_list.lower)
7002 		return NULL;
7003 
7004 	*iter = &lower->list;
7005 
7006 	return lower->private;
7007 }
7008 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
7009 
7010 /**
7011  * netdev_lower_get_next - Get the next device from the lower neighbour
7012  *                         list
7013  * @dev: device
7014  * @iter: list_head ** of the current position
7015  *
7016  * Gets the next netdev_adjacent from the dev's lower neighbour
7017  * list, starting from iter position. The caller must hold RTNL lock or
7018  * its own locking that guarantees that the neighbour lower
7019  * list will remain unchanged.
7020  */
7021 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
7022 {
7023 	struct netdev_adjacent *lower;
7024 
7025 	lower = list_entry(*iter, struct netdev_adjacent, list);
7026 
7027 	if (&lower->list == &dev->adj_list.lower)
7028 		return NULL;
7029 
7030 	*iter = lower->list.next;
7031 
7032 	return lower->dev;
7033 }
7034 EXPORT_SYMBOL(netdev_lower_get_next);
7035 
7036 static struct net_device *netdev_next_lower_dev(struct net_device *dev,
7037 						struct list_head **iter)
7038 {
7039 	struct netdev_adjacent *lower;
7040 
7041 	lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7042 
7043 	if (&lower->list == &dev->adj_list.lower)
7044 		return NULL;
7045 
7046 	*iter = &lower->list;
7047 
7048 	return lower->dev;
7049 }
7050 
7051 static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
7052 						  struct list_head **iter,
7053 						  bool *ignore)
7054 {
7055 	struct netdev_adjacent *lower;
7056 
7057 	lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7058 
7059 	if (&lower->list == &dev->adj_list.lower)
7060 		return NULL;
7061 
7062 	*iter = &lower->list;
7063 	*ignore = lower->ignore;
7064 
7065 	return lower->dev;
7066 }
7067 
7068 int netdev_walk_all_lower_dev(struct net_device *dev,
7069 			      int (*fn)(struct net_device *dev,
7070 					struct netdev_nested_priv *priv),
7071 			      struct netdev_nested_priv *priv)
7072 {
7073 	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7074 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7075 	int ret, cur = 0;
7076 
7077 	now = dev;
7078 	iter = &dev->adj_list.lower;
7079 
7080 	while (1) {
7081 		if (now != dev) {
7082 			ret = fn(now, priv);
7083 			if (ret)
7084 				return ret;
7085 		}
7086 
7087 		next = NULL;
7088 		while (1) {
7089 			ldev = netdev_next_lower_dev(now, &iter);
7090 			if (!ldev)
7091 				break;
7092 
7093 			next = ldev;
7094 			niter = &ldev->adj_list.lower;
7095 			dev_stack[cur] = now;
7096 			iter_stack[cur++] = iter;
7097 			break;
7098 		}
7099 
7100 		if (!next) {
7101 			if (!cur)
7102 				return 0;
7103 			next = dev_stack[--cur];
7104 			niter = iter_stack[cur];
7105 		}
7106 
7107 		now = next;
7108 		iter = niter;
7109 	}
7110 
7111 	return 0;
7112 }
7113 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
7114 
7115 static int __netdev_walk_all_lower_dev(struct net_device *dev,
7116 				       int (*fn)(struct net_device *dev,
7117 					 struct netdev_nested_priv *priv),
7118 				       struct netdev_nested_priv *priv)
7119 {
7120 	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7121 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7122 	int ret, cur = 0;
7123 	bool ignore;
7124 
7125 	now = dev;
7126 	iter = &dev->adj_list.lower;
7127 
7128 	while (1) {
7129 		if (now != dev) {
7130 			ret = fn(now, priv);
7131 			if (ret)
7132 				return ret;
7133 		}
7134 
7135 		next = NULL;
7136 		while (1) {
7137 			ldev = __netdev_next_lower_dev(now, &iter, &ignore);
7138 			if (!ldev)
7139 				break;
7140 			if (ignore)
7141 				continue;
7142 
7143 			next = ldev;
7144 			niter = &ldev->adj_list.lower;
7145 			dev_stack[cur] = now;
7146 			iter_stack[cur++] = iter;
7147 			break;
7148 		}
7149 
7150 		if (!next) {
7151 			if (!cur)
7152 				return 0;
7153 			next = dev_stack[--cur];
7154 			niter = iter_stack[cur];
7155 		}
7156 
7157 		now = next;
7158 		iter = niter;
7159 	}
7160 
7161 	return 0;
7162 }
7163 
7164 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
7165 					     struct list_head **iter)
7166 {
7167 	struct netdev_adjacent *lower;
7168 
7169 	lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7170 	if (&lower->list == &dev->adj_list.lower)
7171 		return NULL;
7172 
7173 	*iter = &lower->list;
7174 
7175 	return lower->dev;
7176 }
7177 EXPORT_SYMBOL(netdev_next_lower_dev_rcu);
7178 
7179 static u8 __netdev_upper_depth(struct net_device *dev)
7180 {
7181 	struct net_device *udev;
7182 	struct list_head *iter;
7183 	u8 max_depth = 0;
7184 	bool ignore;
7185 
7186 	for (iter = &dev->adj_list.upper,
7187 	     udev = __netdev_next_upper_dev(dev, &iter, &ignore);
7188 	     udev;
7189 	     udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
7190 		if (ignore)
7191 			continue;
7192 		if (max_depth < udev->upper_level)
7193 			max_depth = udev->upper_level;
7194 	}
7195 
7196 	return max_depth;
7197 }
7198 
7199 static u8 __netdev_lower_depth(struct net_device *dev)
7200 {
7201 	struct net_device *ldev;
7202 	struct list_head *iter;
7203 	u8 max_depth = 0;
7204 	bool ignore;
7205 
7206 	for (iter = &dev->adj_list.lower,
7207 	     ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
7208 	     ldev;
7209 	     ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
7210 		if (ignore)
7211 			continue;
7212 		if (max_depth < ldev->lower_level)
7213 			max_depth = ldev->lower_level;
7214 	}
7215 
7216 	return max_depth;
7217 }
7218 
7219 static int __netdev_update_upper_level(struct net_device *dev,
7220 				       struct netdev_nested_priv *__unused)
7221 {
7222 	dev->upper_level = __netdev_upper_depth(dev) + 1;
7223 	return 0;
7224 }
7225 
7226 #ifdef CONFIG_LOCKDEP
7227 static LIST_HEAD(net_unlink_list);
7228 
7229 static void net_unlink_todo(struct net_device *dev)
7230 {
7231 	if (list_empty(&dev->unlink_list))
7232 		list_add_tail(&dev->unlink_list, &net_unlink_list);
7233 }
7234 #endif
7235 
7236 static int __netdev_update_lower_level(struct net_device *dev,
7237 				       struct netdev_nested_priv *priv)
7238 {
7239 	dev->lower_level = __netdev_lower_depth(dev) + 1;
7240 
7241 #ifdef CONFIG_LOCKDEP
7242 	if (!priv)
7243 		return 0;
7244 
7245 	if (priv->flags & NESTED_SYNC_IMM)
7246 		dev->nested_level = dev->lower_level - 1;
7247 	if (priv->flags & NESTED_SYNC_TODO)
7248 		net_unlink_todo(dev);
7249 #endif
7250 	return 0;
7251 }
7252 
7253 int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
7254 				  int (*fn)(struct net_device *dev,
7255 					    struct netdev_nested_priv *priv),
7256 				  struct netdev_nested_priv *priv)
7257 {
7258 	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7259 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7260 	int ret, cur = 0;
7261 
7262 	now = dev;
7263 	iter = &dev->adj_list.lower;
7264 
7265 	while (1) {
7266 		if (now != dev) {
7267 			ret = fn(now, priv);
7268 			if (ret)
7269 				return ret;
7270 		}
7271 
7272 		next = NULL;
7273 		while (1) {
7274 			ldev = netdev_next_lower_dev_rcu(now, &iter);
7275 			if (!ldev)
7276 				break;
7277 
7278 			next = ldev;
7279 			niter = &ldev->adj_list.lower;
7280 			dev_stack[cur] = now;
7281 			iter_stack[cur++] = iter;
7282 			break;
7283 		}
7284 
7285 		if (!next) {
7286 			if (!cur)
7287 				return 0;
7288 			next = dev_stack[--cur];
7289 			niter = iter_stack[cur];
7290 		}
7291 
7292 		now = next;
7293 		iter = niter;
7294 	}
7295 
7296 	return 0;
7297 }
7298 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
7299 
7300 /**
7301  * netdev_lower_get_first_private_rcu - Get the first ->private from the
7302  *				       lower neighbour list, RCU
7303  *				       variant
7304  * @dev: device
7305  *
7306  * Gets the first netdev_adjacent->private from the dev's lower neighbour
7307  * list. The caller must hold RCU read lock.
7308  */
7309 void *netdev_lower_get_first_private_rcu(struct net_device *dev)
7310 {
7311 	struct netdev_adjacent *lower;
7312 
7313 	lower = list_first_or_null_rcu(&dev->adj_list.lower,
7314 			struct netdev_adjacent, list);
7315 	if (lower)
7316 		return lower->private;
7317 	return NULL;
7318 }
7319 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
7320 
7321 /**
7322  * netdev_master_upper_dev_get_rcu - Get master upper device
7323  * @dev: device
7324  *
7325  * Find a master upper device and return pointer to it or NULL in case
7326  * it's not there. The caller must hold the RCU read lock.
7327  */
7328 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
7329 {
7330 	struct netdev_adjacent *upper;
7331 
7332 	upper = list_first_or_null_rcu(&dev->adj_list.upper,
7333 				       struct netdev_adjacent, list);
7334 	if (upper && likely(upper->master))
7335 		return upper->dev;
7336 	return NULL;
7337 }
7338 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
7339 
7340 static int netdev_adjacent_sysfs_add(struct net_device *dev,
7341 			      struct net_device *adj_dev,
7342 			      struct list_head *dev_list)
7343 {
7344 	char linkname[IFNAMSIZ+7];
7345 
7346 	sprintf(linkname, dev_list == &dev->adj_list.upper ?
7347 		"upper_%s" : "lower_%s", adj_dev->name);
7348 	return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
7349 				 linkname);
7350 }
7351 static void netdev_adjacent_sysfs_del(struct net_device *dev,
7352 			       char *name,
7353 			       struct list_head *dev_list)
7354 {
7355 	char linkname[IFNAMSIZ+7];
7356 
7357 	sprintf(linkname, dev_list == &dev->adj_list.upper ?
7358 		"upper_%s" : "lower_%s", name);
7359 	sysfs_remove_link(&(dev->dev.kobj), linkname);
7360 }
7361 
7362 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
7363 						 struct net_device *adj_dev,
7364 						 struct list_head *dev_list)
7365 {
7366 	return (dev_list == &dev->adj_list.upper ||
7367 		dev_list == &dev->adj_list.lower) &&
7368 		net_eq(dev_net(dev), dev_net(adj_dev));
7369 }
7370 
7371 static int __netdev_adjacent_dev_insert(struct net_device *dev,
7372 					struct net_device *adj_dev,
7373 					struct list_head *dev_list,
7374 					void *private, bool master)
7375 {
7376 	struct netdev_adjacent *adj;
7377 	int ret;
7378 
7379 	adj = __netdev_find_adj(adj_dev, dev_list);
7380 
7381 	if (adj) {
7382 		adj->ref_nr += 1;
7383 		pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
7384 			 dev->name, adj_dev->name, adj->ref_nr);
7385 
7386 		return 0;
7387 	}
7388 
7389 	adj = kmalloc(sizeof(*adj), GFP_KERNEL);
7390 	if (!adj)
7391 		return -ENOMEM;
7392 
7393 	adj->dev = adj_dev;
7394 	adj->master = master;
7395 	adj->ref_nr = 1;
7396 	adj->private = private;
7397 	adj->ignore = false;
7398 	dev_hold_track(adj_dev, &adj->dev_tracker, GFP_KERNEL);
7399 
7400 	pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
7401 		 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
7402 
7403 	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
7404 		ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
7405 		if (ret)
7406 			goto free_adj;
7407 	}
7408 
7409 	/* Ensure that master link is always the first item in list. */
7410 	if (master) {
7411 		ret = sysfs_create_link(&(dev->dev.kobj),
7412 					&(adj_dev->dev.kobj), "master");
7413 		if (ret)
7414 			goto remove_symlinks;
7415 
7416 		list_add_rcu(&adj->list, dev_list);
7417 	} else {
7418 		list_add_tail_rcu(&adj->list, dev_list);
7419 	}
7420 
7421 	return 0;
7422 
7423 remove_symlinks:
7424 	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
7425 		netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7426 free_adj:
7427 	dev_put_track(adj_dev, &adj->dev_tracker);
7428 	kfree(adj);
7429 
7430 	return ret;
7431 }
7432 
7433 static void __netdev_adjacent_dev_remove(struct net_device *dev,
7434 					 struct net_device *adj_dev,
7435 					 u16 ref_nr,
7436 					 struct list_head *dev_list)
7437 {
7438 	struct netdev_adjacent *adj;
7439 
7440 	pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
7441 		 dev->name, adj_dev->name, ref_nr);
7442 
7443 	adj = __netdev_find_adj(adj_dev, dev_list);
7444 
7445 	if (!adj) {
7446 		pr_err("Adjacency does not exist for device %s from %s\n",
7447 		       dev->name, adj_dev->name);
7448 		WARN_ON(1);
7449 		return;
7450 	}
7451 
7452 	if (adj->ref_nr > ref_nr) {
7453 		pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
7454 			 dev->name, adj_dev->name, ref_nr,
7455 			 adj->ref_nr - ref_nr);
7456 		adj->ref_nr -= ref_nr;
7457 		return;
7458 	}
7459 
7460 	if (adj->master)
7461 		sysfs_remove_link(&(dev->dev.kobj), "master");
7462 
7463 	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
7464 		netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7465 
7466 	list_del_rcu(&adj->list);
7467 	pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
7468 		 adj_dev->name, dev->name, adj_dev->name);
7469 	dev_put_track(adj_dev, &adj->dev_tracker);
7470 	kfree_rcu(adj, rcu);
7471 }
7472 
7473 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
7474 					    struct net_device *upper_dev,
7475 					    struct list_head *up_list,
7476 					    struct list_head *down_list,
7477 					    void *private, bool master)
7478 {
7479 	int ret;
7480 
7481 	ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
7482 					   private, master);
7483 	if (ret)
7484 		return ret;
7485 
7486 	ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
7487 					   private, false);
7488 	if (ret) {
7489 		__netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
7490 		return ret;
7491 	}
7492 
7493 	return 0;
7494 }
7495 
7496 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
7497 					       struct net_device *upper_dev,
7498 					       u16 ref_nr,
7499 					       struct list_head *up_list,
7500 					       struct list_head *down_list)
7501 {
7502 	__netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
7503 	__netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
7504 }
7505 
7506 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
7507 						struct net_device *upper_dev,
7508 						void *private, bool master)
7509 {
7510 	return __netdev_adjacent_dev_link_lists(dev, upper_dev,
7511 						&dev->adj_list.upper,
7512 						&upper_dev->adj_list.lower,
7513 						private, master);
7514 }
7515 
7516 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
7517 						   struct net_device *upper_dev)
7518 {
7519 	__netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
7520 					   &dev->adj_list.upper,
7521 					   &upper_dev->adj_list.lower);
7522 }
7523 
7524 static int __netdev_upper_dev_link(struct net_device *dev,
7525 				   struct net_device *upper_dev, bool master,
7526 				   void *upper_priv, void *upper_info,
7527 				   struct netdev_nested_priv *priv,
7528 				   struct netlink_ext_ack *extack)
7529 {
7530 	struct netdev_notifier_changeupper_info changeupper_info = {
7531 		.info = {
7532 			.dev = dev,
7533 			.extack = extack,
7534 		},
7535 		.upper_dev = upper_dev,
7536 		.master = master,
7537 		.linking = true,
7538 		.upper_info = upper_info,
7539 	};
7540 	struct net_device *master_dev;
7541 	int ret = 0;
7542 
7543 	ASSERT_RTNL();
7544 
7545 	if (dev == upper_dev)
7546 		return -EBUSY;
7547 
7548 	/* To prevent loops, check if dev is not upper device to upper_dev. */
7549 	if (__netdev_has_upper_dev(upper_dev, dev))
7550 		return -EBUSY;
7551 
7552 	if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
7553 		return -EMLINK;
7554 
7555 	if (!master) {
7556 		if (__netdev_has_upper_dev(dev, upper_dev))
7557 			return -EEXIST;
7558 	} else {
7559 		master_dev = __netdev_master_upper_dev_get(dev);
7560 		if (master_dev)
7561 			return master_dev == upper_dev ? -EEXIST : -EBUSY;
7562 	}
7563 
7564 	ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7565 					    &changeupper_info.info);
7566 	ret = notifier_to_errno(ret);
7567 	if (ret)
7568 		return ret;
7569 
7570 	ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
7571 						   master);
7572 	if (ret)
7573 		return ret;
7574 
7575 	ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7576 					    &changeupper_info.info);
7577 	ret = notifier_to_errno(ret);
7578 	if (ret)
7579 		goto rollback;
7580 
7581 	__netdev_update_upper_level(dev, NULL);
7582 	__netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
7583 
7584 	__netdev_update_lower_level(upper_dev, priv);
7585 	__netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
7586 				    priv);
7587 
7588 	return 0;
7589 
7590 rollback:
7591 	__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7592 
7593 	return ret;
7594 }
7595 
7596 /**
7597  * netdev_upper_dev_link - Add a link to the upper device
7598  * @dev: device
7599  * @upper_dev: new upper device
7600  * @extack: netlink extended ack
7601  *
7602  * Adds a link to device which is upper to this one. The caller must hold
7603  * the RTNL lock. On a failure a negative errno code is returned.
7604  * On success the reference counts are adjusted and the function
7605  * returns zero.
7606  */
7607 int netdev_upper_dev_link(struct net_device *dev,
7608 			  struct net_device *upper_dev,
7609 			  struct netlink_ext_ack *extack)
7610 {
7611 	struct netdev_nested_priv priv = {
7612 		.flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
7613 		.data = NULL,
7614 	};
7615 
7616 	return __netdev_upper_dev_link(dev, upper_dev, false,
7617 				       NULL, NULL, &priv, extack);
7618 }
7619 EXPORT_SYMBOL(netdev_upper_dev_link);
7620 
7621 /**
7622  * netdev_master_upper_dev_link - Add a master link to the upper device
7623  * @dev: device
7624  * @upper_dev: new upper device
7625  * @upper_priv: upper device private
7626  * @upper_info: upper info to be passed down via notifier
7627  * @extack: netlink extended ack
7628  *
7629  * Adds a link to device which is upper to this one. In this case, only
7630  * one master upper device can be linked, although other non-master devices
7631  * might be linked as well. The caller must hold the RTNL lock.
7632  * On a failure a negative errno code is returned. On success the reference
7633  * counts are adjusted and the function returns zero.
7634  */
7635 int netdev_master_upper_dev_link(struct net_device *dev,
7636 				 struct net_device *upper_dev,
7637 				 void *upper_priv, void *upper_info,
7638 				 struct netlink_ext_ack *extack)
7639 {
7640 	struct netdev_nested_priv priv = {
7641 		.flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
7642 		.data = NULL,
7643 	};
7644 
7645 	return __netdev_upper_dev_link(dev, upper_dev, true,
7646 				       upper_priv, upper_info, &priv, extack);
7647 }
7648 EXPORT_SYMBOL(netdev_master_upper_dev_link);
7649 
7650 static void __netdev_upper_dev_unlink(struct net_device *dev,
7651 				      struct net_device *upper_dev,
7652 				      struct netdev_nested_priv *priv)
7653 {
7654 	struct netdev_notifier_changeupper_info changeupper_info = {
7655 		.info = {
7656 			.dev = dev,
7657 		},
7658 		.upper_dev = upper_dev,
7659 		.linking = false,
7660 	};
7661 
7662 	ASSERT_RTNL();
7663 
7664 	changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
7665 
7666 	call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7667 				      &changeupper_info.info);
7668 
7669 	__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7670 
7671 	call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7672 				      &changeupper_info.info);
7673 
7674 	__netdev_update_upper_level(dev, NULL);
7675 	__netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
7676 
7677 	__netdev_update_lower_level(upper_dev, priv);
7678 	__netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
7679 				    priv);
7680 }
7681 
7682 /**
7683  * netdev_upper_dev_unlink - Removes a link to upper device
7684  * @dev: device
7685  * @upper_dev: new upper device
7686  *
7687  * Removes a link to device which is upper to this one. The caller must hold
7688  * the RTNL lock.
7689  */
7690 void netdev_upper_dev_unlink(struct net_device *dev,
7691 			     struct net_device *upper_dev)
7692 {
7693 	struct netdev_nested_priv priv = {
7694 		.flags = NESTED_SYNC_TODO,
7695 		.data = NULL,
7696 	};
7697 
7698 	__netdev_upper_dev_unlink(dev, upper_dev, &priv);
7699 }
7700 EXPORT_SYMBOL(netdev_upper_dev_unlink);
7701 
7702 static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
7703 				      struct net_device *lower_dev,
7704 				      bool val)
7705 {
7706 	struct netdev_adjacent *adj;
7707 
7708 	adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower);
7709 	if (adj)
7710 		adj->ignore = val;
7711 
7712 	adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper);
7713 	if (adj)
7714 		adj->ignore = val;
7715 }
7716 
7717 static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
7718 					struct net_device *lower_dev)
7719 {
7720 	__netdev_adjacent_dev_set(upper_dev, lower_dev, true);
7721 }
7722 
7723 static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
7724 				       struct net_device *lower_dev)
7725 {
7726 	__netdev_adjacent_dev_set(upper_dev, lower_dev, false);
7727 }
7728 
7729 int netdev_adjacent_change_prepare(struct net_device *old_dev,
7730 				   struct net_device *new_dev,
7731 				   struct net_device *dev,
7732 				   struct netlink_ext_ack *extack)
7733 {
7734 	struct netdev_nested_priv priv = {
7735 		.flags = 0,
7736 		.data = NULL,
7737 	};
7738 	int err;
7739 
7740 	if (!new_dev)
7741 		return 0;
7742 
7743 	if (old_dev && new_dev != old_dev)
7744 		netdev_adjacent_dev_disable(dev, old_dev);
7745 	err = __netdev_upper_dev_link(new_dev, dev, false, NULL, NULL, &priv,
7746 				      extack);
7747 	if (err) {
7748 		if (old_dev && new_dev != old_dev)
7749 			netdev_adjacent_dev_enable(dev, old_dev);
7750 		return err;
7751 	}
7752 
7753 	return 0;
7754 }
7755 EXPORT_SYMBOL(netdev_adjacent_change_prepare);
7756 
7757 void netdev_adjacent_change_commit(struct net_device *old_dev,
7758 				   struct net_device *new_dev,
7759 				   struct net_device *dev)
7760 {
7761 	struct netdev_nested_priv priv = {
7762 		.flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
7763 		.data = NULL,
7764 	};
7765 
7766 	if (!new_dev || !old_dev)
7767 		return;
7768 
7769 	if (new_dev == old_dev)
7770 		return;
7771 
7772 	netdev_adjacent_dev_enable(dev, old_dev);
7773 	__netdev_upper_dev_unlink(old_dev, dev, &priv);
7774 }
7775 EXPORT_SYMBOL(netdev_adjacent_change_commit);
7776 
7777 void netdev_adjacent_change_abort(struct net_device *old_dev,
7778 				  struct net_device *new_dev,
7779 				  struct net_device *dev)
7780 {
7781 	struct netdev_nested_priv priv = {
7782 		.flags = 0,
7783 		.data = NULL,
7784 	};
7785 
7786 	if (!new_dev)
7787 		return;
7788 
7789 	if (old_dev && new_dev != old_dev)
7790 		netdev_adjacent_dev_enable(dev, old_dev);
7791 
7792 	__netdev_upper_dev_unlink(new_dev, dev, &priv);
7793 }
7794 EXPORT_SYMBOL(netdev_adjacent_change_abort);
7795 
7796 /**
7797  * netdev_bonding_info_change - Dispatch event about slave change
7798  * @dev: device
7799  * @bonding_info: info to dispatch
7800  *
7801  * Send NETDEV_BONDING_INFO to netdev notifiers with info.
7802  * The caller must hold the RTNL lock.
7803  */
7804 void netdev_bonding_info_change(struct net_device *dev,
7805 				struct netdev_bonding_info *bonding_info)
7806 {
7807 	struct netdev_notifier_bonding_info info = {
7808 		.info.dev = dev,
7809 	};
7810 
7811 	memcpy(&info.bonding_info, bonding_info,
7812 	       sizeof(struct netdev_bonding_info));
7813 	call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
7814 				      &info.info);
7815 }
7816 EXPORT_SYMBOL(netdev_bonding_info_change);
7817 
7818 static int netdev_offload_xstats_enable_l3(struct net_device *dev,
7819 					   struct netlink_ext_ack *extack)
7820 {
7821 	struct netdev_notifier_offload_xstats_info info = {
7822 		.info.dev = dev,
7823 		.info.extack = extack,
7824 		.type = NETDEV_OFFLOAD_XSTATS_TYPE_L3,
7825 	};
7826 	int err;
7827 	int rc;
7828 
7829 	dev->offload_xstats_l3 = kzalloc(sizeof(*dev->offload_xstats_l3),
7830 					 GFP_KERNEL);
7831 	if (!dev->offload_xstats_l3)
7832 		return -ENOMEM;
7833 
7834 	rc = call_netdevice_notifiers_info_robust(NETDEV_OFFLOAD_XSTATS_ENABLE,
7835 						  NETDEV_OFFLOAD_XSTATS_DISABLE,
7836 						  &info.info);
7837 	err = notifier_to_errno(rc);
7838 	if (err)
7839 		goto free_stats;
7840 
7841 	return 0;
7842 
7843 free_stats:
7844 	kfree(dev->offload_xstats_l3);
7845 	dev->offload_xstats_l3 = NULL;
7846 	return err;
7847 }
7848 
7849 int netdev_offload_xstats_enable(struct net_device *dev,
7850 				 enum netdev_offload_xstats_type type,
7851 				 struct netlink_ext_ack *extack)
7852 {
7853 	ASSERT_RTNL();
7854 
7855 	if (netdev_offload_xstats_enabled(dev, type))
7856 		return -EALREADY;
7857 
7858 	switch (type) {
7859 	case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
7860 		return netdev_offload_xstats_enable_l3(dev, extack);
7861 	}
7862 
7863 	WARN_ON(1);
7864 	return -EINVAL;
7865 }
7866 EXPORT_SYMBOL(netdev_offload_xstats_enable);
7867 
7868 static void netdev_offload_xstats_disable_l3(struct net_device *dev)
7869 {
7870 	struct netdev_notifier_offload_xstats_info info = {
7871 		.info.dev = dev,
7872 		.type = NETDEV_OFFLOAD_XSTATS_TYPE_L3,
7873 	};
7874 
7875 	call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_DISABLE,
7876 				      &info.info);
7877 	kfree(dev->offload_xstats_l3);
7878 	dev->offload_xstats_l3 = NULL;
7879 }
7880 
7881 int netdev_offload_xstats_disable(struct net_device *dev,
7882 				  enum netdev_offload_xstats_type type)
7883 {
7884 	ASSERT_RTNL();
7885 
7886 	if (!netdev_offload_xstats_enabled(dev, type))
7887 		return -EALREADY;
7888 
7889 	switch (type) {
7890 	case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
7891 		netdev_offload_xstats_disable_l3(dev);
7892 		return 0;
7893 	}
7894 
7895 	WARN_ON(1);
7896 	return -EINVAL;
7897 }
7898 EXPORT_SYMBOL(netdev_offload_xstats_disable);
7899 
7900 static void netdev_offload_xstats_disable_all(struct net_device *dev)
7901 {
7902 	netdev_offload_xstats_disable(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3);
7903 }
7904 
7905 static struct rtnl_hw_stats64 *
7906 netdev_offload_xstats_get_ptr(const struct net_device *dev,
7907 			      enum netdev_offload_xstats_type type)
7908 {
7909 	switch (type) {
7910 	case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
7911 		return dev->offload_xstats_l3;
7912 	}
7913 
7914 	WARN_ON(1);
7915 	return NULL;
7916 }
7917 
7918 bool netdev_offload_xstats_enabled(const struct net_device *dev,
7919 				   enum netdev_offload_xstats_type type)
7920 {
7921 	ASSERT_RTNL();
7922 
7923 	return netdev_offload_xstats_get_ptr(dev, type);
7924 }
7925 EXPORT_SYMBOL(netdev_offload_xstats_enabled);
7926 
7927 struct netdev_notifier_offload_xstats_ru {
7928 	bool used;
7929 };
7930 
7931 struct netdev_notifier_offload_xstats_rd {
7932 	struct rtnl_hw_stats64 stats;
7933 	bool used;
7934 };
7935 
7936 static void netdev_hw_stats64_add(struct rtnl_hw_stats64 *dest,
7937 				  const struct rtnl_hw_stats64 *src)
7938 {
7939 	dest->rx_packets	  += src->rx_packets;
7940 	dest->tx_packets	  += src->tx_packets;
7941 	dest->rx_bytes		  += src->rx_bytes;
7942 	dest->tx_bytes		  += src->tx_bytes;
7943 	dest->rx_errors		  += src->rx_errors;
7944 	dest->tx_errors		  += src->tx_errors;
7945 	dest->rx_dropped	  += src->rx_dropped;
7946 	dest->tx_dropped	  += src->tx_dropped;
7947 	dest->multicast		  += src->multicast;
7948 }
7949 
7950 static int netdev_offload_xstats_get_used(struct net_device *dev,
7951 					  enum netdev_offload_xstats_type type,
7952 					  bool *p_used,
7953 					  struct netlink_ext_ack *extack)
7954 {
7955 	struct netdev_notifier_offload_xstats_ru report_used = {};
7956 	struct netdev_notifier_offload_xstats_info info = {
7957 		.info.dev = dev,
7958 		.info.extack = extack,
7959 		.type = type,
7960 		.report_used = &report_used,
7961 	};
7962 	int rc;
7963 
7964 	WARN_ON(!netdev_offload_xstats_enabled(dev, type));
7965 	rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_USED,
7966 					   &info.info);
7967 	*p_used = report_used.used;
7968 	return notifier_to_errno(rc);
7969 }
7970 
7971 static int netdev_offload_xstats_get_stats(struct net_device *dev,
7972 					   enum netdev_offload_xstats_type type,
7973 					   struct rtnl_hw_stats64 *p_stats,
7974 					   bool *p_used,
7975 					   struct netlink_ext_ack *extack)
7976 {
7977 	struct netdev_notifier_offload_xstats_rd report_delta = {};
7978 	struct netdev_notifier_offload_xstats_info info = {
7979 		.info.dev = dev,
7980 		.info.extack = extack,
7981 		.type = type,
7982 		.report_delta = &report_delta,
7983 	};
7984 	struct rtnl_hw_stats64 *stats;
7985 	int rc;
7986 
7987 	stats = netdev_offload_xstats_get_ptr(dev, type);
7988 	if (WARN_ON(!stats))
7989 		return -EINVAL;
7990 
7991 	rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
7992 					   &info.info);
7993 
7994 	/* Cache whatever we got, even if there was an error, otherwise the
7995 	 * successful stats retrievals would get lost.
7996 	 */
7997 	netdev_hw_stats64_add(stats, &report_delta.stats);
7998 
7999 	if (p_stats)
8000 		*p_stats = *stats;
8001 	*p_used = report_delta.used;
8002 
8003 	return notifier_to_errno(rc);
8004 }
8005 
8006 int netdev_offload_xstats_get(struct net_device *dev,
8007 			      enum netdev_offload_xstats_type type,
8008 			      struct rtnl_hw_stats64 *p_stats, bool *p_used,
8009 			      struct netlink_ext_ack *extack)
8010 {
8011 	ASSERT_RTNL();
8012 
8013 	if (p_stats)
8014 		return netdev_offload_xstats_get_stats(dev, type, p_stats,
8015 						       p_used, extack);
8016 	else
8017 		return netdev_offload_xstats_get_used(dev, type, p_used,
8018 						      extack);
8019 }
8020 EXPORT_SYMBOL(netdev_offload_xstats_get);
8021 
8022 void
8023 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *report_delta,
8024 				   const struct rtnl_hw_stats64 *stats)
8025 {
8026 	report_delta->used = true;
8027 	netdev_hw_stats64_add(&report_delta->stats, stats);
8028 }
8029 EXPORT_SYMBOL(netdev_offload_xstats_report_delta);
8030 
8031 void
8032 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *report_used)
8033 {
8034 	report_used->used = true;
8035 }
8036 EXPORT_SYMBOL(netdev_offload_xstats_report_used);
8037 
8038 void netdev_offload_xstats_push_delta(struct net_device *dev,
8039 				      enum netdev_offload_xstats_type type,
8040 				      const struct rtnl_hw_stats64 *p_stats)
8041 {
8042 	struct rtnl_hw_stats64 *stats;
8043 
8044 	ASSERT_RTNL();
8045 
8046 	stats = netdev_offload_xstats_get_ptr(dev, type);
8047 	if (WARN_ON(!stats))
8048 		return;
8049 
8050 	netdev_hw_stats64_add(stats, p_stats);
8051 }
8052 EXPORT_SYMBOL(netdev_offload_xstats_push_delta);
8053 
8054 /**
8055  * netdev_get_xmit_slave - Get the xmit slave of master device
8056  * @dev: device
8057  * @skb: The packet
8058  * @all_slaves: assume all the slaves are active
8059  *
8060  * The reference counters are not incremented so the caller must be
8061  * careful with locks. The caller must hold RCU lock.
8062  * %NULL is returned if no slave is found.
8063  */
8064 
8065 struct net_device *netdev_get_xmit_slave(struct net_device *dev,
8066 					 struct sk_buff *skb,
8067 					 bool all_slaves)
8068 {
8069 	const struct net_device_ops *ops = dev->netdev_ops;
8070 
8071 	if (!ops->ndo_get_xmit_slave)
8072 		return NULL;
8073 	return ops->ndo_get_xmit_slave(dev, skb, all_slaves);
8074 }
8075 EXPORT_SYMBOL(netdev_get_xmit_slave);
8076 
8077 static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev,
8078 						  struct sock *sk)
8079 {
8080 	const struct net_device_ops *ops = dev->netdev_ops;
8081 
8082 	if (!ops->ndo_sk_get_lower_dev)
8083 		return NULL;
8084 	return ops->ndo_sk_get_lower_dev(dev, sk);
8085 }
8086 
8087 /**
8088  * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket
8089  * @dev: device
8090  * @sk: the socket
8091  *
8092  * %NULL is returned if no lower device is found.
8093  */
8094 
8095 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
8096 					    struct sock *sk)
8097 {
8098 	struct net_device *lower;
8099 
8100 	lower = netdev_sk_get_lower_dev(dev, sk);
8101 	while (lower) {
8102 		dev = lower;
8103 		lower = netdev_sk_get_lower_dev(dev, sk);
8104 	}
8105 
8106 	return dev;
8107 }
8108 EXPORT_SYMBOL(netdev_sk_get_lowest_dev);
8109 
8110 static void netdev_adjacent_add_links(struct net_device *dev)
8111 {
8112 	struct netdev_adjacent *iter;
8113 
8114 	struct net *net = dev_net(dev);
8115 
8116 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
8117 		if (!net_eq(net, dev_net(iter->dev)))
8118 			continue;
8119 		netdev_adjacent_sysfs_add(iter->dev, dev,
8120 					  &iter->dev->adj_list.lower);
8121 		netdev_adjacent_sysfs_add(dev, iter->dev,
8122 					  &dev->adj_list.upper);
8123 	}
8124 
8125 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
8126 		if (!net_eq(net, dev_net(iter->dev)))
8127 			continue;
8128 		netdev_adjacent_sysfs_add(iter->dev, dev,
8129 					  &iter->dev->adj_list.upper);
8130 		netdev_adjacent_sysfs_add(dev, iter->dev,
8131 					  &dev->adj_list.lower);
8132 	}
8133 }
8134 
8135 static void netdev_adjacent_del_links(struct net_device *dev)
8136 {
8137 	struct netdev_adjacent *iter;
8138 
8139 	struct net *net = dev_net(dev);
8140 
8141 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
8142 		if (!net_eq(net, dev_net(iter->dev)))
8143 			continue;
8144 		netdev_adjacent_sysfs_del(iter->dev, dev->name,
8145 					  &iter->dev->adj_list.lower);
8146 		netdev_adjacent_sysfs_del(dev, iter->dev->name,
8147 					  &dev->adj_list.upper);
8148 	}
8149 
8150 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
8151 		if (!net_eq(net, dev_net(iter->dev)))
8152 			continue;
8153 		netdev_adjacent_sysfs_del(iter->dev, dev->name,
8154 					  &iter->dev->adj_list.upper);
8155 		netdev_adjacent_sysfs_del(dev, iter->dev->name,
8156 					  &dev->adj_list.lower);
8157 	}
8158 }
8159 
8160 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
8161 {
8162 	struct netdev_adjacent *iter;
8163 
8164 	struct net *net = dev_net(dev);
8165 
8166 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
8167 		if (!net_eq(net, dev_net(iter->dev)))
8168 			continue;
8169 		netdev_adjacent_sysfs_del(iter->dev, oldname,
8170 					  &iter->dev->adj_list.lower);
8171 		netdev_adjacent_sysfs_add(iter->dev, dev,
8172 					  &iter->dev->adj_list.lower);
8173 	}
8174 
8175 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
8176 		if (!net_eq(net, dev_net(iter->dev)))
8177 			continue;
8178 		netdev_adjacent_sysfs_del(iter->dev, oldname,
8179 					  &iter->dev->adj_list.upper);
8180 		netdev_adjacent_sysfs_add(iter->dev, dev,
8181 					  &iter->dev->adj_list.upper);
8182 	}
8183 }
8184 
8185 void *netdev_lower_dev_get_private(struct net_device *dev,
8186 				   struct net_device *lower_dev)
8187 {
8188 	struct netdev_adjacent *lower;
8189 
8190 	if (!lower_dev)
8191 		return NULL;
8192 	lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
8193 	if (!lower)
8194 		return NULL;
8195 
8196 	return lower->private;
8197 }
8198 EXPORT_SYMBOL(netdev_lower_dev_get_private);
8199 
8200 
8201 /**
8202  * netdev_lower_state_changed - Dispatch event about lower device state change
8203  * @lower_dev: device
8204  * @lower_state_info: state to dispatch
8205  *
8206  * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
8207  * The caller must hold the RTNL lock.
8208  */
8209 void netdev_lower_state_changed(struct net_device *lower_dev,
8210 				void *lower_state_info)
8211 {
8212 	struct netdev_notifier_changelowerstate_info changelowerstate_info = {
8213 		.info.dev = lower_dev,
8214 	};
8215 
8216 	ASSERT_RTNL();
8217 	changelowerstate_info.lower_state_info = lower_state_info;
8218 	call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
8219 				      &changelowerstate_info.info);
8220 }
8221 EXPORT_SYMBOL(netdev_lower_state_changed);
8222 
8223 static void dev_change_rx_flags(struct net_device *dev, int flags)
8224 {
8225 	const struct net_device_ops *ops = dev->netdev_ops;
8226 
8227 	if (ops->ndo_change_rx_flags)
8228 		ops->ndo_change_rx_flags(dev, flags);
8229 }
8230 
8231 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
8232 {
8233 	unsigned int old_flags = dev->flags;
8234 	kuid_t uid;
8235 	kgid_t gid;
8236 
8237 	ASSERT_RTNL();
8238 
8239 	dev->flags |= IFF_PROMISC;
8240 	dev->promiscuity += inc;
8241 	if (dev->promiscuity == 0) {
8242 		/*
8243 		 * Avoid overflow.
8244 		 * If inc causes overflow, untouch promisc and return error.
8245 		 */
8246 		if (inc < 0)
8247 			dev->flags &= ~IFF_PROMISC;
8248 		else {
8249 			dev->promiscuity -= inc;
8250 			netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n");
8251 			return -EOVERFLOW;
8252 		}
8253 	}
8254 	if (dev->flags != old_flags) {
8255 		pr_info("device %s %s promiscuous mode\n",
8256 			dev->name,
8257 			dev->flags & IFF_PROMISC ? "entered" : "left");
8258 		if (audit_enabled) {
8259 			current_uid_gid(&uid, &gid);
8260 			audit_log(audit_context(), GFP_ATOMIC,
8261 				  AUDIT_ANOM_PROMISCUOUS,
8262 				  "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
8263 				  dev->name, (dev->flags & IFF_PROMISC),
8264 				  (old_flags & IFF_PROMISC),
8265 				  from_kuid(&init_user_ns, audit_get_loginuid(current)),
8266 				  from_kuid(&init_user_ns, uid),
8267 				  from_kgid(&init_user_ns, gid),
8268 				  audit_get_sessionid(current));
8269 		}
8270 
8271 		dev_change_rx_flags(dev, IFF_PROMISC);
8272 	}
8273 	if (notify)
8274 		__dev_notify_flags(dev, old_flags, IFF_PROMISC);
8275 	return 0;
8276 }
8277 
8278 /**
8279  *	dev_set_promiscuity	- update promiscuity count on a device
8280  *	@dev: device
8281  *	@inc: modifier
8282  *
8283  *	Add or remove promiscuity from a device. While the count in the device
8284  *	remains above zero the interface remains promiscuous. Once it hits zero
8285  *	the device reverts back to normal filtering operation. A negative inc
8286  *	value is used to drop promiscuity on the device.
8287  *	Return 0 if successful or a negative errno code on error.
8288  */
8289 int dev_set_promiscuity(struct net_device *dev, int inc)
8290 {
8291 	unsigned int old_flags = dev->flags;
8292 	int err;
8293 
8294 	err = __dev_set_promiscuity(dev, inc, true);
8295 	if (err < 0)
8296 		return err;
8297 	if (dev->flags != old_flags)
8298 		dev_set_rx_mode(dev);
8299 	return err;
8300 }
8301 EXPORT_SYMBOL(dev_set_promiscuity);
8302 
8303 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
8304 {
8305 	unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
8306 
8307 	ASSERT_RTNL();
8308 
8309 	dev->flags |= IFF_ALLMULTI;
8310 	dev->allmulti += inc;
8311 	if (dev->allmulti == 0) {
8312 		/*
8313 		 * Avoid overflow.
8314 		 * If inc causes overflow, untouch allmulti and return error.
8315 		 */
8316 		if (inc < 0)
8317 			dev->flags &= ~IFF_ALLMULTI;
8318 		else {
8319 			dev->allmulti -= inc;
8320 			netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n");
8321 			return -EOVERFLOW;
8322 		}
8323 	}
8324 	if (dev->flags ^ old_flags) {
8325 		dev_change_rx_flags(dev, IFF_ALLMULTI);
8326 		dev_set_rx_mode(dev);
8327 		if (notify)
8328 			__dev_notify_flags(dev, old_flags,
8329 					   dev->gflags ^ old_gflags);
8330 	}
8331 	return 0;
8332 }
8333 
8334 /**
8335  *	dev_set_allmulti	- update allmulti count on a device
8336  *	@dev: device
8337  *	@inc: modifier
8338  *
8339  *	Add or remove reception of all multicast frames to a device. While the
8340  *	count in the device remains above zero the interface remains listening
8341  *	to all interfaces. Once it hits zero the device reverts back to normal
8342  *	filtering operation. A negative @inc value is used to drop the counter
8343  *	when releasing a resource needing all multicasts.
8344  *	Return 0 if successful or a negative errno code on error.
8345  */
8346 
8347 int dev_set_allmulti(struct net_device *dev, int inc)
8348 {
8349 	return __dev_set_allmulti(dev, inc, true);
8350 }
8351 EXPORT_SYMBOL(dev_set_allmulti);
8352 
8353 /*
8354  *	Upload unicast and multicast address lists to device and
8355  *	configure RX filtering. When the device doesn't support unicast
8356  *	filtering it is put in promiscuous mode while unicast addresses
8357  *	are present.
8358  */
8359 void __dev_set_rx_mode(struct net_device *dev)
8360 {
8361 	const struct net_device_ops *ops = dev->netdev_ops;
8362 
8363 	/* dev_open will call this function so the list will stay sane. */
8364 	if (!(dev->flags&IFF_UP))
8365 		return;
8366 
8367 	if (!netif_device_present(dev))
8368 		return;
8369 
8370 	if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
8371 		/* Unicast addresses changes may only happen under the rtnl,
8372 		 * therefore calling __dev_set_promiscuity here is safe.
8373 		 */
8374 		if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
8375 			__dev_set_promiscuity(dev, 1, false);
8376 			dev->uc_promisc = true;
8377 		} else if (netdev_uc_empty(dev) && dev->uc_promisc) {
8378 			__dev_set_promiscuity(dev, -1, false);
8379 			dev->uc_promisc = false;
8380 		}
8381 	}
8382 
8383 	if (ops->ndo_set_rx_mode)
8384 		ops->ndo_set_rx_mode(dev);
8385 }
8386 
8387 void dev_set_rx_mode(struct net_device *dev)
8388 {
8389 	netif_addr_lock_bh(dev);
8390 	__dev_set_rx_mode(dev);
8391 	netif_addr_unlock_bh(dev);
8392 }
8393 
8394 /**
8395  *	dev_get_flags - get flags reported to userspace
8396  *	@dev: device
8397  *
8398  *	Get the combination of flag bits exported through APIs to userspace.
8399  */
8400 unsigned int dev_get_flags(const struct net_device *dev)
8401 {
8402 	unsigned int flags;
8403 
8404 	flags = (dev->flags & ~(IFF_PROMISC |
8405 				IFF_ALLMULTI |
8406 				IFF_RUNNING |
8407 				IFF_LOWER_UP |
8408 				IFF_DORMANT)) |
8409 		(dev->gflags & (IFF_PROMISC |
8410 				IFF_ALLMULTI));
8411 
8412 	if (netif_running(dev)) {
8413 		if (netif_oper_up(dev))
8414 			flags |= IFF_RUNNING;
8415 		if (netif_carrier_ok(dev))
8416 			flags |= IFF_LOWER_UP;
8417 		if (netif_dormant(dev))
8418 			flags |= IFF_DORMANT;
8419 	}
8420 
8421 	return flags;
8422 }
8423 EXPORT_SYMBOL(dev_get_flags);
8424 
8425 int __dev_change_flags(struct net_device *dev, unsigned int flags,
8426 		       struct netlink_ext_ack *extack)
8427 {
8428 	unsigned int old_flags = dev->flags;
8429 	int ret;
8430 
8431 	ASSERT_RTNL();
8432 
8433 	/*
8434 	 *	Set the flags on our device.
8435 	 */
8436 
8437 	dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
8438 			       IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
8439 			       IFF_AUTOMEDIA)) |
8440 		     (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
8441 				    IFF_ALLMULTI));
8442 
8443 	/*
8444 	 *	Load in the correct multicast list now the flags have changed.
8445 	 */
8446 
8447 	if ((old_flags ^ flags) & IFF_MULTICAST)
8448 		dev_change_rx_flags(dev, IFF_MULTICAST);
8449 
8450 	dev_set_rx_mode(dev);
8451 
8452 	/*
8453 	 *	Have we downed the interface. We handle IFF_UP ourselves
8454 	 *	according to user attempts to set it, rather than blindly
8455 	 *	setting it.
8456 	 */
8457 
8458 	ret = 0;
8459 	if ((old_flags ^ flags) & IFF_UP) {
8460 		if (old_flags & IFF_UP)
8461 			__dev_close(dev);
8462 		else
8463 			ret = __dev_open(dev, extack);
8464 	}
8465 
8466 	if ((flags ^ dev->gflags) & IFF_PROMISC) {
8467 		int inc = (flags & IFF_PROMISC) ? 1 : -1;
8468 		unsigned int old_flags = dev->flags;
8469 
8470 		dev->gflags ^= IFF_PROMISC;
8471 
8472 		if (__dev_set_promiscuity(dev, inc, false) >= 0)
8473 			if (dev->flags != old_flags)
8474 				dev_set_rx_mode(dev);
8475 	}
8476 
8477 	/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
8478 	 * is important. Some (broken) drivers set IFF_PROMISC, when
8479 	 * IFF_ALLMULTI is requested not asking us and not reporting.
8480 	 */
8481 	if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
8482 		int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
8483 
8484 		dev->gflags ^= IFF_ALLMULTI;
8485 		__dev_set_allmulti(dev, inc, false);
8486 	}
8487 
8488 	return ret;
8489 }
8490 
8491 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
8492 			unsigned int gchanges)
8493 {
8494 	unsigned int changes = dev->flags ^ old_flags;
8495 
8496 	if (gchanges)
8497 		rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
8498 
8499 	if (changes & IFF_UP) {
8500 		if (dev->flags & IFF_UP)
8501 			call_netdevice_notifiers(NETDEV_UP, dev);
8502 		else
8503 			call_netdevice_notifiers(NETDEV_DOWN, dev);
8504 	}
8505 
8506 	if (dev->flags & IFF_UP &&
8507 	    (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
8508 		struct netdev_notifier_change_info change_info = {
8509 			.info = {
8510 				.dev = dev,
8511 			},
8512 			.flags_changed = changes,
8513 		};
8514 
8515 		call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
8516 	}
8517 }
8518 
8519 /**
8520  *	dev_change_flags - change device settings
8521  *	@dev: device
8522  *	@flags: device state flags
8523  *	@extack: netlink extended ack
8524  *
8525  *	Change settings on device based state flags. The flags are
8526  *	in the userspace exported format.
8527  */
8528 int dev_change_flags(struct net_device *dev, unsigned int flags,
8529 		     struct netlink_ext_ack *extack)
8530 {
8531 	int ret;
8532 	unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
8533 
8534 	ret = __dev_change_flags(dev, flags, extack);
8535 	if (ret < 0)
8536 		return ret;
8537 
8538 	changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
8539 	__dev_notify_flags(dev, old_flags, changes);
8540 	return ret;
8541 }
8542 EXPORT_SYMBOL(dev_change_flags);
8543 
8544 int __dev_set_mtu(struct net_device *dev, int new_mtu)
8545 {
8546 	const struct net_device_ops *ops = dev->netdev_ops;
8547 
8548 	if (ops->ndo_change_mtu)
8549 		return ops->ndo_change_mtu(dev, new_mtu);
8550 
8551 	/* Pairs with all the lockless reads of dev->mtu in the stack */
8552 	WRITE_ONCE(dev->mtu, new_mtu);
8553 	return 0;
8554 }
8555 EXPORT_SYMBOL(__dev_set_mtu);
8556 
8557 int dev_validate_mtu(struct net_device *dev, int new_mtu,
8558 		     struct netlink_ext_ack *extack)
8559 {
8560 	/* MTU must be positive, and in range */
8561 	if (new_mtu < 0 || new_mtu < dev->min_mtu) {
8562 		NL_SET_ERR_MSG(extack, "mtu less than device minimum");
8563 		return -EINVAL;
8564 	}
8565 
8566 	if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
8567 		NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
8568 		return -EINVAL;
8569 	}
8570 	return 0;
8571 }
8572 
8573 /**
8574  *	dev_set_mtu_ext - Change maximum transfer unit
8575  *	@dev: device
8576  *	@new_mtu: new transfer unit
8577  *	@extack: netlink extended ack
8578  *
8579  *	Change the maximum transfer size of the network device.
8580  */
8581 int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
8582 		    struct netlink_ext_ack *extack)
8583 {
8584 	int err, orig_mtu;
8585 
8586 	if (new_mtu == dev->mtu)
8587 		return 0;
8588 
8589 	err = dev_validate_mtu(dev, new_mtu, extack);
8590 	if (err)
8591 		return err;
8592 
8593 	if (!netif_device_present(dev))
8594 		return -ENODEV;
8595 
8596 	err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
8597 	err = notifier_to_errno(err);
8598 	if (err)
8599 		return err;
8600 
8601 	orig_mtu = dev->mtu;
8602 	err = __dev_set_mtu(dev, new_mtu);
8603 
8604 	if (!err) {
8605 		err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8606 						   orig_mtu);
8607 		err = notifier_to_errno(err);
8608 		if (err) {
8609 			/* setting mtu back and notifying everyone again,
8610 			 * so that they have a chance to revert changes.
8611 			 */
8612 			__dev_set_mtu(dev, orig_mtu);
8613 			call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8614 						     new_mtu);
8615 		}
8616 	}
8617 	return err;
8618 }
8619 
8620 int dev_set_mtu(struct net_device *dev, int new_mtu)
8621 {
8622 	struct netlink_ext_ack extack;
8623 	int err;
8624 
8625 	memset(&extack, 0, sizeof(extack));
8626 	err = dev_set_mtu_ext(dev, new_mtu, &extack);
8627 	if (err && extack._msg)
8628 		net_err_ratelimited("%s: %s\n", dev->name, extack._msg);
8629 	return err;
8630 }
8631 EXPORT_SYMBOL(dev_set_mtu);
8632 
8633 /**
8634  *	dev_change_tx_queue_len - Change TX queue length of a netdevice
8635  *	@dev: device
8636  *	@new_len: new tx queue length
8637  */
8638 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
8639 {
8640 	unsigned int orig_len = dev->tx_queue_len;
8641 	int res;
8642 
8643 	if (new_len != (unsigned int)new_len)
8644 		return -ERANGE;
8645 
8646 	if (new_len != orig_len) {
8647 		dev->tx_queue_len = new_len;
8648 		res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
8649 		res = notifier_to_errno(res);
8650 		if (res)
8651 			goto err_rollback;
8652 		res = dev_qdisc_change_tx_queue_len(dev);
8653 		if (res)
8654 			goto err_rollback;
8655 	}
8656 
8657 	return 0;
8658 
8659 err_rollback:
8660 	netdev_err(dev, "refused to change device tx_queue_len\n");
8661 	dev->tx_queue_len = orig_len;
8662 	return res;
8663 }
8664 
8665 /**
8666  *	dev_set_group - Change group this device belongs to
8667  *	@dev: device
8668  *	@new_group: group this device should belong to
8669  */
8670 void dev_set_group(struct net_device *dev, int new_group)
8671 {
8672 	dev->group = new_group;
8673 }
8674 
8675 /**
8676  *	dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
8677  *	@dev: device
8678  *	@addr: new address
8679  *	@extack: netlink extended ack
8680  */
8681 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
8682 			      struct netlink_ext_ack *extack)
8683 {
8684 	struct netdev_notifier_pre_changeaddr_info info = {
8685 		.info.dev = dev,
8686 		.info.extack = extack,
8687 		.dev_addr = addr,
8688 	};
8689 	int rc;
8690 
8691 	rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info);
8692 	return notifier_to_errno(rc);
8693 }
8694 EXPORT_SYMBOL(dev_pre_changeaddr_notify);
8695 
8696 /**
8697  *	dev_set_mac_address - Change Media Access Control Address
8698  *	@dev: device
8699  *	@sa: new address
8700  *	@extack: netlink extended ack
8701  *
8702  *	Change the hardware (MAC) address of the device
8703  */
8704 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
8705 			struct netlink_ext_ack *extack)
8706 {
8707 	const struct net_device_ops *ops = dev->netdev_ops;
8708 	int err;
8709 
8710 	if (!ops->ndo_set_mac_address)
8711 		return -EOPNOTSUPP;
8712 	if (sa->sa_family != dev->type)
8713 		return -EINVAL;
8714 	if (!netif_device_present(dev))
8715 		return -ENODEV;
8716 	err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack);
8717 	if (err)
8718 		return err;
8719 	err = ops->ndo_set_mac_address(dev, sa);
8720 	if (err)
8721 		return err;
8722 	dev->addr_assign_type = NET_ADDR_SET;
8723 	call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
8724 	add_device_randomness(dev->dev_addr, dev->addr_len);
8725 	return 0;
8726 }
8727 EXPORT_SYMBOL(dev_set_mac_address);
8728 
8729 static DECLARE_RWSEM(dev_addr_sem);
8730 
8731 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
8732 			     struct netlink_ext_ack *extack)
8733 {
8734 	int ret;
8735 
8736 	down_write(&dev_addr_sem);
8737 	ret = dev_set_mac_address(dev, sa, extack);
8738 	up_write(&dev_addr_sem);
8739 	return ret;
8740 }
8741 EXPORT_SYMBOL(dev_set_mac_address_user);
8742 
8743 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name)
8744 {
8745 	size_t size = sizeof(sa->sa_data);
8746 	struct net_device *dev;
8747 	int ret = 0;
8748 
8749 	down_read(&dev_addr_sem);
8750 	rcu_read_lock();
8751 
8752 	dev = dev_get_by_name_rcu(net, dev_name);
8753 	if (!dev) {
8754 		ret = -ENODEV;
8755 		goto unlock;
8756 	}
8757 	if (!dev->addr_len)
8758 		memset(sa->sa_data, 0, size);
8759 	else
8760 		memcpy(sa->sa_data, dev->dev_addr,
8761 		       min_t(size_t, size, dev->addr_len));
8762 	sa->sa_family = dev->type;
8763 
8764 unlock:
8765 	rcu_read_unlock();
8766 	up_read(&dev_addr_sem);
8767 	return ret;
8768 }
8769 EXPORT_SYMBOL(dev_get_mac_address);
8770 
8771 /**
8772  *	dev_change_carrier - Change device carrier
8773  *	@dev: device
8774  *	@new_carrier: new value
8775  *
8776  *	Change device carrier
8777  */
8778 int dev_change_carrier(struct net_device *dev, bool new_carrier)
8779 {
8780 	const struct net_device_ops *ops = dev->netdev_ops;
8781 
8782 	if (!ops->ndo_change_carrier)
8783 		return -EOPNOTSUPP;
8784 	if (!netif_device_present(dev))
8785 		return -ENODEV;
8786 	return ops->ndo_change_carrier(dev, new_carrier);
8787 }
8788 
8789 /**
8790  *	dev_get_phys_port_id - Get device physical port ID
8791  *	@dev: device
8792  *	@ppid: port ID
8793  *
8794  *	Get device physical port ID
8795  */
8796 int dev_get_phys_port_id(struct net_device *dev,
8797 			 struct netdev_phys_item_id *ppid)
8798 {
8799 	const struct net_device_ops *ops = dev->netdev_ops;
8800 
8801 	if (!ops->ndo_get_phys_port_id)
8802 		return -EOPNOTSUPP;
8803 	return ops->ndo_get_phys_port_id(dev, ppid);
8804 }
8805 
8806 /**
8807  *	dev_get_phys_port_name - Get device physical port name
8808  *	@dev: device
8809  *	@name: port name
8810  *	@len: limit of bytes to copy to name
8811  *
8812  *	Get device physical port name
8813  */
8814 int dev_get_phys_port_name(struct net_device *dev,
8815 			   char *name, size_t len)
8816 {
8817 	const struct net_device_ops *ops = dev->netdev_ops;
8818 	int err;
8819 
8820 	if (ops->ndo_get_phys_port_name) {
8821 		err = ops->ndo_get_phys_port_name(dev, name, len);
8822 		if (err != -EOPNOTSUPP)
8823 			return err;
8824 	}
8825 	return devlink_compat_phys_port_name_get(dev, name, len);
8826 }
8827 
8828 /**
8829  *	dev_get_port_parent_id - Get the device's port parent identifier
8830  *	@dev: network device
8831  *	@ppid: pointer to a storage for the port's parent identifier
8832  *	@recurse: allow/disallow recursion to lower devices
8833  *
8834  *	Get the devices's port parent identifier
8835  */
8836 int dev_get_port_parent_id(struct net_device *dev,
8837 			   struct netdev_phys_item_id *ppid,
8838 			   bool recurse)
8839 {
8840 	const struct net_device_ops *ops = dev->netdev_ops;
8841 	struct netdev_phys_item_id first = { };
8842 	struct net_device *lower_dev;
8843 	struct list_head *iter;
8844 	int err;
8845 
8846 	if (ops->ndo_get_port_parent_id) {
8847 		err = ops->ndo_get_port_parent_id(dev, ppid);
8848 		if (err != -EOPNOTSUPP)
8849 			return err;
8850 	}
8851 
8852 	err = devlink_compat_switch_id_get(dev, ppid);
8853 	if (!recurse || err != -EOPNOTSUPP)
8854 		return err;
8855 
8856 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
8857 		err = dev_get_port_parent_id(lower_dev, ppid, true);
8858 		if (err)
8859 			break;
8860 		if (!first.id_len)
8861 			first = *ppid;
8862 		else if (memcmp(&first, ppid, sizeof(*ppid)))
8863 			return -EOPNOTSUPP;
8864 	}
8865 
8866 	return err;
8867 }
8868 EXPORT_SYMBOL(dev_get_port_parent_id);
8869 
8870 /**
8871  *	netdev_port_same_parent_id - Indicate if two network devices have
8872  *	the same port parent identifier
8873  *	@a: first network device
8874  *	@b: second network device
8875  */
8876 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
8877 {
8878 	struct netdev_phys_item_id a_id = { };
8879 	struct netdev_phys_item_id b_id = { };
8880 
8881 	if (dev_get_port_parent_id(a, &a_id, true) ||
8882 	    dev_get_port_parent_id(b, &b_id, true))
8883 		return false;
8884 
8885 	return netdev_phys_item_id_same(&a_id, &b_id);
8886 }
8887 EXPORT_SYMBOL(netdev_port_same_parent_id);
8888 
8889 /**
8890  *	dev_change_proto_down - set carrier according to proto_down.
8891  *
8892  *	@dev: device
8893  *	@proto_down: new value
8894  */
8895 int dev_change_proto_down(struct net_device *dev, bool proto_down)
8896 {
8897 	if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN))
8898 		return -EOPNOTSUPP;
8899 	if (!netif_device_present(dev))
8900 		return -ENODEV;
8901 	if (proto_down)
8902 		netif_carrier_off(dev);
8903 	else
8904 		netif_carrier_on(dev);
8905 	dev->proto_down = proto_down;
8906 	return 0;
8907 }
8908 
8909 /**
8910  *	dev_change_proto_down_reason - proto down reason
8911  *
8912  *	@dev: device
8913  *	@mask: proto down mask
8914  *	@value: proto down value
8915  */
8916 void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
8917 				  u32 value)
8918 {
8919 	int b;
8920 
8921 	if (!mask) {
8922 		dev->proto_down_reason = value;
8923 	} else {
8924 		for_each_set_bit(b, &mask, 32) {
8925 			if (value & (1 << b))
8926 				dev->proto_down_reason |= BIT(b);
8927 			else
8928 				dev->proto_down_reason &= ~BIT(b);
8929 		}
8930 	}
8931 }
8932 
8933 struct bpf_xdp_link {
8934 	struct bpf_link link;
8935 	struct net_device *dev; /* protected by rtnl_lock, no refcnt held */
8936 	int flags;
8937 };
8938 
8939 static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags)
8940 {
8941 	if (flags & XDP_FLAGS_HW_MODE)
8942 		return XDP_MODE_HW;
8943 	if (flags & XDP_FLAGS_DRV_MODE)
8944 		return XDP_MODE_DRV;
8945 	if (flags & XDP_FLAGS_SKB_MODE)
8946 		return XDP_MODE_SKB;
8947 	return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB;
8948 }
8949 
8950 static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode)
8951 {
8952 	switch (mode) {
8953 	case XDP_MODE_SKB:
8954 		return generic_xdp_install;
8955 	case XDP_MODE_DRV:
8956 	case XDP_MODE_HW:
8957 		return dev->netdev_ops->ndo_bpf;
8958 	default:
8959 		return NULL;
8960 	}
8961 }
8962 
8963 static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev,
8964 					 enum bpf_xdp_mode mode)
8965 {
8966 	return dev->xdp_state[mode].link;
8967 }
8968 
8969 static struct bpf_prog *dev_xdp_prog(struct net_device *dev,
8970 				     enum bpf_xdp_mode mode)
8971 {
8972 	struct bpf_xdp_link *link = dev_xdp_link(dev, mode);
8973 
8974 	if (link)
8975 		return link->link.prog;
8976 	return dev->xdp_state[mode].prog;
8977 }
8978 
8979 u8 dev_xdp_prog_count(struct net_device *dev)
8980 {
8981 	u8 count = 0;
8982 	int i;
8983 
8984 	for (i = 0; i < __MAX_XDP_MODE; i++)
8985 		if (dev->xdp_state[i].prog || dev->xdp_state[i].link)
8986 			count++;
8987 	return count;
8988 }
8989 EXPORT_SYMBOL_GPL(dev_xdp_prog_count);
8990 
8991 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode)
8992 {
8993 	struct bpf_prog *prog = dev_xdp_prog(dev, mode);
8994 
8995 	return prog ? prog->aux->id : 0;
8996 }
8997 
8998 static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode,
8999 			     struct bpf_xdp_link *link)
9000 {
9001 	dev->xdp_state[mode].link = link;
9002 	dev->xdp_state[mode].prog = NULL;
9003 }
9004 
9005 static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode,
9006 			     struct bpf_prog *prog)
9007 {
9008 	dev->xdp_state[mode].link = NULL;
9009 	dev->xdp_state[mode].prog = prog;
9010 }
9011 
9012 static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode,
9013 			   bpf_op_t bpf_op, struct netlink_ext_ack *extack,
9014 			   u32 flags, struct bpf_prog *prog)
9015 {
9016 	struct netdev_bpf xdp;
9017 	int err;
9018 
9019 	memset(&xdp, 0, sizeof(xdp));
9020 	xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG;
9021 	xdp.extack = extack;
9022 	xdp.flags = flags;
9023 	xdp.prog = prog;
9024 
9025 	/* Drivers assume refcnt is already incremented (i.e, prog pointer is
9026 	 * "moved" into driver), so they don't increment it on their own, but
9027 	 * they do decrement refcnt when program is detached or replaced.
9028 	 * Given net_device also owns link/prog, we need to bump refcnt here
9029 	 * to prevent drivers from underflowing it.
9030 	 */
9031 	if (prog)
9032 		bpf_prog_inc(prog);
9033 	err = bpf_op(dev, &xdp);
9034 	if (err) {
9035 		if (prog)
9036 			bpf_prog_put(prog);
9037 		return err;
9038 	}
9039 
9040 	if (mode != XDP_MODE_HW)
9041 		bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog);
9042 
9043 	return 0;
9044 }
9045 
9046 static void dev_xdp_uninstall(struct net_device *dev)
9047 {
9048 	struct bpf_xdp_link *link;
9049 	struct bpf_prog *prog;
9050 	enum bpf_xdp_mode mode;
9051 	bpf_op_t bpf_op;
9052 
9053 	ASSERT_RTNL();
9054 
9055 	for (mode = XDP_MODE_SKB; mode < __MAX_XDP_MODE; mode++) {
9056 		prog = dev_xdp_prog(dev, mode);
9057 		if (!prog)
9058 			continue;
9059 
9060 		bpf_op = dev_xdp_bpf_op(dev, mode);
9061 		if (!bpf_op)
9062 			continue;
9063 
9064 		WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
9065 
9066 		/* auto-detach link from net device */
9067 		link = dev_xdp_link(dev, mode);
9068 		if (link)
9069 			link->dev = NULL;
9070 		else
9071 			bpf_prog_put(prog);
9072 
9073 		dev_xdp_set_link(dev, mode, NULL);
9074 	}
9075 }
9076 
9077 static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack,
9078 			  struct bpf_xdp_link *link, struct bpf_prog *new_prog,
9079 			  struct bpf_prog *old_prog, u32 flags)
9080 {
9081 	unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES);
9082 	struct bpf_prog *cur_prog;
9083 	struct net_device *upper;
9084 	struct list_head *iter;
9085 	enum bpf_xdp_mode mode;
9086 	bpf_op_t bpf_op;
9087 	int err;
9088 
9089 	ASSERT_RTNL();
9090 
9091 	/* either link or prog attachment, never both */
9092 	if (link && (new_prog || old_prog))
9093 		return -EINVAL;
9094 	/* link supports only XDP mode flags */
9095 	if (link && (flags & ~XDP_FLAGS_MODES)) {
9096 		NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment");
9097 		return -EINVAL;
9098 	}
9099 	/* just one XDP mode bit should be set, zero defaults to drv/skb mode */
9100 	if (num_modes > 1) {
9101 		NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set");
9102 		return -EINVAL;
9103 	}
9104 	/* avoid ambiguity if offload + drv/skb mode progs are both loaded */
9105 	if (!num_modes && dev_xdp_prog_count(dev) > 1) {
9106 		NL_SET_ERR_MSG(extack,
9107 			       "More than one program loaded, unset mode is ambiguous");
9108 		return -EINVAL;
9109 	}
9110 	/* old_prog != NULL implies XDP_FLAGS_REPLACE is set */
9111 	if (old_prog && !(flags & XDP_FLAGS_REPLACE)) {
9112 		NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified");
9113 		return -EINVAL;
9114 	}
9115 
9116 	mode = dev_xdp_mode(dev, flags);
9117 	/* can't replace attached link */
9118 	if (dev_xdp_link(dev, mode)) {
9119 		NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link");
9120 		return -EBUSY;
9121 	}
9122 
9123 	/* don't allow if an upper device already has a program */
9124 	netdev_for_each_upper_dev_rcu(dev, upper, iter) {
9125 		if (dev_xdp_prog_count(upper) > 0) {
9126 			NL_SET_ERR_MSG(extack, "Cannot attach when an upper device already has a program");
9127 			return -EEXIST;
9128 		}
9129 	}
9130 
9131 	cur_prog = dev_xdp_prog(dev, mode);
9132 	/* can't replace attached prog with link */
9133 	if (link && cur_prog) {
9134 		NL_SET_ERR_MSG(extack, "Can't replace active XDP program with BPF link");
9135 		return -EBUSY;
9136 	}
9137 	if ((flags & XDP_FLAGS_REPLACE) && cur_prog != old_prog) {
9138 		NL_SET_ERR_MSG(extack, "Active program does not match expected");
9139 		return -EEXIST;
9140 	}
9141 
9142 	/* put effective new program into new_prog */
9143 	if (link)
9144 		new_prog = link->link.prog;
9145 
9146 	if (new_prog) {
9147 		bool offload = mode == XDP_MODE_HW;
9148 		enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB
9149 					       ? XDP_MODE_DRV : XDP_MODE_SKB;
9150 
9151 		if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) {
9152 			NL_SET_ERR_MSG(extack, "XDP program already attached");
9153 			return -EBUSY;
9154 		}
9155 		if (!offload && dev_xdp_prog(dev, other_mode)) {
9156 			NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time");
9157 			return -EEXIST;
9158 		}
9159 		if (!offload && bpf_prog_is_dev_bound(new_prog->aux)) {
9160 			NL_SET_ERR_MSG(extack, "Using device-bound program without HW_MODE flag is not supported");
9161 			return -EINVAL;
9162 		}
9163 		if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) {
9164 			NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device");
9165 			return -EINVAL;
9166 		}
9167 		if (new_prog->expected_attach_type == BPF_XDP_CPUMAP) {
9168 			NL_SET_ERR_MSG(extack, "BPF_XDP_CPUMAP programs can not be attached to a device");
9169 			return -EINVAL;
9170 		}
9171 	}
9172 
9173 	/* don't call drivers if the effective program didn't change */
9174 	if (new_prog != cur_prog) {
9175 		bpf_op = dev_xdp_bpf_op(dev, mode);
9176 		if (!bpf_op) {
9177 			NL_SET_ERR_MSG(extack, "Underlying driver does not support XDP in native mode");
9178 			return -EOPNOTSUPP;
9179 		}
9180 
9181 		err = dev_xdp_install(dev, mode, bpf_op, extack, flags, new_prog);
9182 		if (err)
9183 			return err;
9184 	}
9185 
9186 	if (link)
9187 		dev_xdp_set_link(dev, mode, link);
9188 	else
9189 		dev_xdp_set_prog(dev, mode, new_prog);
9190 	if (cur_prog)
9191 		bpf_prog_put(cur_prog);
9192 
9193 	return 0;
9194 }
9195 
9196 static int dev_xdp_attach_link(struct net_device *dev,
9197 			       struct netlink_ext_ack *extack,
9198 			       struct bpf_xdp_link *link)
9199 {
9200 	return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags);
9201 }
9202 
9203 static int dev_xdp_detach_link(struct net_device *dev,
9204 			       struct netlink_ext_ack *extack,
9205 			       struct bpf_xdp_link *link)
9206 {
9207 	enum bpf_xdp_mode mode;
9208 	bpf_op_t bpf_op;
9209 
9210 	ASSERT_RTNL();
9211 
9212 	mode = dev_xdp_mode(dev, link->flags);
9213 	if (dev_xdp_link(dev, mode) != link)
9214 		return -EINVAL;
9215 
9216 	bpf_op = dev_xdp_bpf_op(dev, mode);
9217 	WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
9218 	dev_xdp_set_link(dev, mode, NULL);
9219 	return 0;
9220 }
9221 
9222 static void bpf_xdp_link_release(struct bpf_link *link)
9223 {
9224 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9225 
9226 	rtnl_lock();
9227 
9228 	/* if racing with net_device's tear down, xdp_link->dev might be
9229 	 * already NULL, in which case link was already auto-detached
9230 	 */
9231 	if (xdp_link->dev) {
9232 		WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link));
9233 		xdp_link->dev = NULL;
9234 	}
9235 
9236 	rtnl_unlock();
9237 }
9238 
9239 static int bpf_xdp_link_detach(struct bpf_link *link)
9240 {
9241 	bpf_xdp_link_release(link);
9242 	return 0;
9243 }
9244 
9245 static void bpf_xdp_link_dealloc(struct bpf_link *link)
9246 {
9247 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9248 
9249 	kfree(xdp_link);
9250 }
9251 
9252 static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link,
9253 				     struct seq_file *seq)
9254 {
9255 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9256 	u32 ifindex = 0;
9257 
9258 	rtnl_lock();
9259 	if (xdp_link->dev)
9260 		ifindex = xdp_link->dev->ifindex;
9261 	rtnl_unlock();
9262 
9263 	seq_printf(seq, "ifindex:\t%u\n", ifindex);
9264 }
9265 
9266 static int bpf_xdp_link_fill_link_info(const struct bpf_link *link,
9267 				       struct bpf_link_info *info)
9268 {
9269 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9270 	u32 ifindex = 0;
9271 
9272 	rtnl_lock();
9273 	if (xdp_link->dev)
9274 		ifindex = xdp_link->dev->ifindex;
9275 	rtnl_unlock();
9276 
9277 	info->xdp.ifindex = ifindex;
9278 	return 0;
9279 }
9280 
9281 static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog,
9282 			       struct bpf_prog *old_prog)
9283 {
9284 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9285 	enum bpf_xdp_mode mode;
9286 	bpf_op_t bpf_op;
9287 	int err = 0;
9288 
9289 	rtnl_lock();
9290 
9291 	/* link might have been auto-released already, so fail */
9292 	if (!xdp_link->dev) {
9293 		err = -ENOLINK;
9294 		goto out_unlock;
9295 	}
9296 
9297 	if (old_prog && link->prog != old_prog) {
9298 		err = -EPERM;
9299 		goto out_unlock;
9300 	}
9301 	old_prog = link->prog;
9302 	if (old_prog->type != new_prog->type ||
9303 	    old_prog->expected_attach_type != new_prog->expected_attach_type) {
9304 		err = -EINVAL;
9305 		goto out_unlock;
9306 	}
9307 
9308 	if (old_prog == new_prog) {
9309 		/* no-op, don't disturb drivers */
9310 		bpf_prog_put(new_prog);
9311 		goto out_unlock;
9312 	}
9313 
9314 	mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags);
9315 	bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode);
9316 	err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL,
9317 			      xdp_link->flags, new_prog);
9318 	if (err)
9319 		goto out_unlock;
9320 
9321 	old_prog = xchg(&link->prog, new_prog);
9322 	bpf_prog_put(old_prog);
9323 
9324 out_unlock:
9325 	rtnl_unlock();
9326 	return err;
9327 }
9328 
9329 static const struct bpf_link_ops bpf_xdp_link_lops = {
9330 	.release = bpf_xdp_link_release,
9331 	.dealloc = bpf_xdp_link_dealloc,
9332 	.detach = bpf_xdp_link_detach,
9333 	.show_fdinfo = bpf_xdp_link_show_fdinfo,
9334 	.fill_link_info = bpf_xdp_link_fill_link_info,
9335 	.update_prog = bpf_xdp_link_update,
9336 };
9337 
9338 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
9339 {
9340 	struct net *net = current->nsproxy->net_ns;
9341 	struct bpf_link_primer link_primer;
9342 	struct bpf_xdp_link *link;
9343 	struct net_device *dev;
9344 	int err, fd;
9345 
9346 	rtnl_lock();
9347 	dev = dev_get_by_index(net, attr->link_create.target_ifindex);
9348 	if (!dev) {
9349 		rtnl_unlock();
9350 		return -EINVAL;
9351 	}
9352 
9353 	link = kzalloc(sizeof(*link), GFP_USER);
9354 	if (!link) {
9355 		err = -ENOMEM;
9356 		goto unlock;
9357 	}
9358 
9359 	bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog);
9360 	link->dev = dev;
9361 	link->flags = attr->link_create.flags;
9362 
9363 	err = bpf_link_prime(&link->link, &link_primer);
9364 	if (err) {
9365 		kfree(link);
9366 		goto unlock;
9367 	}
9368 
9369 	err = dev_xdp_attach_link(dev, NULL, link);
9370 	rtnl_unlock();
9371 
9372 	if (err) {
9373 		link->dev = NULL;
9374 		bpf_link_cleanup(&link_primer);
9375 		goto out_put_dev;
9376 	}
9377 
9378 	fd = bpf_link_settle(&link_primer);
9379 	/* link itself doesn't hold dev's refcnt to not complicate shutdown */
9380 	dev_put(dev);
9381 	return fd;
9382 
9383 unlock:
9384 	rtnl_unlock();
9385 
9386 out_put_dev:
9387 	dev_put(dev);
9388 	return err;
9389 }
9390 
9391 /**
9392  *	dev_change_xdp_fd - set or clear a bpf program for a device rx path
9393  *	@dev: device
9394  *	@extack: netlink extended ack
9395  *	@fd: new program fd or negative value to clear
9396  *	@expected_fd: old program fd that userspace expects to replace or clear
9397  *	@flags: xdp-related flags
9398  *
9399  *	Set or clear a bpf program for a device
9400  */
9401 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
9402 		      int fd, int expected_fd, u32 flags)
9403 {
9404 	enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags);
9405 	struct bpf_prog *new_prog = NULL, *old_prog = NULL;
9406 	int err;
9407 
9408 	ASSERT_RTNL();
9409 
9410 	if (fd >= 0) {
9411 		new_prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
9412 						 mode != XDP_MODE_SKB);
9413 		if (IS_ERR(new_prog))
9414 			return PTR_ERR(new_prog);
9415 	}
9416 
9417 	if (expected_fd >= 0) {
9418 		old_prog = bpf_prog_get_type_dev(expected_fd, BPF_PROG_TYPE_XDP,
9419 						 mode != XDP_MODE_SKB);
9420 		if (IS_ERR(old_prog)) {
9421 			err = PTR_ERR(old_prog);
9422 			old_prog = NULL;
9423 			goto err_out;
9424 		}
9425 	}
9426 
9427 	err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags);
9428 
9429 err_out:
9430 	if (err && new_prog)
9431 		bpf_prog_put(new_prog);
9432 	if (old_prog)
9433 		bpf_prog_put(old_prog);
9434 	return err;
9435 }
9436 
9437 /**
9438  *	dev_new_index	-	allocate an ifindex
9439  *	@net: the applicable net namespace
9440  *
9441  *	Returns a suitable unique value for a new device interface
9442  *	number.  The caller must hold the rtnl semaphore or the
9443  *	dev_base_lock to be sure it remains unique.
9444  */
9445 static int dev_new_index(struct net *net)
9446 {
9447 	int ifindex = net->ifindex;
9448 
9449 	for (;;) {
9450 		if (++ifindex <= 0)
9451 			ifindex = 1;
9452 		if (!__dev_get_by_index(net, ifindex))
9453 			return net->ifindex = ifindex;
9454 	}
9455 }
9456 
9457 /* Delayed registration/unregisteration */
9458 LIST_HEAD(net_todo_list);
9459 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
9460 
9461 static void net_set_todo(struct net_device *dev)
9462 {
9463 	list_add_tail(&dev->todo_list, &net_todo_list);
9464 	atomic_inc(&dev_net(dev)->dev_unreg_count);
9465 }
9466 
9467 static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
9468 	struct net_device *upper, netdev_features_t features)
9469 {
9470 	netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9471 	netdev_features_t feature;
9472 	int feature_bit;
9473 
9474 	for_each_netdev_feature(upper_disables, feature_bit) {
9475 		feature = __NETIF_F_BIT(feature_bit);
9476 		if (!(upper->wanted_features & feature)
9477 		    && (features & feature)) {
9478 			netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
9479 				   &feature, upper->name);
9480 			features &= ~feature;
9481 		}
9482 	}
9483 
9484 	return features;
9485 }
9486 
9487 static void netdev_sync_lower_features(struct net_device *upper,
9488 	struct net_device *lower, netdev_features_t features)
9489 {
9490 	netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9491 	netdev_features_t feature;
9492 	int feature_bit;
9493 
9494 	for_each_netdev_feature(upper_disables, feature_bit) {
9495 		feature = __NETIF_F_BIT(feature_bit);
9496 		if (!(features & feature) && (lower->features & feature)) {
9497 			netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
9498 				   &feature, lower->name);
9499 			lower->wanted_features &= ~feature;
9500 			__netdev_update_features(lower);
9501 
9502 			if (unlikely(lower->features & feature))
9503 				netdev_WARN(upper, "failed to disable %pNF on %s!\n",
9504 					    &feature, lower->name);
9505 			else
9506 				netdev_features_change(lower);
9507 		}
9508 	}
9509 }
9510 
9511 static netdev_features_t netdev_fix_features(struct net_device *dev,
9512 	netdev_features_t features)
9513 {
9514 	/* Fix illegal checksum combinations */
9515 	if ((features & NETIF_F_HW_CSUM) &&
9516 	    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
9517 		netdev_warn(dev, "mixed HW and IP checksum settings.\n");
9518 		features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
9519 	}
9520 
9521 	/* TSO requires that SG is present as well. */
9522 	if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
9523 		netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
9524 		features &= ~NETIF_F_ALL_TSO;
9525 	}
9526 
9527 	if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
9528 					!(features & NETIF_F_IP_CSUM)) {
9529 		netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
9530 		features &= ~NETIF_F_TSO;
9531 		features &= ~NETIF_F_TSO_ECN;
9532 	}
9533 
9534 	if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
9535 					 !(features & NETIF_F_IPV6_CSUM)) {
9536 		netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
9537 		features &= ~NETIF_F_TSO6;
9538 	}
9539 
9540 	/* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
9541 	if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
9542 		features &= ~NETIF_F_TSO_MANGLEID;
9543 
9544 	/* TSO ECN requires that TSO is present as well. */
9545 	if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
9546 		features &= ~NETIF_F_TSO_ECN;
9547 
9548 	/* Software GSO depends on SG. */
9549 	if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
9550 		netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
9551 		features &= ~NETIF_F_GSO;
9552 	}
9553 
9554 	/* GSO partial features require GSO partial be set */
9555 	if ((features & dev->gso_partial_features) &&
9556 	    !(features & NETIF_F_GSO_PARTIAL)) {
9557 		netdev_dbg(dev,
9558 			   "Dropping partially supported GSO features since no GSO partial.\n");
9559 		features &= ~dev->gso_partial_features;
9560 	}
9561 
9562 	if (!(features & NETIF_F_RXCSUM)) {
9563 		/* NETIF_F_GRO_HW implies doing RXCSUM since every packet
9564 		 * successfully merged by hardware must also have the
9565 		 * checksum verified by hardware.  If the user does not
9566 		 * want to enable RXCSUM, logically, we should disable GRO_HW.
9567 		 */
9568 		if (features & NETIF_F_GRO_HW) {
9569 			netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
9570 			features &= ~NETIF_F_GRO_HW;
9571 		}
9572 	}
9573 
9574 	/* LRO/HW-GRO features cannot be combined with RX-FCS */
9575 	if (features & NETIF_F_RXFCS) {
9576 		if (features & NETIF_F_LRO) {
9577 			netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
9578 			features &= ~NETIF_F_LRO;
9579 		}
9580 
9581 		if (features & NETIF_F_GRO_HW) {
9582 			netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
9583 			features &= ~NETIF_F_GRO_HW;
9584 		}
9585 	}
9586 
9587 	if ((features & NETIF_F_GRO_HW) && (features & NETIF_F_LRO)) {
9588 		netdev_dbg(dev, "Dropping LRO feature since HW-GRO is requested.\n");
9589 		features &= ~NETIF_F_LRO;
9590 	}
9591 
9592 	if (features & NETIF_F_HW_TLS_TX) {
9593 		bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) ==
9594 			(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
9595 		bool hw_csum = features & NETIF_F_HW_CSUM;
9596 
9597 		if (!ip_csum && !hw_csum) {
9598 			netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
9599 			features &= ~NETIF_F_HW_TLS_TX;
9600 		}
9601 	}
9602 
9603 	if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
9604 		netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
9605 		features &= ~NETIF_F_HW_TLS_RX;
9606 	}
9607 
9608 	return features;
9609 }
9610 
9611 int __netdev_update_features(struct net_device *dev)
9612 {
9613 	struct net_device *upper, *lower;
9614 	netdev_features_t features;
9615 	struct list_head *iter;
9616 	int err = -1;
9617 
9618 	ASSERT_RTNL();
9619 
9620 	features = netdev_get_wanted_features(dev);
9621 
9622 	if (dev->netdev_ops->ndo_fix_features)
9623 		features = dev->netdev_ops->ndo_fix_features(dev, features);
9624 
9625 	/* driver might be less strict about feature dependencies */
9626 	features = netdev_fix_features(dev, features);
9627 
9628 	/* some features can't be enabled if they're off on an upper device */
9629 	netdev_for_each_upper_dev_rcu(dev, upper, iter)
9630 		features = netdev_sync_upper_features(dev, upper, features);
9631 
9632 	if (dev->features == features)
9633 		goto sync_lower;
9634 
9635 	netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
9636 		&dev->features, &features);
9637 
9638 	if (dev->netdev_ops->ndo_set_features)
9639 		err = dev->netdev_ops->ndo_set_features(dev, features);
9640 	else
9641 		err = 0;
9642 
9643 	if (unlikely(err < 0)) {
9644 		netdev_err(dev,
9645 			"set_features() failed (%d); wanted %pNF, left %pNF\n",
9646 			err, &features, &dev->features);
9647 		/* return non-0 since some features might have changed and
9648 		 * it's better to fire a spurious notification than miss it
9649 		 */
9650 		return -1;
9651 	}
9652 
9653 sync_lower:
9654 	/* some features must be disabled on lower devices when disabled
9655 	 * on an upper device (think: bonding master or bridge)
9656 	 */
9657 	netdev_for_each_lower_dev(dev, lower, iter)
9658 		netdev_sync_lower_features(dev, lower, features);
9659 
9660 	if (!err) {
9661 		netdev_features_t diff = features ^ dev->features;
9662 
9663 		if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
9664 			/* udp_tunnel_{get,drop}_rx_info both need
9665 			 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
9666 			 * device, or they won't do anything.
9667 			 * Thus we need to update dev->features
9668 			 * *before* calling udp_tunnel_get_rx_info,
9669 			 * but *after* calling udp_tunnel_drop_rx_info.
9670 			 */
9671 			if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
9672 				dev->features = features;
9673 				udp_tunnel_get_rx_info(dev);
9674 			} else {
9675 				udp_tunnel_drop_rx_info(dev);
9676 			}
9677 		}
9678 
9679 		if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
9680 			if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
9681 				dev->features = features;
9682 				err |= vlan_get_rx_ctag_filter_info(dev);
9683 			} else {
9684 				vlan_drop_rx_ctag_filter_info(dev);
9685 			}
9686 		}
9687 
9688 		if (diff & NETIF_F_HW_VLAN_STAG_FILTER) {
9689 			if (features & NETIF_F_HW_VLAN_STAG_FILTER) {
9690 				dev->features = features;
9691 				err |= vlan_get_rx_stag_filter_info(dev);
9692 			} else {
9693 				vlan_drop_rx_stag_filter_info(dev);
9694 			}
9695 		}
9696 
9697 		dev->features = features;
9698 	}
9699 
9700 	return err < 0 ? 0 : 1;
9701 }
9702 
9703 /**
9704  *	netdev_update_features - recalculate device features
9705  *	@dev: the device to check
9706  *
9707  *	Recalculate dev->features set and send notifications if it
9708  *	has changed. Should be called after driver or hardware dependent
9709  *	conditions might have changed that influence the features.
9710  */
9711 void netdev_update_features(struct net_device *dev)
9712 {
9713 	if (__netdev_update_features(dev))
9714 		netdev_features_change(dev);
9715 }
9716 EXPORT_SYMBOL(netdev_update_features);
9717 
9718 /**
9719  *	netdev_change_features - recalculate device features
9720  *	@dev: the device to check
9721  *
9722  *	Recalculate dev->features set and send notifications even
9723  *	if they have not changed. Should be called instead of
9724  *	netdev_update_features() if also dev->vlan_features might
9725  *	have changed to allow the changes to be propagated to stacked
9726  *	VLAN devices.
9727  */
9728 void netdev_change_features(struct net_device *dev)
9729 {
9730 	__netdev_update_features(dev);
9731 	netdev_features_change(dev);
9732 }
9733 EXPORT_SYMBOL(netdev_change_features);
9734 
9735 /**
9736  *	netif_stacked_transfer_operstate -	transfer operstate
9737  *	@rootdev: the root or lower level device to transfer state from
9738  *	@dev: the device to transfer operstate to
9739  *
9740  *	Transfer operational state from root to device. This is normally
9741  *	called when a stacking relationship exists between the root
9742  *	device and the device(a leaf device).
9743  */
9744 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
9745 					struct net_device *dev)
9746 {
9747 	if (rootdev->operstate == IF_OPER_DORMANT)
9748 		netif_dormant_on(dev);
9749 	else
9750 		netif_dormant_off(dev);
9751 
9752 	if (rootdev->operstate == IF_OPER_TESTING)
9753 		netif_testing_on(dev);
9754 	else
9755 		netif_testing_off(dev);
9756 
9757 	if (netif_carrier_ok(rootdev))
9758 		netif_carrier_on(dev);
9759 	else
9760 		netif_carrier_off(dev);
9761 }
9762 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
9763 
9764 static int netif_alloc_rx_queues(struct net_device *dev)
9765 {
9766 	unsigned int i, count = dev->num_rx_queues;
9767 	struct netdev_rx_queue *rx;
9768 	size_t sz = count * sizeof(*rx);
9769 	int err = 0;
9770 
9771 	BUG_ON(count < 1);
9772 
9773 	rx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
9774 	if (!rx)
9775 		return -ENOMEM;
9776 
9777 	dev->_rx = rx;
9778 
9779 	for (i = 0; i < count; i++) {
9780 		rx[i].dev = dev;
9781 
9782 		/* XDP RX-queue setup */
9783 		err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0);
9784 		if (err < 0)
9785 			goto err_rxq_info;
9786 	}
9787 	return 0;
9788 
9789 err_rxq_info:
9790 	/* Rollback successful reg's and free other resources */
9791 	while (i--)
9792 		xdp_rxq_info_unreg(&rx[i].xdp_rxq);
9793 	kvfree(dev->_rx);
9794 	dev->_rx = NULL;
9795 	return err;
9796 }
9797 
9798 static void netif_free_rx_queues(struct net_device *dev)
9799 {
9800 	unsigned int i, count = dev->num_rx_queues;
9801 
9802 	/* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
9803 	if (!dev->_rx)
9804 		return;
9805 
9806 	for (i = 0; i < count; i++)
9807 		xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
9808 
9809 	kvfree(dev->_rx);
9810 }
9811 
9812 static void netdev_init_one_queue(struct net_device *dev,
9813 				  struct netdev_queue *queue, void *_unused)
9814 {
9815 	/* Initialize queue lock */
9816 	spin_lock_init(&queue->_xmit_lock);
9817 	netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
9818 	queue->xmit_lock_owner = -1;
9819 	netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
9820 	queue->dev = dev;
9821 #ifdef CONFIG_BQL
9822 	dql_init(&queue->dql, HZ);
9823 #endif
9824 }
9825 
9826 static void netif_free_tx_queues(struct net_device *dev)
9827 {
9828 	kvfree(dev->_tx);
9829 }
9830 
9831 static int netif_alloc_netdev_queues(struct net_device *dev)
9832 {
9833 	unsigned int count = dev->num_tx_queues;
9834 	struct netdev_queue *tx;
9835 	size_t sz = count * sizeof(*tx);
9836 
9837 	if (count < 1 || count > 0xffff)
9838 		return -EINVAL;
9839 
9840 	tx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
9841 	if (!tx)
9842 		return -ENOMEM;
9843 
9844 	dev->_tx = tx;
9845 
9846 	netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
9847 	spin_lock_init(&dev->tx_global_lock);
9848 
9849 	return 0;
9850 }
9851 
9852 void netif_tx_stop_all_queues(struct net_device *dev)
9853 {
9854 	unsigned int i;
9855 
9856 	for (i = 0; i < dev->num_tx_queues; i++) {
9857 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
9858 
9859 		netif_tx_stop_queue(txq);
9860 	}
9861 }
9862 EXPORT_SYMBOL(netif_tx_stop_all_queues);
9863 
9864 /**
9865  *	register_netdevice	- register a network device
9866  *	@dev: device to register
9867  *
9868  *	Take a completed network device structure and add it to the kernel
9869  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
9870  *	chain. 0 is returned on success. A negative errno code is returned
9871  *	on a failure to set up the device, or if the name is a duplicate.
9872  *
9873  *	Callers must hold the rtnl semaphore. You may want
9874  *	register_netdev() instead of this.
9875  *
9876  *	BUGS:
9877  *	The locking appears insufficient to guarantee two parallel registers
9878  *	will not get the same name.
9879  */
9880 
9881 int register_netdevice(struct net_device *dev)
9882 {
9883 	int ret;
9884 	struct net *net = dev_net(dev);
9885 
9886 	BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE <
9887 		     NETDEV_FEATURE_COUNT);
9888 	BUG_ON(dev_boot_phase);
9889 	ASSERT_RTNL();
9890 
9891 	might_sleep();
9892 
9893 	/* When net_device's are persistent, this will be fatal. */
9894 	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
9895 	BUG_ON(!net);
9896 
9897 	ret = ethtool_check_ops(dev->ethtool_ops);
9898 	if (ret)
9899 		return ret;
9900 
9901 	spin_lock_init(&dev->addr_list_lock);
9902 	netdev_set_addr_lockdep_class(dev);
9903 
9904 	ret = dev_get_valid_name(net, dev, dev->name);
9905 	if (ret < 0)
9906 		goto out;
9907 
9908 	ret = -ENOMEM;
9909 	dev->name_node = netdev_name_node_head_alloc(dev);
9910 	if (!dev->name_node)
9911 		goto out;
9912 
9913 	/* Init, if this function is available */
9914 	if (dev->netdev_ops->ndo_init) {
9915 		ret = dev->netdev_ops->ndo_init(dev);
9916 		if (ret) {
9917 			if (ret > 0)
9918 				ret = -EIO;
9919 			goto err_free_name;
9920 		}
9921 	}
9922 
9923 	if (((dev->hw_features | dev->features) &
9924 	     NETIF_F_HW_VLAN_CTAG_FILTER) &&
9925 	    (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
9926 	     !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
9927 		netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
9928 		ret = -EINVAL;
9929 		goto err_uninit;
9930 	}
9931 
9932 	ret = -EBUSY;
9933 	if (!dev->ifindex)
9934 		dev->ifindex = dev_new_index(net);
9935 	else if (__dev_get_by_index(net, dev->ifindex))
9936 		goto err_uninit;
9937 
9938 	/* Transfer changeable features to wanted_features and enable
9939 	 * software offloads (GSO and GRO).
9940 	 */
9941 	dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF);
9942 	dev->features |= NETIF_F_SOFT_FEATURES;
9943 
9944 	if (dev->udp_tunnel_nic_info) {
9945 		dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
9946 		dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
9947 	}
9948 
9949 	dev->wanted_features = dev->features & dev->hw_features;
9950 
9951 	if (!(dev->flags & IFF_LOOPBACK))
9952 		dev->hw_features |= NETIF_F_NOCACHE_COPY;
9953 
9954 	/* If IPv4 TCP segmentation offload is supported we should also
9955 	 * allow the device to enable segmenting the frame with the option
9956 	 * of ignoring a static IP ID value.  This doesn't enable the
9957 	 * feature itself but allows the user to enable it later.
9958 	 */
9959 	if (dev->hw_features & NETIF_F_TSO)
9960 		dev->hw_features |= NETIF_F_TSO_MANGLEID;
9961 	if (dev->vlan_features & NETIF_F_TSO)
9962 		dev->vlan_features |= NETIF_F_TSO_MANGLEID;
9963 	if (dev->mpls_features & NETIF_F_TSO)
9964 		dev->mpls_features |= NETIF_F_TSO_MANGLEID;
9965 	if (dev->hw_enc_features & NETIF_F_TSO)
9966 		dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
9967 
9968 	/* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
9969 	 */
9970 	dev->vlan_features |= NETIF_F_HIGHDMA;
9971 
9972 	/* Make NETIF_F_SG inheritable to tunnel devices.
9973 	 */
9974 	dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
9975 
9976 	/* Make NETIF_F_SG inheritable to MPLS.
9977 	 */
9978 	dev->mpls_features |= NETIF_F_SG;
9979 
9980 	ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
9981 	ret = notifier_to_errno(ret);
9982 	if (ret)
9983 		goto err_uninit;
9984 
9985 	ret = netdev_register_kobject(dev);
9986 	if (ret) {
9987 		dev->reg_state = NETREG_UNREGISTERED;
9988 		goto err_uninit;
9989 	}
9990 	dev->reg_state = NETREG_REGISTERED;
9991 
9992 	__netdev_update_features(dev);
9993 
9994 	/*
9995 	 *	Default initial state at registry is that the
9996 	 *	device is present.
9997 	 */
9998 
9999 	set_bit(__LINK_STATE_PRESENT, &dev->state);
10000 
10001 	linkwatch_init_dev(dev);
10002 
10003 	dev_init_scheduler(dev);
10004 
10005 	dev_hold_track(dev, &dev->dev_registered_tracker, GFP_KERNEL);
10006 	list_netdevice(dev);
10007 
10008 	add_device_randomness(dev->dev_addr, dev->addr_len);
10009 
10010 	/* If the device has permanent device address, driver should
10011 	 * set dev_addr and also addr_assign_type should be set to
10012 	 * NET_ADDR_PERM (default value).
10013 	 */
10014 	if (dev->addr_assign_type == NET_ADDR_PERM)
10015 		memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10016 
10017 	/* Notify protocols, that a new device appeared. */
10018 	ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
10019 	ret = notifier_to_errno(ret);
10020 	if (ret) {
10021 		/* Expect explicit free_netdev() on failure */
10022 		dev->needs_free_netdev = false;
10023 		unregister_netdevice_queue(dev, NULL);
10024 		goto out;
10025 	}
10026 	/*
10027 	 *	Prevent userspace races by waiting until the network
10028 	 *	device is fully setup before sending notifications.
10029 	 */
10030 	if (!dev->rtnl_link_ops ||
10031 	    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
10032 		rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
10033 
10034 out:
10035 	return ret;
10036 
10037 err_uninit:
10038 	if (dev->netdev_ops->ndo_uninit)
10039 		dev->netdev_ops->ndo_uninit(dev);
10040 	if (dev->priv_destructor)
10041 		dev->priv_destructor(dev);
10042 err_free_name:
10043 	netdev_name_node_free(dev->name_node);
10044 	goto out;
10045 }
10046 EXPORT_SYMBOL(register_netdevice);
10047 
10048 /**
10049  *	init_dummy_netdev	- init a dummy network device for NAPI
10050  *	@dev: device to init
10051  *
10052  *	This takes a network device structure and initialize the minimum
10053  *	amount of fields so it can be used to schedule NAPI polls without
10054  *	registering a full blown interface. This is to be used by drivers
10055  *	that need to tie several hardware interfaces to a single NAPI
10056  *	poll scheduler due to HW limitations.
10057  */
10058 int init_dummy_netdev(struct net_device *dev)
10059 {
10060 	/* Clear everything. Note we don't initialize spinlocks
10061 	 * are they aren't supposed to be taken by any of the
10062 	 * NAPI code and this dummy netdev is supposed to be
10063 	 * only ever used for NAPI polls
10064 	 */
10065 	memset(dev, 0, sizeof(struct net_device));
10066 
10067 	/* make sure we BUG if trying to hit standard
10068 	 * register/unregister code path
10069 	 */
10070 	dev->reg_state = NETREG_DUMMY;
10071 
10072 	/* NAPI wants this */
10073 	INIT_LIST_HEAD(&dev->napi_list);
10074 
10075 	/* a dummy interface is started by default */
10076 	set_bit(__LINK_STATE_PRESENT, &dev->state);
10077 	set_bit(__LINK_STATE_START, &dev->state);
10078 
10079 	/* napi_busy_loop stats accounting wants this */
10080 	dev_net_set(dev, &init_net);
10081 
10082 	/* Note : We dont allocate pcpu_refcnt for dummy devices,
10083 	 * because users of this 'device' dont need to change
10084 	 * its refcount.
10085 	 */
10086 
10087 	return 0;
10088 }
10089 EXPORT_SYMBOL_GPL(init_dummy_netdev);
10090 
10091 
10092 /**
10093  *	register_netdev	- register a network device
10094  *	@dev: device to register
10095  *
10096  *	Take a completed network device structure and add it to the kernel
10097  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
10098  *	chain. 0 is returned on success. A negative errno code is returned
10099  *	on a failure to set up the device, or if the name is a duplicate.
10100  *
10101  *	This is a wrapper around register_netdevice that takes the rtnl semaphore
10102  *	and expands the device name if you passed a format string to
10103  *	alloc_netdev.
10104  */
10105 int register_netdev(struct net_device *dev)
10106 {
10107 	int err;
10108 
10109 	if (rtnl_lock_killable())
10110 		return -EINTR;
10111 	err = register_netdevice(dev);
10112 	rtnl_unlock();
10113 	return err;
10114 }
10115 EXPORT_SYMBOL(register_netdev);
10116 
10117 int netdev_refcnt_read(const struct net_device *dev)
10118 {
10119 #ifdef CONFIG_PCPU_DEV_REFCNT
10120 	int i, refcnt = 0;
10121 
10122 	for_each_possible_cpu(i)
10123 		refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
10124 	return refcnt;
10125 #else
10126 	return refcount_read(&dev->dev_refcnt);
10127 #endif
10128 }
10129 EXPORT_SYMBOL(netdev_refcnt_read);
10130 
10131 int netdev_unregister_timeout_secs __read_mostly = 10;
10132 
10133 #define WAIT_REFS_MIN_MSECS 1
10134 #define WAIT_REFS_MAX_MSECS 250
10135 /**
10136  * netdev_wait_allrefs_any - wait until all references are gone.
10137  * @list: list of net_devices to wait on
10138  *
10139  * This is called when unregistering network devices.
10140  *
10141  * Any protocol or device that holds a reference should register
10142  * for netdevice notification, and cleanup and put back the
10143  * reference if they receive an UNREGISTER event.
10144  * We can get stuck here if buggy protocols don't correctly
10145  * call dev_put.
10146  */
10147 static struct net_device *netdev_wait_allrefs_any(struct list_head *list)
10148 {
10149 	unsigned long rebroadcast_time, warning_time;
10150 	struct net_device *dev;
10151 	int wait = 0;
10152 
10153 	rebroadcast_time = warning_time = jiffies;
10154 
10155 	list_for_each_entry(dev, list, todo_list)
10156 		if (netdev_refcnt_read(dev) == 1)
10157 			return dev;
10158 
10159 	while (true) {
10160 		if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
10161 			rtnl_lock();
10162 
10163 			/* Rebroadcast unregister notification */
10164 			list_for_each_entry(dev, list, todo_list)
10165 				call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
10166 
10167 			__rtnl_unlock();
10168 			rcu_barrier();
10169 			rtnl_lock();
10170 
10171 			list_for_each_entry(dev, list, todo_list)
10172 				if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
10173 					     &dev->state)) {
10174 					/* We must not have linkwatch events
10175 					 * pending on unregister. If this
10176 					 * happens, we simply run the queue
10177 					 * unscheduled, resulting in a noop
10178 					 * for this device.
10179 					 */
10180 					linkwatch_run_queue();
10181 					break;
10182 				}
10183 
10184 			__rtnl_unlock();
10185 
10186 			rebroadcast_time = jiffies;
10187 		}
10188 
10189 		if (!wait) {
10190 			rcu_barrier();
10191 			wait = WAIT_REFS_MIN_MSECS;
10192 		} else {
10193 			msleep(wait);
10194 			wait = min(wait << 1, WAIT_REFS_MAX_MSECS);
10195 		}
10196 
10197 		list_for_each_entry(dev, list, todo_list)
10198 			if (netdev_refcnt_read(dev) == 1)
10199 				return dev;
10200 
10201 		if (time_after(jiffies, warning_time +
10202 			       netdev_unregister_timeout_secs * HZ)) {
10203 			list_for_each_entry(dev, list, todo_list) {
10204 				pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
10205 					 dev->name, netdev_refcnt_read(dev));
10206 				ref_tracker_dir_print(&dev->refcnt_tracker, 10);
10207 			}
10208 
10209 			warning_time = jiffies;
10210 		}
10211 	}
10212 }
10213 
10214 /* The sequence is:
10215  *
10216  *	rtnl_lock();
10217  *	...
10218  *	register_netdevice(x1);
10219  *	register_netdevice(x2);
10220  *	...
10221  *	unregister_netdevice(y1);
10222  *	unregister_netdevice(y2);
10223  *      ...
10224  *	rtnl_unlock();
10225  *	free_netdev(y1);
10226  *	free_netdev(y2);
10227  *
10228  * We are invoked by rtnl_unlock().
10229  * This allows us to deal with problems:
10230  * 1) We can delete sysfs objects which invoke hotplug
10231  *    without deadlocking with linkwatch via keventd.
10232  * 2) Since we run with the RTNL semaphore not held, we can sleep
10233  *    safely in order to wait for the netdev refcnt to drop to zero.
10234  *
10235  * We must not return until all unregister events added during
10236  * the interval the lock was held have been completed.
10237  */
10238 void netdev_run_todo(void)
10239 {
10240 	struct net_device *dev, *tmp;
10241 	struct list_head list;
10242 #ifdef CONFIG_LOCKDEP
10243 	struct list_head unlink_list;
10244 
10245 	list_replace_init(&net_unlink_list, &unlink_list);
10246 
10247 	while (!list_empty(&unlink_list)) {
10248 		struct net_device *dev = list_first_entry(&unlink_list,
10249 							  struct net_device,
10250 							  unlink_list);
10251 		list_del_init(&dev->unlink_list);
10252 		dev->nested_level = dev->lower_level - 1;
10253 	}
10254 #endif
10255 
10256 	/* Snapshot list, allow later requests */
10257 	list_replace_init(&net_todo_list, &list);
10258 
10259 	__rtnl_unlock();
10260 
10261 	/* Wait for rcu callbacks to finish before next phase */
10262 	if (!list_empty(&list))
10263 		rcu_barrier();
10264 
10265 	list_for_each_entry_safe(dev, tmp, &list, todo_list) {
10266 		if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
10267 			netdev_WARN(dev, "run_todo but not unregistering\n");
10268 			list_del(&dev->todo_list);
10269 			continue;
10270 		}
10271 
10272 		dev->reg_state = NETREG_UNREGISTERED;
10273 		linkwatch_forget_dev(dev);
10274 	}
10275 
10276 	while (!list_empty(&list)) {
10277 		dev = netdev_wait_allrefs_any(&list);
10278 		list_del(&dev->todo_list);
10279 
10280 		/* paranoia */
10281 		BUG_ON(netdev_refcnt_read(dev) != 1);
10282 		BUG_ON(!list_empty(&dev->ptype_all));
10283 		BUG_ON(!list_empty(&dev->ptype_specific));
10284 		WARN_ON(rcu_access_pointer(dev->ip_ptr));
10285 		WARN_ON(rcu_access_pointer(dev->ip6_ptr));
10286 #if IS_ENABLED(CONFIG_DECNET)
10287 		WARN_ON(dev->dn_ptr);
10288 #endif
10289 		if (dev->priv_destructor)
10290 			dev->priv_destructor(dev);
10291 		if (dev->needs_free_netdev)
10292 			free_netdev(dev);
10293 
10294 		if (atomic_dec_and_test(&dev_net(dev)->dev_unreg_count))
10295 			wake_up(&netdev_unregistering_wq);
10296 
10297 		/* Free network device */
10298 		kobject_put(&dev->dev.kobj);
10299 	}
10300 }
10301 
10302 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
10303  * all the same fields in the same order as net_device_stats, with only
10304  * the type differing, but rtnl_link_stats64 may have additional fields
10305  * at the end for newer counters.
10306  */
10307 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
10308 			     const struct net_device_stats *netdev_stats)
10309 {
10310 #if BITS_PER_LONG == 64
10311 	BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
10312 	memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
10313 	/* zero out counters that only exist in rtnl_link_stats64 */
10314 	memset((char *)stats64 + sizeof(*netdev_stats), 0,
10315 	       sizeof(*stats64) - sizeof(*netdev_stats));
10316 #else
10317 	size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
10318 	const unsigned long *src = (const unsigned long *)netdev_stats;
10319 	u64 *dst = (u64 *)stats64;
10320 
10321 	BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
10322 	for (i = 0; i < n; i++)
10323 		dst[i] = src[i];
10324 	/* zero out counters that only exist in rtnl_link_stats64 */
10325 	memset((char *)stats64 + n * sizeof(u64), 0,
10326 	       sizeof(*stats64) - n * sizeof(u64));
10327 #endif
10328 }
10329 EXPORT_SYMBOL(netdev_stats_to_stats64);
10330 
10331 struct net_device_core_stats *netdev_core_stats_alloc(struct net_device *dev)
10332 {
10333 	struct net_device_core_stats __percpu *p;
10334 
10335 	p = alloc_percpu_gfp(struct net_device_core_stats,
10336 			     GFP_ATOMIC | __GFP_NOWARN);
10337 
10338 	if (p && cmpxchg(&dev->core_stats, NULL, p))
10339 		free_percpu(p);
10340 
10341 	/* This READ_ONCE() pairs with the cmpxchg() above */
10342 	p = READ_ONCE(dev->core_stats);
10343 	if (!p)
10344 		return NULL;
10345 
10346 	return this_cpu_ptr(p);
10347 }
10348 EXPORT_SYMBOL(netdev_core_stats_alloc);
10349 
10350 /**
10351  *	dev_get_stats	- get network device statistics
10352  *	@dev: device to get statistics from
10353  *	@storage: place to store stats
10354  *
10355  *	Get network statistics from device. Return @storage.
10356  *	The device driver may provide its own method by setting
10357  *	dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
10358  *	otherwise the internal statistics structure is used.
10359  */
10360 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
10361 					struct rtnl_link_stats64 *storage)
10362 {
10363 	const struct net_device_ops *ops = dev->netdev_ops;
10364 	const struct net_device_core_stats __percpu *p;
10365 
10366 	if (ops->ndo_get_stats64) {
10367 		memset(storage, 0, sizeof(*storage));
10368 		ops->ndo_get_stats64(dev, storage);
10369 	} else if (ops->ndo_get_stats) {
10370 		netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
10371 	} else {
10372 		netdev_stats_to_stats64(storage, &dev->stats);
10373 	}
10374 
10375 	/* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
10376 	p = READ_ONCE(dev->core_stats);
10377 	if (p) {
10378 		const struct net_device_core_stats *core_stats;
10379 		int i;
10380 
10381 		for_each_possible_cpu(i) {
10382 			core_stats = per_cpu_ptr(p, i);
10383 			storage->rx_dropped += local_read(&core_stats->rx_dropped);
10384 			storage->tx_dropped += local_read(&core_stats->tx_dropped);
10385 			storage->rx_nohandler += local_read(&core_stats->rx_nohandler);
10386 			storage->rx_otherhost_dropped += local_read(&core_stats->rx_otherhost_dropped);
10387 		}
10388 	}
10389 	return storage;
10390 }
10391 EXPORT_SYMBOL(dev_get_stats);
10392 
10393 /**
10394  *	dev_fetch_sw_netstats - get per-cpu network device statistics
10395  *	@s: place to store stats
10396  *	@netstats: per-cpu network stats to read from
10397  *
10398  *	Read per-cpu network statistics and populate the related fields in @s.
10399  */
10400 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
10401 			   const struct pcpu_sw_netstats __percpu *netstats)
10402 {
10403 	int cpu;
10404 
10405 	for_each_possible_cpu(cpu) {
10406 		const struct pcpu_sw_netstats *stats;
10407 		struct pcpu_sw_netstats tmp;
10408 		unsigned int start;
10409 
10410 		stats = per_cpu_ptr(netstats, cpu);
10411 		do {
10412 			start = u64_stats_fetch_begin_irq(&stats->syncp);
10413 			tmp.rx_packets = stats->rx_packets;
10414 			tmp.rx_bytes   = stats->rx_bytes;
10415 			tmp.tx_packets = stats->tx_packets;
10416 			tmp.tx_bytes   = stats->tx_bytes;
10417 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
10418 
10419 		s->rx_packets += tmp.rx_packets;
10420 		s->rx_bytes   += tmp.rx_bytes;
10421 		s->tx_packets += tmp.tx_packets;
10422 		s->tx_bytes   += tmp.tx_bytes;
10423 	}
10424 }
10425 EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats);
10426 
10427 /**
10428  *	dev_get_tstats64 - ndo_get_stats64 implementation
10429  *	@dev: device to get statistics from
10430  *	@s: place to store stats
10431  *
10432  *	Populate @s from dev->stats and dev->tstats. Can be used as
10433  *	ndo_get_stats64() callback.
10434  */
10435 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s)
10436 {
10437 	netdev_stats_to_stats64(s, &dev->stats);
10438 	dev_fetch_sw_netstats(s, dev->tstats);
10439 }
10440 EXPORT_SYMBOL_GPL(dev_get_tstats64);
10441 
10442 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
10443 {
10444 	struct netdev_queue *queue = dev_ingress_queue(dev);
10445 
10446 #ifdef CONFIG_NET_CLS_ACT
10447 	if (queue)
10448 		return queue;
10449 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
10450 	if (!queue)
10451 		return NULL;
10452 	netdev_init_one_queue(dev, queue, NULL);
10453 	RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
10454 	queue->qdisc_sleeping = &noop_qdisc;
10455 	rcu_assign_pointer(dev->ingress_queue, queue);
10456 #endif
10457 	return queue;
10458 }
10459 
10460 static const struct ethtool_ops default_ethtool_ops;
10461 
10462 void netdev_set_default_ethtool_ops(struct net_device *dev,
10463 				    const struct ethtool_ops *ops)
10464 {
10465 	if (dev->ethtool_ops == &default_ethtool_ops)
10466 		dev->ethtool_ops = ops;
10467 }
10468 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
10469 
10470 void netdev_freemem(struct net_device *dev)
10471 {
10472 	char *addr = (char *)dev - dev->padded;
10473 
10474 	kvfree(addr);
10475 }
10476 
10477 /**
10478  * alloc_netdev_mqs - allocate network device
10479  * @sizeof_priv: size of private data to allocate space for
10480  * @name: device name format string
10481  * @name_assign_type: origin of device name
10482  * @setup: callback to initialize device
10483  * @txqs: the number of TX subqueues to allocate
10484  * @rxqs: the number of RX subqueues to allocate
10485  *
10486  * Allocates a struct net_device with private data area for driver use
10487  * and performs basic initialization.  Also allocates subqueue structs
10488  * for each queue on the device.
10489  */
10490 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
10491 		unsigned char name_assign_type,
10492 		void (*setup)(struct net_device *),
10493 		unsigned int txqs, unsigned int rxqs)
10494 {
10495 	struct net_device *dev;
10496 	unsigned int alloc_size;
10497 	struct net_device *p;
10498 
10499 	BUG_ON(strlen(name) >= sizeof(dev->name));
10500 
10501 	if (txqs < 1) {
10502 		pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
10503 		return NULL;
10504 	}
10505 
10506 	if (rxqs < 1) {
10507 		pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
10508 		return NULL;
10509 	}
10510 
10511 	alloc_size = sizeof(struct net_device);
10512 	if (sizeof_priv) {
10513 		/* ensure 32-byte alignment of private area */
10514 		alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
10515 		alloc_size += sizeof_priv;
10516 	}
10517 	/* ensure 32-byte alignment of whole construct */
10518 	alloc_size += NETDEV_ALIGN - 1;
10519 
10520 	p = kvzalloc(alloc_size, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
10521 	if (!p)
10522 		return NULL;
10523 
10524 	dev = PTR_ALIGN(p, NETDEV_ALIGN);
10525 	dev->padded = (char *)dev - (char *)p;
10526 
10527 	ref_tracker_dir_init(&dev->refcnt_tracker, 128);
10528 #ifdef CONFIG_PCPU_DEV_REFCNT
10529 	dev->pcpu_refcnt = alloc_percpu(int);
10530 	if (!dev->pcpu_refcnt)
10531 		goto free_dev;
10532 	__dev_hold(dev);
10533 #else
10534 	refcount_set(&dev->dev_refcnt, 1);
10535 #endif
10536 
10537 	if (dev_addr_init(dev))
10538 		goto free_pcpu;
10539 
10540 	dev_mc_init(dev);
10541 	dev_uc_init(dev);
10542 
10543 	dev_net_set(dev, &init_net);
10544 
10545 	dev->gso_max_size = GSO_MAX_SIZE;
10546 	dev->gso_max_segs = GSO_MAX_SEGS;
10547 	dev->gro_max_size = GRO_MAX_SIZE;
10548 	dev->upper_level = 1;
10549 	dev->lower_level = 1;
10550 #ifdef CONFIG_LOCKDEP
10551 	dev->nested_level = 0;
10552 	INIT_LIST_HEAD(&dev->unlink_list);
10553 #endif
10554 
10555 	INIT_LIST_HEAD(&dev->napi_list);
10556 	INIT_LIST_HEAD(&dev->unreg_list);
10557 	INIT_LIST_HEAD(&dev->close_list);
10558 	INIT_LIST_HEAD(&dev->link_watch_list);
10559 	INIT_LIST_HEAD(&dev->adj_list.upper);
10560 	INIT_LIST_HEAD(&dev->adj_list.lower);
10561 	INIT_LIST_HEAD(&dev->ptype_all);
10562 	INIT_LIST_HEAD(&dev->ptype_specific);
10563 	INIT_LIST_HEAD(&dev->net_notifier_list);
10564 #ifdef CONFIG_NET_SCHED
10565 	hash_init(dev->qdisc_hash);
10566 #endif
10567 	dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
10568 	setup(dev);
10569 
10570 	if (!dev->tx_queue_len) {
10571 		dev->priv_flags |= IFF_NO_QUEUE;
10572 		dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
10573 	}
10574 
10575 	dev->num_tx_queues = txqs;
10576 	dev->real_num_tx_queues = txqs;
10577 	if (netif_alloc_netdev_queues(dev))
10578 		goto free_all;
10579 
10580 	dev->num_rx_queues = rxqs;
10581 	dev->real_num_rx_queues = rxqs;
10582 	if (netif_alloc_rx_queues(dev))
10583 		goto free_all;
10584 
10585 	strcpy(dev->name, name);
10586 	dev->name_assign_type = name_assign_type;
10587 	dev->group = INIT_NETDEV_GROUP;
10588 	if (!dev->ethtool_ops)
10589 		dev->ethtool_ops = &default_ethtool_ops;
10590 
10591 	nf_hook_netdev_init(dev);
10592 
10593 	return dev;
10594 
10595 free_all:
10596 	free_netdev(dev);
10597 	return NULL;
10598 
10599 free_pcpu:
10600 #ifdef CONFIG_PCPU_DEV_REFCNT
10601 	free_percpu(dev->pcpu_refcnt);
10602 free_dev:
10603 #endif
10604 	netdev_freemem(dev);
10605 	return NULL;
10606 }
10607 EXPORT_SYMBOL(alloc_netdev_mqs);
10608 
10609 /**
10610  * free_netdev - free network device
10611  * @dev: device
10612  *
10613  * This function does the last stage of destroying an allocated device
10614  * interface. The reference to the device object is released. If this
10615  * is the last reference then it will be freed.Must be called in process
10616  * context.
10617  */
10618 void free_netdev(struct net_device *dev)
10619 {
10620 	struct napi_struct *p, *n;
10621 
10622 	might_sleep();
10623 
10624 	/* When called immediately after register_netdevice() failed the unwind
10625 	 * handling may still be dismantling the device. Handle that case by
10626 	 * deferring the free.
10627 	 */
10628 	if (dev->reg_state == NETREG_UNREGISTERING) {
10629 		ASSERT_RTNL();
10630 		dev->needs_free_netdev = true;
10631 		return;
10632 	}
10633 
10634 	netif_free_tx_queues(dev);
10635 	netif_free_rx_queues(dev);
10636 
10637 	kfree(rcu_dereference_protected(dev->ingress_queue, 1));
10638 
10639 	/* Flush device addresses */
10640 	dev_addr_flush(dev);
10641 
10642 	list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
10643 		netif_napi_del(p);
10644 
10645 	ref_tracker_dir_exit(&dev->refcnt_tracker);
10646 #ifdef CONFIG_PCPU_DEV_REFCNT
10647 	free_percpu(dev->pcpu_refcnt);
10648 	dev->pcpu_refcnt = NULL;
10649 #endif
10650 	free_percpu(dev->core_stats);
10651 	dev->core_stats = NULL;
10652 	free_percpu(dev->xdp_bulkq);
10653 	dev->xdp_bulkq = NULL;
10654 
10655 	/*  Compatibility with error handling in drivers */
10656 	if (dev->reg_state == NETREG_UNINITIALIZED) {
10657 		netdev_freemem(dev);
10658 		return;
10659 	}
10660 
10661 	BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
10662 	dev->reg_state = NETREG_RELEASED;
10663 
10664 	/* will free via device release */
10665 	put_device(&dev->dev);
10666 }
10667 EXPORT_SYMBOL(free_netdev);
10668 
10669 /**
10670  *	synchronize_net -  Synchronize with packet receive processing
10671  *
10672  *	Wait for packets currently being received to be done.
10673  *	Does not block later packets from starting.
10674  */
10675 void synchronize_net(void)
10676 {
10677 	might_sleep();
10678 	if (rtnl_is_locked())
10679 		synchronize_rcu_expedited();
10680 	else
10681 		synchronize_rcu();
10682 }
10683 EXPORT_SYMBOL(synchronize_net);
10684 
10685 /**
10686  *	unregister_netdevice_queue - remove device from the kernel
10687  *	@dev: device
10688  *	@head: list
10689  *
10690  *	This function shuts down a device interface and removes it
10691  *	from the kernel tables.
10692  *	If head not NULL, device is queued to be unregistered later.
10693  *
10694  *	Callers must hold the rtnl semaphore.  You may want
10695  *	unregister_netdev() instead of this.
10696  */
10697 
10698 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
10699 {
10700 	ASSERT_RTNL();
10701 
10702 	if (head) {
10703 		list_move_tail(&dev->unreg_list, head);
10704 	} else {
10705 		LIST_HEAD(single);
10706 
10707 		list_add(&dev->unreg_list, &single);
10708 		unregister_netdevice_many(&single);
10709 	}
10710 }
10711 EXPORT_SYMBOL(unregister_netdevice_queue);
10712 
10713 /**
10714  *	unregister_netdevice_many - unregister many devices
10715  *	@head: list of devices
10716  *
10717  *  Note: As most callers use a stack allocated list_head,
10718  *  we force a list_del() to make sure stack wont be corrupted later.
10719  */
10720 void unregister_netdevice_many(struct list_head *head)
10721 {
10722 	struct net_device *dev, *tmp;
10723 	LIST_HEAD(close_head);
10724 
10725 	BUG_ON(dev_boot_phase);
10726 	ASSERT_RTNL();
10727 
10728 	if (list_empty(head))
10729 		return;
10730 
10731 	list_for_each_entry_safe(dev, tmp, head, unreg_list) {
10732 		/* Some devices call without registering
10733 		 * for initialization unwind. Remove those
10734 		 * devices and proceed with the remaining.
10735 		 */
10736 		if (dev->reg_state == NETREG_UNINITIALIZED) {
10737 			pr_debug("unregister_netdevice: device %s/%p never was registered\n",
10738 				 dev->name, dev);
10739 
10740 			WARN_ON(1);
10741 			list_del(&dev->unreg_list);
10742 			continue;
10743 		}
10744 		dev->dismantle = true;
10745 		BUG_ON(dev->reg_state != NETREG_REGISTERED);
10746 	}
10747 
10748 	/* If device is running, close it first. */
10749 	list_for_each_entry(dev, head, unreg_list)
10750 		list_add_tail(&dev->close_list, &close_head);
10751 	dev_close_many(&close_head, true);
10752 
10753 	list_for_each_entry(dev, head, unreg_list) {
10754 		/* And unlink it from device chain. */
10755 		unlist_netdevice(dev);
10756 
10757 		dev->reg_state = NETREG_UNREGISTERING;
10758 	}
10759 	flush_all_backlogs();
10760 
10761 	synchronize_net();
10762 
10763 	list_for_each_entry(dev, head, unreg_list) {
10764 		struct sk_buff *skb = NULL;
10765 
10766 		/* Shutdown queueing discipline. */
10767 		dev_shutdown(dev);
10768 
10769 		dev_xdp_uninstall(dev);
10770 
10771 		netdev_offload_xstats_disable_all(dev);
10772 
10773 		/* Notify protocols, that we are about to destroy
10774 		 * this device. They should clean all the things.
10775 		 */
10776 		call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
10777 
10778 		if (!dev->rtnl_link_ops ||
10779 		    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
10780 			skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
10781 						     GFP_KERNEL, NULL, 0);
10782 
10783 		/*
10784 		 *	Flush the unicast and multicast chains
10785 		 */
10786 		dev_uc_flush(dev);
10787 		dev_mc_flush(dev);
10788 
10789 		netdev_name_node_alt_flush(dev);
10790 		netdev_name_node_free(dev->name_node);
10791 
10792 		if (dev->netdev_ops->ndo_uninit)
10793 			dev->netdev_ops->ndo_uninit(dev);
10794 
10795 		if (skb)
10796 			rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
10797 
10798 		/* Notifier chain MUST detach us all upper devices. */
10799 		WARN_ON(netdev_has_any_upper_dev(dev));
10800 		WARN_ON(netdev_has_any_lower_dev(dev));
10801 
10802 		/* Remove entries from kobject tree */
10803 		netdev_unregister_kobject(dev);
10804 #ifdef CONFIG_XPS
10805 		/* Remove XPS queueing entries */
10806 		netif_reset_xps_queues_gt(dev, 0);
10807 #endif
10808 	}
10809 
10810 	synchronize_net();
10811 
10812 	list_for_each_entry(dev, head, unreg_list) {
10813 		dev_put_track(dev, &dev->dev_registered_tracker);
10814 		net_set_todo(dev);
10815 	}
10816 
10817 	list_del(head);
10818 }
10819 EXPORT_SYMBOL(unregister_netdevice_many);
10820 
10821 /**
10822  *	unregister_netdev - remove device from the kernel
10823  *	@dev: device
10824  *
10825  *	This function shuts down a device interface and removes it
10826  *	from the kernel tables.
10827  *
10828  *	This is just a wrapper for unregister_netdevice that takes
10829  *	the rtnl semaphore.  In general you want to use this and not
10830  *	unregister_netdevice.
10831  */
10832 void unregister_netdev(struct net_device *dev)
10833 {
10834 	rtnl_lock();
10835 	unregister_netdevice(dev);
10836 	rtnl_unlock();
10837 }
10838 EXPORT_SYMBOL(unregister_netdev);
10839 
10840 /**
10841  *	__dev_change_net_namespace - move device to different nethost namespace
10842  *	@dev: device
10843  *	@net: network namespace
10844  *	@pat: If not NULL name pattern to try if the current device name
10845  *	      is already taken in the destination network namespace.
10846  *	@new_ifindex: If not zero, specifies device index in the target
10847  *	              namespace.
10848  *
10849  *	This function shuts down a device interface and moves it
10850  *	to a new network namespace. On success 0 is returned, on
10851  *	a failure a netagive errno code is returned.
10852  *
10853  *	Callers must hold the rtnl semaphore.
10854  */
10855 
10856 int __dev_change_net_namespace(struct net_device *dev, struct net *net,
10857 			       const char *pat, int new_ifindex)
10858 {
10859 	struct net *net_old = dev_net(dev);
10860 	int err, new_nsid;
10861 
10862 	ASSERT_RTNL();
10863 
10864 	/* Don't allow namespace local devices to be moved. */
10865 	err = -EINVAL;
10866 	if (dev->features & NETIF_F_NETNS_LOCAL)
10867 		goto out;
10868 
10869 	/* Ensure the device has been registrered */
10870 	if (dev->reg_state != NETREG_REGISTERED)
10871 		goto out;
10872 
10873 	/* Get out if there is nothing todo */
10874 	err = 0;
10875 	if (net_eq(net_old, net))
10876 		goto out;
10877 
10878 	/* Pick the destination device name, and ensure
10879 	 * we can use it in the destination network namespace.
10880 	 */
10881 	err = -EEXIST;
10882 	if (netdev_name_in_use(net, dev->name)) {
10883 		/* We get here if we can't use the current device name */
10884 		if (!pat)
10885 			goto out;
10886 		err = dev_get_valid_name(net, dev, pat);
10887 		if (err < 0)
10888 			goto out;
10889 	}
10890 
10891 	/* Check that new_ifindex isn't used yet. */
10892 	err = -EBUSY;
10893 	if (new_ifindex && __dev_get_by_index(net, new_ifindex))
10894 		goto out;
10895 
10896 	/*
10897 	 * And now a mini version of register_netdevice unregister_netdevice.
10898 	 */
10899 
10900 	/* If device is running close it first. */
10901 	dev_close(dev);
10902 
10903 	/* And unlink it from device chain */
10904 	unlist_netdevice(dev);
10905 
10906 	synchronize_net();
10907 
10908 	/* Shutdown queueing discipline. */
10909 	dev_shutdown(dev);
10910 
10911 	/* Notify protocols, that we are about to destroy
10912 	 * this device. They should clean all the things.
10913 	 *
10914 	 * Note that dev->reg_state stays at NETREG_REGISTERED.
10915 	 * This is wanted because this way 8021q and macvlan know
10916 	 * the device is just moving and can keep their slaves up.
10917 	 */
10918 	call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
10919 	rcu_barrier();
10920 
10921 	new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
10922 	/* If there is an ifindex conflict assign a new one */
10923 	if (!new_ifindex) {
10924 		if (__dev_get_by_index(net, dev->ifindex))
10925 			new_ifindex = dev_new_index(net);
10926 		else
10927 			new_ifindex = dev->ifindex;
10928 	}
10929 
10930 	rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
10931 			    new_ifindex);
10932 
10933 	/*
10934 	 *	Flush the unicast and multicast chains
10935 	 */
10936 	dev_uc_flush(dev);
10937 	dev_mc_flush(dev);
10938 
10939 	/* Send a netdev-removed uevent to the old namespace */
10940 	kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
10941 	netdev_adjacent_del_links(dev);
10942 
10943 	/* Move per-net netdevice notifiers that are following the netdevice */
10944 	move_netdevice_notifiers_dev_net(dev, net);
10945 
10946 	/* Actually switch the network namespace */
10947 	dev_net_set(dev, net);
10948 	dev->ifindex = new_ifindex;
10949 
10950 	/* Send a netdev-add uevent to the new namespace */
10951 	kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
10952 	netdev_adjacent_add_links(dev);
10953 
10954 	/* Fixup kobjects */
10955 	err = device_rename(&dev->dev, dev->name);
10956 	WARN_ON(err);
10957 
10958 	/* Adapt owner in case owning user namespace of target network
10959 	 * namespace is different from the original one.
10960 	 */
10961 	err = netdev_change_owner(dev, net_old, net);
10962 	WARN_ON(err);
10963 
10964 	/* Add the device back in the hashes */
10965 	list_netdevice(dev);
10966 
10967 	/* Notify protocols, that a new device appeared. */
10968 	call_netdevice_notifiers(NETDEV_REGISTER, dev);
10969 
10970 	/*
10971 	 *	Prevent userspace races by waiting until the network
10972 	 *	device is fully setup before sending notifications.
10973 	 */
10974 	rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
10975 
10976 	synchronize_net();
10977 	err = 0;
10978 out:
10979 	return err;
10980 }
10981 EXPORT_SYMBOL_GPL(__dev_change_net_namespace);
10982 
10983 static int dev_cpu_dead(unsigned int oldcpu)
10984 {
10985 	struct sk_buff **list_skb;
10986 	struct sk_buff *skb;
10987 	unsigned int cpu;
10988 	struct softnet_data *sd, *oldsd, *remsd = NULL;
10989 
10990 	local_irq_disable();
10991 	cpu = smp_processor_id();
10992 	sd = &per_cpu(softnet_data, cpu);
10993 	oldsd = &per_cpu(softnet_data, oldcpu);
10994 
10995 	/* Find end of our completion_queue. */
10996 	list_skb = &sd->completion_queue;
10997 	while (*list_skb)
10998 		list_skb = &(*list_skb)->next;
10999 	/* Append completion queue from offline CPU. */
11000 	*list_skb = oldsd->completion_queue;
11001 	oldsd->completion_queue = NULL;
11002 
11003 	/* Append output queue from offline CPU. */
11004 	if (oldsd->output_queue) {
11005 		*sd->output_queue_tailp = oldsd->output_queue;
11006 		sd->output_queue_tailp = oldsd->output_queue_tailp;
11007 		oldsd->output_queue = NULL;
11008 		oldsd->output_queue_tailp = &oldsd->output_queue;
11009 	}
11010 	/* Append NAPI poll list from offline CPU, with one exception :
11011 	 * process_backlog() must be called by cpu owning percpu backlog.
11012 	 * We properly handle process_queue & input_pkt_queue later.
11013 	 */
11014 	while (!list_empty(&oldsd->poll_list)) {
11015 		struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
11016 							    struct napi_struct,
11017 							    poll_list);
11018 
11019 		list_del_init(&napi->poll_list);
11020 		if (napi->poll == process_backlog)
11021 			napi->state = 0;
11022 		else
11023 			____napi_schedule(sd, napi);
11024 	}
11025 
11026 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
11027 	local_irq_enable();
11028 
11029 #ifdef CONFIG_RPS
11030 	remsd = oldsd->rps_ipi_list;
11031 	oldsd->rps_ipi_list = NULL;
11032 #endif
11033 	/* send out pending IPI's on offline CPU */
11034 	net_rps_send_ipi(remsd);
11035 
11036 	/* Process offline CPU's input_pkt_queue */
11037 	while ((skb = __skb_dequeue(&oldsd->process_queue))) {
11038 		netif_rx(skb);
11039 		input_queue_head_incr(oldsd);
11040 	}
11041 	while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
11042 		netif_rx(skb);
11043 		input_queue_head_incr(oldsd);
11044 	}
11045 
11046 	return 0;
11047 }
11048 
11049 /**
11050  *	netdev_increment_features - increment feature set by one
11051  *	@all: current feature set
11052  *	@one: new feature set
11053  *	@mask: mask feature set
11054  *
11055  *	Computes a new feature set after adding a device with feature set
11056  *	@one to the master device with current feature set @all.  Will not
11057  *	enable anything that is off in @mask. Returns the new feature set.
11058  */
11059 netdev_features_t netdev_increment_features(netdev_features_t all,
11060 	netdev_features_t one, netdev_features_t mask)
11061 {
11062 	if (mask & NETIF_F_HW_CSUM)
11063 		mask |= NETIF_F_CSUM_MASK;
11064 	mask |= NETIF_F_VLAN_CHALLENGED;
11065 
11066 	all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
11067 	all &= one | ~NETIF_F_ALL_FOR_ALL;
11068 
11069 	/* If one device supports hw checksumming, set for all. */
11070 	if (all & NETIF_F_HW_CSUM)
11071 		all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
11072 
11073 	return all;
11074 }
11075 EXPORT_SYMBOL(netdev_increment_features);
11076 
11077 static struct hlist_head * __net_init netdev_create_hash(void)
11078 {
11079 	int i;
11080 	struct hlist_head *hash;
11081 
11082 	hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
11083 	if (hash != NULL)
11084 		for (i = 0; i < NETDEV_HASHENTRIES; i++)
11085 			INIT_HLIST_HEAD(&hash[i]);
11086 
11087 	return hash;
11088 }
11089 
11090 /* Initialize per network namespace state */
11091 static int __net_init netdev_init(struct net *net)
11092 {
11093 	BUILD_BUG_ON(GRO_HASH_BUCKETS >
11094 		     8 * sizeof_field(struct napi_struct, gro_bitmask));
11095 
11096 	INIT_LIST_HEAD(&net->dev_base_head);
11097 
11098 	net->dev_name_head = netdev_create_hash();
11099 	if (net->dev_name_head == NULL)
11100 		goto err_name;
11101 
11102 	net->dev_index_head = netdev_create_hash();
11103 	if (net->dev_index_head == NULL)
11104 		goto err_idx;
11105 
11106 	RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain);
11107 
11108 	return 0;
11109 
11110 err_idx:
11111 	kfree(net->dev_name_head);
11112 err_name:
11113 	return -ENOMEM;
11114 }
11115 
11116 /**
11117  *	netdev_drivername - network driver for the device
11118  *	@dev: network device
11119  *
11120  *	Determine network driver for device.
11121  */
11122 const char *netdev_drivername(const struct net_device *dev)
11123 {
11124 	const struct device_driver *driver;
11125 	const struct device *parent;
11126 	const char *empty = "";
11127 
11128 	parent = dev->dev.parent;
11129 	if (!parent)
11130 		return empty;
11131 
11132 	driver = parent->driver;
11133 	if (driver && driver->name)
11134 		return driver->name;
11135 	return empty;
11136 }
11137 
11138 static void __netdev_printk(const char *level, const struct net_device *dev,
11139 			    struct va_format *vaf)
11140 {
11141 	if (dev && dev->dev.parent) {
11142 		dev_printk_emit(level[1] - '0',
11143 				dev->dev.parent,
11144 				"%s %s %s%s: %pV",
11145 				dev_driver_string(dev->dev.parent),
11146 				dev_name(dev->dev.parent),
11147 				netdev_name(dev), netdev_reg_state(dev),
11148 				vaf);
11149 	} else if (dev) {
11150 		printk("%s%s%s: %pV",
11151 		       level, netdev_name(dev), netdev_reg_state(dev), vaf);
11152 	} else {
11153 		printk("%s(NULL net_device): %pV", level, vaf);
11154 	}
11155 }
11156 
11157 void netdev_printk(const char *level, const struct net_device *dev,
11158 		   const char *format, ...)
11159 {
11160 	struct va_format vaf;
11161 	va_list args;
11162 
11163 	va_start(args, format);
11164 
11165 	vaf.fmt = format;
11166 	vaf.va = &args;
11167 
11168 	__netdev_printk(level, dev, &vaf);
11169 
11170 	va_end(args);
11171 }
11172 EXPORT_SYMBOL(netdev_printk);
11173 
11174 #define define_netdev_printk_level(func, level)			\
11175 void func(const struct net_device *dev, const char *fmt, ...)	\
11176 {								\
11177 	struct va_format vaf;					\
11178 	va_list args;						\
11179 								\
11180 	va_start(args, fmt);					\
11181 								\
11182 	vaf.fmt = fmt;						\
11183 	vaf.va = &args;						\
11184 								\
11185 	__netdev_printk(level, dev, &vaf);			\
11186 								\
11187 	va_end(args);						\
11188 }								\
11189 EXPORT_SYMBOL(func);
11190 
11191 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
11192 define_netdev_printk_level(netdev_alert, KERN_ALERT);
11193 define_netdev_printk_level(netdev_crit, KERN_CRIT);
11194 define_netdev_printk_level(netdev_err, KERN_ERR);
11195 define_netdev_printk_level(netdev_warn, KERN_WARNING);
11196 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
11197 define_netdev_printk_level(netdev_info, KERN_INFO);
11198 
11199 static void __net_exit netdev_exit(struct net *net)
11200 {
11201 	kfree(net->dev_name_head);
11202 	kfree(net->dev_index_head);
11203 	if (net != &init_net)
11204 		WARN_ON_ONCE(!list_empty(&net->dev_base_head));
11205 }
11206 
11207 static struct pernet_operations __net_initdata netdev_net_ops = {
11208 	.init = netdev_init,
11209 	.exit = netdev_exit,
11210 };
11211 
11212 static void __net_exit default_device_exit_net(struct net *net)
11213 {
11214 	struct net_device *dev, *aux;
11215 	/*
11216 	 * Push all migratable network devices back to the
11217 	 * initial network namespace
11218 	 */
11219 	ASSERT_RTNL();
11220 	for_each_netdev_safe(net, dev, aux) {
11221 		int err;
11222 		char fb_name[IFNAMSIZ];
11223 
11224 		/* Ignore unmoveable devices (i.e. loopback) */
11225 		if (dev->features & NETIF_F_NETNS_LOCAL)
11226 			continue;
11227 
11228 		/* Leave virtual devices for the generic cleanup */
11229 		if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund)
11230 			continue;
11231 
11232 		/* Push remaining network devices to init_net */
11233 		snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
11234 		if (netdev_name_in_use(&init_net, fb_name))
11235 			snprintf(fb_name, IFNAMSIZ, "dev%%d");
11236 		err = dev_change_net_namespace(dev, &init_net, fb_name);
11237 		if (err) {
11238 			pr_emerg("%s: failed to move %s to init_net: %d\n",
11239 				 __func__, dev->name, err);
11240 			BUG();
11241 		}
11242 	}
11243 }
11244 
11245 static void __net_exit default_device_exit_batch(struct list_head *net_list)
11246 {
11247 	/* At exit all network devices most be removed from a network
11248 	 * namespace.  Do this in the reverse order of registration.
11249 	 * Do this across as many network namespaces as possible to
11250 	 * improve batching efficiency.
11251 	 */
11252 	struct net_device *dev;
11253 	struct net *net;
11254 	LIST_HEAD(dev_kill_list);
11255 
11256 	rtnl_lock();
11257 	list_for_each_entry(net, net_list, exit_list) {
11258 		default_device_exit_net(net);
11259 		cond_resched();
11260 	}
11261 
11262 	list_for_each_entry(net, net_list, exit_list) {
11263 		for_each_netdev_reverse(net, dev) {
11264 			if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
11265 				dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
11266 			else
11267 				unregister_netdevice_queue(dev, &dev_kill_list);
11268 		}
11269 	}
11270 	unregister_netdevice_many(&dev_kill_list);
11271 	rtnl_unlock();
11272 }
11273 
11274 static struct pernet_operations __net_initdata default_device_ops = {
11275 	.exit_batch = default_device_exit_batch,
11276 };
11277 
11278 /*
11279  *	Initialize the DEV module. At boot time this walks the device list and
11280  *	unhooks any devices that fail to initialise (normally hardware not
11281  *	present) and leaves us with a valid list of present and active devices.
11282  *
11283  */
11284 
11285 /*
11286  *       This is called single threaded during boot, so no need
11287  *       to take the rtnl semaphore.
11288  */
11289 static int __init net_dev_init(void)
11290 {
11291 	int i, rc = -ENOMEM;
11292 
11293 	BUG_ON(!dev_boot_phase);
11294 
11295 	if (dev_proc_init())
11296 		goto out;
11297 
11298 	if (netdev_kobject_init())
11299 		goto out;
11300 
11301 	INIT_LIST_HEAD(&ptype_all);
11302 	for (i = 0; i < PTYPE_HASH_SIZE; i++)
11303 		INIT_LIST_HEAD(&ptype_base[i]);
11304 
11305 	if (register_pernet_subsys(&netdev_net_ops))
11306 		goto out;
11307 
11308 	/*
11309 	 *	Initialise the packet receive queues.
11310 	 */
11311 
11312 	for_each_possible_cpu(i) {
11313 		struct work_struct *flush = per_cpu_ptr(&flush_works, i);
11314 		struct softnet_data *sd = &per_cpu(softnet_data, i);
11315 
11316 		INIT_WORK(flush, flush_backlog);
11317 
11318 		skb_queue_head_init(&sd->input_pkt_queue);
11319 		skb_queue_head_init(&sd->process_queue);
11320 #ifdef CONFIG_XFRM_OFFLOAD
11321 		skb_queue_head_init(&sd->xfrm_backlog);
11322 #endif
11323 		INIT_LIST_HEAD(&sd->poll_list);
11324 		sd->output_queue_tailp = &sd->output_queue;
11325 #ifdef CONFIG_RPS
11326 		INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
11327 		sd->cpu = i;
11328 #endif
11329 
11330 		init_gro_hash(&sd->backlog);
11331 		sd->backlog.poll = process_backlog;
11332 		sd->backlog.weight = weight_p;
11333 	}
11334 
11335 	dev_boot_phase = 0;
11336 
11337 	/* The loopback device is special if any other network devices
11338 	 * is present in a network namespace the loopback device must
11339 	 * be present. Since we now dynamically allocate and free the
11340 	 * loopback device ensure this invariant is maintained by
11341 	 * keeping the loopback device as the first device on the
11342 	 * list of network devices.  Ensuring the loopback devices
11343 	 * is the first device that appears and the last network device
11344 	 * that disappears.
11345 	 */
11346 	if (register_pernet_device(&loopback_net_ops))
11347 		goto out;
11348 
11349 	if (register_pernet_device(&default_device_ops))
11350 		goto out;
11351 
11352 	open_softirq(NET_TX_SOFTIRQ, net_tx_action);
11353 	open_softirq(NET_RX_SOFTIRQ, net_rx_action);
11354 
11355 	rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
11356 				       NULL, dev_cpu_dead);
11357 	WARN_ON(rc < 0);
11358 	rc = 0;
11359 out:
11360 	return rc;
11361 }
11362 
11363 subsys_initcall(net_dev_init);
11364