xref: /linux/net/core/dev.c (revision 9ecd05794b8da1f6cfca4c3721a3b0fed2e21a82)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *      NET3    Protocol independent device support routines.
4  *
5  *	Derived from the non IP parts of dev.c 1.0.19
6  *              Authors:	Ross Biro
7  *				Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
8  *				Mark Evans, <evansmp@uhura.aston.ac.uk>
9  *
10  *	Additional Authors:
11  *		Florian la Roche <rzsfl@rz.uni-sb.de>
12  *		Alan Cox <gw4pts@gw4pts.ampr.org>
13  *		David Hinds <dahinds@users.sourceforge.net>
14  *		Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
15  *		Adam Sulmicki <adam@cfar.umd.edu>
16  *              Pekka Riikonen <priikone@poesidon.pspt.fi>
17  *
18  *	Changes:
19  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
20  *                                      to 2 if register_netdev gets called
21  *                                      before net_dev_init & also removed a
22  *                                      few lines of code in the process.
23  *		Alan Cox	:	device private ioctl copies fields back.
24  *		Alan Cox	:	Transmit queue code does relevant
25  *					stunts to keep the queue safe.
26  *		Alan Cox	:	Fixed double lock.
27  *		Alan Cox	:	Fixed promisc NULL pointer trap
28  *		????????	:	Support the full private ioctl range
29  *		Alan Cox	:	Moved ioctl permission check into
30  *					drivers
31  *		Tim Kordas	:	SIOCADDMULTI/SIOCDELMULTI
32  *		Alan Cox	:	100 backlog just doesn't cut it when
33  *					you start doing multicast video 8)
34  *		Alan Cox	:	Rewrote net_bh and list manager.
35  *              Alan Cox        :       Fix ETH_P_ALL echoback lengths.
36  *		Alan Cox	:	Took out transmit every packet pass
37  *					Saved a few bytes in the ioctl handler
38  *		Alan Cox	:	Network driver sets packet type before
39  *					calling netif_rx. Saves a function
40  *					call a packet.
41  *		Alan Cox	:	Hashed net_bh()
42  *		Richard Kooijman:	Timestamp fixes.
43  *		Alan Cox	:	Wrong field in SIOCGIFDSTADDR
44  *		Alan Cox	:	Device lock protection.
45  *              Alan Cox        :       Fixed nasty side effect of device close
46  *					changes.
47  *		Rudi Cilibrasi	:	Pass the right thing to
48  *					set_mac_address()
49  *		Dave Miller	:	32bit quantity for the device lock to
50  *					make it work out on a Sparc.
51  *		Bjorn Ekwall	:	Added KERNELD hack.
52  *		Alan Cox	:	Cleaned up the backlog initialise.
53  *		Craig Metz	:	SIOCGIFCONF fix if space for under
54  *					1 device.
55  *	    Thomas Bogendoerfer :	Return ENODEV for dev_open, if there
56  *					is no device open function.
57  *		Andi Kleen	:	Fix error reporting for SIOCGIFCONF
58  *	    Michael Chastain	:	Fix signed/unsigned for SIOCGIFCONF
59  *		Cyrus Durgin	:	Cleaned for KMOD
60  *		Adam Sulmicki   :	Bug Fix : Network Device Unload
61  *					A network device unload needs to purge
62  *					the backlog queue.
63  *	Paul Rusty Russell	:	SIOCSIFNAME
64  *              Pekka Riikonen  :	Netdev boot-time settings code
65  *              Andrew Morton   :       Make unregister_netdevice wait
66  *                                      indefinitely on dev->refcnt
67  *              J Hadi Salim    :       - Backlog queue sampling
68  *				        - netif_rx() feedback
69  */
70 
71 #include <linux/uaccess.h>
72 #include <linux/bitops.h>
73 #include <linux/capability.h>
74 #include <linux/cpu.h>
75 #include <linux/types.h>
76 #include <linux/kernel.h>
77 #include <linux/hash.h>
78 #include <linux/slab.h>
79 #include <linux/sched.h>
80 #include <linux/sched/mm.h>
81 #include <linux/mutex.h>
82 #include <linux/rwsem.h>
83 #include <linux/string.h>
84 #include <linux/mm.h>
85 #include <linux/socket.h>
86 #include <linux/sockios.h>
87 #include <linux/errno.h>
88 #include <linux/interrupt.h>
89 #include <linux/if_ether.h>
90 #include <linux/netdevice.h>
91 #include <linux/etherdevice.h>
92 #include <linux/ethtool.h>
93 #include <linux/skbuff.h>
94 #include <linux/kthread.h>
95 #include <linux/bpf.h>
96 #include <linux/bpf_trace.h>
97 #include <net/net_namespace.h>
98 #include <net/sock.h>
99 #include <net/busy_poll.h>
100 #include <linux/rtnetlink.h>
101 #include <linux/stat.h>
102 #include <net/dsa.h>
103 #include <net/dst.h>
104 #include <net/dst_metadata.h>
105 #include <net/gro.h>
106 #include <net/pkt_sched.h>
107 #include <net/pkt_cls.h>
108 #include <net/checksum.h>
109 #include <net/xfrm.h>
110 #include <linux/highmem.h>
111 #include <linux/init.h>
112 #include <linux/module.h>
113 #include <linux/netpoll.h>
114 #include <linux/rcupdate.h>
115 #include <linux/delay.h>
116 #include <net/iw_handler.h>
117 #include <asm/current.h>
118 #include <linux/audit.h>
119 #include <linux/dmaengine.h>
120 #include <linux/err.h>
121 #include <linux/ctype.h>
122 #include <linux/if_arp.h>
123 #include <linux/if_vlan.h>
124 #include <linux/ip.h>
125 #include <net/ip.h>
126 #include <net/mpls.h>
127 #include <linux/ipv6.h>
128 #include <linux/in.h>
129 #include <linux/jhash.h>
130 #include <linux/random.h>
131 #include <trace/events/napi.h>
132 #include <trace/events/net.h>
133 #include <trace/events/skb.h>
134 #include <trace/events/qdisc.h>
135 #include <linux/inetdevice.h>
136 #include <linux/cpu_rmap.h>
137 #include <linux/static_key.h>
138 #include <linux/hashtable.h>
139 #include <linux/vmalloc.h>
140 #include <linux/if_macvlan.h>
141 #include <linux/errqueue.h>
142 #include <linux/hrtimer.h>
143 #include <linux/netfilter_netdev.h>
144 #include <linux/crash_dump.h>
145 #include <linux/sctp.h>
146 #include <net/udp_tunnel.h>
147 #include <linux/net_namespace.h>
148 #include <linux/indirect_call_wrapper.h>
149 #include <net/devlink.h>
150 #include <linux/pm_runtime.h>
151 #include <linux/prandom.h>
152 #include <linux/once_lite.h>
153 
154 #include "dev.h"
155 #include "net-sysfs.h"
156 
157 
158 static DEFINE_SPINLOCK(ptype_lock);
159 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
160 struct list_head ptype_all __read_mostly;	/* Taps */
161 
162 static int netif_rx_internal(struct sk_buff *skb);
163 static int call_netdevice_notifiers_extack(unsigned long val,
164 					   struct net_device *dev,
165 					   struct netlink_ext_ack *extack);
166 static struct napi_struct *napi_by_id(unsigned int napi_id);
167 
168 /*
169  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
170  * semaphore.
171  *
172  * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
173  *
174  * Writers must hold the rtnl semaphore while they loop through the
175  * dev_base_head list, and hold dev_base_lock for writing when they do the
176  * actual updates.  This allows pure readers to access the list even
177  * while a writer is preparing to update it.
178  *
179  * To put it another way, dev_base_lock is held for writing only to
180  * protect against pure readers; the rtnl semaphore provides the
181  * protection against other writers.
182  *
183  * See, for example usages, register_netdevice() and
184  * unregister_netdevice(), which must be called with the rtnl
185  * semaphore held.
186  */
187 DEFINE_RWLOCK(dev_base_lock);
188 EXPORT_SYMBOL(dev_base_lock);
189 
190 static DEFINE_MUTEX(ifalias_mutex);
191 
192 /* protects napi_hash addition/deletion and napi_gen_id */
193 static DEFINE_SPINLOCK(napi_hash_lock);
194 
195 static unsigned int napi_gen_id = NR_CPUS;
196 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
197 
198 static DECLARE_RWSEM(devnet_rename_sem);
199 
200 static inline void dev_base_seq_inc(struct net *net)
201 {
202 	while (++net->dev_base_seq == 0)
203 		;
204 }
205 
206 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
207 {
208 	unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
209 
210 	return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
211 }
212 
213 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
214 {
215 	return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
216 }
217 
218 static inline void rps_lock_irqsave(struct softnet_data *sd,
219 				    unsigned long *flags)
220 {
221 	if (IS_ENABLED(CONFIG_RPS))
222 		spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
223 	else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
224 		local_irq_save(*flags);
225 }
226 
227 static inline void rps_lock_irq_disable(struct softnet_data *sd)
228 {
229 	if (IS_ENABLED(CONFIG_RPS))
230 		spin_lock_irq(&sd->input_pkt_queue.lock);
231 	else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
232 		local_irq_disable();
233 }
234 
235 static inline void rps_unlock_irq_restore(struct softnet_data *sd,
236 					  unsigned long *flags)
237 {
238 	if (IS_ENABLED(CONFIG_RPS))
239 		spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
240 	else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
241 		local_irq_restore(*flags);
242 }
243 
244 static inline void rps_unlock_irq_enable(struct softnet_data *sd)
245 {
246 	if (IS_ENABLED(CONFIG_RPS))
247 		spin_unlock_irq(&sd->input_pkt_queue.lock);
248 	else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
249 		local_irq_enable();
250 }
251 
252 static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
253 						       const char *name)
254 {
255 	struct netdev_name_node *name_node;
256 
257 	name_node = kmalloc(sizeof(*name_node), GFP_KERNEL);
258 	if (!name_node)
259 		return NULL;
260 	INIT_HLIST_NODE(&name_node->hlist);
261 	name_node->dev = dev;
262 	name_node->name = name;
263 	return name_node;
264 }
265 
266 static struct netdev_name_node *
267 netdev_name_node_head_alloc(struct net_device *dev)
268 {
269 	struct netdev_name_node *name_node;
270 
271 	name_node = netdev_name_node_alloc(dev, dev->name);
272 	if (!name_node)
273 		return NULL;
274 	INIT_LIST_HEAD(&name_node->list);
275 	return name_node;
276 }
277 
278 static void netdev_name_node_free(struct netdev_name_node *name_node)
279 {
280 	kfree(name_node);
281 }
282 
283 static void netdev_name_node_add(struct net *net,
284 				 struct netdev_name_node *name_node)
285 {
286 	hlist_add_head_rcu(&name_node->hlist,
287 			   dev_name_hash(net, name_node->name));
288 }
289 
290 static void netdev_name_node_del(struct netdev_name_node *name_node)
291 {
292 	hlist_del_rcu(&name_node->hlist);
293 }
294 
295 static struct netdev_name_node *netdev_name_node_lookup(struct net *net,
296 							const char *name)
297 {
298 	struct hlist_head *head = dev_name_hash(net, name);
299 	struct netdev_name_node *name_node;
300 
301 	hlist_for_each_entry(name_node, head, hlist)
302 		if (!strcmp(name_node->name, name))
303 			return name_node;
304 	return NULL;
305 }
306 
307 static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net,
308 							    const char *name)
309 {
310 	struct hlist_head *head = dev_name_hash(net, name);
311 	struct netdev_name_node *name_node;
312 
313 	hlist_for_each_entry_rcu(name_node, head, hlist)
314 		if (!strcmp(name_node->name, name))
315 			return name_node;
316 	return NULL;
317 }
318 
319 bool netdev_name_in_use(struct net *net, const char *name)
320 {
321 	return netdev_name_node_lookup(net, name);
322 }
323 EXPORT_SYMBOL(netdev_name_in_use);
324 
325 int netdev_name_node_alt_create(struct net_device *dev, const char *name)
326 {
327 	struct netdev_name_node *name_node;
328 	struct net *net = dev_net(dev);
329 
330 	name_node = netdev_name_node_lookup(net, name);
331 	if (name_node)
332 		return -EEXIST;
333 	name_node = netdev_name_node_alloc(dev, name);
334 	if (!name_node)
335 		return -ENOMEM;
336 	netdev_name_node_add(net, name_node);
337 	/* The node that holds dev->name acts as a head of per-device list. */
338 	list_add_tail(&name_node->list, &dev->name_node->list);
339 
340 	return 0;
341 }
342 
343 static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
344 {
345 	list_del(&name_node->list);
346 	netdev_name_node_del(name_node);
347 	kfree(name_node->name);
348 	netdev_name_node_free(name_node);
349 }
350 
351 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
352 {
353 	struct netdev_name_node *name_node;
354 	struct net *net = dev_net(dev);
355 
356 	name_node = netdev_name_node_lookup(net, name);
357 	if (!name_node)
358 		return -ENOENT;
359 	/* lookup might have found our primary name or a name belonging
360 	 * to another device.
361 	 */
362 	if (name_node == dev->name_node || name_node->dev != dev)
363 		return -EINVAL;
364 
365 	__netdev_name_node_alt_destroy(name_node);
366 
367 	return 0;
368 }
369 
370 static void netdev_name_node_alt_flush(struct net_device *dev)
371 {
372 	struct netdev_name_node *name_node, *tmp;
373 
374 	list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list)
375 		__netdev_name_node_alt_destroy(name_node);
376 }
377 
378 /* Device list insertion */
379 static void list_netdevice(struct net_device *dev)
380 {
381 	struct net *net = dev_net(dev);
382 
383 	ASSERT_RTNL();
384 
385 	write_lock(&dev_base_lock);
386 	list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
387 	netdev_name_node_add(net, dev->name_node);
388 	hlist_add_head_rcu(&dev->index_hlist,
389 			   dev_index_hash(net, dev->ifindex));
390 	write_unlock(&dev_base_lock);
391 
392 	dev_base_seq_inc(net);
393 }
394 
395 /* Device list removal
396  * caller must respect a RCU grace period before freeing/reusing dev
397  */
398 static void unlist_netdevice(struct net_device *dev, bool lock)
399 {
400 	ASSERT_RTNL();
401 
402 	/* Unlink dev from the device chain */
403 	if (lock)
404 		write_lock(&dev_base_lock);
405 	list_del_rcu(&dev->dev_list);
406 	netdev_name_node_del(dev->name_node);
407 	hlist_del_rcu(&dev->index_hlist);
408 	if (lock)
409 		write_unlock(&dev_base_lock);
410 
411 	dev_base_seq_inc(dev_net(dev));
412 }
413 
414 /*
415  *	Our notifier list
416  */
417 
418 static RAW_NOTIFIER_HEAD(netdev_chain);
419 
420 /*
421  *	Device drivers call our routines to queue packets here. We empty the
422  *	queue in the local softnet handler.
423  */
424 
425 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
426 EXPORT_PER_CPU_SYMBOL(softnet_data);
427 
428 #ifdef CONFIG_LOCKDEP
429 /*
430  * register_netdevice() inits txq->_xmit_lock and sets lockdep class
431  * according to dev->type
432  */
433 static const unsigned short netdev_lock_type[] = {
434 	 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
435 	 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
436 	 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
437 	 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
438 	 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
439 	 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
440 	 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
441 	 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
442 	 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
443 	 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
444 	 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
445 	 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
446 	 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
447 	 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
448 	 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
449 
450 static const char *const netdev_lock_name[] = {
451 	"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
452 	"_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
453 	"_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
454 	"_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
455 	"_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
456 	"_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
457 	"_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
458 	"_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
459 	"_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
460 	"_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
461 	"_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
462 	"_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
463 	"_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
464 	"_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
465 	"_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
466 
467 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
468 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
469 
470 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
471 {
472 	int i;
473 
474 	for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
475 		if (netdev_lock_type[i] == dev_type)
476 			return i;
477 	/* the last key is used by default */
478 	return ARRAY_SIZE(netdev_lock_type) - 1;
479 }
480 
481 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
482 						 unsigned short dev_type)
483 {
484 	int i;
485 
486 	i = netdev_lock_pos(dev_type);
487 	lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
488 				   netdev_lock_name[i]);
489 }
490 
491 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
492 {
493 	int i;
494 
495 	i = netdev_lock_pos(dev->type);
496 	lockdep_set_class_and_name(&dev->addr_list_lock,
497 				   &netdev_addr_lock_key[i],
498 				   netdev_lock_name[i]);
499 }
500 #else
501 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
502 						 unsigned short dev_type)
503 {
504 }
505 
506 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
507 {
508 }
509 #endif
510 
511 /*******************************************************************************
512  *
513  *		Protocol management and registration routines
514  *
515  *******************************************************************************/
516 
517 
518 /*
519  *	Add a protocol ID to the list. Now that the input handler is
520  *	smarter we can dispense with all the messy stuff that used to be
521  *	here.
522  *
523  *	BEWARE!!! Protocol handlers, mangling input packets,
524  *	MUST BE last in hash buckets and checking protocol handlers
525  *	MUST start from promiscuous ptype_all chain in net_bh.
526  *	It is true now, do not change it.
527  *	Explanation follows: if protocol handler, mangling packet, will
528  *	be the first on list, it is not able to sense, that packet
529  *	is cloned and should be copied-on-write, so that it will
530  *	change it and subsequent readers will get broken packet.
531  *							--ANK (980803)
532  */
533 
534 static inline struct list_head *ptype_head(const struct packet_type *pt)
535 {
536 	if (pt->type == htons(ETH_P_ALL))
537 		return pt->dev ? &pt->dev->ptype_all : &ptype_all;
538 	else
539 		return pt->dev ? &pt->dev->ptype_specific :
540 				 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
541 }
542 
543 /**
544  *	dev_add_pack - add packet handler
545  *	@pt: packet type declaration
546  *
547  *	Add a protocol handler to the networking stack. The passed &packet_type
548  *	is linked into kernel lists and may not be freed until it has been
549  *	removed from the kernel lists.
550  *
551  *	This call does not sleep therefore it can not
552  *	guarantee all CPU's that are in middle of receiving packets
553  *	will see the new packet type (until the next received packet).
554  */
555 
556 void dev_add_pack(struct packet_type *pt)
557 {
558 	struct list_head *head = ptype_head(pt);
559 
560 	spin_lock(&ptype_lock);
561 	list_add_rcu(&pt->list, head);
562 	spin_unlock(&ptype_lock);
563 }
564 EXPORT_SYMBOL(dev_add_pack);
565 
566 /**
567  *	__dev_remove_pack	 - remove packet handler
568  *	@pt: packet type declaration
569  *
570  *	Remove a protocol handler that was previously added to the kernel
571  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
572  *	from the kernel lists and can be freed or reused once this function
573  *	returns.
574  *
575  *      The packet type might still be in use by receivers
576  *	and must not be freed until after all the CPU's have gone
577  *	through a quiescent state.
578  */
579 void __dev_remove_pack(struct packet_type *pt)
580 {
581 	struct list_head *head = ptype_head(pt);
582 	struct packet_type *pt1;
583 
584 	spin_lock(&ptype_lock);
585 
586 	list_for_each_entry(pt1, head, list) {
587 		if (pt == pt1) {
588 			list_del_rcu(&pt->list);
589 			goto out;
590 		}
591 	}
592 
593 	pr_warn("dev_remove_pack: %p not found\n", pt);
594 out:
595 	spin_unlock(&ptype_lock);
596 }
597 EXPORT_SYMBOL(__dev_remove_pack);
598 
599 /**
600  *	dev_remove_pack	 - remove packet handler
601  *	@pt: packet type declaration
602  *
603  *	Remove a protocol handler that was previously added to the kernel
604  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
605  *	from the kernel lists and can be freed or reused once this function
606  *	returns.
607  *
608  *	This call sleeps to guarantee that no CPU is looking at the packet
609  *	type after return.
610  */
611 void dev_remove_pack(struct packet_type *pt)
612 {
613 	__dev_remove_pack(pt);
614 
615 	synchronize_net();
616 }
617 EXPORT_SYMBOL(dev_remove_pack);
618 
619 
620 /*******************************************************************************
621  *
622  *			    Device Interface Subroutines
623  *
624  *******************************************************************************/
625 
626 /**
627  *	dev_get_iflink	- get 'iflink' value of a interface
628  *	@dev: targeted interface
629  *
630  *	Indicates the ifindex the interface is linked to.
631  *	Physical interfaces have the same 'ifindex' and 'iflink' values.
632  */
633 
634 int dev_get_iflink(const struct net_device *dev)
635 {
636 	if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
637 		return dev->netdev_ops->ndo_get_iflink(dev);
638 
639 	return dev->ifindex;
640 }
641 EXPORT_SYMBOL(dev_get_iflink);
642 
643 /**
644  *	dev_fill_metadata_dst - Retrieve tunnel egress information.
645  *	@dev: targeted interface
646  *	@skb: The packet.
647  *
648  *	For better visibility of tunnel traffic OVS needs to retrieve
649  *	egress tunnel information for a packet. Following API allows
650  *	user to get this info.
651  */
652 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
653 {
654 	struct ip_tunnel_info *info;
655 
656 	if (!dev->netdev_ops  || !dev->netdev_ops->ndo_fill_metadata_dst)
657 		return -EINVAL;
658 
659 	info = skb_tunnel_info_unclone(skb);
660 	if (!info)
661 		return -ENOMEM;
662 	if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
663 		return -EINVAL;
664 
665 	return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
666 }
667 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
668 
669 static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack)
670 {
671 	int k = stack->num_paths++;
672 
673 	if (WARN_ON_ONCE(k >= NET_DEVICE_PATH_STACK_MAX))
674 		return NULL;
675 
676 	return &stack->path[k];
677 }
678 
679 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
680 			  struct net_device_path_stack *stack)
681 {
682 	const struct net_device *last_dev;
683 	struct net_device_path_ctx ctx = {
684 		.dev	= dev,
685 	};
686 	struct net_device_path *path;
687 	int ret = 0;
688 
689 	memcpy(ctx.daddr, daddr, sizeof(ctx.daddr));
690 	stack->num_paths = 0;
691 	while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) {
692 		last_dev = ctx.dev;
693 		path = dev_fwd_path(stack);
694 		if (!path)
695 			return -1;
696 
697 		memset(path, 0, sizeof(struct net_device_path));
698 		ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path);
699 		if (ret < 0)
700 			return -1;
701 
702 		if (WARN_ON_ONCE(last_dev == ctx.dev))
703 			return -1;
704 	}
705 
706 	if (!ctx.dev)
707 		return ret;
708 
709 	path = dev_fwd_path(stack);
710 	if (!path)
711 		return -1;
712 	path->type = DEV_PATH_ETHERNET;
713 	path->dev = ctx.dev;
714 
715 	return ret;
716 }
717 EXPORT_SYMBOL_GPL(dev_fill_forward_path);
718 
719 /**
720  *	__dev_get_by_name	- find a device by its name
721  *	@net: the applicable net namespace
722  *	@name: name to find
723  *
724  *	Find an interface by name. Must be called under RTNL semaphore
725  *	or @dev_base_lock. If the name is found a pointer to the device
726  *	is returned. If the name is not found then %NULL is returned. The
727  *	reference counters are not incremented so the caller must be
728  *	careful with locks.
729  */
730 
731 struct net_device *__dev_get_by_name(struct net *net, const char *name)
732 {
733 	struct netdev_name_node *node_name;
734 
735 	node_name = netdev_name_node_lookup(net, name);
736 	return node_name ? node_name->dev : NULL;
737 }
738 EXPORT_SYMBOL(__dev_get_by_name);
739 
740 /**
741  * dev_get_by_name_rcu	- find a device by its name
742  * @net: the applicable net namespace
743  * @name: name to find
744  *
745  * Find an interface by name.
746  * If the name is found a pointer to the device is returned.
747  * If the name is not found then %NULL is returned.
748  * The reference counters are not incremented so the caller must be
749  * careful with locks. The caller must hold RCU lock.
750  */
751 
752 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
753 {
754 	struct netdev_name_node *node_name;
755 
756 	node_name = netdev_name_node_lookup_rcu(net, name);
757 	return node_name ? node_name->dev : NULL;
758 }
759 EXPORT_SYMBOL(dev_get_by_name_rcu);
760 
761 /**
762  *	dev_get_by_name		- find a device by its name
763  *	@net: the applicable net namespace
764  *	@name: name to find
765  *
766  *	Find an interface by name. This can be called from any
767  *	context and does its own locking. The returned handle has
768  *	the usage count incremented and the caller must use dev_put() to
769  *	release it when it is no longer needed. %NULL is returned if no
770  *	matching device is found.
771  */
772 
773 struct net_device *dev_get_by_name(struct net *net, const char *name)
774 {
775 	struct net_device *dev;
776 
777 	rcu_read_lock();
778 	dev = dev_get_by_name_rcu(net, name);
779 	dev_hold(dev);
780 	rcu_read_unlock();
781 	return dev;
782 }
783 EXPORT_SYMBOL(dev_get_by_name);
784 
785 /**
786  *	__dev_get_by_index - find a device by its ifindex
787  *	@net: the applicable net namespace
788  *	@ifindex: index of device
789  *
790  *	Search for an interface by index. Returns %NULL if the device
791  *	is not found or a pointer to the device. The device has not
792  *	had its reference counter increased so the caller must be careful
793  *	about locking. The caller must hold either the RTNL semaphore
794  *	or @dev_base_lock.
795  */
796 
797 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
798 {
799 	struct net_device *dev;
800 	struct hlist_head *head = dev_index_hash(net, ifindex);
801 
802 	hlist_for_each_entry(dev, head, index_hlist)
803 		if (dev->ifindex == ifindex)
804 			return dev;
805 
806 	return NULL;
807 }
808 EXPORT_SYMBOL(__dev_get_by_index);
809 
810 /**
811  *	dev_get_by_index_rcu - find a device by its ifindex
812  *	@net: the applicable net namespace
813  *	@ifindex: index of device
814  *
815  *	Search for an interface by index. Returns %NULL if the device
816  *	is not found or a pointer to the device. The device has not
817  *	had its reference counter increased so the caller must be careful
818  *	about locking. The caller must hold RCU lock.
819  */
820 
821 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
822 {
823 	struct net_device *dev;
824 	struct hlist_head *head = dev_index_hash(net, ifindex);
825 
826 	hlist_for_each_entry_rcu(dev, head, index_hlist)
827 		if (dev->ifindex == ifindex)
828 			return dev;
829 
830 	return NULL;
831 }
832 EXPORT_SYMBOL(dev_get_by_index_rcu);
833 
834 
835 /**
836  *	dev_get_by_index - find a device by its ifindex
837  *	@net: the applicable net namespace
838  *	@ifindex: index of device
839  *
840  *	Search for an interface by index. Returns NULL if the device
841  *	is not found or a pointer to the device. The device returned has
842  *	had a reference added and the pointer is safe until the user calls
843  *	dev_put to indicate they have finished with it.
844  */
845 
846 struct net_device *dev_get_by_index(struct net *net, int ifindex)
847 {
848 	struct net_device *dev;
849 
850 	rcu_read_lock();
851 	dev = dev_get_by_index_rcu(net, ifindex);
852 	dev_hold(dev);
853 	rcu_read_unlock();
854 	return dev;
855 }
856 EXPORT_SYMBOL(dev_get_by_index);
857 
858 /**
859  *	dev_get_by_napi_id - find a device by napi_id
860  *	@napi_id: ID of the NAPI struct
861  *
862  *	Search for an interface by NAPI ID. Returns %NULL if the device
863  *	is not found or a pointer to the device. The device has not had
864  *	its reference counter increased so the caller must be careful
865  *	about locking. The caller must hold RCU lock.
866  */
867 
868 struct net_device *dev_get_by_napi_id(unsigned int napi_id)
869 {
870 	struct napi_struct *napi;
871 
872 	WARN_ON_ONCE(!rcu_read_lock_held());
873 
874 	if (napi_id < MIN_NAPI_ID)
875 		return NULL;
876 
877 	napi = napi_by_id(napi_id);
878 
879 	return napi ? napi->dev : NULL;
880 }
881 EXPORT_SYMBOL(dev_get_by_napi_id);
882 
883 /**
884  *	netdev_get_name - get a netdevice name, knowing its ifindex.
885  *	@net: network namespace
886  *	@name: a pointer to the buffer where the name will be stored.
887  *	@ifindex: the ifindex of the interface to get the name from.
888  */
889 int netdev_get_name(struct net *net, char *name, int ifindex)
890 {
891 	struct net_device *dev;
892 	int ret;
893 
894 	down_read(&devnet_rename_sem);
895 	rcu_read_lock();
896 
897 	dev = dev_get_by_index_rcu(net, ifindex);
898 	if (!dev) {
899 		ret = -ENODEV;
900 		goto out;
901 	}
902 
903 	strcpy(name, dev->name);
904 
905 	ret = 0;
906 out:
907 	rcu_read_unlock();
908 	up_read(&devnet_rename_sem);
909 	return ret;
910 }
911 
912 /**
913  *	dev_getbyhwaddr_rcu - find a device by its hardware address
914  *	@net: the applicable net namespace
915  *	@type: media type of device
916  *	@ha: hardware address
917  *
918  *	Search for an interface by MAC address. Returns NULL if the device
919  *	is not found or a pointer to the device.
920  *	The caller must hold RCU or RTNL.
921  *	The returned device has not had its ref count increased
922  *	and the caller must therefore be careful about locking
923  *
924  */
925 
926 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
927 				       const char *ha)
928 {
929 	struct net_device *dev;
930 
931 	for_each_netdev_rcu(net, dev)
932 		if (dev->type == type &&
933 		    !memcmp(dev->dev_addr, ha, dev->addr_len))
934 			return dev;
935 
936 	return NULL;
937 }
938 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
939 
940 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
941 {
942 	struct net_device *dev, *ret = NULL;
943 
944 	rcu_read_lock();
945 	for_each_netdev_rcu(net, dev)
946 		if (dev->type == type) {
947 			dev_hold(dev);
948 			ret = dev;
949 			break;
950 		}
951 	rcu_read_unlock();
952 	return ret;
953 }
954 EXPORT_SYMBOL(dev_getfirstbyhwtype);
955 
956 /**
957  *	__dev_get_by_flags - find any device with given flags
958  *	@net: the applicable net namespace
959  *	@if_flags: IFF_* values
960  *	@mask: bitmask of bits in if_flags to check
961  *
962  *	Search for any interface with the given flags. Returns NULL if a device
963  *	is not found or a pointer to the device. Must be called inside
964  *	rtnl_lock(), and result refcount is unchanged.
965  */
966 
967 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
968 				      unsigned short mask)
969 {
970 	struct net_device *dev, *ret;
971 
972 	ASSERT_RTNL();
973 
974 	ret = NULL;
975 	for_each_netdev(net, dev) {
976 		if (((dev->flags ^ if_flags) & mask) == 0) {
977 			ret = dev;
978 			break;
979 		}
980 	}
981 	return ret;
982 }
983 EXPORT_SYMBOL(__dev_get_by_flags);
984 
985 /**
986  *	dev_valid_name - check if name is okay for network device
987  *	@name: name string
988  *
989  *	Network device names need to be valid file names to
990  *	allow sysfs to work.  We also disallow any kind of
991  *	whitespace.
992  */
993 bool dev_valid_name(const char *name)
994 {
995 	if (*name == '\0')
996 		return false;
997 	if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
998 		return false;
999 	if (!strcmp(name, ".") || !strcmp(name, ".."))
1000 		return false;
1001 
1002 	while (*name) {
1003 		if (*name == '/' || *name == ':' || isspace(*name))
1004 			return false;
1005 		name++;
1006 	}
1007 	return true;
1008 }
1009 EXPORT_SYMBOL(dev_valid_name);
1010 
1011 /**
1012  *	__dev_alloc_name - allocate a name for a device
1013  *	@net: network namespace to allocate the device name in
1014  *	@name: name format string
1015  *	@buf:  scratch buffer and result name string
1016  *
1017  *	Passed a format string - eg "lt%d" it will try and find a suitable
1018  *	id. It scans list of devices to build up a free map, then chooses
1019  *	the first empty slot. The caller must hold the dev_base or rtnl lock
1020  *	while allocating the name and adding the device in order to avoid
1021  *	duplicates.
1022  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1023  *	Returns the number of the unit assigned or a negative errno code.
1024  */
1025 
1026 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1027 {
1028 	int i = 0;
1029 	const char *p;
1030 	const int max_netdevices = 8*PAGE_SIZE;
1031 	unsigned long *inuse;
1032 	struct net_device *d;
1033 
1034 	if (!dev_valid_name(name))
1035 		return -EINVAL;
1036 
1037 	p = strchr(name, '%');
1038 	if (p) {
1039 		/*
1040 		 * Verify the string as this thing may have come from
1041 		 * the user.  There must be either one "%d" and no other "%"
1042 		 * characters.
1043 		 */
1044 		if (p[1] != 'd' || strchr(p + 2, '%'))
1045 			return -EINVAL;
1046 
1047 		/* Use one page as a bit array of possible slots */
1048 		inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1049 		if (!inuse)
1050 			return -ENOMEM;
1051 
1052 		for_each_netdev(net, d) {
1053 			struct netdev_name_node *name_node;
1054 			list_for_each_entry(name_node, &d->name_node->list, list) {
1055 				if (!sscanf(name_node->name, name, &i))
1056 					continue;
1057 				if (i < 0 || i >= max_netdevices)
1058 					continue;
1059 
1060 				/*  avoid cases where sscanf is not exact inverse of printf */
1061 				snprintf(buf, IFNAMSIZ, name, i);
1062 				if (!strncmp(buf, name_node->name, IFNAMSIZ))
1063 					__set_bit(i, inuse);
1064 			}
1065 			if (!sscanf(d->name, name, &i))
1066 				continue;
1067 			if (i < 0 || i >= max_netdevices)
1068 				continue;
1069 
1070 			/*  avoid cases where sscanf is not exact inverse of printf */
1071 			snprintf(buf, IFNAMSIZ, name, i);
1072 			if (!strncmp(buf, d->name, IFNAMSIZ))
1073 				__set_bit(i, inuse);
1074 		}
1075 
1076 		i = find_first_zero_bit(inuse, max_netdevices);
1077 		free_page((unsigned long) inuse);
1078 	}
1079 
1080 	snprintf(buf, IFNAMSIZ, name, i);
1081 	if (!netdev_name_in_use(net, buf))
1082 		return i;
1083 
1084 	/* It is possible to run out of possible slots
1085 	 * when the name is long and there isn't enough space left
1086 	 * for the digits, or if all bits are used.
1087 	 */
1088 	return -ENFILE;
1089 }
1090 
1091 static int dev_alloc_name_ns(struct net *net,
1092 			     struct net_device *dev,
1093 			     const char *name)
1094 {
1095 	char buf[IFNAMSIZ];
1096 	int ret;
1097 
1098 	BUG_ON(!net);
1099 	ret = __dev_alloc_name(net, name, buf);
1100 	if (ret >= 0)
1101 		strscpy(dev->name, buf, IFNAMSIZ);
1102 	return ret;
1103 }
1104 
1105 /**
1106  *	dev_alloc_name - allocate a name for a device
1107  *	@dev: device
1108  *	@name: name format string
1109  *
1110  *	Passed a format string - eg "lt%d" it will try and find a suitable
1111  *	id. It scans list of devices to build up a free map, then chooses
1112  *	the first empty slot. The caller must hold the dev_base or rtnl lock
1113  *	while allocating the name and adding the device in order to avoid
1114  *	duplicates.
1115  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1116  *	Returns the number of the unit assigned or a negative errno code.
1117  */
1118 
1119 int dev_alloc_name(struct net_device *dev, const char *name)
1120 {
1121 	return dev_alloc_name_ns(dev_net(dev), dev, name);
1122 }
1123 EXPORT_SYMBOL(dev_alloc_name);
1124 
1125 static int dev_get_valid_name(struct net *net, struct net_device *dev,
1126 			      const char *name)
1127 {
1128 	BUG_ON(!net);
1129 
1130 	if (!dev_valid_name(name))
1131 		return -EINVAL;
1132 
1133 	if (strchr(name, '%'))
1134 		return dev_alloc_name_ns(net, dev, name);
1135 	else if (netdev_name_in_use(net, name))
1136 		return -EEXIST;
1137 	else if (dev->name != name)
1138 		strscpy(dev->name, name, IFNAMSIZ);
1139 
1140 	return 0;
1141 }
1142 
1143 /**
1144  *	dev_change_name - change name of a device
1145  *	@dev: device
1146  *	@newname: name (or format string) must be at least IFNAMSIZ
1147  *
1148  *	Change name of a device, can pass format strings "eth%d".
1149  *	for wildcarding.
1150  */
1151 int dev_change_name(struct net_device *dev, const char *newname)
1152 {
1153 	unsigned char old_assign_type;
1154 	char oldname[IFNAMSIZ];
1155 	int err = 0;
1156 	int ret;
1157 	struct net *net;
1158 
1159 	ASSERT_RTNL();
1160 	BUG_ON(!dev_net(dev));
1161 
1162 	net = dev_net(dev);
1163 
1164 	down_write(&devnet_rename_sem);
1165 
1166 	if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1167 		up_write(&devnet_rename_sem);
1168 		return 0;
1169 	}
1170 
1171 	memcpy(oldname, dev->name, IFNAMSIZ);
1172 
1173 	err = dev_get_valid_name(net, dev, newname);
1174 	if (err < 0) {
1175 		up_write(&devnet_rename_sem);
1176 		return err;
1177 	}
1178 
1179 	if (oldname[0] && !strchr(oldname, '%'))
1180 		netdev_info(dev, "renamed from %s%s\n", oldname,
1181 			    dev->flags & IFF_UP ? " (while UP)" : "");
1182 
1183 	old_assign_type = dev->name_assign_type;
1184 	dev->name_assign_type = NET_NAME_RENAMED;
1185 
1186 rollback:
1187 	ret = device_rename(&dev->dev, dev->name);
1188 	if (ret) {
1189 		memcpy(dev->name, oldname, IFNAMSIZ);
1190 		dev->name_assign_type = old_assign_type;
1191 		up_write(&devnet_rename_sem);
1192 		return ret;
1193 	}
1194 
1195 	up_write(&devnet_rename_sem);
1196 
1197 	netdev_adjacent_rename_links(dev, oldname);
1198 
1199 	write_lock(&dev_base_lock);
1200 	netdev_name_node_del(dev->name_node);
1201 	write_unlock(&dev_base_lock);
1202 
1203 	synchronize_rcu();
1204 
1205 	write_lock(&dev_base_lock);
1206 	netdev_name_node_add(net, dev->name_node);
1207 	write_unlock(&dev_base_lock);
1208 
1209 	ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1210 	ret = notifier_to_errno(ret);
1211 
1212 	if (ret) {
1213 		/* err >= 0 after dev_alloc_name() or stores the first errno */
1214 		if (err >= 0) {
1215 			err = ret;
1216 			down_write(&devnet_rename_sem);
1217 			memcpy(dev->name, oldname, IFNAMSIZ);
1218 			memcpy(oldname, newname, IFNAMSIZ);
1219 			dev->name_assign_type = old_assign_type;
1220 			old_assign_type = NET_NAME_RENAMED;
1221 			goto rollback;
1222 		} else {
1223 			netdev_err(dev, "name change rollback failed: %d\n",
1224 				   ret);
1225 		}
1226 	}
1227 
1228 	return err;
1229 }
1230 
1231 /**
1232  *	dev_set_alias - change ifalias of a device
1233  *	@dev: device
1234  *	@alias: name up to IFALIASZ
1235  *	@len: limit of bytes to copy from info
1236  *
1237  *	Set ifalias for a device,
1238  */
1239 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1240 {
1241 	struct dev_ifalias *new_alias = NULL;
1242 
1243 	if (len >= IFALIASZ)
1244 		return -EINVAL;
1245 
1246 	if (len) {
1247 		new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1248 		if (!new_alias)
1249 			return -ENOMEM;
1250 
1251 		memcpy(new_alias->ifalias, alias, len);
1252 		new_alias->ifalias[len] = 0;
1253 	}
1254 
1255 	mutex_lock(&ifalias_mutex);
1256 	new_alias = rcu_replace_pointer(dev->ifalias, new_alias,
1257 					mutex_is_locked(&ifalias_mutex));
1258 	mutex_unlock(&ifalias_mutex);
1259 
1260 	if (new_alias)
1261 		kfree_rcu(new_alias, rcuhead);
1262 
1263 	return len;
1264 }
1265 EXPORT_SYMBOL(dev_set_alias);
1266 
1267 /**
1268  *	dev_get_alias - get ifalias of a device
1269  *	@dev: device
1270  *	@name: buffer to store name of ifalias
1271  *	@len: size of buffer
1272  *
1273  *	get ifalias for a device.  Caller must make sure dev cannot go
1274  *	away,  e.g. rcu read lock or own a reference count to device.
1275  */
1276 int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1277 {
1278 	const struct dev_ifalias *alias;
1279 	int ret = 0;
1280 
1281 	rcu_read_lock();
1282 	alias = rcu_dereference(dev->ifalias);
1283 	if (alias)
1284 		ret = snprintf(name, len, "%s", alias->ifalias);
1285 	rcu_read_unlock();
1286 
1287 	return ret;
1288 }
1289 
1290 /**
1291  *	netdev_features_change - device changes features
1292  *	@dev: device to cause notification
1293  *
1294  *	Called to indicate a device has changed features.
1295  */
1296 void netdev_features_change(struct net_device *dev)
1297 {
1298 	call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1299 }
1300 EXPORT_SYMBOL(netdev_features_change);
1301 
1302 /**
1303  *	netdev_state_change - device changes state
1304  *	@dev: device to cause notification
1305  *
1306  *	Called to indicate a device has changed state. This function calls
1307  *	the notifier chains for netdev_chain and sends a NEWLINK message
1308  *	to the routing socket.
1309  */
1310 void netdev_state_change(struct net_device *dev)
1311 {
1312 	if (dev->flags & IFF_UP) {
1313 		struct netdev_notifier_change_info change_info = {
1314 			.info.dev = dev,
1315 		};
1316 
1317 		call_netdevice_notifiers_info(NETDEV_CHANGE,
1318 					      &change_info.info);
1319 		rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL, 0, NULL);
1320 	}
1321 }
1322 EXPORT_SYMBOL(netdev_state_change);
1323 
1324 /**
1325  * __netdev_notify_peers - notify network peers about existence of @dev,
1326  * to be called when rtnl lock is already held.
1327  * @dev: network device
1328  *
1329  * Generate traffic such that interested network peers are aware of
1330  * @dev, such as by generating a gratuitous ARP. This may be used when
1331  * a device wants to inform the rest of the network about some sort of
1332  * reconfiguration such as a failover event or virtual machine
1333  * migration.
1334  */
1335 void __netdev_notify_peers(struct net_device *dev)
1336 {
1337 	ASSERT_RTNL();
1338 	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1339 	call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1340 }
1341 EXPORT_SYMBOL(__netdev_notify_peers);
1342 
1343 /**
1344  * netdev_notify_peers - notify network peers about existence of @dev
1345  * @dev: network device
1346  *
1347  * Generate traffic such that interested network peers are aware of
1348  * @dev, such as by generating a gratuitous ARP. This may be used when
1349  * a device wants to inform the rest of the network about some sort of
1350  * reconfiguration such as a failover event or virtual machine
1351  * migration.
1352  */
1353 void netdev_notify_peers(struct net_device *dev)
1354 {
1355 	rtnl_lock();
1356 	__netdev_notify_peers(dev);
1357 	rtnl_unlock();
1358 }
1359 EXPORT_SYMBOL(netdev_notify_peers);
1360 
1361 static int napi_threaded_poll(void *data);
1362 
1363 static int napi_kthread_create(struct napi_struct *n)
1364 {
1365 	int err = 0;
1366 
1367 	/* Create and wake up the kthread once to put it in
1368 	 * TASK_INTERRUPTIBLE mode to avoid the blocked task
1369 	 * warning and work with loadavg.
1370 	 */
1371 	n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d",
1372 				n->dev->name, n->napi_id);
1373 	if (IS_ERR(n->thread)) {
1374 		err = PTR_ERR(n->thread);
1375 		pr_err("kthread_run failed with err %d\n", err);
1376 		n->thread = NULL;
1377 	}
1378 
1379 	return err;
1380 }
1381 
1382 static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1383 {
1384 	const struct net_device_ops *ops = dev->netdev_ops;
1385 	int ret;
1386 
1387 	ASSERT_RTNL();
1388 	dev_addr_check(dev);
1389 
1390 	if (!netif_device_present(dev)) {
1391 		/* may be detached because parent is runtime-suspended */
1392 		if (dev->dev.parent)
1393 			pm_runtime_resume(dev->dev.parent);
1394 		if (!netif_device_present(dev))
1395 			return -ENODEV;
1396 	}
1397 
1398 	/* Block netpoll from trying to do any rx path servicing.
1399 	 * If we don't do this there is a chance ndo_poll_controller
1400 	 * or ndo_poll may be running while we open the device
1401 	 */
1402 	netpoll_poll_disable(dev);
1403 
1404 	ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack);
1405 	ret = notifier_to_errno(ret);
1406 	if (ret)
1407 		return ret;
1408 
1409 	set_bit(__LINK_STATE_START, &dev->state);
1410 
1411 	if (ops->ndo_validate_addr)
1412 		ret = ops->ndo_validate_addr(dev);
1413 
1414 	if (!ret && ops->ndo_open)
1415 		ret = ops->ndo_open(dev);
1416 
1417 	netpoll_poll_enable(dev);
1418 
1419 	if (ret)
1420 		clear_bit(__LINK_STATE_START, &dev->state);
1421 	else {
1422 		dev->flags |= IFF_UP;
1423 		dev_set_rx_mode(dev);
1424 		dev_activate(dev);
1425 		add_device_randomness(dev->dev_addr, dev->addr_len);
1426 	}
1427 
1428 	return ret;
1429 }
1430 
1431 /**
1432  *	dev_open	- prepare an interface for use.
1433  *	@dev: device to open
1434  *	@extack: netlink extended ack
1435  *
1436  *	Takes a device from down to up state. The device's private open
1437  *	function is invoked and then the multicast lists are loaded. Finally
1438  *	the device is moved into the up state and a %NETDEV_UP message is
1439  *	sent to the netdev notifier chain.
1440  *
1441  *	Calling this function on an active interface is a nop. On a failure
1442  *	a negative errno code is returned.
1443  */
1444 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1445 {
1446 	int ret;
1447 
1448 	if (dev->flags & IFF_UP)
1449 		return 0;
1450 
1451 	ret = __dev_open(dev, extack);
1452 	if (ret < 0)
1453 		return ret;
1454 
1455 	rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL);
1456 	call_netdevice_notifiers(NETDEV_UP, dev);
1457 
1458 	return ret;
1459 }
1460 EXPORT_SYMBOL(dev_open);
1461 
1462 static void __dev_close_many(struct list_head *head)
1463 {
1464 	struct net_device *dev;
1465 
1466 	ASSERT_RTNL();
1467 	might_sleep();
1468 
1469 	list_for_each_entry(dev, head, close_list) {
1470 		/* Temporarily disable netpoll until the interface is down */
1471 		netpoll_poll_disable(dev);
1472 
1473 		call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1474 
1475 		clear_bit(__LINK_STATE_START, &dev->state);
1476 
1477 		/* Synchronize to scheduled poll. We cannot touch poll list, it
1478 		 * can be even on different cpu. So just clear netif_running().
1479 		 *
1480 		 * dev->stop() will invoke napi_disable() on all of it's
1481 		 * napi_struct instances on this device.
1482 		 */
1483 		smp_mb__after_atomic(); /* Commit netif_running(). */
1484 	}
1485 
1486 	dev_deactivate_many(head);
1487 
1488 	list_for_each_entry(dev, head, close_list) {
1489 		const struct net_device_ops *ops = dev->netdev_ops;
1490 
1491 		/*
1492 		 *	Call the device specific close. This cannot fail.
1493 		 *	Only if device is UP
1494 		 *
1495 		 *	We allow it to be called even after a DETACH hot-plug
1496 		 *	event.
1497 		 */
1498 		if (ops->ndo_stop)
1499 			ops->ndo_stop(dev);
1500 
1501 		dev->flags &= ~IFF_UP;
1502 		netpoll_poll_enable(dev);
1503 	}
1504 }
1505 
1506 static void __dev_close(struct net_device *dev)
1507 {
1508 	LIST_HEAD(single);
1509 
1510 	list_add(&dev->close_list, &single);
1511 	__dev_close_many(&single);
1512 	list_del(&single);
1513 }
1514 
1515 void dev_close_many(struct list_head *head, bool unlink)
1516 {
1517 	struct net_device *dev, *tmp;
1518 
1519 	/* Remove the devices that don't need to be closed */
1520 	list_for_each_entry_safe(dev, tmp, head, close_list)
1521 		if (!(dev->flags & IFF_UP))
1522 			list_del_init(&dev->close_list);
1523 
1524 	__dev_close_many(head);
1525 
1526 	list_for_each_entry_safe(dev, tmp, head, close_list) {
1527 		rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL);
1528 		call_netdevice_notifiers(NETDEV_DOWN, dev);
1529 		if (unlink)
1530 			list_del_init(&dev->close_list);
1531 	}
1532 }
1533 EXPORT_SYMBOL(dev_close_many);
1534 
1535 /**
1536  *	dev_close - shutdown an interface.
1537  *	@dev: device to shutdown
1538  *
1539  *	This function moves an active device into down state. A
1540  *	%NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1541  *	is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1542  *	chain.
1543  */
1544 void dev_close(struct net_device *dev)
1545 {
1546 	if (dev->flags & IFF_UP) {
1547 		LIST_HEAD(single);
1548 
1549 		list_add(&dev->close_list, &single);
1550 		dev_close_many(&single, true);
1551 		list_del(&single);
1552 	}
1553 }
1554 EXPORT_SYMBOL(dev_close);
1555 
1556 
1557 /**
1558  *	dev_disable_lro - disable Large Receive Offload on a device
1559  *	@dev: device
1560  *
1561  *	Disable Large Receive Offload (LRO) on a net device.  Must be
1562  *	called under RTNL.  This is needed if received packets may be
1563  *	forwarded to another interface.
1564  */
1565 void dev_disable_lro(struct net_device *dev)
1566 {
1567 	struct net_device *lower_dev;
1568 	struct list_head *iter;
1569 
1570 	dev->wanted_features &= ~NETIF_F_LRO;
1571 	netdev_update_features(dev);
1572 
1573 	if (unlikely(dev->features & NETIF_F_LRO))
1574 		netdev_WARN(dev, "failed to disable LRO!\n");
1575 
1576 	netdev_for_each_lower_dev(dev, lower_dev, iter)
1577 		dev_disable_lro(lower_dev);
1578 }
1579 EXPORT_SYMBOL(dev_disable_lro);
1580 
1581 /**
1582  *	dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1583  *	@dev: device
1584  *
1585  *	Disable HW Generic Receive Offload (GRO_HW) on a net device.  Must be
1586  *	called under RTNL.  This is needed if Generic XDP is installed on
1587  *	the device.
1588  */
1589 static void dev_disable_gro_hw(struct net_device *dev)
1590 {
1591 	dev->wanted_features &= ~NETIF_F_GRO_HW;
1592 	netdev_update_features(dev);
1593 
1594 	if (unlikely(dev->features & NETIF_F_GRO_HW))
1595 		netdev_WARN(dev, "failed to disable GRO_HW!\n");
1596 }
1597 
1598 const char *netdev_cmd_to_name(enum netdev_cmd cmd)
1599 {
1600 #define N(val) 						\
1601 	case NETDEV_##val:				\
1602 		return "NETDEV_" __stringify(val);
1603 	switch (cmd) {
1604 	N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
1605 	N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
1606 	N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
1607 	N(POST_INIT) N(PRE_UNINIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN)
1608 	N(CHANGEUPPER) N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA)
1609 	N(BONDING_INFO) N(PRECHANGEUPPER) N(CHANGELOWERSTATE)
1610 	N(UDP_TUNNEL_PUSH_INFO) N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
1611 	N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
1612 	N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
1613 	N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE)
1614 	N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA)
1615 	N(XDP_FEAT_CHANGE)
1616 	}
1617 #undef N
1618 	return "UNKNOWN_NETDEV_EVENT";
1619 }
1620 EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
1621 
1622 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1623 				   struct net_device *dev)
1624 {
1625 	struct netdev_notifier_info info = {
1626 		.dev = dev,
1627 	};
1628 
1629 	return nb->notifier_call(nb, val, &info);
1630 }
1631 
1632 static int call_netdevice_register_notifiers(struct notifier_block *nb,
1633 					     struct net_device *dev)
1634 {
1635 	int err;
1636 
1637 	err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1638 	err = notifier_to_errno(err);
1639 	if (err)
1640 		return err;
1641 
1642 	if (!(dev->flags & IFF_UP))
1643 		return 0;
1644 
1645 	call_netdevice_notifier(nb, NETDEV_UP, dev);
1646 	return 0;
1647 }
1648 
1649 static void call_netdevice_unregister_notifiers(struct notifier_block *nb,
1650 						struct net_device *dev)
1651 {
1652 	if (dev->flags & IFF_UP) {
1653 		call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1654 					dev);
1655 		call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1656 	}
1657 	call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1658 }
1659 
1660 static int call_netdevice_register_net_notifiers(struct notifier_block *nb,
1661 						 struct net *net)
1662 {
1663 	struct net_device *dev;
1664 	int err;
1665 
1666 	for_each_netdev(net, dev) {
1667 		err = call_netdevice_register_notifiers(nb, dev);
1668 		if (err)
1669 			goto rollback;
1670 	}
1671 	return 0;
1672 
1673 rollback:
1674 	for_each_netdev_continue_reverse(net, dev)
1675 		call_netdevice_unregister_notifiers(nb, dev);
1676 	return err;
1677 }
1678 
1679 static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb,
1680 						    struct net *net)
1681 {
1682 	struct net_device *dev;
1683 
1684 	for_each_netdev(net, dev)
1685 		call_netdevice_unregister_notifiers(nb, dev);
1686 }
1687 
1688 static int dev_boot_phase = 1;
1689 
1690 /**
1691  * register_netdevice_notifier - register a network notifier block
1692  * @nb: notifier
1693  *
1694  * Register a notifier to be called when network device events occur.
1695  * The notifier passed is linked into the kernel structures and must
1696  * not be reused until it has been unregistered. A negative errno code
1697  * is returned on a failure.
1698  *
1699  * When registered all registration and up events are replayed
1700  * to the new notifier to allow device to have a race free
1701  * view of the network device list.
1702  */
1703 
1704 int register_netdevice_notifier(struct notifier_block *nb)
1705 {
1706 	struct net *net;
1707 	int err;
1708 
1709 	/* Close race with setup_net() and cleanup_net() */
1710 	down_write(&pernet_ops_rwsem);
1711 	rtnl_lock();
1712 	err = raw_notifier_chain_register(&netdev_chain, nb);
1713 	if (err)
1714 		goto unlock;
1715 	if (dev_boot_phase)
1716 		goto unlock;
1717 	for_each_net(net) {
1718 		err = call_netdevice_register_net_notifiers(nb, net);
1719 		if (err)
1720 			goto rollback;
1721 	}
1722 
1723 unlock:
1724 	rtnl_unlock();
1725 	up_write(&pernet_ops_rwsem);
1726 	return err;
1727 
1728 rollback:
1729 	for_each_net_continue_reverse(net)
1730 		call_netdevice_unregister_net_notifiers(nb, net);
1731 
1732 	raw_notifier_chain_unregister(&netdev_chain, nb);
1733 	goto unlock;
1734 }
1735 EXPORT_SYMBOL(register_netdevice_notifier);
1736 
1737 /**
1738  * unregister_netdevice_notifier - unregister a network notifier block
1739  * @nb: notifier
1740  *
1741  * Unregister a notifier previously registered by
1742  * register_netdevice_notifier(). The notifier is unlinked into the
1743  * kernel structures and may then be reused. A negative errno code
1744  * is returned on a failure.
1745  *
1746  * After unregistering unregister and down device events are synthesized
1747  * for all devices on the device list to the removed notifier to remove
1748  * the need for special case cleanup code.
1749  */
1750 
1751 int unregister_netdevice_notifier(struct notifier_block *nb)
1752 {
1753 	struct net *net;
1754 	int err;
1755 
1756 	/* Close race with setup_net() and cleanup_net() */
1757 	down_write(&pernet_ops_rwsem);
1758 	rtnl_lock();
1759 	err = raw_notifier_chain_unregister(&netdev_chain, nb);
1760 	if (err)
1761 		goto unlock;
1762 
1763 	for_each_net(net)
1764 		call_netdevice_unregister_net_notifiers(nb, net);
1765 
1766 unlock:
1767 	rtnl_unlock();
1768 	up_write(&pernet_ops_rwsem);
1769 	return err;
1770 }
1771 EXPORT_SYMBOL(unregister_netdevice_notifier);
1772 
1773 static int __register_netdevice_notifier_net(struct net *net,
1774 					     struct notifier_block *nb,
1775 					     bool ignore_call_fail)
1776 {
1777 	int err;
1778 
1779 	err = raw_notifier_chain_register(&net->netdev_chain, nb);
1780 	if (err)
1781 		return err;
1782 	if (dev_boot_phase)
1783 		return 0;
1784 
1785 	err = call_netdevice_register_net_notifiers(nb, net);
1786 	if (err && !ignore_call_fail)
1787 		goto chain_unregister;
1788 
1789 	return 0;
1790 
1791 chain_unregister:
1792 	raw_notifier_chain_unregister(&net->netdev_chain, nb);
1793 	return err;
1794 }
1795 
1796 static int __unregister_netdevice_notifier_net(struct net *net,
1797 					       struct notifier_block *nb)
1798 {
1799 	int err;
1800 
1801 	err = raw_notifier_chain_unregister(&net->netdev_chain, nb);
1802 	if (err)
1803 		return err;
1804 
1805 	call_netdevice_unregister_net_notifiers(nb, net);
1806 	return 0;
1807 }
1808 
1809 /**
1810  * register_netdevice_notifier_net - register a per-netns network notifier block
1811  * @net: network namespace
1812  * @nb: notifier
1813  *
1814  * Register a notifier to be called when network device events occur.
1815  * The notifier passed is linked into the kernel structures and must
1816  * not be reused until it has been unregistered. A negative errno code
1817  * is returned on a failure.
1818  *
1819  * When registered all registration and up events are replayed
1820  * to the new notifier to allow device to have a race free
1821  * view of the network device list.
1822  */
1823 
1824 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb)
1825 {
1826 	int err;
1827 
1828 	rtnl_lock();
1829 	err = __register_netdevice_notifier_net(net, nb, false);
1830 	rtnl_unlock();
1831 	return err;
1832 }
1833 EXPORT_SYMBOL(register_netdevice_notifier_net);
1834 
1835 /**
1836  * unregister_netdevice_notifier_net - unregister a per-netns
1837  *                                     network notifier block
1838  * @net: network namespace
1839  * @nb: notifier
1840  *
1841  * Unregister a notifier previously registered by
1842  * register_netdevice_notifier_net(). The notifier is unlinked from the
1843  * kernel structures and may then be reused. A negative errno code
1844  * is returned on a failure.
1845  *
1846  * After unregistering unregister and down device events are synthesized
1847  * for all devices on the device list to the removed notifier to remove
1848  * the need for special case cleanup code.
1849  */
1850 
1851 int unregister_netdevice_notifier_net(struct net *net,
1852 				      struct notifier_block *nb)
1853 {
1854 	int err;
1855 
1856 	rtnl_lock();
1857 	err = __unregister_netdevice_notifier_net(net, nb);
1858 	rtnl_unlock();
1859 	return err;
1860 }
1861 EXPORT_SYMBOL(unregister_netdevice_notifier_net);
1862 
1863 static void __move_netdevice_notifier_net(struct net *src_net,
1864 					  struct net *dst_net,
1865 					  struct notifier_block *nb)
1866 {
1867 	__unregister_netdevice_notifier_net(src_net, nb);
1868 	__register_netdevice_notifier_net(dst_net, nb, true);
1869 }
1870 
1871 int register_netdevice_notifier_dev_net(struct net_device *dev,
1872 					struct notifier_block *nb,
1873 					struct netdev_net_notifier *nn)
1874 {
1875 	int err;
1876 
1877 	rtnl_lock();
1878 	err = __register_netdevice_notifier_net(dev_net(dev), nb, false);
1879 	if (!err) {
1880 		nn->nb = nb;
1881 		list_add(&nn->list, &dev->net_notifier_list);
1882 	}
1883 	rtnl_unlock();
1884 	return err;
1885 }
1886 EXPORT_SYMBOL(register_netdevice_notifier_dev_net);
1887 
1888 int unregister_netdevice_notifier_dev_net(struct net_device *dev,
1889 					  struct notifier_block *nb,
1890 					  struct netdev_net_notifier *nn)
1891 {
1892 	int err;
1893 
1894 	rtnl_lock();
1895 	list_del(&nn->list);
1896 	err = __unregister_netdevice_notifier_net(dev_net(dev), nb);
1897 	rtnl_unlock();
1898 	return err;
1899 }
1900 EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net);
1901 
1902 static void move_netdevice_notifiers_dev_net(struct net_device *dev,
1903 					     struct net *net)
1904 {
1905 	struct netdev_net_notifier *nn;
1906 
1907 	list_for_each_entry(nn, &dev->net_notifier_list, list)
1908 		__move_netdevice_notifier_net(dev_net(dev), net, nn->nb);
1909 }
1910 
1911 /**
1912  *	call_netdevice_notifiers_info - call all network notifier blocks
1913  *	@val: value passed unmodified to notifier function
1914  *	@info: notifier information data
1915  *
1916  *	Call all network notifier blocks.  Parameters and return value
1917  *	are as for raw_notifier_call_chain().
1918  */
1919 
1920 int call_netdevice_notifiers_info(unsigned long val,
1921 				  struct netdev_notifier_info *info)
1922 {
1923 	struct net *net = dev_net(info->dev);
1924 	int ret;
1925 
1926 	ASSERT_RTNL();
1927 
1928 	/* Run per-netns notifier block chain first, then run the global one.
1929 	 * Hopefully, one day, the global one is going to be removed after
1930 	 * all notifier block registrators get converted to be per-netns.
1931 	 */
1932 	ret = raw_notifier_call_chain(&net->netdev_chain, val, info);
1933 	if (ret & NOTIFY_STOP_MASK)
1934 		return ret;
1935 	return raw_notifier_call_chain(&netdev_chain, val, info);
1936 }
1937 
1938 /**
1939  *	call_netdevice_notifiers_info_robust - call per-netns notifier blocks
1940  *	                                       for and rollback on error
1941  *	@val_up: value passed unmodified to notifier function
1942  *	@val_down: value passed unmodified to the notifier function when
1943  *	           recovering from an error on @val_up
1944  *	@info: notifier information data
1945  *
1946  *	Call all per-netns network notifier blocks, but not notifier blocks on
1947  *	the global notifier chain. Parameters and return value are as for
1948  *	raw_notifier_call_chain_robust().
1949  */
1950 
1951 static int
1952 call_netdevice_notifiers_info_robust(unsigned long val_up,
1953 				     unsigned long val_down,
1954 				     struct netdev_notifier_info *info)
1955 {
1956 	struct net *net = dev_net(info->dev);
1957 
1958 	ASSERT_RTNL();
1959 
1960 	return raw_notifier_call_chain_robust(&net->netdev_chain,
1961 					      val_up, val_down, info);
1962 }
1963 
1964 static int call_netdevice_notifiers_extack(unsigned long val,
1965 					   struct net_device *dev,
1966 					   struct netlink_ext_ack *extack)
1967 {
1968 	struct netdev_notifier_info info = {
1969 		.dev = dev,
1970 		.extack = extack,
1971 	};
1972 
1973 	return call_netdevice_notifiers_info(val, &info);
1974 }
1975 
1976 /**
1977  *	call_netdevice_notifiers - call all network notifier blocks
1978  *      @val: value passed unmodified to notifier function
1979  *      @dev: net_device pointer passed unmodified to notifier function
1980  *
1981  *	Call all network notifier blocks.  Parameters and return value
1982  *	are as for raw_notifier_call_chain().
1983  */
1984 
1985 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1986 {
1987 	return call_netdevice_notifiers_extack(val, dev, NULL);
1988 }
1989 EXPORT_SYMBOL(call_netdevice_notifiers);
1990 
1991 /**
1992  *	call_netdevice_notifiers_mtu - call all network notifier blocks
1993  *	@val: value passed unmodified to notifier function
1994  *	@dev: net_device pointer passed unmodified to notifier function
1995  *	@arg: additional u32 argument passed to the notifier function
1996  *
1997  *	Call all network notifier blocks.  Parameters and return value
1998  *	are as for raw_notifier_call_chain().
1999  */
2000 static int call_netdevice_notifiers_mtu(unsigned long val,
2001 					struct net_device *dev, u32 arg)
2002 {
2003 	struct netdev_notifier_info_ext info = {
2004 		.info.dev = dev,
2005 		.ext.mtu = arg,
2006 	};
2007 
2008 	BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
2009 
2010 	return call_netdevice_notifiers_info(val, &info.info);
2011 }
2012 
2013 #ifdef CONFIG_NET_INGRESS
2014 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
2015 
2016 void net_inc_ingress_queue(void)
2017 {
2018 	static_branch_inc(&ingress_needed_key);
2019 }
2020 EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
2021 
2022 void net_dec_ingress_queue(void)
2023 {
2024 	static_branch_dec(&ingress_needed_key);
2025 }
2026 EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
2027 #endif
2028 
2029 #ifdef CONFIG_NET_EGRESS
2030 static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
2031 
2032 void net_inc_egress_queue(void)
2033 {
2034 	static_branch_inc(&egress_needed_key);
2035 }
2036 EXPORT_SYMBOL_GPL(net_inc_egress_queue);
2037 
2038 void net_dec_egress_queue(void)
2039 {
2040 	static_branch_dec(&egress_needed_key);
2041 }
2042 EXPORT_SYMBOL_GPL(net_dec_egress_queue);
2043 #endif
2044 
2045 DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
2046 EXPORT_SYMBOL(netstamp_needed_key);
2047 #ifdef CONFIG_JUMP_LABEL
2048 static atomic_t netstamp_needed_deferred;
2049 static atomic_t netstamp_wanted;
2050 static void netstamp_clear(struct work_struct *work)
2051 {
2052 	int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
2053 	int wanted;
2054 
2055 	wanted = atomic_add_return(deferred, &netstamp_wanted);
2056 	if (wanted > 0)
2057 		static_branch_enable(&netstamp_needed_key);
2058 	else
2059 		static_branch_disable(&netstamp_needed_key);
2060 }
2061 static DECLARE_WORK(netstamp_work, netstamp_clear);
2062 #endif
2063 
2064 void net_enable_timestamp(void)
2065 {
2066 #ifdef CONFIG_JUMP_LABEL
2067 	int wanted = atomic_read(&netstamp_wanted);
2068 
2069 	while (wanted > 0) {
2070 		if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted + 1))
2071 			return;
2072 	}
2073 	atomic_inc(&netstamp_needed_deferred);
2074 	schedule_work(&netstamp_work);
2075 #else
2076 	static_branch_inc(&netstamp_needed_key);
2077 #endif
2078 }
2079 EXPORT_SYMBOL(net_enable_timestamp);
2080 
2081 void net_disable_timestamp(void)
2082 {
2083 #ifdef CONFIG_JUMP_LABEL
2084 	int wanted = atomic_read(&netstamp_wanted);
2085 
2086 	while (wanted > 1) {
2087 		if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted - 1))
2088 			return;
2089 	}
2090 	atomic_dec(&netstamp_needed_deferred);
2091 	schedule_work(&netstamp_work);
2092 #else
2093 	static_branch_dec(&netstamp_needed_key);
2094 #endif
2095 }
2096 EXPORT_SYMBOL(net_disable_timestamp);
2097 
2098 static inline void net_timestamp_set(struct sk_buff *skb)
2099 {
2100 	skb->tstamp = 0;
2101 	skb->mono_delivery_time = 0;
2102 	if (static_branch_unlikely(&netstamp_needed_key))
2103 		skb->tstamp = ktime_get_real();
2104 }
2105 
2106 #define net_timestamp_check(COND, SKB)				\
2107 	if (static_branch_unlikely(&netstamp_needed_key)) {	\
2108 		if ((COND) && !(SKB)->tstamp)			\
2109 			(SKB)->tstamp = ktime_get_real();	\
2110 	}							\
2111 
2112 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
2113 {
2114 	return __is_skb_forwardable(dev, skb, true);
2115 }
2116 EXPORT_SYMBOL_GPL(is_skb_forwardable);
2117 
2118 static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb,
2119 			      bool check_mtu)
2120 {
2121 	int ret = ____dev_forward_skb(dev, skb, check_mtu);
2122 
2123 	if (likely(!ret)) {
2124 		skb->protocol = eth_type_trans(skb, dev);
2125 		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
2126 	}
2127 
2128 	return ret;
2129 }
2130 
2131 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2132 {
2133 	return __dev_forward_skb2(dev, skb, true);
2134 }
2135 EXPORT_SYMBOL_GPL(__dev_forward_skb);
2136 
2137 /**
2138  * dev_forward_skb - loopback an skb to another netif
2139  *
2140  * @dev: destination network device
2141  * @skb: buffer to forward
2142  *
2143  * return values:
2144  *	NET_RX_SUCCESS	(no congestion)
2145  *	NET_RX_DROP     (packet was dropped, but freed)
2146  *
2147  * dev_forward_skb can be used for injecting an skb from the
2148  * start_xmit function of one device into the receive queue
2149  * of another device.
2150  *
2151  * The receiving device may be in another namespace, so
2152  * we have to clear all information in the skb that could
2153  * impact namespace isolation.
2154  */
2155 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2156 {
2157 	return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
2158 }
2159 EXPORT_SYMBOL_GPL(dev_forward_skb);
2160 
2161 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb)
2162 {
2163 	return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb);
2164 }
2165 
2166 static inline int deliver_skb(struct sk_buff *skb,
2167 			      struct packet_type *pt_prev,
2168 			      struct net_device *orig_dev)
2169 {
2170 	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
2171 		return -ENOMEM;
2172 	refcount_inc(&skb->users);
2173 	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2174 }
2175 
2176 static inline void deliver_ptype_list_skb(struct sk_buff *skb,
2177 					  struct packet_type **pt,
2178 					  struct net_device *orig_dev,
2179 					  __be16 type,
2180 					  struct list_head *ptype_list)
2181 {
2182 	struct packet_type *ptype, *pt_prev = *pt;
2183 
2184 	list_for_each_entry_rcu(ptype, ptype_list, list) {
2185 		if (ptype->type != type)
2186 			continue;
2187 		if (pt_prev)
2188 			deliver_skb(skb, pt_prev, orig_dev);
2189 		pt_prev = ptype;
2190 	}
2191 	*pt = pt_prev;
2192 }
2193 
2194 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
2195 {
2196 	if (!ptype->af_packet_priv || !skb->sk)
2197 		return false;
2198 
2199 	if (ptype->id_match)
2200 		return ptype->id_match(ptype, skb->sk);
2201 	else if ((struct sock *)ptype->af_packet_priv == skb->sk)
2202 		return true;
2203 
2204 	return false;
2205 }
2206 
2207 /**
2208  * dev_nit_active - return true if any network interface taps are in use
2209  *
2210  * @dev: network device to check for the presence of taps
2211  */
2212 bool dev_nit_active(struct net_device *dev)
2213 {
2214 	return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all);
2215 }
2216 EXPORT_SYMBOL_GPL(dev_nit_active);
2217 
2218 /*
2219  *	Support routine. Sends outgoing frames to any network
2220  *	taps currently in use.
2221  */
2222 
2223 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
2224 {
2225 	struct packet_type *ptype;
2226 	struct sk_buff *skb2 = NULL;
2227 	struct packet_type *pt_prev = NULL;
2228 	struct list_head *ptype_list = &ptype_all;
2229 
2230 	rcu_read_lock();
2231 again:
2232 	list_for_each_entry_rcu(ptype, ptype_list, list) {
2233 		if (ptype->ignore_outgoing)
2234 			continue;
2235 
2236 		/* Never send packets back to the socket
2237 		 * they originated from - MvS (miquels@drinkel.ow.org)
2238 		 */
2239 		if (skb_loop_sk(ptype, skb))
2240 			continue;
2241 
2242 		if (pt_prev) {
2243 			deliver_skb(skb2, pt_prev, skb->dev);
2244 			pt_prev = ptype;
2245 			continue;
2246 		}
2247 
2248 		/* need to clone skb, done only once */
2249 		skb2 = skb_clone(skb, GFP_ATOMIC);
2250 		if (!skb2)
2251 			goto out_unlock;
2252 
2253 		net_timestamp_set(skb2);
2254 
2255 		/* skb->nh should be correctly
2256 		 * set by sender, so that the second statement is
2257 		 * just protection against buggy protocols.
2258 		 */
2259 		skb_reset_mac_header(skb2);
2260 
2261 		if (skb_network_header(skb2) < skb2->data ||
2262 		    skb_network_header(skb2) > skb_tail_pointer(skb2)) {
2263 			net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2264 					     ntohs(skb2->protocol),
2265 					     dev->name);
2266 			skb_reset_network_header(skb2);
2267 		}
2268 
2269 		skb2->transport_header = skb2->network_header;
2270 		skb2->pkt_type = PACKET_OUTGOING;
2271 		pt_prev = ptype;
2272 	}
2273 
2274 	if (ptype_list == &ptype_all) {
2275 		ptype_list = &dev->ptype_all;
2276 		goto again;
2277 	}
2278 out_unlock:
2279 	if (pt_prev) {
2280 		if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
2281 			pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2282 		else
2283 			kfree_skb(skb2);
2284 	}
2285 	rcu_read_unlock();
2286 }
2287 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
2288 
2289 /**
2290  * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2291  * @dev: Network device
2292  * @txq: number of queues available
2293  *
2294  * If real_num_tx_queues is changed the tc mappings may no longer be
2295  * valid. To resolve this verify the tc mapping remains valid and if
2296  * not NULL the mapping. With no priorities mapping to this
2297  * offset/count pair it will no longer be used. In the worst case TC0
2298  * is invalid nothing can be done so disable priority mappings. If is
2299  * expected that drivers will fix this mapping if they can before
2300  * calling netif_set_real_num_tx_queues.
2301  */
2302 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
2303 {
2304 	int i;
2305 	struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2306 
2307 	/* If TC0 is invalidated disable TC mapping */
2308 	if (tc->offset + tc->count > txq) {
2309 		netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2310 		dev->num_tc = 0;
2311 		return;
2312 	}
2313 
2314 	/* Invalidated prio to tc mappings set to TC0 */
2315 	for (i = 1; i < TC_BITMASK + 1; i++) {
2316 		int q = netdev_get_prio_tc_map(dev, i);
2317 
2318 		tc = &dev->tc_to_txq[q];
2319 		if (tc->offset + tc->count > txq) {
2320 			netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2321 				    i, q);
2322 			netdev_set_prio_tc_map(dev, i, 0);
2323 		}
2324 	}
2325 }
2326 
2327 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2328 {
2329 	if (dev->num_tc) {
2330 		struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2331 		int i;
2332 
2333 		/* walk through the TCs and see if it falls into any of them */
2334 		for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2335 			if ((txq - tc->offset) < tc->count)
2336 				return i;
2337 		}
2338 
2339 		/* didn't find it, just return -1 to indicate no match */
2340 		return -1;
2341 	}
2342 
2343 	return 0;
2344 }
2345 EXPORT_SYMBOL(netdev_txq_to_tc);
2346 
2347 #ifdef CONFIG_XPS
2348 static struct static_key xps_needed __read_mostly;
2349 static struct static_key xps_rxqs_needed __read_mostly;
2350 static DEFINE_MUTEX(xps_map_mutex);
2351 #define xmap_dereference(P)		\
2352 	rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2353 
2354 static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2355 			     struct xps_dev_maps *old_maps, int tci, u16 index)
2356 {
2357 	struct xps_map *map = NULL;
2358 	int pos;
2359 
2360 	if (dev_maps)
2361 		map = xmap_dereference(dev_maps->attr_map[tci]);
2362 	if (!map)
2363 		return false;
2364 
2365 	for (pos = map->len; pos--;) {
2366 		if (map->queues[pos] != index)
2367 			continue;
2368 
2369 		if (map->len > 1) {
2370 			map->queues[pos] = map->queues[--map->len];
2371 			break;
2372 		}
2373 
2374 		if (old_maps)
2375 			RCU_INIT_POINTER(old_maps->attr_map[tci], NULL);
2376 		RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2377 		kfree_rcu(map, rcu);
2378 		return false;
2379 	}
2380 
2381 	return true;
2382 }
2383 
2384 static bool remove_xps_queue_cpu(struct net_device *dev,
2385 				 struct xps_dev_maps *dev_maps,
2386 				 int cpu, u16 offset, u16 count)
2387 {
2388 	int num_tc = dev_maps->num_tc;
2389 	bool active = false;
2390 	int tci;
2391 
2392 	for (tci = cpu * num_tc; num_tc--; tci++) {
2393 		int i, j;
2394 
2395 		for (i = count, j = offset; i--; j++) {
2396 			if (!remove_xps_queue(dev_maps, NULL, tci, j))
2397 				break;
2398 		}
2399 
2400 		active |= i < 0;
2401 	}
2402 
2403 	return active;
2404 }
2405 
2406 static void reset_xps_maps(struct net_device *dev,
2407 			   struct xps_dev_maps *dev_maps,
2408 			   enum xps_map_type type)
2409 {
2410 	static_key_slow_dec_cpuslocked(&xps_needed);
2411 	if (type == XPS_RXQS)
2412 		static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
2413 
2414 	RCU_INIT_POINTER(dev->xps_maps[type], NULL);
2415 
2416 	kfree_rcu(dev_maps, rcu);
2417 }
2418 
2419 static void clean_xps_maps(struct net_device *dev, enum xps_map_type type,
2420 			   u16 offset, u16 count)
2421 {
2422 	struct xps_dev_maps *dev_maps;
2423 	bool active = false;
2424 	int i, j;
2425 
2426 	dev_maps = xmap_dereference(dev->xps_maps[type]);
2427 	if (!dev_maps)
2428 		return;
2429 
2430 	for (j = 0; j < dev_maps->nr_ids; j++)
2431 		active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, count);
2432 	if (!active)
2433 		reset_xps_maps(dev, dev_maps, type);
2434 
2435 	if (type == XPS_CPUS) {
2436 		for (i = offset + (count - 1); count--; i--)
2437 			netdev_queue_numa_node_write(
2438 				netdev_get_tx_queue(dev, i), NUMA_NO_NODE);
2439 	}
2440 }
2441 
2442 static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2443 				   u16 count)
2444 {
2445 	if (!static_key_false(&xps_needed))
2446 		return;
2447 
2448 	cpus_read_lock();
2449 	mutex_lock(&xps_map_mutex);
2450 
2451 	if (static_key_false(&xps_rxqs_needed))
2452 		clean_xps_maps(dev, XPS_RXQS, offset, count);
2453 
2454 	clean_xps_maps(dev, XPS_CPUS, offset, count);
2455 
2456 	mutex_unlock(&xps_map_mutex);
2457 	cpus_read_unlock();
2458 }
2459 
2460 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2461 {
2462 	netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2463 }
2464 
2465 static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
2466 				      u16 index, bool is_rxqs_map)
2467 {
2468 	struct xps_map *new_map;
2469 	int alloc_len = XPS_MIN_MAP_ALLOC;
2470 	int i, pos;
2471 
2472 	for (pos = 0; map && pos < map->len; pos++) {
2473 		if (map->queues[pos] != index)
2474 			continue;
2475 		return map;
2476 	}
2477 
2478 	/* Need to add tx-queue to this CPU's/rx-queue's existing map */
2479 	if (map) {
2480 		if (pos < map->alloc_len)
2481 			return map;
2482 
2483 		alloc_len = map->alloc_len * 2;
2484 	}
2485 
2486 	/* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2487 	 *  map
2488 	 */
2489 	if (is_rxqs_map)
2490 		new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
2491 	else
2492 		new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2493 				       cpu_to_node(attr_index));
2494 	if (!new_map)
2495 		return NULL;
2496 
2497 	for (i = 0; i < pos; i++)
2498 		new_map->queues[i] = map->queues[i];
2499 	new_map->alloc_len = alloc_len;
2500 	new_map->len = pos;
2501 
2502 	return new_map;
2503 }
2504 
2505 /* Copy xps maps at a given index */
2506 static void xps_copy_dev_maps(struct xps_dev_maps *dev_maps,
2507 			      struct xps_dev_maps *new_dev_maps, int index,
2508 			      int tc, bool skip_tc)
2509 {
2510 	int i, tci = index * dev_maps->num_tc;
2511 	struct xps_map *map;
2512 
2513 	/* copy maps belonging to foreign traffic classes */
2514 	for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2515 		if (i == tc && skip_tc)
2516 			continue;
2517 
2518 		/* fill in the new device map from the old device map */
2519 		map = xmap_dereference(dev_maps->attr_map[tci]);
2520 		RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2521 	}
2522 }
2523 
2524 /* Must be called under cpus_read_lock */
2525 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2526 			  u16 index, enum xps_map_type type)
2527 {
2528 	struct xps_dev_maps *dev_maps, *new_dev_maps = NULL, *old_dev_maps = NULL;
2529 	const unsigned long *online_mask = NULL;
2530 	bool active = false, copy = false;
2531 	int i, j, tci, numa_node_id = -2;
2532 	int maps_sz, num_tc = 1, tc = 0;
2533 	struct xps_map *map, *new_map;
2534 	unsigned int nr_ids;
2535 
2536 	WARN_ON_ONCE(index >= dev->num_tx_queues);
2537 
2538 	if (dev->num_tc) {
2539 		/* Do not allow XPS on subordinate device directly */
2540 		num_tc = dev->num_tc;
2541 		if (num_tc < 0)
2542 			return -EINVAL;
2543 
2544 		/* If queue belongs to subordinate dev use its map */
2545 		dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2546 
2547 		tc = netdev_txq_to_tc(dev, index);
2548 		if (tc < 0)
2549 			return -EINVAL;
2550 	}
2551 
2552 	mutex_lock(&xps_map_mutex);
2553 
2554 	dev_maps = xmap_dereference(dev->xps_maps[type]);
2555 	if (type == XPS_RXQS) {
2556 		maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
2557 		nr_ids = dev->num_rx_queues;
2558 	} else {
2559 		maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
2560 		if (num_possible_cpus() > 1)
2561 			online_mask = cpumask_bits(cpu_online_mask);
2562 		nr_ids = nr_cpu_ids;
2563 	}
2564 
2565 	if (maps_sz < L1_CACHE_BYTES)
2566 		maps_sz = L1_CACHE_BYTES;
2567 
2568 	/* The old dev_maps could be larger or smaller than the one we're
2569 	 * setting up now, as dev->num_tc or nr_ids could have been updated in
2570 	 * between. We could try to be smart, but let's be safe instead and only
2571 	 * copy foreign traffic classes if the two map sizes match.
2572 	 */
2573 	if (dev_maps &&
2574 	    dev_maps->num_tc == num_tc && dev_maps->nr_ids == nr_ids)
2575 		copy = true;
2576 
2577 	/* allocate memory for queue storage */
2578 	for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
2579 	     j < nr_ids;) {
2580 		if (!new_dev_maps) {
2581 			new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2582 			if (!new_dev_maps) {
2583 				mutex_unlock(&xps_map_mutex);
2584 				return -ENOMEM;
2585 			}
2586 
2587 			new_dev_maps->nr_ids = nr_ids;
2588 			new_dev_maps->num_tc = num_tc;
2589 		}
2590 
2591 		tci = j * num_tc + tc;
2592 		map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL;
2593 
2594 		map = expand_xps_map(map, j, index, type == XPS_RXQS);
2595 		if (!map)
2596 			goto error;
2597 
2598 		RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2599 	}
2600 
2601 	if (!new_dev_maps)
2602 		goto out_no_new_maps;
2603 
2604 	if (!dev_maps) {
2605 		/* Increment static keys at most once per type */
2606 		static_key_slow_inc_cpuslocked(&xps_needed);
2607 		if (type == XPS_RXQS)
2608 			static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
2609 	}
2610 
2611 	for (j = 0; j < nr_ids; j++) {
2612 		bool skip_tc = false;
2613 
2614 		tci = j * num_tc + tc;
2615 		if (netif_attr_test_mask(j, mask, nr_ids) &&
2616 		    netif_attr_test_online(j, online_mask, nr_ids)) {
2617 			/* add tx-queue to CPU/rx-queue maps */
2618 			int pos = 0;
2619 
2620 			skip_tc = true;
2621 
2622 			map = xmap_dereference(new_dev_maps->attr_map[tci]);
2623 			while ((pos < map->len) && (map->queues[pos] != index))
2624 				pos++;
2625 
2626 			if (pos == map->len)
2627 				map->queues[map->len++] = index;
2628 #ifdef CONFIG_NUMA
2629 			if (type == XPS_CPUS) {
2630 				if (numa_node_id == -2)
2631 					numa_node_id = cpu_to_node(j);
2632 				else if (numa_node_id != cpu_to_node(j))
2633 					numa_node_id = -1;
2634 			}
2635 #endif
2636 		}
2637 
2638 		if (copy)
2639 			xps_copy_dev_maps(dev_maps, new_dev_maps, j, tc,
2640 					  skip_tc);
2641 	}
2642 
2643 	rcu_assign_pointer(dev->xps_maps[type], new_dev_maps);
2644 
2645 	/* Cleanup old maps */
2646 	if (!dev_maps)
2647 		goto out_no_old_maps;
2648 
2649 	for (j = 0; j < dev_maps->nr_ids; j++) {
2650 		for (i = num_tc, tci = j * dev_maps->num_tc; i--; tci++) {
2651 			map = xmap_dereference(dev_maps->attr_map[tci]);
2652 			if (!map)
2653 				continue;
2654 
2655 			if (copy) {
2656 				new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2657 				if (map == new_map)
2658 					continue;
2659 			}
2660 
2661 			RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2662 			kfree_rcu(map, rcu);
2663 		}
2664 	}
2665 
2666 	old_dev_maps = dev_maps;
2667 
2668 out_no_old_maps:
2669 	dev_maps = new_dev_maps;
2670 	active = true;
2671 
2672 out_no_new_maps:
2673 	if (type == XPS_CPUS)
2674 		/* update Tx queue numa node */
2675 		netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2676 					     (numa_node_id >= 0) ?
2677 					     numa_node_id : NUMA_NO_NODE);
2678 
2679 	if (!dev_maps)
2680 		goto out_no_maps;
2681 
2682 	/* removes tx-queue from unused CPUs/rx-queues */
2683 	for (j = 0; j < dev_maps->nr_ids; j++) {
2684 		tci = j * dev_maps->num_tc;
2685 
2686 		for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2687 			if (i == tc &&
2688 			    netif_attr_test_mask(j, mask, dev_maps->nr_ids) &&
2689 			    netif_attr_test_online(j, online_mask, dev_maps->nr_ids))
2690 				continue;
2691 
2692 			active |= remove_xps_queue(dev_maps,
2693 						   copy ? old_dev_maps : NULL,
2694 						   tci, index);
2695 		}
2696 	}
2697 
2698 	if (old_dev_maps)
2699 		kfree_rcu(old_dev_maps, rcu);
2700 
2701 	/* free map if not active */
2702 	if (!active)
2703 		reset_xps_maps(dev, dev_maps, type);
2704 
2705 out_no_maps:
2706 	mutex_unlock(&xps_map_mutex);
2707 
2708 	return 0;
2709 error:
2710 	/* remove any maps that we added */
2711 	for (j = 0; j < nr_ids; j++) {
2712 		for (i = num_tc, tci = j * num_tc; i--; tci++) {
2713 			new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2714 			map = copy ?
2715 			      xmap_dereference(dev_maps->attr_map[tci]) :
2716 			      NULL;
2717 			if (new_map && new_map != map)
2718 				kfree(new_map);
2719 		}
2720 	}
2721 
2722 	mutex_unlock(&xps_map_mutex);
2723 
2724 	kfree(new_dev_maps);
2725 	return -ENOMEM;
2726 }
2727 EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
2728 
2729 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2730 			u16 index)
2731 {
2732 	int ret;
2733 
2734 	cpus_read_lock();
2735 	ret =  __netif_set_xps_queue(dev, cpumask_bits(mask), index, XPS_CPUS);
2736 	cpus_read_unlock();
2737 
2738 	return ret;
2739 }
2740 EXPORT_SYMBOL(netif_set_xps_queue);
2741 
2742 #endif
2743 static void netdev_unbind_all_sb_channels(struct net_device *dev)
2744 {
2745 	struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2746 
2747 	/* Unbind any subordinate channels */
2748 	while (txq-- != &dev->_tx[0]) {
2749 		if (txq->sb_dev)
2750 			netdev_unbind_sb_channel(dev, txq->sb_dev);
2751 	}
2752 }
2753 
2754 void netdev_reset_tc(struct net_device *dev)
2755 {
2756 #ifdef CONFIG_XPS
2757 	netif_reset_xps_queues_gt(dev, 0);
2758 #endif
2759 	netdev_unbind_all_sb_channels(dev);
2760 
2761 	/* Reset TC configuration of device */
2762 	dev->num_tc = 0;
2763 	memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2764 	memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2765 }
2766 EXPORT_SYMBOL(netdev_reset_tc);
2767 
2768 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2769 {
2770 	if (tc >= dev->num_tc)
2771 		return -EINVAL;
2772 
2773 #ifdef CONFIG_XPS
2774 	netif_reset_xps_queues(dev, offset, count);
2775 #endif
2776 	dev->tc_to_txq[tc].count = count;
2777 	dev->tc_to_txq[tc].offset = offset;
2778 	return 0;
2779 }
2780 EXPORT_SYMBOL(netdev_set_tc_queue);
2781 
2782 int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2783 {
2784 	if (num_tc > TC_MAX_QUEUE)
2785 		return -EINVAL;
2786 
2787 #ifdef CONFIG_XPS
2788 	netif_reset_xps_queues_gt(dev, 0);
2789 #endif
2790 	netdev_unbind_all_sb_channels(dev);
2791 
2792 	dev->num_tc = num_tc;
2793 	return 0;
2794 }
2795 EXPORT_SYMBOL(netdev_set_num_tc);
2796 
2797 void netdev_unbind_sb_channel(struct net_device *dev,
2798 			      struct net_device *sb_dev)
2799 {
2800 	struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2801 
2802 #ifdef CONFIG_XPS
2803 	netif_reset_xps_queues_gt(sb_dev, 0);
2804 #endif
2805 	memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
2806 	memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
2807 
2808 	while (txq-- != &dev->_tx[0]) {
2809 		if (txq->sb_dev == sb_dev)
2810 			txq->sb_dev = NULL;
2811 	}
2812 }
2813 EXPORT_SYMBOL(netdev_unbind_sb_channel);
2814 
2815 int netdev_bind_sb_channel_queue(struct net_device *dev,
2816 				 struct net_device *sb_dev,
2817 				 u8 tc, u16 count, u16 offset)
2818 {
2819 	/* Make certain the sb_dev and dev are already configured */
2820 	if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
2821 		return -EINVAL;
2822 
2823 	/* We cannot hand out queues we don't have */
2824 	if ((offset + count) > dev->real_num_tx_queues)
2825 		return -EINVAL;
2826 
2827 	/* Record the mapping */
2828 	sb_dev->tc_to_txq[tc].count = count;
2829 	sb_dev->tc_to_txq[tc].offset = offset;
2830 
2831 	/* Provide a way for Tx queue to find the tc_to_txq map or
2832 	 * XPS map for itself.
2833 	 */
2834 	while (count--)
2835 		netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
2836 
2837 	return 0;
2838 }
2839 EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
2840 
2841 int netdev_set_sb_channel(struct net_device *dev, u16 channel)
2842 {
2843 	/* Do not use a multiqueue device to represent a subordinate channel */
2844 	if (netif_is_multiqueue(dev))
2845 		return -ENODEV;
2846 
2847 	/* We allow channels 1 - 32767 to be used for subordinate channels.
2848 	 * Channel 0 is meant to be "native" mode and used only to represent
2849 	 * the main root device. We allow writing 0 to reset the device back
2850 	 * to normal mode after being used as a subordinate channel.
2851 	 */
2852 	if (channel > S16_MAX)
2853 		return -EINVAL;
2854 
2855 	dev->num_tc = -channel;
2856 
2857 	return 0;
2858 }
2859 EXPORT_SYMBOL(netdev_set_sb_channel);
2860 
2861 /*
2862  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2863  * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
2864  */
2865 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2866 {
2867 	bool disabling;
2868 	int rc;
2869 
2870 	disabling = txq < dev->real_num_tx_queues;
2871 
2872 	if (txq < 1 || txq > dev->num_tx_queues)
2873 		return -EINVAL;
2874 
2875 	if (dev->reg_state == NETREG_REGISTERED ||
2876 	    dev->reg_state == NETREG_UNREGISTERING) {
2877 		ASSERT_RTNL();
2878 
2879 		rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2880 						  txq);
2881 		if (rc)
2882 			return rc;
2883 
2884 		if (dev->num_tc)
2885 			netif_setup_tc(dev, txq);
2886 
2887 		dev_qdisc_change_real_num_tx(dev, txq);
2888 
2889 		dev->real_num_tx_queues = txq;
2890 
2891 		if (disabling) {
2892 			synchronize_net();
2893 			qdisc_reset_all_tx_gt(dev, txq);
2894 #ifdef CONFIG_XPS
2895 			netif_reset_xps_queues_gt(dev, txq);
2896 #endif
2897 		}
2898 	} else {
2899 		dev->real_num_tx_queues = txq;
2900 	}
2901 
2902 	return 0;
2903 }
2904 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2905 
2906 #ifdef CONFIG_SYSFS
2907 /**
2908  *	netif_set_real_num_rx_queues - set actual number of RX queues used
2909  *	@dev: Network device
2910  *	@rxq: Actual number of RX queues
2911  *
2912  *	This must be called either with the rtnl_lock held or before
2913  *	registration of the net device.  Returns 0 on success, or a
2914  *	negative error code.  If called before registration, it always
2915  *	succeeds.
2916  */
2917 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2918 {
2919 	int rc;
2920 
2921 	if (rxq < 1 || rxq > dev->num_rx_queues)
2922 		return -EINVAL;
2923 
2924 	if (dev->reg_state == NETREG_REGISTERED) {
2925 		ASSERT_RTNL();
2926 
2927 		rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2928 						  rxq);
2929 		if (rc)
2930 			return rc;
2931 	}
2932 
2933 	dev->real_num_rx_queues = rxq;
2934 	return 0;
2935 }
2936 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2937 #endif
2938 
2939 /**
2940  *	netif_set_real_num_queues - set actual number of RX and TX queues used
2941  *	@dev: Network device
2942  *	@txq: Actual number of TX queues
2943  *	@rxq: Actual number of RX queues
2944  *
2945  *	Set the real number of both TX and RX queues.
2946  *	Does nothing if the number of queues is already correct.
2947  */
2948 int netif_set_real_num_queues(struct net_device *dev,
2949 			      unsigned int txq, unsigned int rxq)
2950 {
2951 	unsigned int old_rxq = dev->real_num_rx_queues;
2952 	int err;
2953 
2954 	if (txq < 1 || txq > dev->num_tx_queues ||
2955 	    rxq < 1 || rxq > dev->num_rx_queues)
2956 		return -EINVAL;
2957 
2958 	/* Start from increases, so the error path only does decreases -
2959 	 * decreases can't fail.
2960 	 */
2961 	if (rxq > dev->real_num_rx_queues) {
2962 		err = netif_set_real_num_rx_queues(dev, rxq);
2963 		if (err)
2964 			return err;
2965 	}
2966 	if (txq > dev->real_num_tx_queues) {
2967 		err = netif_set_real_num_tx_queues(dev, txq);
2968 		if (err)
2969 			goto undo_rx;
2970 	}
2971 	if (rxq < dev->real_num_rx_queues)
2972 		WARN_ON(netif_set_real_num_rx_queues(dev, rxq));
2973 	if (txq < dev->real_num_tx_queues)
2974 		WARN_ON(netif_set_real_num_tx_queues(dev, txq));
2975 
2976 	return 0;
2977 undo_rx:
2978 	WARN_ON(netif_set_real_num_rx_queues(dev, old_rxq));
2979 	return err;
2980 }
2981 EXPORT_SYMBOL(netif_set_real_num_queues);
2982 
2983 /**
2984  * netif_set_tso_max_size() - set the max size of TSO frames supported
2985  * @dev:	netdev to update
2986  * @size:	max skb->len of a TSO frame
2987  *
2988  * Set the limit on the size of TSO super-frames the device can handle.
2989  * Unless explicitly set the stack will assume the value of
2990  * %GSO_LEGACY_MAX_SIZE.
2991  */
2992 void netif_set_tso_max_size(struct net_device *dev, unsigned int size)
2993 {
2994 	dev->tso_max_size = min(GSO_MAX_SIZE, size);
2995 	if (size < READ_ONCE(dev->gso_max_size))
2996 		netif_set_gso_max_size(dev, size);
2997 	if (size < READ_ONCE(dev->gso_ipv4_max_size))
2998 		netif_set_gso_ipv4_max_size(dev, size);
2999 }
3000 EXPORT_SYMBOL(netif_set_tso_max_size);
3001 
3002 /**
3003  * netif_set_tso_max_segs() - set the max number of segs supported for TSO
3004  * @dev:	netdev to update
3005  * @segs:	max number of TCP segments
3006  *
3007  * Set the limit on the number of TCP segments the device can generate from
3008  * a single TSO super-frame.
3009  * Unless explicitly set the stack will assume the value of %GSO_MAX_SEGS.
3010  */
3011 void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs)
3012 {
3013 	dev->tso_max_segs = segs;
3014 	if (segs < READ_ONCE(dev->gso_max_segs))
3015 		netif_set_gso_max_segs(dev, segs);
3016 }
3017 EXPORT_SYMBOL(netif_set_tso_max_segs);
3018 
3019 /**
3020  * netif_inherit_tso_max() - copy all TSO limits from a lower device to an upper
3021  * @to:		netdev to update
3022  * @from:	netdev from which to copy the limits
3023  */
3024 void netif_inherit_tso_max(struct net_device *to, const struct net_device *from)
3025 {
3026 	netif_set_tso_max_size(to, from->tso_max_size);
3027 	netif_set_tso_max_segs(to, from->tso_max_segs);
3028 }
3029 EXPORT_SYMBOL(netif_inherit_tso_max);
3030 
3031 /**
3032  * netif_get_num_default_rss_queues - default number of RSS queues
3033  *
3034  * Default value is the number of physical cores if there are only 1 or 2, or
3035  * divided by 2 if there are more.
3036  */
3037 int netif_get_num_default_rss_queues(void)
3038 {
3039 	cpumask_var_t cpus;
3040 	int cpu, count = 0;
3041 
3042 	if (unlikely(is_kdump_kernel() || !zalloc_cpumask_var(&cpus, GFP_KERNEL)))
3043 		return 1;
3044 
3045 	cpumask_copy(cpus, cpu_online_mask);
3046 	for_each_cpu(cpu, cpus) {
3047 		++count;
3048 		cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
3049 	}
3050 	free_cpumask_var(cpus);
3051 
3052 	return count > 2 ? DIV_ROUND_UP(count, 2) : count;
3053 }
3054 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
3055 
3056 static void __netif_reschedule(struct Qdisc *q)
3057 {
3058 	struct softnet_data *sd;
3059 	unsigned long flags;
3060 
3061 	local_irq_save(flags);
3062 	sd = this_cpu_ptr(&softnet_data);
3063 	q->next_sched = NULL;
3064 	*sd->output_queue_tailp = q;
3065 	sd->output_queue_tailp = &q->next_sched;
3066 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
3067 	local_irq_restore(flags);
3068 }
3069 
3070 void __netif_schedule(struct Qdisc *q)
3071 {
3072 	if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
3073 		__netif_reschedule(q);
3074 }
3075 EXPORT_SYMBOL(__netif_schedule);
3076 
3077 struct dev_kfree_skb_cb {
3078 	enum skb_drop_reason reason;
3079 };
3080 
3081 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
3082 {
3083 	return (struct dev_kfree_skb_cb *)skb->cb;
3084 }
3085 
3086 void netif_schedule_queue(struct netdev_queue *txq)
3087 {
3088 	rcu_read_lock();
3089 	if (!netif_xmit_stopped(txq)) {
3090 		struct Qdisc *q = rcu_dereference(txq->qdisc);
3091 
3092 		__netif_schedule(q);
3093 	}
3094 	rcu_read_unlock();
3095 }
3096 EXPORT_SYMBOL(netif_schedule_queue);
3097 
3098 void netif_tx_wake_queue(struct netdev_queue *dev_queue)
3099 {
3100 	if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
3101 		struct Qdisc *q;
3102 
3103 		rcu_read_lock();
3104 		q = rcu_dereference(dev_queue->qdisc);
3105 		__netif_schedule(q);
3106 		rcu_read_unlock();
3107 	}
3108 }
3109 EXPORT_SYMBOL(netif_tx_wake_queue);
3110 
3111 void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason)
3112 {
3113 	unsigned long flags;
3114 
3115 	if (unlikely(!skb))
3116 		return;
3117 
3118 	if (likely(refcount_read(&skb->users) == 1)) {
3119 		smp_rmb();
3120 		refcount_set(&skb->users, 0);
3121 	} else if (likely(!refcount_dec_and_test(&skb->users))) {
3122 		return;
3123 	}
3124 	get_kfree_skb_cb(skb)->reason = reason;
3125 	local_irq_save(flags);
3126 	skb->next = __this_cpu_read(softnet_data.completion_queue);
3127 	__this_cpu_write(softnet_data.completion_queue, skb);
3128 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
3129 	local_irq_restore(flags);
3130 }
3131 EXPORT_SYMBOL(dev_kfree_skb_irq_reason);
3132 
3133 void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason)
3134 {
3135 	if (in_hardirq() || irqs_disabled())
3136 		dev_kfree_skb_irq_reason(skb, reason);
3137 	else
3138 		kfree_skb_reason(skb, reason);
3139 }
3140 EXPORT_SYMBOL(dev_kfree_skb_any_reason);
3141 
3142 
3143 /**
3144  * netif_device_detach - mark device as removed
3145  * @dev: network device
3146  *
3147  * Mark device as removed from system and therefore no longer available.
3148  */
3149 void netif_device_detach(struct net_device *dev)
3150 {
3151 	if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
3152 	    netif_running(dev)) {
3153 		netif_tx_stop_all_queues(dev);
3154 	}
3155 }
3156 EXPORT_SYMBOL(netif_device_detach);
3157 
3158 /**
3159  * netif_device_attach - mark device as attached
3160  * @dev: network device
3161  *
3162  * Mark device as attached from system and restart if needed.
3163  */
3164 void netif_device_attach(struct net_device *dev)
3165 {
3166 	if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
3167 	    netif_running(dev)) {
3168 		netif_tx_wake_all_queues(dev);
3169 		__netdev_watchdog_up(dev);
3170 	}
3171 }
3172 EXPORT_SYMBOL(netif_device_attach);
3173 
3174 /*
3175  * Returns a Tx hash based on the given packet descriptor a Tx queues' number
3176  * to be used as a distribution range.
3177  */
3178 static u16 skb_tx_hash(const struct net_device *dev,
3179 		       const struct net_device *sb_dev,
3180 		       struct sk_buff *skb)
3181 {
3182 	u32 hash;
3183 	u16 qoffset = 0;
3184 	u16 qcount = dev->real_num_tx_queues;
3185 
3186 	if (dev->num_tc) {
3187 		u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
3188 
3189 		qoffset = sb_dev->tc_to_txq[tc].offset;
3190 		qcount = sb_dev->tc_to_txq[tc].count;
3191 		if (unlikely(!qcount)) {
3192 			net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n",
3193 					     sb_dev->name, qoffset, tc);
3194 			qoffset = 0;
3195 			qcount = dev->real_num_tx_queues;
3196 		}
3197 	}
3198 
3199 	if (skb_rx_queue_recorded(skb)) {
3200 		DEBUG_NET_WARN_ON_ONCE(qcount == 0);
3201 		hash = skb_get_rx_queue(skb);
3202 		if (hash >= qoffset)
3203 			hash -= qoffset;
3204 		while (unlikely(hash >= qcount))
3205 			hash -= qcount;
3206 		return hash + qoffset;
3207 	}
3208 
3209 	return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
3210 }
3211 
3212 static void skb_warn_bad_offload(const struct sk_buff *skb)
3213 {
3214 	static const netdev_features_t null_features;
3215 	struct net_device *dev = skb->dev;
3216 	const char *name = "";
3217 
3218 	if (!net_ratelimit())
3219 		return;
3220 
3221 	if (dev) {
3222 		if (dev->dev.parent)
3223 			name = dev_driver_string(dev->dev.parent);
3224 		else
3225 			name = netdev_name(dev);
3226 	}
3227 	skb_dump(KERN_WARNING, skb, false);
3228 	WARN(1, "%s: caps=(%pNF, %pNF)\n",
3229 	     name, dev ? &dev->features : &null_features,
3230 	     skb->sk ? &skb->sk->sk_route_caps : &null_features);
3231 }
3232 
3233 /*
3234  * Invalidate hardware checksum when packet is to be mangled, and
3235  * complete checksum manually on outgoing path.
3236  */
3237 int skb_checksum_help(struct sk_buff *skb)
3238 {
3239 	__wsum csum;
3240 	int ret = 0, offset;
3241 
3242 	if (skb->ip_summed == CHECKSUM_COMPLETE)
3243 		goto out_set_summed;
3244 
3245 	if (unlikely(skb_is_gso(skb))) {
3246 		skb_warn_bad_offload(skb);
3247 		return -EINVAL;
3248 	}
3249 
3250 	/* Before computing a checksum, we should make sure no frag could
3251 	 * be modified by an external entity : checksum could be wrong.
3252 	 */
3253 	if (skb_has_shared_frag(skb)) {
3254 		ret = __skb_linearize(skb);
3255 		if (ret)
3256 			goto out;
3257 	}
3258 
3259 	offset = skb_checksum_start_offset(skb);
3260 	ret = -EINVAL;
3261 	if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
3262 		DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
3263 		goto out;
3264 	}
3265 	csum = skb_checksum(skb, offset, skb->len - offset, 0);
3266 
3267 	offset += skb->csum_offset;
3268 	if (WARN_ON_ONCE(offset + sizeof(__sum16) > skb_headlen(skb))) {
3269 		DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
3270 		goto out;
3271 	}
3272 	ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
3273 	if (ret)
3274 		goto out;
3275 
3276 	*(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
3277 out_set_summed:
3278 	skb->ip_summed = CHECKSUM_NONE;
3279 out:
3280 	return ret;
3281 }
3282 EXPORT_SYMBOL(skb_checksum_help);
3283 
3284 int skb_crc32c_csum_help(struct sk_buff *skb)
3285 {
3286 	__le32 crc32c_csum;
3287 	int ret = 0, offset, start;
3288 
3289 	if (skb->ip_summed != CHECKSUM_PARTIAL)
3290 		goto out;
3291 
3292 	if (unlikely(skb_is_gso(skb)))
3293 		goto out;
3294 
3295 	/* Before computing a checksum, we should make sure no frag could
3296 	 * be modified by an external entity : checksum could be wrong.
3297 	 */
3298 	if (unlikely(skb_has_shared_frag(skb))) {
3299 		ret = __skb_linearize(skb);
3300 		if (ret)
3301 			goto out;
3302 	}
3303 	start = skb_checksum_start_offset(skb);
3304 	offset = start + offsetof(struct sctphdr, checksum);
3305 	if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
3306 		ret = -EINVAL;
3307 		goto out;
3308 	}
3309 
3310 	ret = skb_ensure_writable(skb, offset + sizeof(__le32));
3311 	if (ret)
3312 		goto out;
3313 
3314 	crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
3315 						  skb->len - start, ~(__u32)0,
3316 						  crc32c_csum_stub));
3317 	*(__le32 *)(skb->data + offset) = crc32c_csum;
3318 	skb->ip_summed = CHECKSUM_NONE;
3319 	skb->csum_not_inet = 0;
3320 out:
3321 	return ret;
3322 }
3323 
3324 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
3325 {
3326 	__be16 type = skb->protocol;
3327 
3328 	/* Tunnel gso handlers can set protocol to ethernet. */
3329 	if (type == htons(ETH_P_TEB)) {
3330 		struct ethhdr *eth;
3331 
3332 		if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
3333 			return 0;
3334 
3335 		eth = (struct ethhdr *)skb->data;
3336 		type = eth->h_proto;
3337 	}
3338 
3339 	return __vlan_get_protocol(skb, type, depth);
3340 }
3341 
3342 /* openvswitch calls this on rx path, so we need a different check.
3343  */
3344 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
3345 {
3346 	if (tx_path)
3347 		return skb->ip_summed != CHECKSUM_PARTIAL &&
3348 		       skb->ip_summed != CHECKSUM_UNNECESSARY;
3349 
3350 	return skb->ip_summed == CHECKSUM_NONE;
3351 }
3352 
3353 /**
3354  *	__skb_gso_segment - Perform segmentation on skb.
3355  *	@skb: buffer to segment
3356  *	@features: features for the output path (see dev->features)
3357  *	@tx_path: whether it is called in TX path
3358  *
3359  *	This function segments the given skb and returns a list of segments.
3360  *
3361  *	It may return NULL if the skb requires no segmentation.  This is
3362  *	only possible when GSO is used for verifying header integrity.
3363  *
3364  *	Segmentation preserves SKB_GSO_CB_OFFSET bytes of previous skb cb.
3365  */
3366 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3367 				  netdev_features_t features, bool tx_path)
3368 {
3369 	struct sk_buff *segs;
3370 
3371 	if (unlikely(skb_needs_check(skb, tx_path))) {
3372 		int err;
3373 
3374 		/* We're going to init ->check field in TCP or UDP header */
3375 		err = skb_cow_head(skb, 0);
3376 		if (err < 0)
3377 			return ERR_PTR(err);
3378 	}
3379 
3380 	/* Only report GSO partial support if it will enable us to
3381 	 * support segmentation on this frame without needing additional
3382 	 * work.
3383 	 */
3384 	if (features & NETIF_F_GSO_PARTIAL) {
3385 		netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
3386 		struct net_device *dev = skb->dev;
3387 
3388 		partial_features |= dev->features & dev->gso_partial_features;
3389 		if (!skb_gso_ok(skb, features | partial_features))
3390 			features &= ~NETIF_F_GSO_PARTIAL;
3391 	}
3392 
3393 	BUILD_BUG_ON(SKB_GSO_CB_OFFSET +
3394 		     sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
3395 
3396 	SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3397 	SKB_GSO_CB(skb)->encap_level = 0;
3398 
3399 	skb_reset_mac_header(skb);
3400 	skb_reset_mac_len(skb);
3401 
3402 	segs = skb_mac_gso_segment(skb, features);
3403 
3404 	if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
3405 		skb_warn_bad_offload(skb);
3406 
3407 	return segs;
3408 }
3409 EXPORT_SYMBOL(__skb_gso_segment);
3410 
3411 /* Take action when hardware reception checksum errors are detected. */
3412 #ifdef CONFIG_BUG
3413 static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3414 {
3415 	netdev_err(dev, "hw csum failure\n");
3416 	skb_dump(KERN_ERR, skb, true);
3417 	dump_stack();
3418 }
3419 
3420 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3421 {
3422 	DO_ONCE_LITE(do_netdev_rx_csum_fault, dev, skb);
3423 }
3424 EXPORT_SYMBOL(netdev_rx_csum_fault);
3425 #endif
3426 
3427 /* XXX: check that highmem exists at all on the given machine. */
3428 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
3429 {
3430 #ifdef CONFIG_HIGHMEM
3431 	int i;
3432 
3433 	if (!(dev->features & NETIF_F_HIGHDMA)) {
3434 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3435 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3436 
3437 			if (PageHighMem(skb_frag_page(frag)))
3438 				return 1;
3439 		}
3440 	}
3441 #endif
3442 	return 0;
3443 }
3444 
3445 /* If MPLS offload request, verify we are testing hardware MPLS features
3446  * instead of standard features for the netdev.
3447  */
3448 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3449 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3450 					   netdev_features_t features,
3451 					   __be16 type)
3452 {
3453 	if (eth_p_mpls(type))
3454 		features &= skb->dev->mpls_features;
3455 
3456 	return features;
3457 }
3458 #else
3459 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3460 					   netdev_features_t features,
3461 					   __be16 type)
3462 {
3463 	return features;
3464 }
3465 #endif
3466 
3467 static netdev_features_t harmonize_features(struct sk_buff *skb,
3468 	netdev_features_t features)
3469 {
3470 	__be16 type;
3471 
3472 	type = skb_network_protocol(skb, NULL);
3473 	features = net_mpls_features(skb, features, type);
3474 
3475 	if (skb->ip_summed != CHECKSUM_NONE &&
3476 	    !can_checksum_protocol(features, type)) {
3477 		features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3478 	}
3479 	if (illegal_highdma(skb->dev, skb))
3480 		features &= ~NETIF_F_SG;
3481 
3482 	return features;
3483 }
3484 
3485 netdev_features_t passthru_features_check(struct sk_buff *skb,
3486 					  struct net_device *dev,
3487 					  netdev_features_t features)
3488 {
3489 	return features;
3490 }
3491 EXPORT_SYMBOL(passthru_features_check);
3492 
3493 static netdev_features_t dflt_features_check(struct sk_buff *skb,
3494 					     struct net_device *dev,
3495 					     netdev_features_t features)
3496 {
3497 	return vlan_features_check(skb, features);
3498 }
3499 
3500 static netdev_features_t gso_features_check(const struct sk_buff *skb,
3501 					    struct net_device *dev,
3502 					    netdev_features_t features)
3503 {
3504 	u16 gso_segs = skb_shinfo(skb)->gso_segs;
3505 
3506 	if (gso_segs > READ_ONCE(dev->gso_max_segs))
3507 		return features & ~NETIF_F_GSO_MASK;
3508 
3509 	if (!skb_shinfo(skb)->gso_type) {
3510 		skb_warn_bad_offload(skb);
3511 		return features & ~NETIF_F_GSO_MASK;
3512 	}
3513 
3514 	/* Support for GSO partial features requires software
3515 	 * intervention before we can actually process the packets
3516 	 * so we need to strip support for any partial features now
3517 	 * and we can pull them back in after we have partially
3518 	 * segmented the frame.
3519 	 */
3520 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
3521 		features &= ~dev->gso_partial_features;
3522 
3523 	/* Make sure to clear the IPv4 ID mangling feature if the
3524 	 * IPv4 header has the potential to be fragmented.
3525 	 */
3526 	if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
3527 		struct iphdr *iph = skb->encapsulation ?
3528 				    inner_ip_hdr(skb) : ip_hdr(skb);
3529 
3530 		if (!(iph->frag_off & htons(IP_DF)))
3531 			features &= ~NETIF_F_TSO_MANGLEID;
3532 	}
3533 
3534 	return features;
3535 }
3536 
3537 netdev_features_t netif_skb_features(struct sk_buff *skb)
3538 {
3539 	struct net_device *dev = skb->dev;
3540 	netdev_features_t features = dev->features;
3541 
3542 	if (skb_is_gso(skb))
3543 		features = gso_features_check(skb, dev, features);
3544 
3545 	/* If encapsulation offload request, verify we are testing
3546 	 * hardware encapsulation features instead of standard
3547 	 * features for the netdev
3548 	 */
3549 	if (skb->encapsulation)
3550 		features &= dev->hw_enc_features;
3551 
3552 	if (skb_vlan_tagged(skb))
3553 		features = netdev_intersect_features(features,
3554 						     dev->vlan_features |
3555 						     NETIF_F_HW_VLAN_CTAG_TX |
3556 						     NETIF_F_HW_VLAN_STAG_TX);
3557 
3558 	if (dev->netdev_ops->ndo_features_check)
3559 		features &= dev->netdev_ops->ndo_features_check(skb, dev,
3560 								features);
3561 	else
3562 		features &= dflt_features_check(skb, dev, features);
3563 
3564 	return harmonize_features(skb, features);
3565 }
3566 EXPORT_SYMBOL(netif_skb_features);
3567 
3568 static int xmit_one(struct sk_buff *skb, struct net_device *dev,
3569 		    struct netdev_queue *txq, bool more)
3570 {
3571 	unsigned int len;
3572 	int rc;
3573 
3574 	if (dev_nit_active(dev))
3575 		dev_queue_xmit_nit(skb, dev);
3576 
3577 	len = skb->len;
3578 	trace_net_dev_start_xmit(skb, dev);
3579 	rc = netdev_start_xmit(skb, dev, txq, more);
3580 	trace_net_dev_xmit(skb, rc, dev, len);
3581 
3582 	return rc;
3583 }
3584 
3585 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3586 				    struct netdev_queue *txq, int *ret)
3587 {
3588 	struct sk_buff *skb = first;
3589 	int rc = NETDEV_TX_OK;
3590 
3591 	while (skb) {
3592 		struct sk_buff *next = skb->next;
3593 
3594 		skb_mark_not_on_list(skb);
3595 		rc = xmit_one(skb, dev, txq, next != NULL);
3596 		if (unlikely(!dev_xmit_complete(rc))) {
3597 			skb->next = next;
3598 			goto out;
3599 		}
3600 
3601 		skb = next;
3602 		if (netif_tx_queue_stopped(txq) && skb) {
3603 			rc = NETDEV_TX_BUSY;
3604 			break;
3605 		}
3606 	}
3607 
3608 out:
3609 	*ret = rc;
3610 	return skb;
3611 }
3612 
3613 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3614 					  netdev_features_t features)
3615 {
3616 	if (skb_vlan_tag_present(skb) &&
3617 	    !vlan_hw_offload_capable(features, skb->vlan_proto))
3618 		skb = __vlan_hwaccel_push_inside(skb);
3619 	return skb;
3620 }
3621 
3622 int skb_csum_hwoffload_help(struct sk_buff *skb,
3623 			    const netdev_features_t features)
3624 {
3625 	if (unlikely(skb_csum_is_sctp(skb)))
3626 		return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3627 			skb_crc32c_csum_help(skb);
3628 
3629 	if (features & NETIF_F_HW_CSUM)
3630 		return 0;
3631 
3632 	if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
3633 		switch (skb->csum_offset) {
3634 		case offsetof(struct tcphdr, check):
3635 		case offsetof(struct udphdr, check):
3636 			return 0;
3637 		}
3638 	}
3639 
3640 	return skb_checksum_help(skb);
3641 }
3642 EXPORT_SYMBOL(skb_csum_hwoffload_help);
3643 
3644 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
3645 {
3646 	netdev_features_t features;
3647 
3648 	features = netif_skb_features(skb);
3649 	skb = validate_xmit_vlan(skb, features);
3650 	if (unlikely(!skb))
3651 		goto out_null;
3652 
3653 	skb = sk_validate_xmit_skb(skb, dev);
3654 	if (unlikely(!skb))
3655 		goto out_null;
3656 
3657 	if (netif_needs_gso(skb, features)) {
3658 		struct sk_buff *segs;
3659 
3660 		segs = skb_gso_segment(skb, features);
3661 		if (IS_ERR(segs)) {
3662 			goto out_kfree_skb;
3663 		} else if (segs) {
3664 			consume_skb(skb);
3665 			skb = segs;
3666 		}
3667 	} else {
3668 		if (skb_needs_linearize(skb, features) &&
3669 		    __skb_linearize(skb))
3670 			goto out_kfree_skb;
3671 
3672 		/* If packet is not checksummed and device does not
3673 		 * support checksumming for this protocol, complete
3674 		 * checksumming here.
3675 		 */
3676 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
3677 			if (skb->encapsulation)
3678 				skb_set_inner_transport_header(skb,
3679 							       skb_checksum_start_offset(skb));
3680 			else
3681 				skb_set_transport_header(skb,
3682 							 skb_checksum_start_offset(skb));
3683 			if (skb_csum_hwoffload_help(skb, features))
3684 				goto out_kfree_skb;
3685 		}
3686 	}
3687 
3688 	skb = validate_xmit_xfrm(skb, features, again);
3689 
3690 	return skb;
3691 
3692 out_kfree_skb:
3693 	kfree_skb(skb);
3694 out_null:
3695 	dev_core_stats_tx_dropped_inc(dev);
3696 	return NULL;
3697 }
3698 
3699 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
3700 {
3701 	struct sk_buff *next, *head = NULL, *tail;
3702 
3703 	for (; skb != NULL; skb = next) {
3704 		next = skb->next;
3705 		skb_mark_not_on_list(skb);
3706 
3707 		/* in case skb wont be segmented, point to itself */
3708 		skb->prev = skb;
3709 
3710 		skb = validate_xmit_skb(skb, dev, again);
3711 		if (!skb)
3712 			continue;
3713 
3714 		if (!head)
3715 			head = skb;
3716 		else
3717 			tail->next = skb;
3718 		/* If skb was segmented, skb->prev points to
3719 		 * the last segment. If not, it still contains skb.
3720 		 */
3721 		tail = skb->prev;
3722 	}
3723 	return head;
3724 }
3725 EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
3726 
3727 static void qdisc_pkt_len_init(struct sk_buff *skb)
3728 {
3729 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
3730 
3731 	qdisc_skb_cb(skb)->pkt_len = skb->len;
3732 
3733 	/* To get more precise estimation of bytes sent on wire,
3734 	 * we add to pkt_len the headers size of all segments
3735 	 */
3736 	if (shinfo->gso_size && skb_transport_header_was_set(skb)) {
3737 		u16 gso_segs = shinfo->gso_segs;
3738 		unsigned int hdr_len;
3739 
3740 		/* mac layer + network layer */
3741 		hdr_len = skb_transport_offset(skb);
3742 
3743 		/* + transport layer */
3744 		if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3745 			const struct tcphdr *th;
3746 			struct tcphdr _tcphdr;
3747 
3748 			th = skb_header_pointer(skb, hdr_len,
3749 						sizeof(_tcphdr), &_tcphdr);
3750 			if (likely(th))
3751 				hdr_len += __tcp_hdrlen(th);
3752 		} else {
3753 			struct udphdr _udphdr;
3754 
3755 			if (skb_header_pointer(skb, hdr_len,
3756 					       sizeof(_udphdr), &_udphdr))
3757 				hdr_len += sizeof(struct udphdr);
3758 		}
3759 
3760 		if (shinfo->gso_type & SKB_GSO_DODGY)
3761 			gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3762 						shinfo->gso_size);
3763 
3764 		qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
3765 	}
3766 }
3767 
3768 static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q,
3769 			     struct sk_buff **to_free,
3770 			     struct netdev_queue *txq)
3771 {
3772 	int rc;
3773 
3774 	rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK;
3775 	if (rc == NET_XMIT_SUCCESS)
3776 		trace_qdisc_enqueue(q, txq, skb);
3777 	return rc;
3778 }
3779 
3780 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3781 				 struct net_device *dev,
3782 				 struct netdev_queue *txq)
3783 {
3784 	spinlock_t *root_lock = qdisc_lock(q);
3785 	struct sk_buff *to_free = NULL;
3786 	bool contended;
3787 	int rc;
3788 
3789 	qdisc_calculate_pkt_len(skb, q);
3790 
3791 	if (q->flags & TCQ_F_NOLOCK) {
3792 		if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) &&
3793 		    qdisc_run_begin(q)) {
3794 			/* Retest nolock_qdisc_is_empty() within the protection
3795 			 * of q->seqlock to protect from racing with requeuing.
3796 			 */
3797 			if (unlikely(!nolock_qdisc_is_empty(q))) {
3798 				rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3799 				__qdisc_run(q);
3800 				qdisc_run_end(q);
3801 
3802 				goto no_lock_out;
3803 			}
3804 
3805 			qdisc_bstats_cpu_update(q, skb);
3806 			if (sch_direct_xmit(skb, q, dev, txq, NULL, true) &&
3807 			    !nolock_qdisc_is_empty(q))
3808 				__qdisc_run(q);
3809 
3810 			qdisc_run_end(q);
3811 			return NET_XMIT_SUCCESS;
3812 		}
3813 
3814 		rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3815 		qdisc_run(q);
3816 
3817 no_lock_out:
3818 		if (unlikely(to_free))
3819 			kfree_skb_list_reason(to_free,
3820 					      SKB_DROP_REASON_QDISC_DROP);
3821 		return rc;
3822 	}
3823 
3824 	/*
3825 	 * Heuristic to force contended enqueues to serialize on a
3826 	 * separate lock before trying to get qdisc main lock.
3827 	 * This permits qdisc->running owner to get the lock more
3828 	 * often and dequeue packets faster.
3829 	 * On PREEMPT_RT it is possible to preempt the qdisc owner during xmit
3830 	 * and then other tasks will only enqueue packets. The packets will be
3831 	 * sent after the qdisc owner is scheduled again. To prevent this
3832 	 * scenario the task always serialize on the lock.
3833 	 */
3834 	contended = qdisc_is_running(q) || IS_ENABLED(CONFIG_PREEMPT_RT);
3835 	if (unlikely(contended))
3836 		spin_lock(&q->busylock);
3837 
3838 	spin_lock(root_lock);
3839 	if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3840 		__qdisc_drop(skb, &to_free);
3841 		rc = NET_XMIT_DROP;
3842 	} else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
3843 		   qdisc_run_begin(q)) {
3844 		/*
3845 		 * This is a work-conserving queue; there are no old skbs
3846 		 * waiting to be sent out; and the qdisc is not running -
3847 		 * xmit the skb directly.
3848 		 */
3849 
3850 		qdisc_bstats_update(q, skb);
3851 
3852 		if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
3853 			if (unlikely(contended)) {
3854 				spin_unlock(&q->busylock);
3855 				contended = false;
3856 			}
3857 			__qdisc_run(q);
3858 		}
3859 
3860 		qdisc_run_end(q);
3861 		rc = NET_XMIT_SUCCESS;
3862 	} else {
3863 		rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3864 		if (qdisc_run_begin(q)) {
3865 			if (unlikely(contended)) {
3866 				spin_unlock(&q->busylock);
3867 				contended = false;
3868 			}
3869 			__qdisc_run(q);
3870 			qdisc_run_end(q);
3871 		}
3872 	}
3873 	spin_unlock(root_lock);
3874 	if (unlikely(to_free))
3875 		kfree_skb_list_reason(to_free, SKB_DROP_REASON_QDISC_DROP);
3876 	if (unlikely(contended))
3877 		spin_unlock(&q->busylock);
3878 	return rc;
3879 }
3880 
3881 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3882 static void skb_update_prio(struct sk_buff *skb)
3883 {
3884 	const struct netprio_map *map;
3885 	const struct sock *sk;
3886 	unsigned int prioidx;
3887 
3888 	if (skb->priority)
3889 		return;
3890 	map = rcu_dereference_bh(skb->dev->priomap);
3891 	if (!map)
3892 		return;
3893 	sk = skb_to_full_sk(skb);
3894 	if (!sk)
3895 		return;
3896 
3897 	prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3898 
3899 	if (prioidx < map->priomap_len)
3900 		skb->priority = map->priomap[prioidx];
3901 }
3902 #else
3903 #define skb_update_prio(skb)
3904 #endif
3905 
3906 /**
3907  *	dev_loopback_xmit - loop back @skb
3908  *	@net: network namespace this loopback is happening in
3909  *	@sk:  sk needed to be a netfilter okfn
3910  *	@skb: buffer to transmit
3911  */
3912 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
3913 {
3914 	skb_reset_mac_header(skb);
3915 	__skb_pull(skb, skb_network_offset(skb));
3916 	skb->pkt_type = PACKET_LOOPBACK;
3917 	if (skb->ip_summed == CHECKSUM_NONE)
3918 		skb->ip_summed = CHECKSUM_UNNECESSARY;
3919 	DEBUG_NET_WARN_ON_ONCE(!skb_dst(skb));
3920 	skb_dst_force(skb);
3921 	netif_rx(skb);
3922 	return 0;
3923 }
3924 EXPORT_SYMBOL(dev_loopback_xmit);
3925 
3926 #ifdef CONFIG_NET_EGRESS
3927 static struct sk_buff *
3928 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3929 {
3930 #ifdef CONFIG_NET_CLS_ACT
3931 	struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
3932 	struct tcf_result cl_res;
3933 
3934 	if (!miniq)
3935 		return skb;
3936 
3937 	/* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
3938 	tc_skb_cb(skb)->mru = 0;
3939 	tc_skb_cb(skb)->post_ct = false;
3940 	mini_qdisc_bstats_cpu_update(miniq, skb);
3941 
3942 	switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) {
3943 	case TC_ACT_OK:
3944 	case TC_ACT_RECLASSIFY:
3945 		skb->tc_index = TC_H_MIN(cl_res.classid);
3946 		break;
3947 	case TC_ACT_SHOT:
3948 		mini_qdisc_qstats_cpu_drop(miniq);
3949 		*ret = NET_XMIT_DROP;
3950 		kfree_skb_reason(skb, SKB_DROP_REASON_TC_EGRESS);
3951 		return NULL;
3952 	case TC_ACT_STOLEN:
3953 	case TC_ACT_QUEUED:
3954 	case TC_ACT_TRAP:
3955 		*ret = NET_XMIT_SUCCESS;
3956 		consume_skb(skb);
3957 		return NULL;
3958 	case TC_ACT_REDIRECT:
3959 		/* No need to push/pop skb's mac_header here on egress! */
3960 		skb_do_redirect(skb);
3961 		*ret = NET_XMIT_SUCCESS;
3962 		return NULL;
3963 	default:
3964 		break;
3965 	}
3966 #endif /* CONFIG_NET_CLS_ACT */
3967 
3968 	return skb;
3969 }
3970 
3971 static struct netdev_queue *
3972 netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb)
3973 {
3974 	int qm = skb_get_queue_mapping(skb);
3975 
3976 	return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm));
3977 }
3978 
3979 static bool netdev_xmit_txqueue_skipped(void)
3980 {
3981 	return __this_cpu_read(softnet_data.xmit.skip_txqueue);
3982 }
3983 
3984 void netdev_xmit_skip_txqueue(bool skip)
3985 {
3986 	__this_cpu_write(softnet_data.xmit.skip_txqueue, skip);
3987 }
3988 EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
3989 #endif /* CONFIG_NET_EGRESS */
3990 
3991 #ifdef CONFIG_XPS
3992 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
3993 			       struct xps_dev_maps *dev_maps, unsigned int tci)
3994 {
3995 	int tc = netdev_get_prio_tc_map(dev, skb->priority);
3996 	struct xps_map *map;
3997 	int queue_index = -1;
3998 
3999 	if (tc >= dev_maps->num_tc || tci >= dev_maps->nr_ids)
4000 		return queue_index;
4001 
4002 	tci *= dev_maps->num_tc;
4003 	tci += tc;
4004 
4005 	map = rcu_dereference(dev_maps->attr_map[tci]);
4006 	if (map) {
4007 		if (map->len == 1)
4008 			queue_index = map->queues[0];
4009 		else
4010 			queue_index = map->queues[reciprocal_scale(
4011 						skb_get_hash(skb), map->len)];
4012 		if (unlikely(queue_index >= dev->real_num_tx_queues))
4013 			queue_index = -1;
4014 	}
4015 	return queue_index;
4016 }
4017 #endif
4018 
4019 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
4020 			 struct sk_buff *skb)
4021 {
4022 #ifdef CONFIG_XPS
4023 	struct xps_dev_maps *dev_maps;
4024 	struct sock *sk = skb->sk;
4025 	int queue_index = -1;
4026 
4027 	if (!static_key_false(&xps_needed))
4028 		return -1;
4029 
4030 	rcu_read_lock();
4031 	if (!static_key_false(&xps_rxqs_needed))
4032 		goto get_cpus_map;
4033 
4034 	dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_RXQS]);
4035 	if (dev_maps) {
4036 		int tci = sk_rx_queue_get(sk);
4037 
4038 		if (tci >= 0)
4039 			queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4040 							  tci);
4041 	}
4042 
4043 get_cpus_map:
4044 	if (queue_index < 0) {
4045 		dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_CPUS]);
4046 		if (dev_maps) {
4047 			unsigned int tci = skb->sender_cpu - 1;
4048 
4049 			queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4050 							  tci);
4051 		}
4052 	}
4053 	rcu_read_unlock();
4054 
4055 	return queue_index;
4056 #else
4057 	return -1;
4058 #endif
4059 }
4060 
4061 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
4062 		     struct net_device *sb_dev)
4063 {
4064 	return 0;
4065 }
4066 EXPORT_SYMBOL(dev_pick_tx_zero);
4067 
4068 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
4069 		       struct net_device *sb_dev)
4070 {
4071 	return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
4072 }
4073 EXPORT_SYMBOL(dev_pick_tx_cpu_id);
4074 
4075 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
4076 		     struct net_device *sb_dev)
4077 {
4078 	struct sock *sk = skb->sk;
4079 	int queue_index = sk_tx_queue_get(sk);
4080 
4081 	sb_dev = sb_dev ? : dev;
4082 
4083 	if (queue_index < 0 || skb->ooo_okay ||
4084 	    queue_index >= dev->real_num_tx_queues) {
4085 		int new_index = get_xps_queue(dev, sb_dev, skb);
4086 
4087 		if (new_index < 0)
4088 			new_index = skb_tx_hash(dev, sb_dev, skb);
4089 
4090 		if (queue_index != new_index && sk &&
4091 		    sk_fullsock(sk) &&
4092 		    rcu_access_pointer(sk->sk_dst_cache))
4093 			sk_tx_queue_set(sk, new_index);
4094 
4095 		queue_index = new_index;
4096 	}
4097 
4098 	return queue_index;
4099 }
4100 EXPORT_SYMBOL(netdev_pick_tx);
4101 
4102 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
4103 					 struct sk_buff *skb,
4104 					 struct net_device *sb_dev)
4105 {
4106 	int queue_index = 0;
4107 
4108 #ifdef CONFIG_XPS
4109 	u32 sender_cpu = skb->sender_cpu - 1;
4110 
4111 	if (sender_cpu >= (u32)NR_CPUS)
4112 		skb->sender_cpu = raw_smp_processor_id() + 1;
4113 #endif
4114 
4115 	if (dev->real_num_tx_queues != 1) {
4116 		const struct net_device_ops *ops = dev->netdev_ops;
4117 
4118 		if (ops->ndo_select_queue)
4119 			queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
4120 		else
4121 			queue_index = netdev_pick_tx(dev, skb, sb_dev);
4122 
4123 		queue_index = netdev_cap_txqueue(dev, queue_index);
4124 	}
4125 
4126 	skb_set_queue_mapping(skb, queue_index);
4127 	return netdev_get_tx_queue(dev, queue_index);
4128 }
4129 
4130 /**
4131  * __dev_queue_xmit() - transmit a buffer
4132  * @skb:	buffer to transmit
4133  * @sb_dev:	suboordinate device used for L2 forwarding offload
4134  *
4135  * Queue a buffer for transmission to a network device. The caller must
4136  * have set the device and priority and built the buffer before calling
4137  * this function. The function can be called from an interrupt.
4138  *
4139  * When calling this method, interrupts MUST be enabled. This is because
4140  * the BH enable code must have IRQs enabled so that it will not deadlock.
4141  *
4142  * Regardless of the return value, the skb is consumed, so it is currently
4143  * difficult to retry a send to this method. (You can bump the ref count
4144  * before sending to hold a reference for retry if you are careful.)
4145  *
4146  * Return:
4147  * * 0				- buffer successfully transmitted
4148  * * positive qdisc return code	- NET_XMIT_DROP etc.
4149  * * negative errno		- other errors
4150  */
4151 int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
4152 {
4153 	struct net_device *dev = skb->dev;
4154 	struct netdev_queue *txq = NULL;
4155 	struct Qdisc *q;
4156 	int rc = -ENOMEM;
4157 	bool again = false;
4158 
4159 	skb_reset_mac_header(skb);
4160 	skb_assert_len(skb);
4161 
4162 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
4163 		__skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED);
4164 
4165 	/* Disable soft irqs for various locks below. Also
4166 	 * stops preemption for RCU.
4167 	 */
4168 	rcu_read_lock_bh();
4169 
4170 	skb_update_prio(skb);
4171 
4172 	qdisc_pkt_len_init(skb);
4173 #ifdef CONFIG_NET_CLS_ACT
4174 	skb->tc_at_ingress = 0;
4175 #endif
4176 #ifdef CONFIG_NET_EGRESS
4177 	if (static_branch_unlikely(&egress_needed_key)) {
4178 		if (nf_hook_egress_active()) {
4179 			skb = nf_hook_egress(skb, &rc, dev);
4180 			if (!skb)
4181 				goto out;
4182 		}
4183 
4184 		netdev_xmit_skip_txqueue(false);
4185 
4186 		nf_skip_egress(skb, true);
4187 		skb = sch_handle_egress(skb, &rc, dev);
4188 		if (!skb)
4189 			goto out;
4190 		nf_skip_egress(skb, false);
4191 
4192 		if (netdev_xmit_txqueue_skipped())
4193 			txq = netdev_tx_queue_mapping(dev, skb);
4194 	}
4195 #endif
4196 	/* If device/qdisc don't need skb->dst, release it right now while
4197 	 * its hot in this cpu cache.
4198 	 */
4199 	if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
4200 		skb_dst_drop(skb);
4201 	else
4202 		skb_dst_force(skb);
4203 
4204 	if (!txq)
4205 		txq = netdev_core_pick_tx(dev, skb, sb_dev);
4206 
4207 	q = rcu_dereference_bh(txq->qdisc);
4208 
4209 	trace_net_dev_queue(skb);
4210 	if (q->enqueue) {
4211 		rc = __dev_xmit_skb(skb, q, dev, txq);
4212 		goto out;
4213 	}
4214 
4215 	/* The device has no queue. Common case for software devices:
4216 	 * loopback, all the sorts of tunnels...
4217 
4218 	 * Really, it is unlikely that netif_tx_lock protection is necessary
4219 	 * here.  (f.e. loopback and IP tunnels are clean ignoring statistics
4220 	 * counters.)
4221 	 * However, it is possible, that they rely on protection
4222 	 * made by us here.
4223 
4224 	 * Check this and shot the lock. It is not prone from deadlocks.
4225 	 *Either shot noqueue qdisc, it is even simpler 8)
4226 	 */
4227 	if (dev->flags & IFF_UP) {
4228 		int cpu = smp_processor_id(); /* ok because BHs are off */
4229 
4230 		/* Other cpus might concurrently change txq->xmit_lock_owner
4231 		 * to -1 or to their cpu id, but not to our id.
4232 		 */
4233 		if (READ_ONCE(txq->xmit_lock_owner) != cpu) {
4234 			if (dev_xmit_recursion())
4235 				goto recursion_alert;
4236 
4237 			skb = validate_xmit_skb(skb, dev, &again);
4238 			if (!skb)
4239 				goto out;
4240 
4241 			HARD_TX_LOCK(dev, txq, cpu);
4242 
4243 			if (!netif_xmit_stopped(txq)) {
4244 				dev_xmit_recursion_inc();
4245 				skb = dev_hard_start_xmit(skb, dev, txq, &rc);
4246 				dev_xmit_recursion_dec();
4247 				if (dev_xmit_complete(rc)) {
4248 					HARD_TX_UNLOCK(dev, txq);
4249 					goto out;
4250 				}
4251 			}
4252 			HARD_TX_UNLOCK(dev, txq);
4253 			net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
4254 					     dev->name);
4255 		} else {
4256 			/* Recursion is detected! It is possible,
4257 			 * unfortunately
4258 			 */
4259 recursion_alert:
4260 			net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
4261 					     dev->name);
4262 		}
4263 	}
4264 
4265 	rc = -ENETDOWN;
4266 	rcu_read_unlock_bh();
4267 
4268 	dev_core_stats_tx_dropped_inc(dev);
4269 	kfree_skb_list(skb);
4270 	return rc;
4271 out:
4272 	rcu_read_unlock_bh();
4273 	return rc;
4274 }
4275 EXPORT_SYMBOL(__dev_queue_xmit);
4276 
4277 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
4278 {
4279 	struct net_device *dev = skb->dev;
4280 	struct sk_buff *orig_skb = skb;
4281 	struct netdev_queue *txq;
4282 	int ret = NETDEV_TX_BUSY;
4283 	bool again = false;
4284 
4285 	if (unlikely(!netif_running(dev) ||
4286 		     !netif_carrier_ok(dev)))
4287 		goto drop;
4288 
4289 	skb = validate_xmit_skb_list(skb, dev, &again);
4290 	if (skb != orig_skb)
4291 		goto drop;
4292 
4293 	skb_set_queue_mapping(skb, queue_id);
4294 	txq = skb_get_tx_queue(dev, skb);
4295 
4296 	local_bh_disable();
4297 
4298 	dev_xmit_recursion_inc();
4299 	HARD_TX_LOCK(dev, txq, smp_processor_id());
4300 	if (!netif_xmit_frozen_or_drv_stopped(txq))
4301 		ret = netdev_start_xmit(skb, dev, txq, false);
4302 	HARD_TX_UNLOCK(dev, txq);
4303 	dev_xmit_recursion_dec();
4304 
4305 	local_bh_enable();
4306 	return ret;
4307 drop:
4308 	dev_core_stats_tx_dropped_inc(dev);
4309 	kfree_skb_list(skb);
4310 	return NET_XMIT_DROP;
4311 }
4312 EXPORT_SYMBOL(__dev_direct_xmit);
4313 
4314 /*************************************************************************
4315  *			Receiver routines
4316  *************************************************************************/
4317 
4318 int netdev_max_backlog __read_mostly = 1000;
4319 EXPORT_SYMBOL(netdev_max_backlog);
4320 
4321 int netdev_tstamp_prequeue __read_mostly = 1;
4322 unsigned int sysctl_skb_defer_max __read_mostly = 64;
4323 int netdev_budget __read_mostly = 300;
4324 /* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
4325 unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ;
4326 int weight_p __read_mostly = 64;           /* old backlog weight */
4327 int dev_weight_rx_bias __read_mostly = 1;  /* bias for backlog weight */
4328 int dev_weight_tx_bias __read_mostly = 1;  /* bias for output_queue quota */
4329 int dev_rx_weight __read_mostly = 64;
4330 int dev_tx_weight __read_mostly = 64;
4331 
4332 /* Called with irq disabled */
4333 static inline void ____napi_schedule(struct softnet_data *sd,
4334 				     struct napi_struct *napi)
4335 {
4336 	struct task_struct *thread;
4337 
4338 	lockdep_assert_irqs_disabled();
4339 
4340 	if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
4341 		/* Paired with smp_mb__before_atomic() in
4342 		 * napi_enable()/dev_set_threaded().
4343 		 * Use READ_ONCE() to guarantee a complete
4344 		 * read on napi->thread. Only call
4345 		 * wake_up_process() when it's not NULL.
4346 		 */
4347 		thread = READ_ONCE(napi->thread);
4348 		if (thread) {
4349 			/* Avoid doing set_bit() if the thread is in
4350 			 * INTERRUPTIBLE state, cause napi_thread_wait()
4351 			 * makes sure to proceed with napi polling
4352 			 * if the thread is explicitly woken from here.
4353 			 */
4354 			if (READ_ONCE(thread->__state) != TASK_INTERRUPTIBLE)
4355 				set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
4356 			wake_up_process(thread);
4357 			return;
4358 		}
4359 	}
4360 
4361 	list_add_tail(&napi->poll_list, &sd->poll_list);
4362 	/* If not called from net_rx_action()
4363 	 * we have to raise NET_RX_SOFTIRQ.
4364 	 */
4365 	if (!sd->in_net_rx_action)
4366 		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
4367 }
4368 
4369 #ifdef CONFIG_RPS
4370 
4371 /* One global table that all flow-based protocols share. */
4372 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
4373 EXPORT_SYMBOL(rps_sock_flow_table);
4374 u32 rps_cpu_mask __read_mostly;
4375 EXPORT_SYMBOL(rps_cpu_mask);
4376 
4377 struct static_key_false rps_needed __read_mostly;
4378 EXPORT_SYMBOL(rps_needed);
4379 struct static_key_false rfs_needed __read_mostly;
4380 EXPORT_SYMBOL(rfs_needed);
4381 
4382 static struct rps_dev_flow *
4383 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4384 	    struct rps_dev_flow *rflow, u16 next_cpu)
4385 {
4386 	if (next_cpu < nr_cpu_ids) {
4387 #ifdef CONFIG_RFS_ACCEL
4388 		struct netdev_rx_queue *rxqueue;
4389 		struct rps_dev_flow_table *flow_table;
4390 		struct rps_dev_flow *old_rflow;
4391 		u32 flow_id;
4392 		u16 rxq_index;
4393 		int rc;
4394 
4395 		/* Should we steer this flow to a different hardware queue? */
4396 		if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
4397 		    !(dev->features & NETIF_F_NTUPLE))
4398 			goto out;
4399 		rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
4400 		if (rxq_index == skb_get_rx_queue(skb))
4401 			goto out;
4402 
4403 		rxqueue = dev->_rx + rxq_index;
4404 		flow_table = rcu_dereference(rxqueue->rps_flow_table);
4405 		if (!flow_table)
4406 			goto out;
4407 		flow_id = skb_get_hash(skb) & flow_table->mask;
4408 		rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
4409 							rxq_index, flow_id);
4410 		if (rc < 0)
4411 			goto out;
4412 		old_rflow = rflow;
4413 		rflow = &flow_table->flows[flow_id];
4414 		rflow->filter = rc;
4415 		if (old_rflow->filter == rflow->filter)
4416 			old_rflow->filter = RPS_NO_FILTER;
4417 	out:
4418 #endif
4419 		rflow->last_qtail =
4420 			per_cpu(softnet_data, next_cpu).input_queue_head;
4421 	}
4422 
4423 	rflow->cpu = next_cpu;
4424 	return rflow;
4425 }
4426 
4427 /*
4428  * get_rps_cpu is called from netif_receive_skb and returns the target
4429  * CPU from the RPS map of the receiving queue for a given skb.
4430  * rcu_read_lock must be held on entry.
4431  */
4432 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4433 		       struct rps_dev_flow **rflowp)
4434 {
4435 	const struct rps_sock_flow_table *sock_flow_table;
4436 	struct netdev_rx_queue *rxqueue = dev->_rx;
4437 	struct rps_dev_flow_table *flow_table;
4438 	struct rps_map *map;
4439 	int cpu = -1;
4440 	u32 tcpu;
4441 	u32 hash;
4442 
4443 	if (skb_rx_queue_recorded(skb)) {
4444 		u16 index = skb_get_rx_queue(skb);
4445 
4446 		if (unlikely(index >= dev->real_num_rx_queues)) {
4447 			WARN_ONCE(dev->real_num_rx_queues > 1,
4448 				  "%s received packet on queue %u, but number "
4449 				  "of RX queues is %u\n",
4450 				  dev->name, index, dev->real_num_rx_queues);
4451 			goto done;
4452 		}
4453 		rxqueue += index;
4454 	}
4455 
4456 	/* Avoid computing hash if RFS/RPS is not active for this rxqueue */
4457 
4458 	flow_table = rcu_dereference(rxqueue->rps_flow_table);
4459 	map = rcu_dereference(rxqueue->rps_map);
4460 	if (!flow_table && !map)
4461 		goto done;
4462 
4463 	skb_reset_network_header(skb);
4464 	hash = skb_get_hash(skb);
4465 	if (!hash)
4466 		goto done;
4467 
4468 	sock_flow_table = rcu_dereference(rps_sock_flow_table);
4469 	if (flow_table && sock_flow_table) {
4470 		struct rps_dev_flow *rflow;
4471 		u32 next_cpu;
4472 		u32 ident;
4473 
4474 		/* First check into global flow table if there is a match */
4475 		ident = sock_flow_table->ents[hash & sock_flow_table->mask];
4476 		if ((ident ^ hash) & ~rps_cpu_mask)
4477 			goto try_rps;
4478 
4479 		next_cpu = ident & rps_cpu_mask;
4480 
4481 		/* OK, now we know there is a match,
4482 		 * we can look at the local (per receive queue) flow table
4483 		 */
4484 		rflow = &flow_table->flows[hash & flow_table->mask];
4485 		tcpu = rflow->cpu;
4486 
4487 		/*
4488 		 * If the desired CPU (where last recvmsg was done) is
4489 		 * different from current CPU (one in the rx-queue flow
4490 		 * table entry), switch if one of the following holds:
4491 		 *   - Current CPU is unset (>= nr_cpu_ids).
4492 		 *   - Current CPU is offline.
4493 		 *   - The current CPU's queue tail has advanced beyond the
4494 		 *     last packet that was enqueued using this table entry.
4495 		 *     This guarantees that all previous packets for the flow
4496 		 *     have been dequeued, thus preserving in order delivery.
4497 		 */
4498 		if (unlikely(tcpu != next_cpu) &&
4499 		    (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
4500 		     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
4501 		      rflow->last_qtail)) >= 0)) {
4502 			tcpu = next_cpu;
4503 			rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
4504 		}
4505 
4506 		if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
4507 			*rflowp = rflow;
4508 			cpu = tcpu;
4509 			goto done;
4510 		}
4511 	}
4512 
4513 try_rps:
4514 
4515 	if (map) {
4516 		tcpu = map->cpus[reciprocal_scale(hash, map->len)];
4517 		if (cpu_online(tcpu)) {
4518 			cpu = tcpu;
4519 			goto done;
4520 		}
4521 	}
4522 
4523 done:
4524 	return cpu;
4525 }
4526 
4527 #ifdef CONFIG_RFS_ACCEL
4528 
4529 /**
4530  * rps_may_expire_flow - check whether an RFS hardware filter may be removed
4531  * @dev: Device on which the filter was set
4532  * @rxq_index: RX queue index
4533  * @flow_id: Flow ID passed to ndo_rx_flow_steer()
4534  * @filter_id: Filter ID returned by ndo_rx_flow_steer()
4535  *
4536  * Drivers that implement ndo_rx_flow_steer() should periodically call
4537  * this function for each installed filter and remove the filters for
4538  * which it returns %true.
4539  */
4540 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
4541 			 u32 flow_id, u16 filter_id)
4542 {
4543 	struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
4544 	struct rps_dev_flow_table *flow_table;
4545 	struct rps_dev_flow *rflow;
4546 	bool expire = true;
4547 	unsigned int cpu;
4548 
4549 	rcu_read_lock();
4550 	flow_table = rcu_dereference(rxqueue->rps_flow_table);
4551 	if (flow_table && flow_id <= flow_table->mask) {
4552 		rflow = &flow_table->flows[flow_id];
4553 		cpu = READ_ONCE(rflow->cpu);
4554 		if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
4555 		    ((int)(per_cpu(softnet_data, cpu).input_queue_head -
4556 			   rflow->last_qtail) <
4557 		     (int)(10 * flow_table->mask)))
4558 			expire = false;
4559 	}
4560 	rcu_read_unlock();
4561 	return expire;
4562 }
4563 EXPORT_SYMBOL(rps_may_expire_flow);
4564 
4565 #endif /* CONFIG_RFS_ACCEL */
4566 
4567 /* Called from hardirq (IPI) context */
4568 static void rps_trigger_softirq(void *data)
4569 {
4570 	struct softnet_data *sd = data;
4571 
4572 	____napi_schedule(sd, &sd->backlog);
4573 	sd->received_rps++;
4574 }
4575 
4576 #endif /* CONFIG_RPS */
4577 
4578 /* Called from hardirq (IPI) context */
4579 static void trigger_rx_softirq(void *data)
4580 {
4581 	struct softnet_data *sd = data;
4582 
4583 	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
4584 	smp_store_release(&sd->defer_ipi_scheduled, 0);
4585 }
4586 
4587 /*
4588  * After we queued a packet into sd->input_pkt_queue,
4589  * we need to make sure this queue is serviced soon.
4590  *
4591  * - If this is another cpu queue, link it to our rps_ipi_list,
4592  *   and make sure we will process rps_ipi_list from net_rx_action().
4593  *
4594  * - If this is our own queue, NAPI schedule our backlog.
4595  *   Note that this also raises NET_RX_SOFTIRQ.
4596  */
4597 static void napi_schedule_rps(struct softnet_data *sd)
4598 {
4599 	struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
4600 
4601 #ifdef CONFIG_RPS
4602 	if (sd != mysd) {
4603 		sd->rps_ipi_next = mysd->rps_ipi_list;
4604 		mysd->rps_ipi_list = sd;
4605 
4606 		/* If not called from net_rx_action()
4607 		 * we have to raise NET_RX_SOFTIRQ.
4608 		 */
4609 		if (!mysd->in_net_rx_action)
4610 			__raise_softirq_irqoff(NET_RX_SOFTIRQ);
4611 		return;
4612 	}
4613 #endif /* CONFIG_RPS */
4614 	__napi_schedule_irqoff(&mysd->backlog);
4615 }
4616 
4617 #ifdef CONFIG_NET_FLOW_LIMIT
4618 int netdev_flow_limit_table_len __read_mostly = (1 << 12);
4619 #endif
4620 
4621 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
4622 {
4623 #ifdef CONFIG_NET_FLOW_LIMIT
4624 	struct sd_flow_limit *fl;
4625 	struct softnet_data *sd;
4626 	unsigned int old_flow, new_flow;
4627 
4628 	if (qlen < (READ_ONCE(netdev_max_backlog) >> 1))
4629 		return false;
4630 
4631 	sd = this_cpu_ptr(&softnet_data);
4632 
4633 	rcu_read_lock();
4634 	fl = rcu_dereference(sd->flow_limit);
4635 	if (fl) {
4636 		new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
4637 		old_flow = fl->history[fl->history_head];
4638 		fl->history[fl->history_head] = new_flow;
4639 
4640 		fl->history_head++;
4641 		fl->history_head &= FLOW_LIMIT_HISTORY - 1;
4642 
4643 		if (likely(fl->buckets[old_flow]))
4644 			fl->buckets[old_flow]--;
4645 
4646 		if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
4647 			fl->count++;
4648 			rcu_read_unlock();
4649 			return true;
4650 		}
4651 	}
4652 	rcu_read_unlock();
4653 #endif
4654 	return false;
4655 }
4656 
4657 /*
4658  * enqueue_to_backlog is called to queue an skb to a per CPU backlog
4659  * queue (may be a remote CPU queue).
4660  */
4661 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
4662 			      unsigned int *qtail)
4663 {
4664 	enum skb_drop_reason reason;
4665 	struct softnet_data *sd;
4666 	unsigned long flags;
4667 	unsigned int qlen;
4668 
4669 	reason = SKB_DROP_REASON_NOT_SPECIFIED;
4670 	sd = &per_cpu(softnet_data, cpu);
4671 
4672 	rps_lock_irqsave(sd, &flags);
4673 	if (!netif_running(skb->dev))
4674 		goto drop;
4675 	qlen = skb_queue_len(&sd->input_pkt_queue);
4676 	if (qlen <= READ_ONCE(netdev_max_backlog) && !skb_flow_limit(skb, qlen)) {
4677 		if (qlen) {
4678 enqueue:
4679 			__skb_queue_tail(&sd->input_pkt_queue, skb);
4680 			input_queue_tail_incr_save(sd, qtail);
4681 			rps_unlock_irq_restore(sd, &flags);
4682 			return NET_RX_SUCCESS;
4683 		}
4684 
4685 		/* Schedule NAPI for backlog device
4686 		 * We can use non atomic operation since we own the queue lock
4687 		 */
4688 		if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
4689 			napi_schedule_rps(sd);
4690 		goto enqueue;
4691 	}
4692 	reason = SKB_DROP_REASON_CPU_BACKLOG;
4693 
4694 drop:
4695 	sd->dropped++;
4696 	rps_unlock_irq_restore(sd, &flags);
4697 
4698 	dev_core_stats_rx_dropped_inc(skb->dev);
4699 	kfree_skb_reason(skb, reason);
4700 	return NET_RX_DROP;
4701 }
4702 
4703 static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
4704 {
4705 	struct net_device *dev = skb->dev;
4706 	struct netdev_rx_queue *rxqueue;
4707 
4708 	rxqueue = dev->_rx;
4709 
4710 	if (skb_rx_queue_recorded(skb)) {
4711 		u16 index = skb_get_rx_queue(skb);
4712 
4713 		if (unlikely(index >= dev->real_num_rx_queues)) {
4714 			WARN_ONCE(dev->real_num_rx_queues > 1,
4715 				  "%s received packet on queue %u, but number "
4716 				  "of RX queues is %u\n",
4717 				  dev->name, index, dev->real_num_rx_queues);
4718 
4719 			return rxqueue; /* Return first rxqueue */
4720 		}
4721 		rxqueue += index;
4722 	}
4723 	return rxqueue;
4724 }
4725 
4726 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
4727 			     struct bpf_prog *xdp_prog)
4728 {
4729 	void *orig_data, *orig_data_end, *hard_start;
4730 	struct netdev_rx_queue *rxqueue;
4731 	bool orig_bcast, orig_host;
4732 	u32 mac_len, frame_sz;
4733 	__be16 orig_eth_type;
4734 	struct ethhdr *eth;
4735 	u32 metalen, act;
4736 	int off;
4737 
4738 	/* The XDP program wants to see the packet starting at the MAC
4739 	 * header.
4740 	 */
4741 	mac_len = skb->data - skb_mac_header(skb);
4742 	hard_start = skb->data - skb_headroom(skb);
4743 
4744 	/* SKB "head" area always have tailroom for skb_shared_info */
4745 	frame_sz = (void *)skb_end_pointer(skb) - hard_start;
4746 	frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4747 
4748 	rxqueue = netif_get_rxqueue(skb);
4749 	xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
4750 	xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
4751 			 skb_headlen(skb) + mac_len, true);
4752 
4753 	orig_data_end = xdp->data_end;
4754 	orig_data = xdp->data;
4755 	eth = (struct ethhdr *)xdp->data;
4756 	orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr);
4757 	orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
4758 	orig_eth_type = eth->h_proto;
4759 
4760 	act = bpf_prog_run_xdp(xdp_prog, xdp);
4761 
4762 	/* check if bpf_xdp_adjust_head was used */
4763 	off = xdp->data - orig_data;
4764 	if (off) {
4765 		if (off > 0)
4766 			__skb_pull(skb, off);
4767 		else if (off < 0)
4768 			__skb_push(skb, -off);
4769 
4770 		skb->mac_header += off;
4771 		skb_reset_network_header(skb);
4772 	}
4773 
4774 	/* check if bpf_xdp_adjust_tail was used */
4775 	off = xdp->data_end - orig_data_end;
4776 	if (off != 0) {
4777 		skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
4778 		skb->len += off; /* positive on grow, negative on shrink */
4779 	}
4780 
4781 	/* check if XDP changed eth hdr such SKB needs update */
4782 	eth = (struct ethhdr *)xdp->data;
4783 	if ((orig_eth_type != eth->h_proto) ||
4784 	    (orig_host != ether_addr_equal_64bits(eth->h_dest,
4785 						  skb->dev->dev_addr)) ||
4786 	    (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
4787 		__skb_push(skb, ETH_HLEN);
4788 		skb->pkt_type = PACKET_HOST;
4789 		skb->protocol = eth_type_trans(skb, skb->dev);
4790 	}
4791 
4792 	/* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull
4793 	 * before calling us again on redirect path. We do not call do_redirect
4794 	 * as we leave that up to the caller.
4795 	 *
4796 	 * Caller is responsible for managing lifetime of skb (i.e. calling
4797 	 * kfree_skb in response to actions it cannot handle/XDP_DROP).
4798 	 */
4799 	switch (act) {
4800 	case XDP_REDIRECT:
4801 	case XDP_TX:
4802 		__skb_push(skb, mac_len);
4803 		break;
4804 	case XDP_PASS:
4805 		metalen = xdp->data - xdp->data_meta;
4806 		if (metalen)
4807 			skb_metadata_set(skb, metalen);
4808 		break;
4809 	}
4810 
4811 	return act;
4812 }
4813 
4814 static u32 netif_receive_generic_xdp(struct sk_buff *skb,
4815 				     struct xdp_buff *xdp,
4816 				     struct bpf_prog *xdp_prog)
4817 {
4818 	u32 act = XDP_DROP;
4819 
4820 	/* Reinjected packets coming from act_mirred or similar should
4821 	 * not get XDP generic processing.
4822 	 */
4823 	if (skb_is_redirected(skb))
4824 		return XDP_PASS;
4825 
4826 	/* XDP packets must be linear and must have sufficient headroom
4827 	 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
4828 	 * native XDP provides, thus we need to do it here as well.
4829 	 */
4830 	if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
4831 	    skb_headroom(skb) < XDP_PACKET_HEADROOM) {
4832 		int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
4833 		int troom = skb->tail + skb->data_len - skb->end;
4834 
4835 		/* In case we have to go down the path and also linearize,
4836 		 * then lets do the pskb_expand_head() work just once here.
4837 		 */
4838 		if (pskb_expand_head(skb,
4839 				     hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
4840 				     troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
4841 			goto do_drop;
4842 		if (skb_linearize(skb))
4843 			goto do_drop;
4844 	}
4845 
4846 	act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog);
4847 	switch (act) {
4848 	case XDP_REDIRECT:
4849 	case XDP_TX:
4850 	case XDP_PASS:
4851 		break;
4852 	default:
4853 		bpf_warn_invalid_xdp_action(skb->dev, xdp_prog, act);
4854 		fallthrough;
4855 	case XDP_ABORTED:
4856 		trace_xdp_exception(skb->dev, xdp_prog, act);
4857 		fallthrough;
4858 	case XDP_DROP:
4859 	do_drop:
4860 		kfree_skb(skb);
4861 		break;
4862 	}
4863 
4864 	return act;
4865 }
4866 
4867 /* When doing generic XDP we have to bypass the qdisc layer and the
4868  * network taps in order to match in-driver-XDP behavior. This also means
4869  * that XDP packets are able to starve other packets going through a qdisc,
4870  * and DDOS attacks will be more effective. In-driver-XDP use dedicated TX
4871  * queues, so they do not have this starvation issue.
4872  */
4873 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
4874 {
4875 	struct net_device *dev = skb->dev;
4876 	struct netdev_queue *txq;
4877 	bool free_skb = true;
4878 	int cpu, rc;
4879 
4880 	txq = netdev_core_pick_tx(dev, skb, NULL);
4881 	cpu = smp_processor_id();
4882 	HARD_TX_LOCK(dev, txq, cpu);
4883 	if (!netif_xmit_frozen_or_drv_stopped(txq)) {
4884 		rc = netdev_start_xmit(skb, dev, txq, 0);
4885 		if (dev_xmit_complete(rc))
4886 			free_skb = false;
4887 	}
4888 	HARD_TX_UNLOCK(dev, txq);
4889 	if (free_skb) {
4890 		trace_xdp_exception(dev, xdp_prog, XDP_TX);
4891 		dev_core_stats_tx_dropped_inc(dev);
4892 		kfree_skb(skb);
4893 	}
4894 }
4895 
4896 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
4897 
4898 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
4899 {
4900 	if (xdp_prog) {
4901 		struct xdp_buff xdp;
4902 		u32 act;
4903 		int err;
4904 
4905 		act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
4906 		if (act != XDP_PASS) {
4907 			switch (act) {
4908 			case XDP_REDIRECT:
4909 				err = xdp_do_generic_redirect(skb->dev, skb,
4910 							      &xdp, xdp_prog);
4911 				if (err)
4912 					goto out_redir;
4913 				break;
4914 			case XDP_TX:
4915 				generic_xdp_tx(skb, xdp_prog);
4916 				break;
4917 			}
4918 			return XDP_DROP;
4919 		}
4920 	}
4921 	return XDP_PASS;
4922 out_redir:
4923 	kfree_skb_reason(skb, SKB_DROP_REASON_XDP);
4924 	return XDP_DROP;
4925 }
4926 EXPORT_SYMBOL_GPL(do_xdp_generic);
4927 
4928 static int netif_rx_internal(struct sk_buff *skb)
4929 {
4930 	int ret;
4931 
4932 	net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
4933 
4934 	trace_netif_rx(skb);
4935 
4936 #ifdef CONFIG_RPS
4937 	if (static_branch_unlikely(&rps_needed)) {
4938 		struct rps_dev_flow voidflow, *rflow = &voidflow;
4939 		int cpu;
4940 
4941 		rcu_read_lock();
4942 
4943 		cpu = get_rps_cpu(skb->dev, skb, &rflow);
4944 		if (cpu < 0)
4945 			cpu = smp_processor_id();
4946 
4947 		ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4948 
4949 		rcu_read_unlock();
4950 	} else
4951 #endif
4952 	{
4953 		unsigned int qtail;
4954 
4955 		ret = enqueue_to_backlog(skb, smp_processor_id(), &qtail);
4956 	}
4957 	return ret;
4958 }
4959 
4960 /**
4961  *	__netif_rx	-	Slightly optimized version of netif_rx
4962  *	@skb: buffer to post
4963  *
4964  *	This behaves as netif_rx except that it does not disable bottom halves.
4965  *	As a result this function may only be invoked from the interrupt context
4966  *	(either hard or soft interrupt).
4967  */
4968 int __netif_rx(struct sk_buff *skb)
4969 {
4970 	int ret;
4971 
4972 	lockdep_assert_once(hardirq_count() | softirq_count());
4973 
4974 	trace_netif_rx_entry(skb);
4975 	ret = netif_rx_internal(skb);
4976 	trace_netif_rx_exit(ret);
4977 	return ret;
4978 }
4979 EXPORT_SYMBOL(__netif_rx);
4980 
4981 /**
4982  *	netif_rx	-	post buffer to the network code
4983  *	@skb: buffer to post
4984  *
4985  *	This function receives a packet from a device driver and queues it for
4986  *	the upper (protocol) levels to process via the backlog NAPI device. It
4987  *	always succeeds. The buffer may be dropped during processing for
4988  *	congestion control or by the protocol layers.
4989  *	The network buffer is passed via the backlog NAPI device. Modern NIC
4990  *	driver should use NAPI and GRO.
4991  *	This function can used from interrupt and from process context. The
4992  *	caller from process context must not disable interrupts before invoking
4993  *	this function.
4994  *
4995  *	return values:
4996  *	NET_RX_SUCCESS	(no congestion)
4997  *	NET_RX_DROP     (packet was dropped)
4998  *
4999  */
5000 int netif_rx(struct sk_buff *skb)
5001 {
5002 	bool need_bh_off = !(hardirq_count() | softirq_count());
5003 	int ret;
5004 
5005 	if (need_bh_off)
5006 		local_bh_disable();
5007 	trace_netif_rx_entry(skb);
5008 	ret = netif_rx_internal(skb);
5009 	trace_netif_rx_exit(ret);
5010 	if (need_bh_off)
5011 		local_bh_enable();
5012 	return ret;
5013 }
5014 EXPORT_SYMBOL(netif_rx);
5015 
5016 static __latent_entropy void net_tx_action(struct softirq_action *h)
5017 {
5018 	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
5019 
5020 	if (sd->completion_queue) {
5021 		struct sk_buff *clist;
5022 
5023 		local_irq_disable();
5024 		clist = sd->completion_queue;
5025 		sd->completion_queue = NULL;
5026 		local_irq_enable();
5027 
5028 		while (clist) {
5029 			struct sk_buff *skb = clist;
5030 
5031 			clist = clist->next;
5032 
5033 			WARN_ON(refcount_read(&skb->users));
5034 			if (likely(get_kfree_skb_cb(skb)->reason == SKB_CONSUMED))
5035 				trace_consume_skb(skb, net_tx_action);
5036 			else
5037 				trace_kfree_skb(skb, net_tx_action,
5038 						get_kfree_skb_cb(skb)->reason);
5039 
5040 			if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
5041 				__kfree_skb(skb);
5042 			else
5043 				__kfree_skb_defer(skb);
5044 		}
5045 	}
5046 
5047 	if (sd->output_queue) {
5048 		struct Qdisc *head;
5049 
5050 		local_irq_disable();
5051 		head = sd->output_queue;
5052 		sd->output_queue = NULL;
5053 		sd->output_queue_tailp = &sd->output_queue;
5054 		local_irq_enable();
5055 
5056 		rcu_read_lock();
5057 
5058 		while (head) {
5059 			struct Qdisc *q = head;
5060 			spinlock_t *root_lock = NULL;
5061 
5062 			head = head->next_sched;
5063 
5064 			/* We need to make sure head->next_sched is read
5065 			 * before clearing __QDISC_STATE_SCHED
5066 			 */
5067 			smp_mb__before_atomic();
5068 
5069 			if (!(q->flags & TCQ_F_NOLOCK)) {
5070 				root_lock = qdisc_lock(q);
5071 				spin_lock(root_lock);
5072 			} else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
5073 						     &q->state))) {
5074 				/* There is a synchronize_net() between
5075 				 * STATE_DEACTIVATED flag being set and
5076 				 * qdisc_reset()/some_qdisc_is_busy() in
5077 				 * dev_deactivate(), so we can safely bail out
5078 				 * early here to avoid data race between
5079 				 * qdisc_deactivate() and some_qdisc_is_busy()
5080 				 * for lockless qdisc.
5081 				 */
5082 				clear_bit(__QDISC_STATE_SCHED, &q->state);
5083 				continue;
5084 			}
5085 
5086 			clear_bit(__QDISC_STATE_SCHED, &q->state);
5087 			qdisc_run(q);
5088 			if (root_lock)
5089 				spin_unlock(root_lock);
5090 		}
5091 
5092 		rcu_read_unlock();
5093 	}
5094 
5095 	xfrm_dev_backlog(sd);
5096 }
5097 
5098 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
5099 /* This hook is defined here for ATM LANE */
5100 int (*br_fdb_test_addr_hook)(struct net_device *dev,
5101 			     unsigned char *addr) __read_mostly;
5102 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
5103 #endif
5104 
5105 static inline struct sk_buff *
5106 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
5107 		   struct net_device *orig_dev, bool *another)
5108 {
5109 #ifdef CONFIG_NET_CLS_ACT
5110 	struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress);
5111 	struct tcf_result cl_res;
5112 
5113 	/* If there's at least one ingress present somewhere (so
5114 	 * we get here via enabled static key), remaining devices
5115 	 * that are not configured with an ingress qdisc will bail
5116 	 * out here.
5117 	 */
5118 	if (!miniq)
5119 		return skb;
5120 
5121 	if (*pt_prev) {
5122 		*ret = deliver_skb(skb, *pt_prev, orig_dev);
5123 		*pt_prev = NULL;
5124 	}
5125 
5126 	qdisc_skb_cb(skb)->pkt_len = skb->len;
5127 	tc_skb_cb(skb)->mru = 0;
5128 	tc_skb_cb(skb)->post_ct = false;
5129 	skb->tc_at_ingress = 1;
5130 	mini_qdisc_bstats_cpu_update(miniq, skb);
5131 
5132 	switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) {
5133 	case TC_ACT_OK:
5134 	case TC_ACT_RECLASSIFY:
5135 		skb->tc_index = TC_H_MIN(cl_res.classid);
5136 		break;
5137 	case TC_ACT_SHOT:
5138 		mini_qdisc_qstats_cpu_drop(miniq);
5139 		kfree_skb_reason(skb, SKB_DROP_REASON_TC_INGRESS);
5140 		*ret = NET_RX_DROP;
5141 		return NULL;
5142 	case TC_ACT_STOLEN:
5143 	case TC_ACT_QUEUED:
5144 	case TC_ACT_TRAP:
5145 		consume_skb(skb);
5146 		*ret = NET_RX_SUCCESS;
5147 		return NULL;
5148 	case TC_ACT_REDIRECT:
5149 		/* skb_mac_header check was done by cls/act_bpf, so
5150 		 * we can safely push the L2 header back before
5151 		 * redirecting to another netdev
5152 		 */
5153 		__skb_push(skb, skb->mac_len);
5154 		if (skb_do_redirect(skb) == -EAGAIN) {
5155 			__skb_pull(skb, skb->mac_len);
5156 			*another = true;
5157 			break;
5158 		}
5159 		*ret = NET_RX_SUCCESS;
5160 		return NULL;
5161 	case TC_ACT_CONSUMED:
5162 		*ret = NET_RX_SUCCESS;
5163 		return NULL;
5164 	default:
5165 		break;
5166 	}
5167 #endif /* CONFIG_NET_CLS_ACT */
5168 	return skb;
5169 }
5170 
5171 /**
5172  *	netdev_is_rx_handler_busy - check if receive handler is registered
5173  *	@dev: device to check
5174  *
5175  *	Check if a receive handler is already registered for a given device.
5176  *	Return true if there one.
5177  *
5178  *	The caller must hold the rtnl_mutex.
5179  */
5180 bool netdev_is_rx_handler_busy(struct net_device *dev)
5181 {
5182 	ASSERT_RTNL();
5183 	return dev && rtnl_dereference(dev->rx_handler);
5184 }
5185 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
5186 
5187 /**
5188  *	netdev_rx_handler_register - register receive handler
5189  *	@dev: device to register a handler for
5190  *	@rx_handler: receive handler to register
5191  *	@rx_handler_data: data pointer that is used by rx handler
5192  *
5193  *	Register a receive handler for a device. This handler will then be
5194  *	called from __netif_receive_skb. A negative errno code is returned
5195  *	on a failure.
5196  *
5197  *	The caller must hold the rtnl_mutex.
5198  *
5199  *	For a general description of rx_handler, see enum rx_handler_result.
5200  */
5201 int netdev_rx_handler_register(struct net_device *dev,
5202 			       rx_handler_func_t *rx_handler,
5203 			       void *rx_handler_data)
5204 {
5205 	if (netdev_is_rx_handler_busy(dev))
5206 		return -EBUSY;
5207 
5208 	if (dev->priv_flags & IFF_NO_RX_HANDLER)
5209 		return -EINVAL;
5210 
5211 	/* Note: rx_handler_data must be set before rx_handler */
5212 	rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
5213 	rcu_assign_pointer(dev->rx_handler, rx_handler);
5214 
5215 	return 0;
5216 }
5217 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
5218 
5219 /**
5220  *	netdev_rx_handler_unregister - unregister receive handler
5221  *	@dev: device to unregister a handler from
5222  *
5223  *	Unregister a receive handler from a device.
5224  *
5225  *	The caller must hold the rtnl_mutex.
5226  */
5227 void netdev_rx_handler_unregister(struct net_device *dev)
5228 {
5229 
5230 	ASSERT_RTNL();
5231 	RCU_INIT_POINTER(dev->rx_handler, NULL);
5232 	/* a reader seeing a non NULL rx_handler in a rcu_read_lock()
5233 	 * section has a guarantee to see a non NULL rx_handler_data
5234 	 * as well.
5235 	 */
5236 	synchronize_net();
5237 	RCU_INIT_POINTER(dev->rx_handler_data, NULL);
5238 }
5239 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
5240 
5241 /*
5242  * Limit the use of PFMEMALLOC reserves to those protocols that implement
5243  * the special handling of PFMEMALLOC skbs.
5244  */
5245 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
5246 {
5247 	switch (skb->protocol) {
5248 	case htons(ETH_P_ARP):
5249 	case htons(ETH_P_IP):
5250 	case htons(ETH_P_IPV6):
5251 	case htons(ETH_P_8021Q):
5252 	case htons(ETH_P_8021AD):
5253 		return true;
5254 	default:
5255 		return false;
5256 	}
5257 }
5258 
5259 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
5260 			     int *ret, struct net_device *orig_dev)
5261 {
5262 	if (nf_hook_ingress_active(skb)) {
5263 		int ingress_retval;
5264 
5265 		if (*pt_prev) {
5266 			*ret = deliver_skb(skb, *pt_prev, orig_dev);
5267 			*pt_prev = NULL;
5268 		}
5269 
5270 		rcu_read_lock();
5271 		ingress_retval = nf_hook_ingress(skb);
5272 		rcu_read_unlock();
5273 		return ingress_retval;
5274 	}
5275 	return 0;
5276 }
5277 
5278 static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
5279 				    struct packet_type **ppt_prev)
5280 {
5281 	struct packet_type *ptype, *pt_prev;
5282 	rx_handler_func_t *rx_handler;
5283 	struct sk_buff *skb = *pskb;
5284 	struct net_device *orig_dev;
5285 	bool deliver_exact = false;
5286 	int ret = NET_RX_DROP;
5287 	__be16 type;
5288 
5289 	net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue), skb);
5290 
5291 	trace_netif_receive_skb(skb);
5292 
5293 	orig_dev = skb->dev;
5294 
5295 	skb_reset_network_header(skb);
5296 	if (!skb_transport_header_was_set(skb))
5297 		skb_reset_transport_header(skb);
5298 	skb_reset_mac_len(skb);
5299 
5300 	pt_prev = NULL;
5301 
5302 another_round:
5303 	skb->skb_iif = skb->dev->ifindex;
5304 
5305 	__this_cpu_inc(softnet_data.processed);
5306 
5307 	if (static_branch_unlikely(&generic_xdp_needed_key)) {
5308 		int ret2;
5309 
5310 		migrate_disable();
5311 		ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
5312 		migrate_enable();
5313 
5314 		if (ret2 != XDP_PASS) {
5315 			ret = NET_RX_DROP;
5316 			goto out;
5317 		}
5318 	}
5319 
5320 	if (eth_type_vlan(skb->protocol)) {
5321 		skb = skb_vlan_untag(skb);
5322 		if (unlikely(!skb))
5323 			goto out;
5324 	}
5325 
5326 	if (skb_skip_tc_classify(skb))
5327 		goto skip_classify;
5328 
5329 	if (pfmemalloc)
5330 		goto skip_taps;
5331 
5332 	list_for_each_entry_rcu(ptype, &ptype_all, list) {
5333 		if (pt_prev)
5334 			ret = deliver_skb(skb, pt_prev, orig_dev);
5335 		pt_prev = ptype;
5336 	}
5337 
5338 	list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
5339 		if (pt_prev)
5340 			ret = deliver_skb(skb, pt_prev, orig_dev);
5341 		pt_prev = ptype;
5342 	}
5343 
5344 skip_taps:
5345 #ifdef CONFIG_NET_INGRESS
5346 	if (static_branch_unlikely(&ingress_needed_key)) {
5347 		bool another = false;
5348 
5349 		nf_skip_egress(skb, true);
5350 		skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev,
5351 					 &another);
5352 		if (another)
5353 			goto another_round;
5354 		if (!skb)
5355 			goto out;
5356 
5357 		nf_skip_egress(skb, false);
5358 		if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
5359 			goto out;
5360 	}
5361 #endif
5362 	skb_reset_redirect(skb);
5363 skip_classify:
5364 	if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
5365 		goto drop;
5366 
5367 	if (skb_vlan_tag_present(skb)) {
5368 		if (pt_prev) {
5369 			ret = deliver_skb(skb, pt_prev, orig_dev);
5370 			pt_prev = NULL;
5371 		}
5372 		if (vlan_do_receive(&skb))
5373 			goto another_round;
5374 		else if (unlikely(!skb))
5375 			goto out;
5376 	}
5377 
5378 	rx_handler = rcu_dereference(skb->dev->rx_handler);
5379 	if (rx_handler) {
5380 		if (pt_prev) {
5381 			ret = deliver_skb(skb, pt_prev, orig_dev);
5382 			pt_prev = NULL;
5383 		}
5384 		switch (rx_handler(&skb)) {
5385 		case RX_HANDLER_CONSUMED:
5386 			ret = NET_RX_SUCCESS;
5387 			goto out;
5388 		case RX_HANDLER_ANOTHER:
5389 			goto another_round;
5390 		case RX_HANDLER_EXACT:
5391 			deliver_exact = true;
5392 			break;
5393 		case RX_HANDLER_PASS:
5394 			break;
5395 		default:
5396 			BUG();
5397 		}
5398 	}
5399 
5400 	if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) {
5401 check_vlan_id:
5402 		if (skb_vlan_tag_get_id(skb)) {
5403 			/* Vlan id is non 0 and vlan_do_receive() above couldn't
5404 			 * find vlan device.
5405 			 */
5406 			skb->pkt_type = PACKET_OTHERHOST;
5407 		} else if (eth_type_vlan(skb->protocol)) {
5408 			/* Outer header is 802.1P with vlan 0, inner header is
5409 			 * 802.1Q or 802.1AD and vlan_do_receive() above could
5410 			 * not find vlan dev for vlan id 0.
5411 			 */
5412 			__vlan_hwaccel_clear_tag(skb);
5413 			skb = skb_vlan_untag(skb);
5414 			if (unlikely(!skb))
5415 				goto out;
5416 			if (vlan_do_receive(&skb))
5417 				/* After stripping off 802.1P header with vlan 0
5418 				 * vlan dev is found for inner header.
5419 				 */
5420 				goto another_round;
5421 			else if (unlikely(!skb))
5422 				goto out;
5423 			else
5424 				/* We have stripped outer 802.1P vlan 0 header.
5425 				 * But could not find vlan dev.
5426 				 * check again for vlan id to set OTHERHOST.
5427 				 */
5428 				goto check_vlan_id;
5429 		}
5430 		/* Note: we might in the future use prio bits
5431 		 * and set skb->priority like in vlan_do_receive()
5432 		 * For the time being, just ignore Priority Code Point
5433 		 */
5434 		__vlan_hwaccel_clear_tag(skb);
5435 	}
5436 
5437 	type = skb->protocol;
5438 
5439 	/* deliver only exact match when indicated */
5440 	if (likely(!deliver_exact)) {
5441 		deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5442 				       &ptype_base[ntohs(type) &
5443 						   PTYPE_HASH_MASK]);
5444 	}
5445 
5446 	deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5447 			       &orig_dev->ptype_specific);
5448 
5449 	if (unlikely(skb->dev != orig_dev)) {
5450 		deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5451 				       &skb->dev->ptype_specific);
5452 	}
5453 
5454 	if (pt_prev) {
5455 		if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
5456 			goto drop;
5457 		*ppt_prev = pt_prev;
5458 	} else {
5459 drop:
5460 		if (!deliver_exact)
5461 			dev_core_stats_rx_dropped_inc(skb->dev);
5462 		else
5463 			dev_core_stats_rx_nohandler_inc(skb->dev);
5464 		kfree_skb_reason(skb, SKB_DROP_REASON_UNHANDLED_PROTO);
5465 		/* Jamal, now you will not able to escape explaining
5466 		 * me how you were going to use this. :-)
5467 		 */
5468 		ret = NET_RX_DROP;
5469 	}
5470 
5471 out:
5472 	/* The invariant here is that if *ppt_prev is not NULL
5473 	 * then skb should also be non-NULL.
5474 	 *
5475 	 * Apparently *ppt_prev assignment above holds this invariant due to
5476 	 * skb dereferencing near it.
5477 	 */
5478 	*pskb = skb;
5479 	return ret;
5480 }
5481 
5482 static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
5483 {
5484 	struct net_device *orig_dev = skb->dev;
5485 	struct packet_type *pt_prev = NULL;
5486 	int ret;
5487 
5488 	ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5489 	if (pt_prev)
5490 		ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
5491 					 skb->dev, pt_prev, orig_dev);
5492 	return ret;
5493 }
5494 
5495 /**
5496  *	netif_receive_skb_core - special purpose version of netif_receive_skb
5497  *	@skb: buffer to process
5498  *
5499  *	More direct receive version of netif_receive_skb().  It should
5500  *	only be used by callers that have a need to skip RPS and Generic XDP.
5501  *	Caller must also take care of handling if ``(page_is_)pfmemalloc``.
5502  *
5503  *	This function may only be called from softirq context and interrupts
5504  *	should be enabled.
5505  *
5506  *	Return values (usually ignored):
5507  *	NET_RX_SUCCESS: no congestion
5508  *	NET_RX_DROP: packet was dropped
5509  */
5510 int netif_receive_skb_core(struct sk_buff *skb)
5511 {
5512 	int ret;
5513 
5514 	rcu_read_lock();
5515 	ret = __netif_receive_skb_one_core(skb, false);
5516 	rcu_read_unlock();
5517 
5518 	return ret;
5519 }
5520 EXPORT_SYMBOL(netif_receive_skb_core);
5521 
5522 static inline void __netif_receive_skb_list_ptype(struct list_head *head,
5523 						  struct packet_type *pt_prev,
5524 						  struct net_device *orig_dev)
5525 {
5526 	struct sk_buff *skb, *next;
5527 
5528 	if (!pt_prev)
5529 		return;
5530 	if (list_empty(head))
5531 		return;
5532 	if (pt_prev->list_func != NULL)
5533 		INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv,
5534 				   ip_list_rcv, head, pt_prev, orig_dev);
5535 	else
5536 		list_for_each_entry_safe(skb, next, head, list) {
5537 			skb_list_del_init(skb);
5538 			pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
5539 		}
5540 }
5541 
5542 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
5543 {
5544 	/* Fast-path assumptions:
5545 	 * - There is no RX handler.
5546 	 * - Only one packet_type matches.
5547 	 * If either of these fails, we will end up doing some per-packet
5548 	 * processing in-line, then handling the 'last ptype' for the whole
5549 	 * sublist.  This can't cause out-of-order delivery to any single ptype,
5550 	 * because the 'last ptype' must be constant across the sublist, and all
5551 	 * other ptypes are handled per-packet.
5552 	 */
5553 	/* Current (common) ptype of sublist */
5554 	struct packet_type *pt_curr = NULL;
5555 	/* Current (common) orig_dev of sublist */
5556 	struct net_device *od_curr = NULL;
5557 	struct list_head sublist;
5558 	struct sk_buff *skb, *next;
5559 
5560 	INIT_LIST_HEAD(&sublist);
5561 	list_for_each_entry_safe(skb, next, head, list) {
5562 		struct net_device *orig_dev = skb->dev;
5563 		struct packet_type *pt_prev = NULL;
5564 
5565 		skb_list_del_init(skb);
5566 		__netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5567 		if (!pt_prev)
5568 			continue;
5569 		if (pt_curr != pt_prev || od_curr != orig_dev) {
5570 			/* dispatch old sublist */
5571 			__netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5572 			/* start new sublist */
5573 			INIT_LIST_HEAD(&sublist);
5574 			pt_curr = pt_prev;
5575 			od_curr = orig_dev;
5576 		}
5577 		list_add_tail(&skb->list, &sublist);
5578 	}
5579 
5580 	/* dispatch final sublist */
5581 	__netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5582 }
5583 
5584 static int __netif_receive_skb(struct sk_buff *skb)
5585 {
5586 	int ret;
5587 
5588 	if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
5589 		unsigned int noreclaim_flag;
5590 
5591 		/*
5592 		 * PFMEMALLOC skbs are special, they should
5593 		 * - be delivered to SOCK_MEMALLOC sockets only
5594 		 * - stay away from userspace
5595 		 * - have bounded memory usage
5596 		 *
5597 		 * Use PF_MEMALLOC as this saves us from propagating the allocation
5598 		 * context down to all allocation sites.
5599 		 */
5600 		noreclaim_flag = memalloc_noreclaim_save();
5601 		ret = __netif_receive_skb_one_core(skb, true);
5602 		memalloc_noreclaim_restore(noreclaim_flag);
5603 	} else
5604 		ret = __netif_receive_skb_one_core(skb, false);
5605 
5606 	return ret;
5607 }
5608 
5609 static void __netif_receive_skb_list(struct list_head *head)
5610 {
5611 	unsigned long noreclaim_flag = 0;
5612 	struct sk_buff *skb, *next;
5613 	bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
5614 
5615 	list_for_each_entry_safe(skb, next, head, list) {
5616 		if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
5617 			struct list_head sublist;
5618 
5619 			/* Handle the previous sublist */
5620 			list_cut_before(&sublist, head, &skb->list);
5621 			if (!list_empty(&sublist))
5622 				__netif_receive_skb_list_core(&sublist, pfmemalloc);
5623 			pfmemalloc = !pfmemalloc;
5624 			/* See comments in __netif_receive_skb */
5625 			if (pfmemalloc)
5626 				noreclaim_flag = memalloc_noreclaim_save();
5627 			else
5628 				memalloc_noreclaim_restore(noreclaim_flag);
5629 		}
5630 	}
5631 	/* Handle the remaining sublist */
5632 	if (!list_empty(head))
5633 		__netif_receive_skb_list_core(head, pfmemalloc);
5634 	/* Restore pflags */
5635 	if (pfmemalloc)
5636 		memalloc_noreclaim_restore(noreclaim_flag);
5637 }
5638 
5639 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
5640 {
5641 	struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
5642 	struct bpf_prog *new = xdp->prog;
5643 	int ret = 0;
5644 
5645 	switch (xdp->command) {
5646 	case XDP_SETUP_PROG:
5647 		rcu_assign_pointer(dev->xdp_prog, new);
5648 		if (old)
5649 			bpf_prog_put(old);
5650 
5651 		if (old && !new) {
5652 			static_branch_dec(&generic_xdp_needed_key);
5653 		} else if (new && !old) {
5654 			static_branch_inc(&generic_xdp_needed_key);
5655 			dev_disable_lro(dev);
5656 			dev_disable_gro_hw(dev);
5657 		}
5658 		break;
5659 
5660 	default:
5661 		ret = -EINVAL;
5662 		break;
5663 	}
5664 
5665 	return ret;
5666 }
5667 
5668 static int netif_receive_skb_internal(struct sk_buff *skb)
5669 {
5670 	int ret;
5671 
5672 	net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
5673 
5674 	if (skb_defer_rx_timestamp(skb))
5675 		return NET_RX_SUCCESS;
5676 
5677 	rcu_read_lock();
5678 #ifdef CONFIG_RPS
5679 	if (static_branch_unlikely(&rps_needed)) {
5680 		struct rps_dev_flow voidflow, *rflow = &voidflow;
5681 		int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5682 
5683 		if (cpu >= 0) {
5684 			ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5685 			rcu_read_unlock();
5686 			return ret;
5687 		}
5688 	}
5689 #endif
5690 	ret = __netif_receive_skb(skb);
5691 	rcu_read_unlock();
5692 	return ret;
5693 }
5694 
5695 void netif_receive_skb_list_internal(struct list_head *head)
5696 {
5697 	struct sk_buff *skb, *next;
5698 	struct list_head sublist;
5699 
5700 	INIT_LIST_HEAD(&sublist);
5701 	list_for_each_entry_safe(skb, next, head, list) {
5702 		net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
5703 		skb_list_del_init(skb);
5704 		if (!skb_defer_rx_timestamp(skb))
5705 			list_add_tail(&skb->list, &sublist);
5706 	}
5707 	list_splice_init(&sublist, head);
5708 
5709 	rcu_read_lock();
5710 #ifdef CONFIG_RPS
5711 	if (static_branch_unlikely(&rps_needed)) {
5712 		list_for_each_entry_safe(skb, next, head, list) {
5713 			struct rps_dev_flow voidflow, *rflow = &voidflow;
5714 			int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5715 
5716 			if (cpu >= 0) {
5717 				/* Will be handled, remove from list */
5718 				skb_list_del_init(skb);
5719 				enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5720 			}
5721 		}
5722 	}
5723 #endif
5724 	__netif_receive_skb_list(head);
5725 	rcu_read_unlock();
5726 }
5727 
5728 /**
5729  *	netif_receive_skb - process receive buffer from network
5730  *	@skb: buffer to process
5731  *
5732  *	netif_receive_skb() is the main receive data processing function.
5733  *	It always succeeds. The buffer may be dropped during processing
5734  *	for congestion control or by the protocol layers.
5735  *
5736  *	This function may only be called from softirq context and interrupts
5737  *	should be enabled.
5738  *
5739  *	Return values (usually ignored):
5740  *	NET_RX_SUCCESS: no congestion
5741  *	NET_RX_DROP: packet was dropped
5742  */
5743 int netif_receive_skb(struct sk_buff *skb)
5744 {
5745 	int ret;
5746 
5747 	trace_netif_receive_skb_entry(skb);
5748 
5749 	ret = netif_receive_skb_internal(skb);
5750 	trace_netif_receive_skb_exit(ret);
5751 
5752 	return ret;
5753 }
5754 EXPORT_SYMBOL(netif_receive_skb);
5755 
5756 /**
5757  *	netif_receive_skb_list - process many receive buffers from network
5758  *	@head: list of skbs to process.
5759  *
5760  *	Since return value of netif_receive_skb() is normally ignored, and
5761  *	wouldn't be meaningful for a list, this function returns void.
5762  *
5763  *	This function may only be called from softirq context and interrupts
5764  *	should be enabled.
5765  */
5766 void netif_receive_skb_list(struct list_head *head)
5767 {
5768 	struct sk_buff *skb;
5769 
5770 	if (list_empty(head))
5771 		return;
5772 	if (trace_netif_receive_skb_list_entry_enabled()) {
5773 		list_for_each_entry(skb, head, list)
5774 			trace_netif_receive_skb_list_entry(skb);
5775 	}
5776 	netif_receive_skb_list_internal(head);
5777 	trace_netif_receive_skb_list_exit(0);
5778 }
5779 EXPORT_SYMBOL(netif_receive_skb_list);
5780 
5781 static DEFINE_PER_CPU(struct work_struct, flush_works);
5782 
5783 /* Network device is going away, flush any packets still pending */
5784 static void flush_backlog(struct work_struct *work)
5785 {
5786 	struct sk_buff *skb, *tmp;
5787 	struct softnet_data *sd;
5788 
5789 	local_bh_disable();
5790 	sd = this_cpu_ptr(&softnet_data);
5791 
5792 	rps_lock_irq_disable(sd);
5793 	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
5794 		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5795 			__skb_unlink(skb, &sd->input_pkt_queue);
5796 			dev_kfree_skb_irq(skb);
5797 			input_queue_head_incr(sd);
5798 		}
5799 	}
5800 	rps_unlock_irq_enable(sd);
5801 
5802 	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
5803 		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5804 			__skb_unlink(skb, &sd->process_queue);
5805 			kfree_skb(skb);
5806 			input_queue_head_incr(sd);
5807 		}
5808 	}
5809 	local_bh_enable();
5810 }
5811 
5812 static bool flush_required(int cpu)
5813 {
5814 #if IS_ENABLED(CONFIG_RPS)
5815 	struct softnet_data *sd = &per_cpu(softnet_data, cpu);
5816 	bool do_flush;
5817 
5818 	rps_lock_irq_disable(sd);
5819 
5820 	/* as insertion into process_queue happens with the rps lock held,
5821 	 * process_queue access may race only with dequeue
5822 	 */
5823 	do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
5824 		   !skb_queue_empty_lockless(&sd->process_queue);
5825 	rps_unlock_irq_enable(sd);
5826 
5827 	return do_flush;
5828 #endif
5829 	/* without RPS we can't safely check input_pkt_queue: during a
5830 	 * concurrent remote skb_queue_splice() we can detect as empty both
5831 	 * input_pkt_queue and process_queue even if the latter could end-up
5832 	 * containing a lot of packets.
5833 	 */
5834 	return true;
5835 }
5836 
5837 static void flush_all_backlogs(void)
5838 {
5839 	static cpumask_t flush_cpus;
5840 	unsigned int cpu;
5841 
5842 	/* since we are under rtnl lock protection we can use static data
5843 	 * for the cpumask and avoid allocating on stack the possibly
5844 	 * large mask
5845 	 */
5846 	ASSERT_RTNL();
5847 
5848 	cpus_read_lock();
5849 
5850 	cpumask_clear(&flush_cpus);
5851 	for_each_online_cpu(cpu) {
5852 		if (flush_required(cpu)) {
5853 			queue_work_on(cpu, system_highpri_wq,
5854 				      per_cpu_ptr(&flush_works, cpu));
5855 			cpumask_set_cpu(cpu, &flush_cpus);
5856 		}
5857 	}
5858 
5859 	/* we can have in flight packet[s] on the cpus we are not flushing,
5860 	 * synchronize_net() in unregister_netdevice_many() will take care of
5861 	 * them
5862 	 */
5863 	for_each_cpu(cpu, &flush_cpus)
5864 		flush_work(per_cpu_ptr(&flush_works, cpu));
5865 
5866 	cpus_read_unlock();
5867 }
5868 
5869 static void net_rps_send_ipi(struct softnet_data *remsd)
5870 {
5871 #ifdef CONFIG_RPS
5872 	while (remsd) {
5873 		struct softnet_data *next = remsd->rps_ipi_next;
5874 
5875 		if (cpu_online(remsd->cpu))
5876 			smp_call_function_single_async(remsd->cpu, &remsd->csd);
5877 		remsd = next;
5878 	}
5879 #endif
5880 }
5881 
5882 /*
5883  * net_rps_action_and_irq_enable sends any pending IPI's for rps.
5884  * Note: called with local irq disabled, but exits with local irq enabled.
5885  */
5886 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
5887 {
5888 #ifdef CONFIG_RPS
5889 	struct softnet_data *remsd = sd->rps_ipi_list;
5890 
5891 	if (remsd) {
5892 		sd->rps_ipi_list = NULL;
5893 
5894 		local_irq_enable();
5895 
5896 		/* Send pending IPI's to kick RPS processing on remote cpus. */
5897 		net_rps_send_ipi(remsd);
5898 	} else
5899 #endif
5900 		local_irq_enable();
5901 }
5902 
5903 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
5904 {
5905 #ifdef CONFIG_RPS
5906 	return sd->rps_ipi_list != NULL;
5907 #else
5908 	return false;
5909 #endif
5910 }
5911 
5912 static int process_backlog(struct napi_struct *napi, int quota)
5913 {
5914 	struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
5915 	bool again = true;
5916 	int work = 0;
5917 
5918 	/* Check if we have pending ipi, its better to send them now,
5919 	 * not waiting net_rx_action() end.
5920 	 */
5921 	if (sd_has_rps_ipi_waiting(sd)) {
5922 		local_irq_disable();
5923 		net_rps_action_and_irq_enable(sd);
5924 	}
5925 
5926 	napi->weight = READ_ONCE(dev_rx_weight);
5927 	while (again) {
5928 		struct sk_buff *skb;
5929 
5930 		while ((skb = __skb_dequeue(&sd->process_queue))) {
5931 			rcu_read_lock();
5932 			__netif_receive_skb(skb);
5933 			rcu_read_unlock();
5934 			input_queue_head_incr(sd);
5935 			if (++work >= quota)
5936 				return work;
5937 
5938 		}
5939 
5940 		rps_lock_irq_disable(sd);
5941 		if (skb_queue_empty(&sd->input_pkt_queue)) {
5942 			/*
5943 			 * Inline a custom version of __napi_complete().
5944 			 * only current cpu owns and manipulates this napi,
5945 			 * and NAPI_STATE_SCHED is the only possible flag set
5946 			 * on backlog.
5947 			 * We can use a plain write instead of clear_bit(),
5948 			 * and we dont need an smp_mb() memory barrier.
5949 			 */
5950 			napi->state = 0;
5951 			again = false;
5952 		} else {
5953 			skb_queue_splice_tail_init(&sd->input_pkt_queue,
5954 						   &sd->process_queue);
5955 		}
5956 		rps_unlock_irq_enable(sd);
5957 	}
5958 
5959 	return work;
5960 }
5961 
5962 /**
5963  * __napi_schedule - schedule for receive
5964  * @n: entry to schedule
5965  *
5966  * The entry's receive function will be scheduled to run.
5967  * Consider using __napi_schedule_irqoff() if hard irqs are masked.
5968  */
5969 void __napi_schedule(struct napi_struct *n)
5970 {
5971 	unsigned long flags;
5972 
5973 	local_irq_save(flags);
5974 	____napi_schedule(this_cpu_ptr(&softnet_data), n);
5975 	local_irq_restore(flags);
5976 }
5977 EXPORT_SYMBOL(__napi_schedule);
5978 
5979 /**
5980  *	napi_schedule_prep - check if napi can be scheduled
5981  *	@n: napi context
5982  *
5983  * Test if NAPI routine is already running, and if not mark
5984  * it as running.  This is used as a condition variable to
5985  * insure only one NAPI poll instance runs.  We also make
5986  * sure there is no pending NAPI disable.
5987  */
5988 bool napi_schedule_prep(struct napi_struct *n)
5989 {
5990 	unsigned long new, val = READ_ONCE(n->state);
5991 
5992 	do {
5993 		if (unlikely(val & NAPIF_STATE_DISABLE))
5994 			return false;
5995 		new = val | NAPIF_STATE_SCHED;
5996 
5997 		/* Sets STATE_MISSED bit if STATE_SCHED was already set
5998 		 * This was suggested by Alexander Duyck, as compiler
5999 		 * emits better code than :
6000 		 * if (val & NAPIF_STATE_SCHED)
6001 		 *     new |= NAPIF_STATE_MISSED;
6002 		 */
6003 		new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
6004 						   NAPIF_STATE_MISSED;
6005 	} while (!try_cmpxchg(&n->state, &val, new));
6006 
6007 	return !(val & NAPIF_STATE_SCHED);
6008 }
6009 EXPORT_SYMBOL(napi_schedule_prep);
6010 
6011 /**
6012  * __napi_schedule_irqoff - schedule for receive
6013  * @n: entry to schedule
6014  *
6015  * Variant of __napi_schedule() assuming hard irqs are masked.
6016  *
6017  * On PREEMPT_RT enabled kernels this maps to __napi_schedule()
6018  * because the interrupt disabled assumption might not be true
6019  * due to force-threaded interrupts and spinlock substitution.
6020  */
6021 void __napi_schedule_irqoff(struct napi_struct *n)
6022 {
6023 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6024 		____napi_schedule(this_cpu_ptr(&softnet_data), n);
6025 	else
6026 		__napi_schedule(n);
6027 }
6028 EXPORT_SYMBOL(__napi_schedule_irqoff);
6029 
6030 bool napi_complete_done(struct napi_struct *n, int work_done)
6031 {
6032 	unsigned long flags, val, new, timeout = 0;
6033 	bool ret = true;
6034 
6035 	/*
6036 	 * 1) Don't let napi dequeue from the cpu poll list
6037 	 *    just in case its running on a different cpu.
6038 	 * 2) If we are busy polling, do nothing here, we have
6039 	 *    the guarantee we will be called later.
6040 	 */
6041 	if (unlikely(n->state & (NAPIF_STATE_NPSVC |
6042 				 NAPIF_STATE_IN_BUSY_POLL)))
6043 		return false;
6044 
6045 	if (work_done) {
6046 		if (n->gro_bitmask)
6047 			timeout = READ_ONCE(n->dev->gro_flush_timeout);
6048 		n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs);
6049 	}
6050 	if (n->defer_hard_irqs_count > 0) {
6051 		n->defer_hard_irqs_count--;
6052 		timeout = READ_ONCE(n->dev->gro_flush_timeout);
6053 		if (timeout)
6054 			ret = false;
6055 	}
6056 	if (n->gro_bitmask) {
6057 		/* When the NAPI instance uses a timeout and keeps postponing
6058 		 * it, we need to bound somehow the time packets are kept in
6059 		 * the GRO layer
6060 		 */
6061 		napi_gro_flush(n, !!timeout);
6062 	}
6063 
6064 	gro_normal_list(n);
6065 
6066 	if (unlikely(!list_empty(&n->poll_list))) {
6067 		/* If n->poll_list is not empty, we need to mask irqs */
6068 		local_irq_save(flags);
6069 		list_del_init(&n->poll_list);
6070 		local_irq_restore(flags);
6071 	}
6072 
6073 	val = READ_ONCE(n->state);
6074 	do {
6075 		WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
6076 
6077 		new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED |
6078 			      NAPIF_STATE_SCHED_THREADED |
6079 			      NAPIF_STATE_PREFER_BUSY_POLL);
6080 
6081 		/* If STATE_MISSED was set, leave STATE_SCHED set,
6082 		 * because we will call napi->poll() one more time.
6083 		 * This C code was suggested by Alexander Duyck to help gcc.
6084 		 */
6085 		new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
6086 						    NAPIF_STATE_SCHED;
6087 	} while (!try_cmpxchg(&n->state, &val, new));
6088 
6089 	if (unlikely(val & NAPIF_STATE_MISSED)) {
6090 		__napi_schedule(n);
6091 		return false;
6092 	}
6093 
6094 	if (timeout)
6095 		hrtimer_start(&n->timer, ns_to_ktime(timeout),
6096 			      HRTIMER_MODE_REL_PINNED);
6097 	return ret;
6098 }
6099 EXPORT_SYMBOL(napi_complete_done);
6100 
6101 /* must be called under rcu_read_lock(), as we dont take a reference */
6102 static struct napi_struct *napi_by_id(unsigned int napi_id)
6103 {
6104 	unsigned int hash = napi_id % HASH_SIZE(napi_hash);
6105 	struct napi_struct *napi;
6106 
6107 	hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
6108 		if (napi->napi_id == napi_id)
6109 			return napi;
6110 
6111 	return NULL;
6112 }
6113 
6114 #if defined(CONFIG_NET_RX_BUSY_POLL)
6115 
6116 static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
6117 {
6118 	if (!skip_schedule) {
6119 		gro_normal_list(napi);
6120 		__napi_schedule(napi);
6121 		return;
6122 	}
6123 
6124 	if (napi->gro_bitmask) {
6125 		/* flush too old packets
6126 		 * If HZ < 1000, flush all packets.
6127 		 */
6128 		napi_gro_flush(napi, HZ >= 1000);
6129 	}
6130 
6131 	gro_normal_list(napi);
6132 	clear_bit(NAPI_STATE_SCHED, &napi->state);
6133 }
6134 
6135 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bool prefer_busy_poll,
6136 			   u16 budget)
6137 {
6138 	bool skip_schedule = false;
6139 	unsigned long timeout;
6140 	int rc;
6141 
6142 	/* Busy polling means there is a high chance device driver hard irq
6143 	 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
6144 	 * set in napi_schedule_prep().
6145 	 * Since we are about to call napi->poll() once more, we can safely
6146 	 * clear NAPI_STATE_MISSED.
6147 	 *
6148 	 * Note: x86 could use a single "lock and ..." instruction
6149 	 * to perform these two clear_bit()
6150 	 */
6151 	clear_bit(NAPI_STATE_MISSED, &napi->state);
6152 	clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
6153 
6154 	local_bh_disable();
6155 
6156 	if (prefer_busy_poll) {
6157 		napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs);
6158 		timeout = READ_ONCE(napi->dev->gro_flush_timeout);
6159 		if (napi->defer_hard_irqs_count && timeout) {
6160 			hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED);
6161 			skip_schedule = true;
6162 		}
6163 	}
6164 
6165 	/* All we really want here is to re-enable device interrupts.
6166 	 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
6167 	 */
6168 	rc = napi->poll(napi, budget);
6169 	/* We can't gro_normal_list() here, because napi->poll() might have
6170 	 * rearmed the napi (napi_complete_done()) in which case it could
6171 	 * already be running on another CPU.
6172 	 */
6173 	trace_napi_poll(napi, rc, budget);
6174 	netpoll_poll_unlock(have_poll_lock);
6175 	if (rc == budget)
6176 		__busy_poll_stop(napi, skip_schedule);
6177 	local_bh_enable();
6178 }
6179 
6180 void napi_busy_loop(unsigned int napi_id,
6181 		    bool (*loop_end)(void *, unsigned long),
6182 		    void *loop_end_arg, bool prefer_busy_poll, u16 budget)
6183 {
6184 	unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
6185 	int (*napi_poll)(struct napi_struct *napi, int budget);
6186 	void *have_poll_lock = NULL;
6187 	struct napi_struct *napi;
6188 
6189 restart:
6190 	napi_poll = NULL;
6191 
6192 	rcu_read_lock();
6193 
6194 	napi = napi_by_id(napi_id);
6195 	if (!napi)
6196 		goto out;
6197 
6198 	preempt_disable();
6199 	for (;;) {
6200 		int work = 0;
6201 
6202 		local_bh_disable();
6203 		if (!napi_poll) {
6204 			unsigned long val = READ_ONCE(napi->state);
6205 
6206 			/* If multiple threads are competing for this napi,
6207 			 * we avoid dirtying napi->state as much as we can.
6208 			 */
6209 			if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
6210 				   NAPIF_STATE_IN_BUSY_POLL)) {
6211 				if (prefer_busy_poll)
6212 					set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6213 				goto count;
6214 			}
6215 			if (cmpxchg(&napi->state, val,
6216 				    val | NAPIF_STATE_IN_BUSY_POLL |
6217 					  NAPIF_STATE_SCHED) != val) {
6218 				if (prefer_busy_poll)
6219 					set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6220 				goto count;
6221 			}
6222 			have_poll_lock = netpoll_poll_lock(napi);
6223 			napi_poll = napi->poll;
6224 		}
6225 		work = napi_poll(napi, budget);
6226 		trace_napi_poll(napi, work, budget);
6227 		gro_normal_list(napi);
6228 count:
6229 		if (work > 0)
6230 			__NET_ADD_STATS(dev_net(napi->dev),
6231 					LINUX_MIB_BUSYPOLLRXPACKETS, work);
6232 		local_bh_enable();
6233 
6234 		if (!loop_end || loop_end(loop_end_arg, start_time))
6235 			break;
6236 
6237 		if (unlikely(need_resched())) {
6238 			if (napi_poll)
6239 				busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
6240 			preempt_enable();
6241 			rcu_read_unlock();
6242 			cond_resched();
6243 			if (loop_end(loop_end_arg, start_time))
6244 				return;
6245 			goto restart;
6246 		}
6247 		cpu_relax();
6248 	}
6249 	if (napi_poll)
6250 		busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
6251 	preempt_enable();
6252 out:
6253 	rcu_read_unlock();
6254 }
6255 EXPORT_SYMBOL(napi_busy_loop);
6256 
6257 #endif /* CONFIG_NET_RX_BUSY_POLL */
6258 
6259 static void napi_hash_add(struct napi_struct *napi)
6260 {
6261 	if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state))
6262 		return;
6263 
6264 	spin_lock(&napi_hash_lock);
6265 
6266 	/* 0..NR_CPUS range is reserved for sender_cpu use */
6267 	do {
6268 		if (unlikely(++napi_gen_id < MIN_NAPI_ID))
6269 			napi_gen_id = MIN_NAPI_ID;
6270 	} while (napi_by_id(napi_gen_id));
6271 	napi->napi_id = napi_gen_id;
6272 
6273 	hlist_add_head_rcu(&napi->napi_hash_node,
6274 			   &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
6275 
6276 	spin_unlock(&napi_hash_lock);
6277 }
6278 
6279 /* Warning : caller is responsible to make sure rcu grace period
6280  * is respected before freeing memory containing @napi
6281  */
6282 static void napi_hash_del(struct napi_struct *napi)
6283 {
6284 	spin_lock(&napi_hash_lock);
6285 
6286 	hlist_del_init_rcu(&napi->napi_hash_node);
6287 
6288 	spin_unlock(&napi_hash_lock);
6289 }
6290 
6291 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
6292 {
6293 	struct napi_struct *napi;
6294 
6295 	napi = container_of(timer, struct napi_struct, timer);
6296 
6297 	/* Note : we use a relaxed variant of napi_schedule_prep() not setting
6298 	 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
6299 	 */
6300 	if (!napi_disable_pending(napi) &&
6301 	    !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) {
6302 		clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6303 		__napi_schedule_irqoff(napi);
6304 	}
6305 
6306 	return HRTIMER_NORESTART;
6307 }
6308 
6309 static void init_gro_hash(struct napi_struct *napi)
6310 {
6311 	int i;
6312 
6313 	for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6314 		INIT_LIST_HEAD(&napi->gro_hash[i].list);
6315 		napi->gro_hash[i].count = 0;
6316 	}
6317 	napi->gro_bitmask = 0;
6318 }
6319 
6320 int dev_set_threaded(struct net_device *dev, bool threaded)
6321 {
6322 	struct napi_struct *napi;
6323 	int err = 0;
6324 
6325 	if (dev->threaded == threaded)
6326 		return 0;
6327 
6328 	if (threaded) {
6329 		list_for_each_entry(napi, &dev->napi_list, dev_list) {
6330 			if (!napi->thread) {
6331 				err = napi_kthread_create(napi);
6332 				if (err) {
6333 					threaded = false;
6334 					break;
6335 				}
6336 			}
6337 		}
6338 	}
6339 
6340 	dev->threaded = threaded;
6341 
6342 	/* Make sure kthread is created before THREADED bit
6343 	 * is set.
6344 	 */
6345 	smp_mb__before_atomic();
6346 
6347 	/* Setting/unsetting threaded mode on a napi might not immediately
6348 	 * take effect, if the current napi instance is actively being
6349 	 * polled. In this case, the switch between threaded mode and
6350 	 * softirq mode will happen in the next round of napi_schedule().
6351 	 * This should not cause hiccups/stalls to the live traffic.
6352 	 */
6353 	list_for_each_entry(napi, &dev->napi_list, dev_list) {
6354 		if (threaded)
6355 			set_bit(NAPI_STATE_THREADED, &napi->state);
6356 		else
6357 			clear_bit(NAPI_STATE_THREADED, &napi->state);
6358 	}
6359 
6360 	return err;
6361 }
6362 EXPORT_SYMBOL(dev_set_threaded);
6363 
6364 void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
6365 			   int (*poll)(struct napi_struct *, int), int weight)
6366 {
6367 	if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state)))
6368 		return;
6369 
6370 	INIT_LIST_HEAD(&napi->poll_list);
6371 	INIT_HLIST_NODE(&napi->napi_hash_node);
6372 	hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6373 	napi->timer.function = napi_watchdog;
6374 	init_gro_hash(napi);
6375 	napi->skb = NULL;
6376 	INIT_LIST_HEAD(&napi->rx_list);
6377 	napi->rx_count = 0;
6378 	napi->poll = poll;
6379 	if (weight > NAPI_POLL_WEIGHT)
6380 		netdev_err_once(dev, "%s() called with weight %d\n", __func__,
6381 				weight);
6382 	napi->weight = weight;
6383 	napi->dev = dev;
6384 #ifdef CONFIG_NETPOLL
6385 	napi->poll_owner = -1;
6386 #endif
6387 	set_bit(NAPI_STATE_SCHED, &napi->state);
6388 	set_bit(NAPI_STATE_NPSVC, &napi->state);
6389 	list_add_rcu(&napi->dev_list, &dev->napi_list);
6390 	napi_hash_add(napi);
6391 	napi_get_frags_check(napi);
6392 	/* Create kthread for this napi if dev->threaded is set.
6393 	 * Clear dev->threaded if kthread creation failed so that
6394 	 * threaded mode will not be enabled in napi_enable().
6395 	 */
6396 	if (dev->threaded && napi_kthread_create(napi))
6397 		dev->threaded = 0;
6398 }
6399 EXPORT_SYMBOL(netif_napi_add_weight);
6400 
6401 void napi_disable(struct napi_struct *n)
6402 {
6403 	unsigned long val, new;
6404 
6405 	might_sleep();
6406 	set_bit(NAPI_STATE_DISABLE, &n->state);
6407 
6408 	val = READ_ONCE(n->state);
6409 	do {
6410 		while (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) {
6411 			usleep_range(20, 200);
6412 			val = READ_ONCE(n->state);
6413 		}
6414 
6415 		new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC;
6416 		new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL);
6417 	} while (!try_cmpxchg(&n->state, &val, new));
6418 
6419 	hrtimer_cancel(&n->timer);
6420 
6421 	clear_bit(NAPI_STATE_DISABLE, &n->state);
6422 }
6423 EXPORT_SYMBOL(napi_disable);
6424 
6425 /**
6426  *	napi_enable - enable NAPI scheduling
6427  *	@n: NAPI context
6428  *
6429  * Resume NAPI from being scheduled on this context.
6430  * Must be paired with napi_disable.
6431  */
6432 void napi_enable(struct napi_struct *n)
6433 {
6434 	unsigned long new, val = READ_ONCE(n->state);
6435 
6436 	do {
6437 		BUG_ON(!test_bit(NAPI_STATE_SCHED, &val));
6438 
6439 		new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC);
6440 		if (n->dev->threaded && n->thread)
6441 			new |= NAPIF_STATE_THREADED;
6442 	} while (!try_cmpxchg(&n->state, &val, new));
6443 }
6444 EXPORT_SYMBOL(napi_enable);
6445 
6446 static void flush_gro_hash(struct napi_struct *napi)
6447 {
6448 	int i;
6449 
6450 	for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6451 		struct sk_buff *skb, *n;
6452 
6453 		list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
6454 			kfree_skb(skb);
6455 		napi->gro_hash[i].count = 0;
6456 	}
6457 }
6458 
6459 /* Must be called in process context */
6460 void __netif_napi_del(struct napi_struct *napi)
6461 {
6462 	if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state))
6463 		return;
6464 
6465 	napi_hash_del(napi);
6466 	list_del_rcu(&napi->dev_list);
6467 	napi_free_frags(napi);
6468 
6469 	flush_gro_hash(napi);
6470 	napi->gro_bitmask = 0;
6471 
6472 	if (napi->thread) {
6473 		kthread_stop(napi->thread);
6474 		napi->thread = NULL;
6475 	}
6476 }
6477 EXPORT_SYMBOL(__netif_napi_del);
6478 
6479 static int __napi_poll(struct napi_struct *n, bool *repoll)
6480 {
6481 	int work, weight;
6482 
6483 	weight = n->weight;
6484 
6485 	/* This NAPI_STATE_SCHED test is for avoiding a race
6486 	 * with netpoll's poll_napi().  Only the entity which
6487 	 * obtains the lock and sees NAPI_STATE_SCHED set will
6488 	 * actually make the ->poll() call.  Therefore we avoid
6489 	 * accidentally calling ->poll() when NAPI is not scheduled.
6490 	 */
6491 	work = 0;
6492 	if (test_bit(NAPI_STATE_SCHED, &n->state)) {
6493 		work = n->poll(n, weight);
6494 		trace_napi_poll(n, work, weight);
6495 	}
6496 
6497 	if (unlikely(work > weight))
6498 		netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
6499 				n->poll, work, weight);
6500 
6501 	if (likely(work < weight))
6502 		return work;
6503 
6504 	/* Drivers must not modify the NAPI state if they
6505 	 * consume the entire weight.  In such cases this code
6506 	 * still "owns" the NAPI instance and therefore can
6507 	 * move the instance around on the list at-will.
6508 	 */
6509 	if (unlikely(napi_disable_pending(n))) {
6510 		napi_complete(n);
6511 		return work;
6512 	}
6513 
6514 	/* The NAPI context has more processing work, but busy-polling
6515 	 * is preferred. Exit early.
6516 	 */
6517 	if (napi_prefer_busy_poll(n)) {
6518 		if (napi_complete_done(n, work)) {
6519 			/* If timeout is not set, we need to make sure
6520 			 * that the NAPI is re-scheduled.
6521 			 */
6522 			napi_schedule(n);
6523 		}
6524 		return work;
6525 	}
6526 
6527 	if (n->gro_bitmask) {
6528 		/* flush too old packets
6529 		 * If HZ < 1000, flush all packets.
6530 		 */
6531 		napi_gro_flush(n, HZ >= 1000);
6532 	}
6533 
6534 	gro_normal_list(n);
6535 
6536 	/* Some drivers may have called napi_schedule
6537 	 * prior to exhausting their budget.
6538 	 */
6539 	if (unlikely(!list_empty(&n->poll_list))) {
6540 		pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
6541 			     n->dev ? n->dev->name : "backlog");
6542 		return work;
6543 	}
6544 
6545 	*repoll = true;
6546 
6547 	return work;
6548 }
6549 
6550 static int napi_poll(struct napi_struct *n, struct list_head *repoll)
6551 {
6552 	bool do_repoll = false;
6553 	void *have;
6554 	int work;
6555 
6556 	list_del_init(&n->poll_list);
6557 
6558 	have = netpoll_poll_lock(n);
6559 
6560 	work = __napi_poll(n, &do_repoll);
6561 
6562 	if (do_repoll)
6563 		list_add_tail(&n->poll_list, repoll);
6564 
6565 	netpoll_poll_unlock(have);
6566 
6567 	return work;
6568 }
6569 
6570 static int napi_thread_wait(struct napi_struct *napi)
6571 {
6572 	bool woken = false;
6573 
6574 	set_current_state(TASK_INTERRUPTIBLE);
6575 
6576 	while (!kthread_should_stop()) {
6577 		/* Testing SCHED_THREADED bit here to make sure the current
6578 		 * kthread owns this napi and could poll on this napi.
6579 		 * Testing SCHED bit is not enough because SCHED bit might be
6580 		 * set by some other busy poll thread or by napi_disable().
6581 		 */
6582 		if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) {
6583 			WARN_ON(!list_empty(&napi->poll_list));
6584 			__set_current_state(TASK_RUNNING);
6585 			return 0;
6586 		}
6587 
6588 		schedule();
6589 		/* woken being true indicates this thread owns this napi. */
6590 		woken = true;
6591 		set_current_state(TASK_INTERRUPTIBLE);
6592 	}
6593 	__set_current_state(TASK_RUNNING);
6594 
6595 	return -1;
6596 }
6597 
6598 static int napi_threaded_poll(void *data)
6599 {
6600 	struct napi_struct *napi = data;
6601 	void *have;
6602 
6603 	while (!napi_thread_wait(napi)) {
6604 		for (;;) {
6605 			bool repoll = false;
6606 
6607 			local_bh_disable();
6608 
6609 			have = netpoll_poll_lock(napi);
6610 			__napi_poll(napi, &repoll);
6611 			netpoll_poll_unlock(have);
6612 
6613 			local_bh_enable();
6614 
6615 			if (!repoll)
6616 				break;
6617 
6618 			cond_resched();
6619 		}
6620 	}
6621 	return 0;
6622 }
6623 
6624 static void skb_defer_free_flush(struct softnet_data *sd)
6625 {
6626 	struct sk_buff *skb, *next;
6627 
6628 	/* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
6629 	if (!READ_ONCE(sd->defer_list))
6630 		return;
6631 
6632 	spin_lock_irq(&sd->defer_lock);
6633 	skb = sd->defer_list;
6634 	sd->defer_list = NULL;
6635 	sd->defer_count = 0;
6636 	spin_unlock_irq(&sd->defer_lock);
6637 
6638 	while (skb != NULL) {
6639 		next = skb->next;
6640 		napi_consume_skb(skb, 1);
6641 		skb = next;
6642 	}
6643 }
6644 
6645 static __latent_entropy void net_rx_action(struct softirq_action *h)
6646 {
6647 	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
6648 	unsigned long time_limit = jiffies +
6649 		usecs_to_jiffies(READ_ONCE(netdev_budget_usecs));
6650 	int budget = READ_ONCE(netdev_budget);
6651 	LIST_HEAD(list);
6652 	LIST_HEAD(repoll);
6653 
6654 start:
6655 	sd->in_net_rx_action = true;
6656 	local_irq_disable();
6657 	list_splice_init(&sd->poll_list, &list);
6658 	local_irq_enable();
6659 
6660 	for (;;) {
6661 		struct napi_struct *n;
6662 
6663 		skb_defer_free_flush(sd);
6664 
6665 		if (list_empty(&list)) {
6666 			if (list_empty(&repoll)) {
6667 				sd->in_net_rx_action = false;
6668 				barrier();
6669 				/* We need to check if ____napi_schedule()
6670 				 * had refilled poll_list while
6671 				 * sd->in_net_rx_action was true.
6672 				 */
6673 				if (!list_empty(&sd->poll_list))
6674 					goto start;
6675 				if (!sd_has_rps_ipi_waiting(sd))
6676 					goto end;
6677 			}
6678 			break;
6679 		}
6680 
6681 		n = list_first_entry(&list, struct napi_struct, poll_list);
6682 		budget -= napi_poll(n, &repoll);
6683 
6684 		/* If softirq window is exhausted then punt.
6685 		 * Allow this to run for 2 jiffies since which will allow
6686 		 * an average latency of 1.5/HZ.
6687 		 */
6688 		if (unlikely(budget <= 0 ||
6689 			     time_after_eq(jiffies, time_limit))) {
6690 			sd->time_squeeze++;
6691 			break;
6692 		}
6693 	}
6694 
6695 	local_irq_disable();
6696 
6697 	list_splice_tail_init(&sd->poll_list, &list);
6698 	list_splice_tail(&repoll, &list);
6699 	list_splice(&list, &sd->poll_list);
6700 	if (!list_empty(&sd->poll_list))
6701 		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
6702 	else
6703 		sd->in_net_rx_action = false;
6704 
6705 	net_rps_action_and_irq_enable(sd);
6706 end:;
6707 }
6708 
6709 struct netdev_adjacent {
6710 	struct net_device *dev;
6711 	netdevice_tracker dev_tracker;
6712 
6713 	/* upper master flag, there can only be one master device per list */
6714 	bool master;
6715 
6716 	/* lookup ignore flag */
6717 	bool ignore;
6718 
6719 	/* counter for the number of times this device was added to us */
6720 	u16 ref_nr;
6721 
6722 	/* private field for the users */
6723 	void *private;
6724 
6725 	struct list_head list;
6726 	struct rcu_head rcu;
6727 };
6728 
6729 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
6730 						 struct list_head *adj_list)
6731 {
6732 	struct netdev_adjacent *adj;
6733 
6734 	list_for_each_entry(adj, adj_list, list) {
6735 		if (adj->dev == adj_dev)
6736 			return adj;
6737 	}
6738 	return NULL;
6739 }
6740 
6741 static int ____netdev_has_upper_dev(struct net_device *upper_dev,
6742 				    struct netdev_nested_priv *priv)
6743 {
6744 	struct net_device *dev = (struct net_device *)priv->data;
6745 
6746 	return upper_dev == dev;
6747 }
6748 
6749 /**
6750  * netdev_has_upper_dev - Check if device is linked to an upper device
6751  * @dev: device
6752  * @upper_dev: upper device to check
6753  *
6754  * Find out if a device is linked to specified upper device and return true
6755  * in case it is. Note that this checks only immediate upper device,
6756  * not through a complete stack of devices. The caller must hold the RTNL lock.
6757  */
6758 bool netdev_has_upper_dev(struct net_device *dev,
6759 			  struct net_device *upper_dev)
6760 {
6761 	struct netdev_nested_priv priv = {
6762 		.data = (void *)upper_dev,
6763 	};
6764 
6765 	ASSERT_RTNL();
6766 
6767 	return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
6768 					     &priv);
6769 }
6770 EXPORT_SYMBOL(netdev_has_upper_dev);
6771 
6772 /**
6773  * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device
6774  * @dev: device
6775  * @upper_dev: upper device to check
6776  *
6777  * Find out if a device is linked to specified upper device and return true
6778  * in case it is. Note that this checks the entire upper device chain.
6779  * The caller must hold rcu lock.
6780  */
6781 
6782 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
6783 				  struct net_device *upper_dev)
6784 {
6785 	struct netdev_nested_priv priv = {
6786 		.data = (void *)upper_dev,
6787 	};
6788 
6789 	return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
6790 					       &priv);
6791 }
6792 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
6793 
6794 /**
6795  * netdev_has_any_upper_dev - Check if device is linked to some device
6796  * @dev: device
6797  *
6798  * Find out if a device is linked to an upper device and return true in case
6799  * it is. The caller must hold the RTNL lock.
6800  */
6801 bool netdev_has_any_upper_dev(struct net_device *dev)
6802 {
6803 	ASSERT_RTNL();
6804 
6805 	return !list_empty(&dev->adj_list.upper);
6806 }
6807 EXPORT_SYMBOL(netdev_has_any_upper_dev);
6808 
6809 /**
6810  * netdev_master_upper_dev_get - Get master upper device
6811  * @dev: device
6812  *
6813  * Find a master upper device and return pointer to it or NULL in case
6814  * it's not there. The caller must hold the RTNL lock.
6815  */
6816 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
6817 {
6818 	struct netdev_adjacent *upper;
6819 
6820 	ASSERT_RTNL();
6821 
6822 	if (list_empty(&dev->adj_list.upper))
6823 		return NULL;
6824 
6825 	upper = list_first_entry(&dev->adj_list.upper,
6826 				 struct netdev_adjacent, list);
6827 	if (likely(upper->master))
6828 		return upper->dev;
6829 	return NULL;
6830 }
6831 EXPORT_SYMBOL(netdev_master_upper_dev_get);
6832 
6833 static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
6834 {
6835 	struct netdev_adjacent *upper;
6836 
6837 	ASSERT_RTNL();
6838 
6839 	if (list_empty(&dev->adj_list.upper))
6840 		return NULL;
6841 
6842 	upper = list_first_entry(&dev->adj_list.upper,
6843 				 struct netdev_adjacent, list);
6844 	if (likely(upper->master) && !upper->ignore)
6845 		return upper->dev;
6846 	return NULL;
6847 }
6848 
6849 /**
6850  * netdev_has_any_lower_dev - Check if device is linked to some device
6851  * @dev: device
6852  *
6853  * Find out if a device is linked to a lower device and return true in case
6854  * it is. The caller must hold the RTNL lock.
6855  */
6856 static bool netdev_has_any_lower_dev(struct net_device *dev)
6857 {
6858 	ASSERT_RTNL();
6859 
6860 	return !list_empty(&dev->adj_list.lower);
6861 }
6862 
6863 void *netdev_adjacent_get_private(struct list_head *adj_list)
6864 {
6865 	struct netdev_adjacent *adj;
6866 
6867 	adj = list_entry(adj_list, struct netdev_adjacent, list);
6868 
6869 	return adj->private;
6870 }
6871 EXPORT_SYMBOL(netdev_adjacent_get_private);
6872 
6873 /**
6874  * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
6875  * @dev: device
6876  * @iter: list_head ** of the current position
6877  *
6878  * Gets the next device from the dev's upper list, starting from iter
6879  * position. The caller must hold RCU read lock.
6880  */
6881 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
6882 						 struct list_head **iter)
6883 {
6884 	struct netdev_adjacent *upper;
6885 
6886 	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6887 
6888 	upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6889 
6890 	if (&upper->list == &dev->adj_list.upper)
6891 		return NULL;
6892 
6893 	*iter = &upper->list;
6894 
6895 	return upper->dev;
6896 }
6897 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
6898 
6899 static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
6900 						  struct list_head **iter,
6901 						  bool *ignore)
6902 {
6903 	struct netdev_adjacent *upper;
6904 
6905 	upper = list_entry((*iter)->next, struct netdev_adjacent, list);
6906 
6907 	if (&upper->list == &dev->adj_list.upper)
6908 		return NULL;
6909 
6910 	*iter = &upper->list;
6911 	*ignore = upper->ignore;
6912 
6913 	return upper->dev;
6914 }
6915 
6916 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
6917 						    struct list_head **iter)
6918 {
6919 	struct netdev_adjacent *upper;
6920 
6921 	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6922 
6923 	upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6924 
6925 	if (&upper->list == &dev->adj_list.upper)
6926 		return NULL;
6927 
6928 	*iter = &upper->list;
6929 
6930 	return upper->dev;
6931 }
6932 
6933 static int __netdev_walk_all_upper_dev(struct net_device *dev,
6934 				       int (*fn)(struct net_device *dev,
6935 					 struct netdev_nested_priv *priv),
6936 				       struct netdev_nested_priv *priv)
6937 {
6938 	struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
6939 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
6940 	int ret, cur = 0;
6941 	bool ignore;
6942 
6943 	now = dev;
6944 	iter = &dev->adj_list.upper;
6945 
6946 	while (1) {
6947 		if (now != dev) {
6948 			ret = fn(now, priv);
6949 			if (ret)
6950 				return ret;
6951 		}
6952 
6953 		next = NULL;
6954 		while (1) {
6955 			udev = __netdev_next_upper_dev(now, &iter, &ignore);
6956 			if (!udev)
6957 				break;
6958 			if (ignore)
6959 				continue;
6960 
6961 			next = udev;
6962 			niter = &udev->adj_list.upper;
6963 			dev_stack[cur] = now;
6964 			iter_stack[cur++] = iter;
6965 			break;
6966 		}
6967 
6968 		if (!next) {
6969 			if (!cur)
6970 				return 0;
6971 			next = dev_stack[--cur];
6972 			niter = iter_stack[cur];
6973 		}
6974 
6975 		now = next;
6976 		iter = niter;
6977 	}
6978 
6979 	return 0;
6980 }
6981 
6982 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
6983 				  int (*fn)(struct net_device *dev,
6984 					    struct netdev_nested_priv *priv),
6985 				  struct netdev_nested_priv *priv)
6986 {
6987 	struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
6988 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
6989 	int ret, cur = 0;
6990 
6991 	now = dev;
6992 	iter = &dev->adj_list.upper;
6993 
6994 	while (1) {
6995 		if (now != dev) {
6996 			ret = fn(now, priv);
6997 			if (ret)
6998 				return ret;
6999 		}
7000 
7001 		next = NULL;
7002 		while (1) {
7003 			udev = netdev_next_upper_dev_rcu(now, &iter);
7004 			if (!udev)
7005 				break;
7006 
7007 			next = udev;
7008 			niter = &udev->adj_list.upper;
7009 			dev_stack[cur] = now;
7010 			iter_stack[cur++] = iter;
7011 			break;
7012 		}
7013 
7014 		if (!next) {
7015 			if (!cur)
7016 				return 0;
7017 			next = dev_stack[--cur];
7018 			niter = iter_stack[cur];
7019 		}
7020 
7021 		now = next;
7022 		iter = niter;
7023 	}
7024 
7025 	return 0;
7026 }
7027 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
7028 
7029 static bool __netdev_has_upper_dev(struct net_device *dev,
7030 				   struct net_device *upper_dev)
7031 {
7032 	struct netdev_nested_priv priv = {
7033 		.flags = 0,
7034 		.data = (void *)upper_dev,
7035 	};
7036 
7037 	ASSERT_RTNL();
7038 
7039 	return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
7040 					   &priv);
7041 }
7042 
7043 /**
7044  * netdev_lower_get_next_private - Get the next ->private from the
7045  *				   lower neighbour list
7046  * @dev: device
7047  * @iter: list_head ** of the current position
7048  *
7049  * Gets the next netdev_adjacent->private from the dev's lower neighbour
7050  * list, starting from iter position. The caller must hold either hold the
7051  * RTNL lock or its own locking that guarantees that the neighbour lower
7052  * list will remain unchanged.
7053  */
7054 void *netdev_lower_get_next_private(struct net_device *dev,
7055 				    struct list_head **iter)
7056 {
7057 	struct netdev_adjacent *lower;
7058 
7059 	lower = list_entry(*iter, struct netdev_adjacent, list);
7060 
7061 	if (&lower->list == &dev->adj_list.lower)
7062 		return NULL;
7063 
7064 	*iter = lower->list.next;
7065 
7066 	return lower->private;
7067 }
7068 EXPORT_SYMBOL(netdev_lower_get_next_private);
7069 
7070 /**
7071  * netdev_lower_get_next_private_rcu - Get the next ->private from the
7072  *				       lower neighbour list, RCU
7073  *				       variant
7074  * @dev: device
7075  * @iter: list_head ** of the current position
7076  *
7077  * Gets the next netdev_adjacent->private from the dev's lower neighbour
7078  * list, starting from iter position. The caller must hold RCU read lock.
7079  */
7080 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
7081 					struct list_head **iter)
7082 {
7083 	struct netdev_adjacent *lower;
7084 
7085 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
7086 
7087 	lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7088 
7089 	if (&lower->list == &dev->adj_list.lower)
7090 		return NULL;
7091 
7092 	*iter = &lower->list;
7093 
7094 	return lower->private;
7095 }
7096 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
7097 
7098 /**
7099  * netdev_lower_get_next - Get the next device from the lower neighbour
7100  *                         list
7101  * @dev: device
7102  * @iter: list_head ** of the current position
7103  *
7104  * Gets the next netdev_adjacent from the dev's lower neighbour
7105  * list, starting from iter position. The caller must hold RTNL lock or
7106  * its own locking that guarantees that the neighbour lower
7107  * list will remain unchanged.
7108  */
7109 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
7110 {
7111 	struct netdev_adjacent *lower;
7112 
7113 	lower = list_entry(*iter, struct netdev_adjacent, list);
7114 
7115 	if (&lower->list == &dev->adj_list.lower)
7116 		return NULL;
7117 
7118 	*iter = lower->list.next;
7119 
7120 	return lower->dev;
7121 }
7122 EXPORT_SYMBOL(netdev_lower_get_next);
7123 
7124 static struct net_device *netdev_next_lower_dev(struct net_device *dev,
7125 						struct list_head **iter)
7126 {
7127 	struct netdev_adjacent *lower;
7128 
7129 	lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7130 
7131 	if (&lower->list == &dev->adj_list.lower)
7132 		return NULL;
7133 
7134 	*iter = &lower->list;
7135 
7136 	return lower->dev;
7137 }
7138 
7139 static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
7140 						  struct list_head **iter,
7141 						  bool *ignore)
7142 {
7143 	struct netdev_adjacent *lower;
7144 
7145 	lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7146 
7147 	if (&lower->list == &dev->adj_list.lower)
7148 		return NULL;
7149 
7150 	*iter = &lower->list;
7151 	*ignore = lower->ignore;
7152 
7153 	return lower->dev;
7154 }
7155 
7156 int netdev_walk_all_lower_dev(struct net_device *dev,
7157 			      int (*fn)(struct net_device *dev,
7158 					struct netdev_nested_priv *priv),
7159 			      struct netdev_nested_priv *priv)
7160 {
7161 	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7162 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7163 	int ret, cur = 0;
7164 
7165 	now = dev;
7166 	iter = &dev->adj_list.lower;
7167 
7168 	while (1) {
7169 		if (now != dev) {
7170 			ret = fn(now, priv);
7171 			if (ret)
7172 				return ret;
7173 		}
7174 
7175 		next = NULL;
7176 		while (1) {
7177 			ldev = netdev_next_lower_dev(now, &iter);
7178 			if (!ldev)
7179 				break;
7180 
7181 			next = ldev;
7182 			niter = &ldev->adj_list.lower;
7183 			dev_stack[cur] = now;
7184 			iter_stack[cur++] = iter;
7185 			break;
7186 		}
7187 
7188 		if (!next) {
7189 			if (!cur)
7190 				return 0;
7191 			next = dev_stack[--cur];
7192 			niter = iter_stack[cur];
7193 		}
7194 
7195 		now = next;
7196 		iter = niter;
7197 	}
7198 
7199 	return 0;
7200 }
7201 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
7202 
7203 static int __netdev_walk_all_lower_dev(struct net_device *dev,
7204 				       int (*fn)(struct net_device *dev,
7205 					 struct netdev_nested_priv *priv),
7206 				       struct netdev_nested_priv *priv)
7207 {
7208 	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7209 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7210 	int ret, cur = 0;
7211 	bool ignore;
7212 
7213 	now = dev;
7214 	iter = &dev->adj_list.lower;
7215 
7216 	while (1) {
7217 		if (now != dev) {
7218 			ret = fn(now, priv);
7219 			if (ret)
7220 				return ret;
7221 		}
7222 
7223 		next = NULL;
7224 		while (1) {
7225 			ldev = __netdev_next_lower_dev(now, &iter, &ignore);
7226 			if (!ldev)
7227 				break;
7228 			if (ignore)
7229 				continue;
7230 
7231 			next = ldev;
7232 			niter = &ldev->adj_list.lower;
7233 			dev_stack[cur] = now;
7234 			iter_stack[cur++] = iter;
7235 			break;
7236 		}
7237 
7238 		if (!next) {
7239 			if (!cur)
7240 				return 0;
7241 			next = dev_stack[--cur];
7242 			niter = iter_stack[cur];
7243 		}
7244 
7245 		now = next;
7246 		iter = niter;
7247 	}
7248 
7249 	return 0;
7250 }
7251 
7252 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
7253 					     struct list_head **iter)
7254 {
7255 	struct netdev_adjacent *lower;
7256 
7257 	lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7258 	if (&lower->list == &dev->adj_list.lower)
7259 		return NULL;
7260 
7261 	*iter = &lower->list;
7262 
7263 	return lower->dev;
7264 }
7265 EXPORT_SYMBOL(netdev_next_lower_dev_rcu);
7266 
7267 static u8 __netdev_upper_depth(struct net_device *dev)
7268 {
7269 	struct net_device *udev;
7270 	struct list_head *iter;
7271 	u8 max_depth = 0;
7272 	bool ignore;
7273 
7274 	for (iter = &dev->adj_list.upper,
7275 	     udev = __netdev_next_upper_dev(dev, &iter, &ignore);
7276 	     udev;
7277 	     udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
7278 		if (ignore)
7279 			continue;
7280 		if (max_depth < udev->upper_level)
7281 			max_depth = udev->upper_level;
7282 	}
7283 
7284 	return max_depth;
7285 }
7286 
7287 static u8 __netdev_lower_depth(struct net_device *dev)
7288 {
7289 	struct net_device *ldev;
7290 	struct list_head *iter;
7291 	u8 max_depth = 0;
7292 	bool ignore;
7293 
7294 	for (iter = &dev->adj_list.lower,
7295 	     ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
7296 	     ldev;
7297 	     ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
7298 		if (ignore)
7299 			continue;
7300 		if (max_depth < ldev->lower_level)
7301 			max_depth = ldev->lower_level;
7302 	}
7303 
7304 	return max_depth;
7305 }
7306 
7307 static int __netdev_update_upper_level(struct net_device *dev,
7308 				       struct netdev_nested_priv *__unused)
7309 {
7310 	dev->upper_level = __netdev_upper_depth(dev) + 1;
7311 	return 0;
7312 }
7313 
7314 #ifdef CONFIG_LOCKDEP
7315 static LIST_HEAD(net_unlink_list);
7316 
7317 static void net_unlink_todo(struct net_device *dev)
7318 {
7319 	if (list_empty(&dev->unlink_list))
7320 		list_add_tail(&dev->unlink_list, &net_unlink_list);
7321 }
7322 #endif
7323 
7324 static int __netdev_update_lower_level(struct net_device *dev,
7325 				       struct netdev_nested_priv *priv)
7326 {
7327 	dev->lower_level = __netdev_lower_depth(dev) + 1;
7328 
7329 #ifdef CONFIG_LOCKDEP
7330 	if (!priv)
7331 		return 0;
7332 
7333 	if (priv->flags & NESTED_SYNC_IMM)
7334 		dev->nested_level = dev->lower_level - 1;
7335 	if (priv->flags & NESTED_SYNC_TODO)
7336 		net_unlink_todo(dev);
7337 #endif
7338 	return 0;
7339 }
7340 
7341 int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
7342 				  int (*fn)(struct net_device *dev,
7343 					    struct netdev_nested_priv *priv),
7344 				  struct netdev_nested_priv *priv)
7345 {
7346 	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7347 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7348 	int ret, cur = 0;
7349 
7350 	now = dev;
7351 	iter = &dev->adj_list.lower;
7352 
7353 	while (1) {
7354 		if (now != dev) {
7355 			ret = fn(now, priv);
7356 			if (ret)
7357 				return ret;
7358 		}
7359 
7360 		next = NULL;
7361 		while (1) {
7362 			ldev = netdev_next_lower_dev_rcu(now, &iter);
7363 			if (!ldev)
7364 				break;
7365 
7366 			next = ldev;
7367 			niter = &ldev->adj_list.lower;
7368 			dev_stack[cur] = now;
7369 			iter_stack[cur++] = iter;
7370 			break;
7371 		}
7372 
7373 		if (!next) {
7374 			if (!cur)
7375 				return 0;
7376 			next = dev_stack[--cur];
7377 			niter = iter_stack[cur];
7378 		}
7379 
7380 		now = next;
7381 		iter = niter;
7382 	}
7383 
7384 	return 0;
7385 }
7386 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
7387 
7388 /**
7389  * netdev_lower_get_first_private_rcu - Get the first ->private from the
7390  *				       lower neighbour list, RCU
7391  *				       variant
7392  * @dev: device
7393  *
7394  * Gets the first netdev_adjacent->private from the dev's lower neighbour
7395  * list. The caller must hold RCU read lock.
7396  */
7397 void *netdev_lower_get_first_private_rcu(struct net_device *dev)
7398 {
7399 	struct netdev_adjacent *lower;
7400 
7401 	lower = list_first_or_null_rcu(&dev->adj_list.lower,
7402 			struct netdev_adjacent, list);
7403 	if (lower)
7404 		return lower->private;
7405 	return NULL;
7406 }
7407 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
7408 
7409 /**
7410  * netdev_master_upper_dev_get_rcu - Get master upper device
7411  * @dev: device
7412  *
7413  * Find a master upper device and return pointer to it or NULL in case
7414  * it's not there. The caller must hold the RCU read lock.
7415  */
7416 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
7417 {
7418 	struct netdev_adjacent *upper;
7419 
7420 	upper = list_first_or_null_rcu(&dev->adj_list.upper,
7421 				       struct netdev_adjacent, list);
7422 	if (upper && likely(upper->master))
7423 		return upper->dev;
7424 	return NULL;
7425 }
7426 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
7427 
7428 static int netdev_adjacent_sysfs_add(struct net_device *dev,
7429 			      struct net_device *adj_dev,
7430 			      struct list_head *dev_list)
7431 {
7432 	char linkname[IFNAMSIZ+7];
7433 
7434 	sprintf(linkname, dev_list == &dev->adj_list.upper ?
7435 		"upper_%s" : "lower_%s", adj_dev->name);
7436 	return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
7437 				 linkname);
7438 }
7439 static void netdev_adjacent_sysfs_del(struct net_device *dev,
7440 			       char *name,
7441 			       struct list_head *dev_list)
7442 {
7443 	char linkname[IFNAMSIZ+7];
7444 
7445 	sprintf(linkname, dev_list == &dev->adj_list.upper ?
7446 		"upper_%s" : "lower_%s", name);
7447 	sysfs_remove_link(&(dev->dev.kobj), linkname);
7448 }
7449 
7450 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
7451 						 struct net_device *adj_dev,
7452 						 struct list_head *dev_list)
7453 {
7454 	return (dev_list == &dev->adj_list.upper ||
7455 		dev_list == &dev->adj_list.lower) &&
7456 		net_eq(dev_net(dev), dev_net(adj_dev));
7457 }
7458 
7459 static int __netdev_adjacent_dev_insert(struct net_device *dev,
7460 					struct net_device *adj_dev,
7461 					struct list_head *dev_list,
7462 					void *private, bool master)
7463 {
7464 	struct netdev_adjacent *adj;
7465 	int ret;
7466 
7467 	adj = __netdev_find_adj(adj_dev, dev_list);
7468 
7469 	if (adj) {
7470 		adj->ref_nr += 1;
7471 		pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
7472 			 dev->name, adj_dev->name, adj->ref_nr);
7473 
7474 		return 0;
7475 	}
7476 
7477 	adj = kmalloc(sizeof(*adj), GFP_KERNEL);
7478 	if (!adj)
7479 		return -ENOMEM;
7480 
7481 	adj->dev = adj_dev;
7482 	adj->master = master;
7483 	adj->ref_nr = 1;
7484 	adj->private = private;
7485 	adj->ignore = false;
7486 	netdev_hold(adj_dev, &adj->dev_tracker, GFP_KERNEL);
7487 
7488 	pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
7489 		 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
7490 
7491 	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
7492 		ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
7493 		if (ret)
7494 			goto free_adj;
7495 	}
7496 
7497 	/* Ensure that master link is always the first item in list. */
7498 	if (master) {
7499 		ret = sysfs_create_link(&(dev->dev.kobj),
7500 					&(adj_dev->dev.kobj), "master");
7501 		if (ret)
7502 			goto remove_symlinks;
7503 
7504 		list_add_rcu(&adj->list, dev_list);
7505 	} else {
7506 		list_add_tail_rcu(&adj->list, dev_list);
7507 	}
7508 
7509 	return 0;
7510 
7511 remove_symlinks:
7512 	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
7513 		netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7514 free_adj:
7515 	netdev_put(adj_dev, &adj->dev_tracker);
7516 	kfree(adj);
7517 
7518 	return ret;
7519 }
7520 
7521 static void __netdev_adjacent_dev_remove(struct net_device *dev,
7522 					 struct net_device *adj_dev,
7523 					 u16 ref_nr,
7524 					 struct list_head *dev_list)
7525 {
7526 	struct netdev_adjacent *adj;
7527 
7528 	pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
7529 		 dev->name, adj_dev->name, ref_nr);
7530 
7531 	adj = __netdev_find_adj(adj_dev, dev_list);
7532 
7533 	if (!adj) {
7534 		pr_err("Adjacency does not exist for device %s from %s\n",
7535 		       dev->name, adj_dev->name);
7536 		WARN_ON(1);
7537 		return;
7538 	}
7539 
7540 	if (adj->ref_nr > ref_nr) {
7541 		pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
7542 			 dev->name, adj_dev->name, ref_nr,
7543 			 adj->ref_nr - ref_nr);
7544 		adj->ref_nr -= ref_nr;
7545 		return;
7546 	}
7547 
7548 	if (adj->master)
7549 		sysfs_remove_link(&(dev->dev.kobj), "master");
7550 
7551 	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
7552 		netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7553 
7554 	list_del_rcu(&adj->list);
7555 	pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
7556 		 adj_dev->name, dev->name, adj_dev->name);
7557 	netdev_put(adj_dev, &adj->dev_tracker);
7558 	kfree_rcu(adj, rcu);
7559 }
7560 
7561 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
7562 					    struct net_device *upper_dev,
7563 					    struct list_head *up_list,
7564 					    struct list_head *down_list,
7565 					    void *private, bool master)
7566 {
7567 	int ret;
7568 
7569 	ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
7570 					   private, master);
7571 	if (ret)
7572 		return ret;
7573 
7574 	ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
7575 					   private, false);
7576 	if (ret) {
7577 		__netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
7578 		return ret;
7579 	}
7580 
7581 	return 0;
7582 }
7583 
7584 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
7585 					       struct net_device *upper_dev,
7586 					       u16 ref_nr,
7587 					       struct list_head *up_list,
7588 					       struct list_head *down_list)
7589 {
7590 	__netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
7591 	__netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
7592 }
7593 
7594 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
7595 						struct net_device *upper_dev,
7596 						void *private, bool master)
7597 {
7598 	return __netdev_adjacent_dev_link_lists(dev, upper_dev,
7599 						&dev->adj_list.upper,
7600 						&upper_dev->adj_list.lower,
7601 						private, master);
7602 }
7603 
7604 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
7605 						   struct net_device *upper_dev)
7606 {
7607 	__netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
7608 					   &dev->adj_list.upper,
7609 					   &upper_dev->adj_list.lower);
7610 }
7611 
7612 static int __netdev_upper_dev_link(struct net_device *dev,
7613 				   struct net_device *upper_dev, bool master,
7614 				   void *upper_priv, void *upper_info,
7615 				   struct netdev_nested_priv *priv,
7616 				   struct netlink_ext_ack *extack)
7617 {
7618 	struct netdev_notifier_changeupper_info changeupper_info = {
7619 		.info = {
7620 			.dev = dev,
7621 			.extack = extack,
7622 		},
7623 		.upper_dev = upper_dev,
7624 		.master = master,
7625 		.linking = true,
7626 		.upper_info = upper_info,
7627 	};
7628 	struct net_device *master_dev;
7629 	int ret = 0;
7630 
7631 	ASSERT_RTNL();
7632 
7633 	if (dev == upper_dev)
7634 		return -EBUSY;
7635 
7636 	/* To prevent loops, check if dev is not upper device to upper_dev. */
7637 	if (__netdev_has_upper_dev(upper_dev, dev))
7638 		return -EBUSY;
7639 
7640 	if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
7641 		return -EMLINK;
7642 
7643 	if (!master) {
7644 		if (__netdev_has_upper_dev(dev, upper_dev))
7645 			return -EEXIST;
7646 	} else {
7647 		master_dev = __netdev_master_upper_dev_get(dev);
7648 		if (master_dev)
7649 			return master_dev == upper_dev ? -EEXIST : -EBUSY;
7650 	}
7651 
7652 	ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7653 					    &changeupper_info.info);
7654 	ret = notifier_to_errno(ret);
7655 	if (ret)
7656 		return ret;
7657 
7658 	ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
7659 						   master);
7660 	if (ret)
7661 		return ret;
7662 
7663 	ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7664 					    &changeupper_info.info);
7665 	ret = notifier_to_errno(ret);
7666 	if (ret)
7667 		goto rollback;
7668 
7669 	__netdev_update_upper_level(dev, NULL);
7670 	__netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
7671 
7672 	__netdev_update_lower_level(upper_dev, priv);
7673 	__netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
7674 				    priv);
7675 
7676 	return 0;
7677 
7678 rollback:
7679 	__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7680 
7681 	return ret;
7682 }
7683 
7684 /**
7685  * netdev_upper_dev_link - Add a link to the upper device
7686  * @dev: device
7687  * @upper_dev: new upper device
7688  * @extack: netlink extended ack
7689  *
7690  * Adds a link to device which is upper to this one. The caller must hold
7691  * the RTNL lock. On a failure a negative errno code is returned.
7692  * On success the reference counts are adjusted and the function
7693  * returns zero.
7694  */
7695 int netdev_upper_dev_link(struct net_device *dev,
7696 			  struct net_device *upper_dev,
7697 			  struct netlink_ext_ack *extack)
7698 {
7699 	struct netdev_nested_priv priv = {
7700 		.flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
7701 		.data = NULL,
7702 	};
7703 
7704 	return __netdev_upper_dev_link(dev, upper_dev, false,
7705 				       NULL, NULL, &priv, extack);
7706 }
7707 EXPORT_SYMBOL(netdev_upper_dev_link);
7708 
7709 /**
7710  * netdev_master_upper_dev_link - Add a master link to the upper device
7711  * @dev: device
7712  * @upper_dev: new upper device
7713  * @upper_priv: upper device private
7714  * @upper_info: upper info to be passed down via notifier
7715  * @extack: netlink extended ack
7716  *
7717  * Adds a link to device which is upper to this one. In this case, only
7718  * one master upper device can be linked, although other non-master devices
7719  * might be linked as well. The caller must hold the RTNL lock.
7720  * On a failure a negative errno code is returned. On success the reference
7721  * counts are adjusted and the function returns zero.
7722  */
7723 int netdev_master_upper_dev_link(struct net_device *dev,
7724 				 struct net_device *upper_dev,
7725 				 void *upper_priv, void *upper_info,
7726 				 struct netlink_ext_ack *extack)
7727 {
7728 	struct netdev_nested_priv priv = {
7729 		.flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
7730 		.data = NULL,
7731 	};
7732 
7733 	return __netdev_upper_dev_link(dev, upper_dev, true,
7734 				       upper_priv, upper_info, &priv, extack);
7735 }
7736 EXPORT_SYMBOL(netdev_master_upper_dev_link);
7737 
7738 static void __netdev_upper_dev_unlink(struct net_device *dev,
7739 				      struct net_device *upper_dev,
7740 				      struct netdev_nested_priv *priv)
7741 {
7742 	struct netdev_notifier_changeupper_info changeupper_info = {
7743 		.info = {
7744 			.dev = dev,
7745 		},
7746 		.upper_dev = upper_dev,
7747 		.linking = false,
7748 	};
7749 
7750 	ASSERT_RTNL();
7751 
7752 	changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
7753 
7754 	call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7755 				      &changeupper_info.info);
7756 
7757 	__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7758 
7759 	call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7760 				      &changeupper_info.info);
7761 
7762 	__netdev_update_upper_level(dev, NULL);
7763 	__netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
7764 
7765 	__netdev_update_lower_level(upper_dev, priv);
7766 	__netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
7767 				    priv);
7768 }
7769 
7770 /**
7771  * netdev_upper_dev_unlink - Removes a link to upper device
7772  * @dev: device
7773  * @upper_dev: new upper device
7774  *
7775  * Removes a link to device which is upper to this one. The caller must hold
7776  * the RTNL lock.
7777  */
7778 void netdev_upper_dev_unlink(struct net_device *dev,
7779 			     struct net_device *upper_dev)
7780 {
7781 	struct netdev_nested_priv priv = {
7782 		.flags = NESTED_SYNC_TODO,
7783 		.data = NULL,
7784 	};
7785 
7786 	__netdev_upper_dev_unlink(dev, upper_dev, &priv);
7787 }
7788 EXPORT_SYMBOL(netdev_upper_dev_unlink);
7789 
7790 static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
7791 				      struct net_device *lower_dev,
7792 				      bool val)
7793 {
7794 	struct netdev_adjacent *adj;
7795 
7796 	adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower);
7797 	if (adj)
7798 		adj->ignore = val;
7799 
7800 	adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper);
7801 	if (adj)
7802 		adj->ignore = val;
7803 }
7804 
7805 static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
7806 					struct net_device *lower_dev)
7807 {
7808 	__netdev_adjacent_dev_set(upper_dev, lower_dev, true);
7809 }
7810 
7811 static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
7812 				       struct net_device *lower_dev)
7813 {
7814 	__netdev_adjacent_dev_set(upper_dev, lower_dev, false);
7815 }
7816 
7817 int netdev_adjacent_change_prepare(struct net_device *old_dev,
7818 				   struct net_device *new_dev,
7819 				   struct net_device *dev,
7820 				   struct netlink_ext_ack *extack)
7821 {
7822 	struct netdev_nested_priv priv = {
7823 		.flags = 0,
7824 		.data = NULL,
7825 	};
7826 	int err;
7827 
7828 	if (!new_dev)
7829 		return 0;
7830 
7831 	if (old_dev && new_dev != old_dev)
7832 		netdev_adjacent_dev_disable(dev, old_dev);
7833 	err = __netdev_upper_dev_link(new_dev, dev, false, NULL, NULL, &priv,
7834 				      extack);
7835 	if (err) {
7836 		if (old_dev && new_dev != old_dev)
7837 			netdev_adjacent_dev_enable(dev, old_dev);
7838 		return err;
7839 	}
7840 
7841 	return 0;
7842 }
7843 EXPORT_SYMBOL(netdev_adjacent_change_prepare);
7844 
7845 void netdev_adjacent_change_commit(struct net_device *old_dev,
7846 				   struct net_device *new_dev,
7847 				   struct net_device *dev)
7848 {
7849 	struct netdev_nested_priv priv = {
7850 		.flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
7851 		.data = NULL,
7852 	};
7853 
7854 	if (!new_dev || !old_dev)
7855 		return;
7856 
7857 	if (new_dev == old_dev)
7858 		return;
7859 
7860 	netdev_adjacent_dev_enable(dev, old_dev);
7861 	__netdev_upper_dev_unlink(old_dev, dev, &priv);
7862 }
7863 EXPORT_SYMBOL(netdev_adjacent_change_commit);
7864 
7865 void netdev_adjacent_change_abort(struct net_device *old_dev,
7866 				  struct net_device *new_dev,
7867 				  struct net_device *dev)
7868 {
7869 	struct netdev_nested_priv priv = {
7870 		.flags = 0,
7871 		.data = NULL,
7872 	};
7873 
7874 	if (!new_dev)
7875 		return;
7876 
7877 	if (old_dev && new_dev != old_dev)
7878 		netdev_adjacent_dev_enable(dev, old_dev);
7879 
7880 	__netdev_upper_dev_unlink(new_dev, dev, &priv);
7881 }
7882 EXPORT_SYMBOL(netdev_adjacent_change_abort);
7883 
7884 /**
7885  * netdev_bonding_info_change - Dispatch event about slave change
7886  * @dev: device
7887  * @bonding_info: info to dispatch
7888  *
7889  * Send NETDEV_BONDING_INFO to netdev notifiers with info.
7890  * The caller must hold the RTNL lock.
7891  */
7892 void netdev_bonding_info_change(struct net_device *dev,
7893 				struct netdev_bonding_info *bonding_info)
7894 {
7895 	struct netdev_notifier_bonding_info info = {
7896 		.info.dev = dev,
7897 	};
7898 
7899 	memcpy(&info.bonding_info, bonding_info,
7900 	       sizeof(struct netdev_bonding_info));
7901 	call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
7902 				      &info.info);
7903 }
7904 EXPORT_SYMBOL(netdev_bonding_info_change);
7905 
7906 static int netdev_offload_xstats_enable_l3(struct net_device *dev,
7907 					   struct netlink_ext_ack *extack)
7908 {
7909 	struct netdev_notifier_offload_xstats_info info = {
7910 		.info.dev = dev,
7911 		.info.extack = extack,
7912 		.type = NETDEV_OFFLOAD_XSTATS_TYPE_L3,
7913 	};
7914 	int err;
7915 	int rc;
7916 
7917 	dev->offload_xstats_l3 = kzalloc(sizeof(*dev->offload_xstats_l3),
7918 					 GFP_KERNEL);
7919 	if (!dev->offload_xstats_l3)
7920 		return -ENOMEM;
7921 
7922 	rc = call_netdevice_notifiers_info_robust(NETDEV_OFFLOAD_XSTATS_ENABLE,
7923 						  NETDEV_OFFLOAD_XSTATS_DISABLE,
7924 						  &info.info);
7925 	err = notifier_to_errno(rc);
7926 	if (err)
7927 		goto free_stats;
7928 
7929 	return 0;
7930 
7931 free_stats:
7932 	kfree(dev->offload_xstats_l3);
7933 	dev->offload_xstats_l3 = NULL;
7934 	return err;
7935 }
7936 
7937 int netdev_offload_xstats_enable(struct net_device *dev,
7938 				 enum netdev_offload_xstats_type type,
7939 				 struct netlink_ext_ack *extack)
7940 {
7941 	ASSERT_RTNL();
7942 
7943 	if (netdev_offload_xstats_enabled(dev, type))
7944 		return -EALREADY;
7945 
7946 	switch (type) {
7947 	case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
7948 		return netdev_offload_xstats_enable_l3(dev, extack);
7949 	}
7950 
7951 	WARN_ON(1);
7952 	return -EINVAL;
7953 }
7954 EXPORT_SYMBOL(netdev_offload_xstats_enable);
7955 
7956 static void netdev_offload_xstats_disable_l3(struct net_device *dev)
7957 {
7958 	struct netdev_notifier_offload_xstats_info info = {
7959 		.info.dev = dev,
7960 		.type = NETDEV_OFFLOAD_XSTATS_TYPE_L3,
7961 	};
7962 
7963 	call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_DISABLE,
7964 				      &info.info);
7965 	kfree(dev->offload_xstats_l3);
7966 	dev->offload_xstats_l3 = NULL;
7967 }
7968 
7969 int netdev_offload_xstats_disable(struct net_device *dev,
7970 				  enum netdev_offload_xstats_type type)
7971 {
7972 	ASSERT_RTNL();
7973 
7974 	if (!netdev_offload_xstats_enabled(dev, type))
7975 		return -EALREADY;
7976 
7977 	switch (type) {
7978 	case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
7979 		netdev_offload_xstats_disable_l3(dev);
7980 		return 0;
7981 	}
7982 
7983 	WARN_ON(1);
7984 	return -EINVAL;
7985 }
7986 EXPORT_SYMBOL(netdev_offload_xstats_disable);
7987 
7988 static void netdev_offload_xstats_disable_all(struct net_device *dev)
7989 {
7990 	netdev_offload_xstats_disable(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3);
7991 }
7992 
7993 static struct rtnl_hw_stats64 *
7994 netdev_offload_xstats_get_ptr(const struct net_device *dev,
7995 			      enum netdev_offload_xstats_type type)
7996 {
7997 	switch (type) {
7998 	case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
7999 		return dev->offload_xstats_l3;
8000 	}
8001 
8002 	WARN_ON(1);
8003 	return NULL;
8004 }
8005 
8006 bool netdev_offload_xstats_enabled(const struct net_device *dev,
8007 				   enum netdev_offload_xstats_type type)
8008 {
8009 	ASSERT_RTNL();
8010 
8011 	return netdev_offload_xstats_get_ptr(dev, type);
8012 }
8013 EXPORT_SYMBOL(netdev_offload_xstats_enabled);
8014 
8015 struct netdev_notifier_offload_xstats_ru {
8016 	bool used;
8017 };
8018 
8019 struct netdev_notifier_offload_xstats_rd {
8020 	struct rtnl_hw_stats64 stats;
8021 	bool used;
8022 };
8023 
8024 static void netdev_hw_stats64_add(struct rtnl_hw_stats64 *dest,
8025 				  const struct rtnl_hw_stats64 *src)
8026 {
8027 	dest->rx_packets	  += src->rx_packets;
8028 	dest->tx_packets	  += src->tx_packets;
8029 	dest->rx_bytes		  += src->rx_bytes;
8030 	dest->tx_bytes		  += src->tx_bytes;
8031 	dest->rx_errors		  += src->rx_errors;
8032 	dest->tx_errors		  += src->tx_errors;
8033 	dest->rx_dropped	  += src->rx_dropped;
8034 	dest->tx_dropped	  += src->tx_dropped;
8035 	dest->multicast		  += src->multicast;
8036 }
8037 
8038 static int netdev_offload_xstats_get_used(struct net_device *dev,
8039 					  enum netdev_offload_xstats_type type,
8040 					  bool *p_used,
8041 					  struct netlink_ext_ack *extack)
8042 {
8043 	struct netdev_notifier_offload_xstats_ru report_used = {};
8044 	struct netdev_notifier_offload_xstats_info info = {
8045 		.info.dev = dev,
8046 		.info.extack = extack,
8047 		.type = type,
8048 		.report_used = &report_used,
8049 	};
8050 	int rc;
8051 
8052 	WARN_ON(!netdev_offload_xstats_enabled(dev, type));
8053 	rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_USED,
8054 					   &info.info);
8055 	*p_used = report_used.used;
8056 	return notifier_to_errno(rc);
8057 }
8058 
8059 static int netdev_offload_xstats_get_stats(struct net_device *dev,
8060 					   enum netdev_offload_xstats_type type,
8061 					   struct rtnl_hw_stats64 *p_stats,
8062 					   bool *p_used,
8063 					   struct netlink_ext_ack *extack)
8064 {
8065 	struct netdev_notifier_offload_xstats_rd report_delta = {};
8066 	struct netdev_notifier_offload_xstats_info info = {
8067 		.info.dev = dev,
8068 		.info.extack = extack,
8069 		.type = type,
8070 		.report_delta = &report_delta,
8071 	};
8072 	struct rtnl_hw_stats64 *stats;
8073 	int rc;
8074 
8075 	stats = netdev_offload_xstats_get_ptr(dev, type);
8076 	if (WARN_ON(!stats))
8077 		return -EINVAL;
8078 
8079 	rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
8080 					   &info.info);
8081 
8082 	/* Cache whatever we got, even if there was an error, otherwise the
8083 	 * successful stats retrievals would get lost.
8084 	 */
8085 	netdev_hw_stats64_add(stats, &report_delta.stats);
8086 
8087 	if (p_stats)
8088 		*p_stats = *stats;
8089 	*p_used = report_delta.used;
8090 
8091 	return notifier_to_errno(rc);
8092 }
8093 
8094 int netdev_offload_xstats_get(struct net_device *dev,
8095 			      enum netdev_offload_xstats_type type,
8096 			      struct rtnl_hw_stats64 *p_stats, bool *p_used,
8097 			      struct netlink_ext_ack *extack)
8098 {
8099 	ASSERT_RTNL();
8100 
8101 	if (p_stats)
8102 		return netdev_offload_xstats_get_stats(dev, type, p_stats,
8103 						       p_used, extack);
8104 	else
8105 		return netdev_offload_xstats_get_used(dev, type, p_used,
8106 						      extack);
8107 }
8108 EXPORT_SYMBOL(netdev_offload_xstats_get);
8109 
8110 void
8111 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *report_delta,
8112 				   const struct rtnl_hw_stats64 *stats)
8113 {
8114 	report_delta->used = true;
8115 	netdev_hw_stats64_add(&report_delta->stats, stats);
8116 }
8117 EXPORT_SYMBOL(netdev_offload_xstats_report_delta);
8118 
8119 void
8120 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *report_used)
8121 {
8122 	report_used->used = true;
8123 }
8124 EXPORT_SYMBOL(netdev_offload_xstats_report_used);
8125 
8126 void netdev_offload_xstats_push_delta(struct net_device *dev,
8127 				      enum netdev_offload_xstats_type type,
8128 				      const struct rtnl_hw_stats64 *p_stats)
8129 {
8130 	struct rtnl_hw_stats64 *stats;
8131 
8132 	ASSERT_RTNL();
8133 
8134 	stats = netdev_offload_xstats_get_ptr(dev, type);
8135 	if (WARN_ON(!stats))
8136 		return;
8137 
8138 	netdev_hw_stats64_add(stats, p_stats);
8139 }
8140 EXPORT_SYMBOL(netdev_offload_xstats_push_delta);
8141 
8142 /**
8143  * netdev_get_xmit_slave - Get the xmit slave of master device
8144  * @dev: device
8145  * @skb: The packet
8146  * @all_slaves: assume all the slaves are active
8147  *
8148  * The reference counters are not incremented so the caller must be
8149  * careful with locks. The caller must hold RCU lock.
8150  * %NULL is returned if no slave is found.
8151  */
8152 
8153 struct net_device *netdev_get_xmit_slave(struct net_device *dev,
8154 					 struct sk_buff *skb,
8155 					 bool all_slaves)
8156 {
8157 	const struct net_device_ops *ops = dev->netdev_ops;
8158 
8159 	if (!ops->ndo_get_xmit_slave)
8160 		return NULL;
8161 	return ops->ndo_get_xmit_slave(dev, skb, all_slaves);
8162 }
8163 EXPORT_SYMBOL(netdev_get_xmit_slave);
8164 
8165 static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev,
8166 						  struct sock *sk)
8167 {
8168 	const struct net_device_ops *ops = dev->netdev_ops;
8169 
8170 	if (!ops->ndo_sk_get_lower_dev)
8171 		return NULL;
8172 	return ops->ndo_sk_get_lower_dev(dev, sk);
8173 }
8174 
8175 /**
8176  * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket
8177  * @dev: device
8178  * @sk: the socket
8179  *
8180  * %NULL is returned if no lower device is found.
8181  */
8182 
8183 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
8184 					    struct sock *sk)
8185 {
8186 	struct net_device *lower;
8187 
8188 	lower = netdev_sk_get_lower_dev(dev, sk);
8189 	while (lower) {
8190 		dev = lower;
8191 		lower = netdev_sk_get_lower_dev(dev, sk);
8192 	}
8193 
8194 	return dev;
8195 }
8196 EXPORT_SYMBOL(netdev_sk_get_lowest_dev);
8197 
8198 static void netdev_adjacent_add_links(struct net_device *dev)
8199 {
8200 	struct netdev_adjacent *iter;
8201 
8202 	struct net *net = dev_net(dev);
8203 
8204 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
8205 		if (!net_eq(net, dev_net(iter->dev)))
8206 			continue;
8207 		netdev_adjacent_sysfs_add(iter->dev, dev,
8208 					  &iter->dev->adj_list.lower);
8209 		netdev_adjacent_sysfs_add(dev, iter->dev,
8210 					  &dev->adj_list.upper);
8211 	}
8212 
8213 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
8214 		if (!net_eq(net, dev_net(iter->dev)))
8215 			continue;
8216 		netdev_adjacent_sysfs_add(iter->dev, dev,
8217 					  &iter->dev->adj_list.upper);
8218 		netdev_adjacent_sysfs_add(dev, iter->dev,
8219 					  &dev->adj_list.lower);
8220 	}
8221 }
8222 
8223 static void netdev_adjacent_del_links(struct net_device *dev)
8224 {
8225 	struct netdev_adjacent *iter;
8226 
8227 	struct net *net = dev_net(dev);
8228 
8229 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
8230 		if (!net_eq(net, dev_net(iter->dev)))
8231 			continue;
8232 		netdev_adjacent_sysfs_del(iter->dev, dev->name,
8233 					  &iter->dev->adj_list.lower);
8234 		netdev_adjacent_sysfs_del(dev, iter->dev->name,
8235 					  &dev->adj_list.upper);
8236 	}
8237 
8238 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
8239 		if (!net_eq(net, dev_net(iter->dev)))
8240 			continue;
8241 		netdev_adjacent_sysfs_del(iter->dev, dev->name,
8242 					  &iter->dev->adj_list.upper);
8243 		netdev_adjacent_sysfs_del(dev, iter->dev->name,
8244 					  &dev->adj_list.lower);
8245 	}
8246 }
8247 
8248 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
8249 {
8250 	struct netdev_adjacent *iter;
8251 
8252 	struct net *net = dev_net(dev);
8253 
8254 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
8255 		if (!net_eq(net, dev_net(iter->dev)))
8256 			continue;
8257 		netdev_adjacent_sysfs_del(iter->dev, oldname,
8258 					  &iter->dev->adj_list.lower);
8259 		netdev_adjacent_sysfs_add(iter->dev, dev,
8260 					  &iter->dev->adj_list.lower);
8261 	}
8262 
8263 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
8264 		if (!net_eq(net, dev_net(iter->dev)))
8265 			continue;
8266 		netdev_adjacent_sysfs_del(iter->dev, oldname,
8267 					  &iter->dev->adj_list.upper);
8268 		netdev_adjacent_sysfs_add(iter->dev, dev,
8269 					  &iter->dev->adj_list.upper);
8270 	}
8271 }
8272 
8273 void *netdev_lower_dev_get_private(struct net_device *dev,
8274 				   struct net_device *lower_dev)
8275 {
8276 	struct netdev_adjacent *lower;
8277 
8278 	if (!lower_dev)
8279 		return NULL;
8280 	lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
8281 	if (!lower)
8282 		return NULL;
8283 
8284 	return lower->private;
8285 }
8286 EXPORT_SYMBOL(netdev_lower_dev_get_private);
8287 
8288 
8289 /**
8290  * netdev_lower_state_changed - Dispatch event about lower device state change
8291  * @lower_dev: device
8292  * @lower_state_info: state to dispatch
8293  *
8294  * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
8295  * The caller must hold the RTNL lock.
8296  */
8297 void netdev_lower_state_changed(struct net_device *lower_dev,
8298 				void *lower_state_info)
8299 {
8300 	struct netdev_notifier_changelowerstate_info changelowerstate_info = {
8301 		.info.dev = lower_dev,
8302 	};
8303 
8304 	ASSERT_RTNL();
8305 	changelowerstate_info.lower_state_info = lower_state_info;
8306 	call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
8307 				      &changelowerstate_info.info);
8308 }
8309 EXPORT_SYMBOL(netdev_lower_state_changed);
8310 
8311 static void dev_change_rx_flags(struct net_device *dev, int flags)
8312 {
8313 	const struct net_device_ops *ops = dev->netdev_ops;
8314 
8315 	if (ops->ndo_change_rx_flags)
8316 		ops->ndo_change_rx_flags(dev, flags);
8317 }
8318 
8319 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
8320 {
8321 	unsigned int old_flags = dev->flags;
8322 	kuid_t uid;
8323 	kgid_t gid;
8324 
8325 	ASSERT_RTNL();
8326 
8327 	dev->flags |= IFF_PROMISC;
8328 	dev->promiscuity += inc;
8329 	if (dev->promiscuity == 0) {
8330 		/*
8331 		 * Avoid overflow.
8332 		 * If inc causes overflow, untouch promisc and return error.
8333 		 */
8334 		if (inc < 0)
8335 			dev->flags &= ~IFF_PROMISC;
8336 		else {
8337 			dev->promiscuity -= inc;
8338 			netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n");
8339 			return -EOVERFLOW;
8340 		}
8341 	}
8342 	if (dev->flags != old_flags) {
8343 		netdev_info(dev, "%s promiscuous mode\n",
8344 			    dev->flags & IFF_PROMISC ? "entered" : "left");
8345 		if (audit_enabled) {
8346 			current_uid_gid(&uid, &gid);
8347 			audit_log(audit_context(), GFP_ATOMIC,
8348 				  AUDIT_ANOM_PROMISCUOUS,
8349 				  "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
8350 				  dev->name, (dev->flags & IFF_PROMISC),
8351 				  (old_flags & IFF_PROMISC),
8352 				  from_kuid(&init_user_ns, audit_get_loginuid(current)),
8353 				  from_kuid(&init_user_ns, uid),
8354 				  from_kgid(&init_user_ns, gid),
8355 				  audit_get_sessionid(current));
8356 		}
8357 
8358 		dev_change_rx_flags(dev, IFF_PROMISC);
8359 	}
8360 	if (notify)
8361 		__dev_notify_flags(dev, old_flags, IFF_PROMISC, 0, NULL);
8362 	return 0;
8363 }
8364 
8365 /**
8366  *	dev_set_promiscuity	- update promiscuity count on a device
8367  *	@dev: device
8368  *	@inc: modifier
8369  *
8370  *	Add or remove promiscuity from a device. While the count in the device
8371  *	remains above zero the interface remains promiscuous. Once it hits zero
8372  *	the device reverts back to normal filtering operation. A negative inc
8373  *	value is used to drop promiscuity on the device.
8374  *	Return 0 if successful or a negative errno code on error.
8375  */
8376 int dev_set_promiscuity(struct net_device *dev, int inc)
8377 {
8378 	unsigned int old_flags = dev->flags;
8379 	int err;
8380 
8381 	err = __dev_set_promiscuity(dev, inc, true);
8382 	if (err < 0)
8383 		return err;
8384 	if (dev->flags != old_flags)
8385 		dev_set_rx_mode(dev);
8386 	return err;
8387 }
8388 EXPORT_SYMBOL(dev_set_promiscuity);
8389 
8390 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
8391 {
8392 	unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
8393 
8394 	ASSERT_RTNL();
8395 
8396 	dev->flags |= IFF_ALLMULTI;
8397 	dev->allmulti += inc;
8398 	if (dev->allmulti == 0) {
8399 		/*
8400 		 * Avoid overflow.
8401 		 * If inc causes overflow, untouch allmulti and return error.
8402 		 */
8403 		if (inc < 0)
8404 			dev->flags &= ~IFF_ALLMULTI;
8405 		else {
8406 			dev->allmulti -= inc;
8407 			netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n");
8408 			return -EOVERFLOW;
8409 		}
8410 	}
8411 	if (dev->flags ^ old_flags) {
8412 		netdev_info(dev, "%s allmulticast mode\n",
8413 			    dev->flags & IFF_ALLMULTI ? "entered" : "left");
8414 		dev_change_rx_flags(dev, IFF_ALLMULTI);
8415 		dev_set_rx_mode(dev);
8416 		if (notify)
8417 			__dev_notify_flags(dev, old_flags,
8418 					   dev->gflags ^ old_gflags, 0, NULL);
8419 	}
8420 	return 0;
8421 }
8422 
8423 /**
8424  *	dev_set_allmulti	- update allmulti count on a device
8425  *	@dev: device
8426  *	@inc: modifier
8427  *
8428  *	Add or remove reception of all multicast frames to a device. While the
8429  *	count in the device remains above zero the interface remains listening
8430  *	to all interfaces. Once it hits zero the device reverts back to normal
8431  *	filtering operation. A negative @inc value is used to drop the counter
8432  *	when releasing a resource needing all multicasts.
8433  *	Return 0 if successful or a negative errno code on error.
8434  */
8435 
8436 int dev_set_allmulti(struct net_device *dev, int inc)
8437 {
8438 	return __dev_set_allmulti(dev, inc, true);
8439 }
8440 EXPORT_SYMBOL(dev_set_allmulti);
8441 
8442 /*
8443  *	Upload unicast and multicast address lists to device and
8444  *	configure RX filtering. When the device doesn't support unicast
8445  *	filtering it is put in promiscuous mode while unicast addresses
8446  *	are present.
8447  */
8448 void __dev_set_rx_mode(struct net_device *dev)
8449 {
8450 	const struct net_device_ops *ops = dev->netdev_ops;
8451 
8452 	/* dev_open will call this function so the list will stay sane. */
8453 	if (!(dev->flags&IFF_UP))
8454 		return;
8455 
8456 	if (!netif_device_present(dev))
8457 		return;
8458 
8459 	if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
8460 		/* Unicast addresses changes may only happen under the rtnl,
8461 		 * therefore calling __dev_set_promiscuity here is safe.
8462 		 */
8463 		if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
8464 			__dev_set_promiscuity(dev, 1, false);
8465 			dev->uc_promisc = true;
8466 		} else if (netdev_uc_empty(dev) && dev->uc_promisc) {
8467 			__dev_set_promiscuity(dev, -1, false);
8468 			dev->uc_promisc = false;
8469 		}
8470 	}
8471 
8472 	if (ops->ndo_set_rx_mode)
8473 		ops->ndo_set_rx_mode(dev);
8474 }
8475 
8476 void dev_set_rx_mode(struct net_device *dev)
8477 {
8478 	netif_addr_lock_bh(dev);
8479 	__dev_set_rx_mode(dev);
8480 	netif_addr_unlock_bh(dev);
8481 }
8482 
8483 /**
8484  *	dev_get_flags - get flags reported to userspace
8485  *	@dev: device
8486  *
8487  *	Get the combination of flag bits exported through APIs to userspace.
8488  */
8489 unsigned int dev_get_flags(const struct net_device *dev)
8490 {
8491 	unsigned int flags;
8492 
8493 	flags = (dev->flags & ~(IFF_PROMISC |
8494 				IFF_ALLMULTI |
8495 				IFF_RUNNING |
8496 				IFF_LOWER_UP |
8497 				IFF_DORMANT)) |
8498 		(dev->gflags & (IFF_PROMISC |
8499 				IFF_ALLMULTI));
8500 
8501 	if (netif_running(dev)) {
8502 		if (netif_oper_up(dev))
8503 			flags |= IFF_RUNNING;
8504 		if (netif_carrier_ok(dev))
8505 			flags |= IFF_LOWER_UP;
8506 		if (netif_dormant(dev))
8507 			flags |= IFF_DORMANT;
8508 	}
8509 
8510 	return flags;
8511 }
8512 EXPORT_SYMBOL(dev_get_flags);
8513 
8514 int __dev_change_flags(struct net_device *dev, unsigned int flags,
8515 		       struct netlink_ext_ack *extack)
8516 {
8517 	unsigned int old_flags = dev->flags;
8518 	int ret;
8519 
8520 	ASSERT_RTNL();
8521 
8522 	/*
8523 	 *	Set the flags on our device.
8524 	 */
8525 
8526 	dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
8527 			       IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
8528 			       IFF_AUTOMEDIA)) |
8529 		     (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
8530 				    IFF_ALLMULTI));
8531 
8532 	/*
8533 	 *	Load in the correct multicast list now the flags have changed.
8534 	 */
8535 
8536 	if ((old_flags ^ flags) & IFF_MULTICAST)
8537 		dev_change_rx_flags(dev, IFF_MULTICAST);
8538 
8539 	dev_set_rx_mode(dev);
8540 
8541 	/*
8542 	 *	Have we downed the interface. We handle IFF_UP ourselves
8543 	 *	according to user attempts to set it, rather than blindly
8544 	 *	setting it.
8545 	 */
8546 
8547 	ret = 0;
8548 	if ((old_flags ^ flags) & IFF_UP) {
8549 		if (old_flags & IFF_UP)
8550 			__dev_close(dev);
8551 		else
8552 			ret = __dev_open(dev, extack);
8553 	}
8554 
8555 	if ((flags ^ dev->gflags) & IFF_PROMISC) {
8556 		int inc = (flags & IFF_PROMISC) ? 1 : -1;
8557 		unsigned int old_flags = dev->flags;
8558 
8559 		dev->gflags ^= IFF_PROMISC;
8560 
8561 		if (__dev_set_promiscuity(dev, inc, false) >= 0)
8562 			if (dev->flags != old_flags)
8563 				dev_set_rx_mode(dev);
8564 	}
8565 
8566 	/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
8567 	 * is important. Some (broken) drivers set IFF_PROMISC, when
8568 	 * IFF_ALLMULTI is requested not asking us and not reporting.
8569 	 */
8570 	if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
8571 		int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
8572 
8573 		dev->gflags ^= IFF_ALLMULTI;
8574 		__dev_set_allmulti(dev, inc, false);
8575 	}
8576 
8577 	return ret;
8578 }
8579 
8580 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
8581 			unsigned int gchanges, u32 portid,
8582 			const struct nlmsghdr *nlh)
8583 {
8584 	unsigned int changes = dev->flags ^ old_flags;
8585 
8586 	if (gchanges)
8587 		rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC, portid, nlh);
8588 
8589 	if (changes & IFF_UP) {
8590 		if (dev->flags & IFF_UP)
8591 			call_netdevice_notifiers(NETDEV_UP, dev);
8592 		else
8593 			call_netdevice_notifiers(NETDEV_DOWN, dev);
8594 	}
8595 
8596 	if (dev->flags & IFF_UP &&
8597 	    (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
8598 		struct netdev_notifier_change_info change_info = {
8599 			.info = {
8600 				.dev = dev,
8601 			},
8602 			.flags_changed = changes,
8603 		};
8604 
8605 		call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
8606 	}
8607 }
8608 
8609 /**
8610  *	dev_change_flags - change device settings
8611  *	@dev: device
8612  *	@flags: device state flags
8613  *	@extack: netlink extended ack
8614  *
8615  *	Change settings on device based state flags. The flags are
8616  *	in the userspace exported format.
8617  */
8618 int dev_change_flags(struct net_device *dev, unsigned int flags,
8619 		     struct netlink_ext_ack *extack)
8620 {
8621 	int ret;
8622 	unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
8623 
8624 	ret = __dev_change_flags(dev, flags, extack);
8625 	if (ret < 0)
8626 		return ret;
8627 
8628 	changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
8629 	__dev_notify_flags(dev, old_flags, changes, 0, NULL);
8630 	return ret;
8631 }
8632 EXPORT_SYMBOL(dev_change_flags);
8633 
8634 int __dev_set_mtu(struct net_device *dev, int new_mtu)
8635 {
8636 	const struct net_device_ops *ops = dev->netdev_ops;
8637 
8638 	if (ops->ndo_change_mtu)
8639 		return ops->ndo_change_mtu(dev, new_mtu);
8640 
8641 	/* Pairs with all the lockless reads of dev->mtu in the stack */
8642 	WRITE_ONCE(dev->mtu, new_mtu);
8643 	return 0;
8644 }
8645 EXPORT_SYMBOL(__dev_set_mtu);
8646 
8647 int dev_validate_mtu(struct net_device *dev, int new_mtu,
8648 		     struct netlink_ext_ack *extack)
8649 {
8650 	/* MTU must be positive, and in range */
8651 	if (new_mtu < 0 || new_mtu < dev->min_mtu) {
8652 		NL_SET_ERR_MSG(extack, "mtu less than device minimum");
8653 		return -EINVAL;
8654 	}
8655 
8656 	if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
8657 		NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
8658 		return -EINVAL;
8659 	}
8660 	return 0;
8661 }
8662 
8663 /**
8664  *	dev_set_mtu_ext - Change maximum transfer unit
8665  *	@dev: device
8666  *	@new_mtu: new transfer unit
8667  *	@extack: netlink extended ack
8668  *
8669  *	Change the maximum transfer size of the network device.
8670  */
8671 int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
8672 		    struct netlink_ext_ack *extack)
8673 {
8674 	int err, orig_mtu;
8675 
8676 	if (new_mtu == dev->mtu)
8677 		return 0;
8678 
8679 	err = dev_validate_mtu(dev, new_mtu, extack);
8680 	if (err)
8681 		return err;
8682 
8683 	if (!netif_device_present(dev))
8684 		return -ENODEV;
8685 
8686 	err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
8687 	err = notifier_to_errno(err);
8688 	if (err)
8689 		return err;
8690 
8691 	orig_mtu = dev->mtu;
8692 	err = __dev_set_mtu(dev, new_mtu);
8693 
8694 	if (!err) {
8695 		err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8696 						   orig_mtu);
8697 		err = notifier_to_errno(err);
8698 		if (err) {
8699 			/* setting mtu back and notifying everyone again,
8700 			 * so that they have a chance to revert changes.
8701 			 */
8702 			__dev_set_mtu(dev, orig_mtu);
8703 			call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8704 						     new_mtu);
8705 		}
8706 	}
8707 	return err;
8708 }
8709 
8710 int dev_set_mtu(struct net_device *dev, int new_mtu)
8711 {
8712 	struct netlink_ext_ack extack;
8713 	int err;
8714 
8715 	memset(&extack, 0, sizeof(extack));
8716 	err = dev_set_mtu_ext(dev, new_mtu, &extack);
8717 	if (err && extack._msg)
8718 		net_err_ratelimited("%s: %s\n", dev->name, extack._msg);
8719 	return err;
8720 }
8721 EXPORT_SYMBOL(dev_set_mtu);
8722 
8723 /**
8724  *	dev_change_tx_queue_len - Change TX queue length of a netdevice
8725  *	@dev: device
8726  *	@new_len: new tx queue length
8727  */
8728 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
8729 {
8730 	unsigned int orig_len = dev->tx_queue_len;
8731 	int res;
8732 
8733 	if (new_len != (unsigned int)new_len)
8734 		return -ERANGE;
8735 
8736 	if (new_len != orig_len) {
8737 		dev->tx_queue_len = new_len;
8738 		res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
8739 		res = notifier_to_errno(res);
8740 		if (res)
8741 			goto err_rollback;
8742 		res = dev_qdisc_change_tx_queue_len(dev);
8743 		if (res)
8744 			goto err_rollback;
8745 	}
8746 
8747 	return 0;
8748 
8749 err_rollback:
8750 	netdev_err(dev, "refused to change device tx_queue_len\n");
8751 	dev->tx_queue_len = orig_len;
8752 	return res;
8753 }
8754 
8755 /**
8756  *	dev_set_group - Change group this device belongs to
8757  *	@dev: device
8758  *	@new_group: group this device should belong to
8759  */
8760 void dev_set_group(struct net_device *dev, int new_group)
8761 {
8762 	dev->group = new_group;
8763 }
8764 
8765 /**
8766  *	dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
8767  *	@dev: device
8768  *	@addr: new address
8769  *	@extack: netlink extended ack
8770  */
8771 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
8772 			      struct netlink_ext_ack *extack)
8773 {
8774 	struct netdev_notifier_pre_changeaddr_info info = {
8775 		.info.dev = dev,
8776 		.info.extack = extack,
8777 		.dev_addr = addr,
8778 	};
8779 	int rc;
8780 
8781 	rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info);
8782 	return notifier_to_errno(rc);
8783 }
8784 EXPORT_SYMBOL(dev_pre_changeaddr_notify);
8785 
8786 /**
8787  *	dev_set_mac_address - Change Media Access Control Address
8788  *	@dev: device
8789  *	@sa: new address
8790  *	@extack: netlink extended ack
8791  *
8792  *	Change the hardware (MAC) address of the device
8793  */
8794 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
8795 			struct netlink_ext_ack *extack)
8796 {
8797 	const struct net_device_ops *ops = dev->netdev_ops;
8798 	int err;
8799 
8800 	if (!ops->ndo_set_mac_address)
8801 		return -EOPNOTSUPP;
8802 	if (sa->sa_family != dev->type)
8803 		return -EINVAL;
8804 	if (!netif_device_present(dev))
8805 		return -ENODEV;
8806 	err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack);
8807 	if (err)
8808 		return err;
8809 	err = ops->ndo_set_mac_address(dev, sa);
8810 	if (err)
8811 		return err;
8812 	dev->addr_assign_type = NET_ADDR_SET;
8813 	call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
8814 	add_device_randomness(dev->dev_addr, dev->addr_len);
8815 	return 0;
8816 }
8817 EXPORT_SYMBOL(dev_set_mac_address);
8818 
8819 static DECLARE_RWSEM(dev_addr_sem);
8820 
8821 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
8822 			     struct netlink_ext_ack *extack)
8823 {
8824 	int ret;
8825 
8826 	down_write(&dev_addr_sem);
8827 	ret = dev_set_mac_address(dev, sa, extack);
8828 	up_write(&dev_addr_sem);
8829 	return ret;
8830 }
8831 EXPORT_SYMBOL(dev_set_mac_address_user);
8832 
8833 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name)
8834 {
8835 	size_t size = sizeof(sa->sa_data_min);
8836 	struct net_device *dev;
8837 	int ret = 0;
8838 
8839 	down_read(&dev_addr_sem);
8840 	rcu_read_lock();
8841 
8842 	dev = dev_get_by_name_rcu(net, dev_name);
8843 	if (!dev) {
8844 		ret = -ENODEV;
8845 		goto unlock;
8846 	}
8847 	if (!dev->addr_len)
8848 		memset(sa->sa_data, 0, size);
8849 	else
8850 		memcpy(sa->sa_data, dev->dev_addr,
8851 		       min_t(size_t, size, dev->addr_len));
8852 	sa->sa_family = dev->type;
8853 
8854 unlock:
8855 	rcu_read_unlock();
8856 	up_read(&dev_addr_sem);
8857 	return ret;
8858 }
8859 EXPORT_SYMBOL(dev_get_mac_address);
8860 
8861 /**
8862  *	dev_change_carrier - Change device carrier
8863  *	@dev: device
8864  *	@new_carrier: new value
8865  *
8866  *	Change device carrier
8867  */
8868 int dev_change_carrier(struct net_device *dev, bool new_carrier)
8869 {
8870 	const struct net_device_ops *ops = dev->netdev_ops;
8871 
8872 	if (!ops->ndo_change_carrier)
8873 		return -EOPNOTSUPP;
8874 	if (!netif_device_present(dev))
8875 		return -ENODEV;
8876 	return ops->ndo_change_carrier(dev, new_carrier);
8877 }
8878 
8879 /**
8880  *	dev_get_phys_port_id - Get device physical port ID
8881  *	@dev: device
8882  *	@ppid: port ID
8883  *
8884  *	Get device physical port ID
8885  */
8886 int dev_get_phys_port_id(struct net_device *dev,
8887 			 struct netdev_phys_item_id *ppid)
8888 {
8889 	const struct net_device_ops *ops = dev->netdev_ops;
8890 
8891 	if (!ops->ndo_get_phys_port_id)
8892 		return -EOPNOTSUPP;
8893 	return ops->ndo_get_phys_port_id(dev, ppid);
8894 }
8895 
8896 /**
8897  *	dev_get_phys_port_name - Get device physical port name
8898  *	@dev: device
8899  *	@name: port name
8900  *	@len: limit of bytes to copy to name
8901  *
8902  *	Get device physical port name
8903  */
8904 int dev_get_phys_port_name(struct net_device *dev,
8905 			   char *name, size_t len)
8906 {
8907 	const struct net_device_ops *ops = dev->netdev_ops;
8908 	int err;
8909 
8910 	if (ops->ndo_get_phys_port_name) {
8911 		err = ops->ndo_get_phys_port_name(dev, name, len);
8912 		if (err != -EOPNOTSUPP)
8913 			return err;
8914 	}
8915 	return devlink_compat_phys_port_name_get(dev, name, len);
8916 }
8917 
8918 /**
8919  *	dev_get_port_parent_id - Get the device's port parent identifier
8920  *	@dev: network device
8921  *	@ppid: pointer to a storage for the port's parent identifier
8922  *	@recurse: allow/disallow recursion to lower devices
8923  *
8924  *	Get the devices's port parent identifier
8925  */
8926 int dev_get_port_parent_id(struct net_device *dev,
8927 			   struct netdev_phys_item_id *ppid,
8928 			   bool recurse)
8929 {
8930 	const struct net_device_ops *ops = dev->netdev_ops;
8931 	struct netdev_phys_item_id first = { };
8932 	struct net_device *lower_dev;
8933 	struct list_head *iter;
8934 	int err;
8935 
8936 	if (ops->ndo_get_port_parent_id) {
8937 		err = ops->ndo_get_port_parent_id(dev, ppid);
8938 		if (err != -EOPNOTSUPP)
8939 			return err;
8940 	}
8941 
8942 	err = devlink_compat_switch_id_get(dev, ppid);
8943 	if (!recurse || err != -EOPNOTSUPP)
8944 		return err;
8945 
8946 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
8947 		err = dev_get_port_parent_id(lower_dev, ppid, true);
8948 		if (err)
8949 			break;
8950 		if (!first.id_len)
8951 			first = *ppid;
8952 		else if (memcmp(&first, ppid, sizeof(*ppid)))
8953 			return -EOPNOTSUPP;
8954 	}
8955 
8956 	return err;
8957 }
8958 EXPORT_SYMBOL(dev_get_port_parent_id);
8959 
8960 /**
8961  *	netdev_port_same_parent_id - Indicate if two network devices have
8962  *	the same port parent identifier
8963  *	@a: first network device
8964  *	@b: second network device
8965  */
8966 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
8967 {
8968 	struct netdev_phys_item_id a_id = { };
8969 	struct netdev_phys_item_id b_id = { };
8970 
8971 	if (dev_get_port_parent_id(a, &a_id, true) ||
8972 	    dev_get_port_parent_id(b, &b_id, true))
8973 		return false;
8974 
8975 	return netdev_phys_item_id_same(&a_id, &b_id);
8976 }
8977 EXPORT_SYMBOL(netdev_port_same_parent_id);
8978 
8979 /**
8980  *	dev_change_proto_down - set carrier according to proto_down.
8981  *
8982  *	@dev: device
8983  *	@proto_down: new value
8984  */
8985 int dev_change_proto_down(struct net_device *dev, bool proto_down)
8986 {
8987 	if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN))
8988 		return -EOPNOTSUPP;
8989 	if (!netif_device_present(dev))
8990 		return -ENODEV;
8991 	if (proto_down)
8992 		netif_carrier_off(dev);
8993 	else
8994 		netif_carrier_on(dev);
8995 	dev->proto_down = proto_down;
8996 	return 0;
8997 }
8998 
8999 /**
9000  *	dev_change_proto_down_reason - proto down reason
9001  *
9002  *	@dev: device
9003  *	@mask: proto down mask
9004  *	@value: proto down value
9005  */
9006 void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
9007 				  u32 value)
9008 {
9009 	int b;
9010 
9011 	if (!mask) {
9012 		dev->proto_down_reason = value;
9013 	} else {
9014 		for_each_set_bit(b, &mask, 32) {
9015 			if (value & (1 << b))
9016 				dev->proto_down_reason |= BIT(b);
9017 			else
9018 				dev->proto_down_reason &= ~BIT(b);
9019 		}
9020 	}
9021 }
9022 
9023 struct bpf_xdp_link {
9024 	struct bpf_link link;
9025 	struct net_device *dev; /* protected by rtnl_lock, no refcnt held */
9026 	int flags;
9027 };
9028 
9029 static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags)
9030 {
9031 	if (flags & XDP_FLAGS_HW_MODE)
9032 		return XDP_MODE_HW;
9033 	if (flags & XDP_FLAGS_DRV_MODE)
9034 		return XDP_MODE_DRV;
9035 	if (flags & XDP_FLAGS_SKB_MODE)
9036 		return XDP_MODE_SKB;
9037 	return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB;
9038 }
9039 
9040 static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode)
9041 {
9042 	switch (mode) {
9043 	case XDP_MODE_SKB:
9044 		return generic_xdp_install;
9045 	case XDP_MODE_DRV:
9046 	case XDP_MODE_HW:
9047 		return dev->netdev_ops->ndo_bpf;
9048 	default:
9049 		return NULL;
9050 	}
9051 }
9052 
9053 static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev,
9054 					 enum bpf_xdp_mode mode)
9055 {
9056 	return dev->xdp_state[mode].link;
9057 }
9058 
9059 static struct bpf_prog *dev_xdp_prog(struct net_device *dev,
9060 				     enum bpf_xdp_mode mode)
9061 {
9062 	struct bpf_xdp_link *link = dev_xdp_link(dev, mode);
9063 
9064 	if (link)
9065 		return link->link.prog;
9066 	return dev->xdp_state[mode].prog;
9067 }
9068 
9069 u8 dev_xdp_prog_count(struct net_device *dev)
9070 {
9071 	u8 count = 0;
9072 	int i;
9073 
9074 	for (i = 0; i < __MAX_XDP_MODE; i++)
9075 		if (dev->xdp_state[i].prog || dev->xdp_state[i].link)
9076 			count++;
9077 	return count;
9078 }
9079 EXPORT_SYMBOL_GPL(dev_xdp_prog_count);
9080 
9081 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode)
9082 {
9083 	struct bpf_prog *prog = dev_xdp_prog(dev, mode);
9084 
9085 	return prog ? prog->aux->id : 0;
9086 }
9087 
9088 static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode,
9089 			     struct bpf_xdp_link *link)
9090 {
9091 	dev->xdp_state[mode].link = link;
9092 	dev->xdp_state[mode].prog = NULL;
9093 }
9094 
9095 static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode,
9096 			     struct bpf_prog *prog)
9097 {
9098 	dev->xdp_state[mode].link = NULL;
9099 	dev->xdp_state[mode].prog = prog;
9100 }
9101 
9102 static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode,
9103 			   bpf_op_t bpf_op, struct netlink_ext_ack *extack,
9104 			   u32 flags, struct bpf_prog *prog)
9105 {
9106 	struct netdev_bpf xdp;
9107 	int err;
9108 
9109 	memset(&xdp, 0, sizeof(xdp));
9110 	xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG;
9111 	xdp.extack = extack;
9112 	xdp.flags = flags;
9113 	xdp.prog = prog;
9114 
9115 	/* Drivers assume refcnt is already incremented (i.e, prog pointer is
9116 	 * "moved" into driver), so they don't increment it on their own, but
9117 	 * they do decrement refcnt when program is detached or replaced.
9118 	 * Given net_device also owns link/prog, we need to bump refcnt here
9119 	 * to prevent drivers from underflowing it.
9120 	 */
9121 	if (prog)
9122 		bpf_prog_inc(prog);
9123 	err = bpf_op(dev, &xdp);
9124 	if (err) {
9125 		if (prog)
9126 			bpf_prog_put(prog);
9127 		return err;
9128 	}
9129 
9130 	if (mode != XDP_MODE_HW)
9131 		bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog);
9132 
9133 	return 0;
9134 }
9135 
9136 static void dev_xdp_uninstall(struct net_device *dev)
9137 {
9138 	struct bpf_xdp_link *link;
9139 	struct bpf_prog *prog;
9140 	enum bpf_xdp_mode mode;
9141 	bpf_op_t bpf_op;
9142 
9143 	ASSERT_RTNL();
9144 
9145 	for (mode = XDP_MODE_SKB; mode < __MAX_XDP_MODE; mode++) {
9146 		prog = dev_xdp_prog(dev, mode);
9147 		if (!prog)
9148 			continue;
9149 
9150 		bpf_op = dev_xdp_bpf_op(dev, mode);
9151 		if (!bpf_op)
9152 			continue;
9153 
9154 		WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
9155 
9156 		/* auto-detach link from net device */
9157 		link = dev_xdp_link(dev, mode);
9158 		if (link)
9159 			link->dev = NULL;
9160 		else
9161 			bpf_prog_put(prog);
9162 
9163 		dev_xdp_set_link(dev, mode, NULL);
9164 	}
9165 }
9166 
9167 static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack,
9168 			  struct bpf_xdp_link *link, struct bpf_prog *new_prog,
9169 			  struct bpf_prog *old_prog, u32 flags)
9170 {
9171 	unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES);
9172 	struct bpf_prog *cur_prog;
9173 	struct net_device *upper;
9174 	struct list_head *iter;
9175 	enum bpf_xdp_mode mode;
9176 	bpf_op_t bpf_op;
9177 	int err;
9178 
9179 	ASSERT_RTNL();
9180 
9181 	/* either link or prog attachment, never both */
9182 	if (link && (new_prog || old_prog))
9183 		return -EINVAL;
9184 	/* link supports only XDP mode flags */
9185 	if (link && (flags & ~XDP_FLAGS_MODES)) {
9186 		NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment");
9187 		return -EINVAL;
9188 	}
9189 	/* just one XDP mode bit should be set, zero defaults to drv/skb mode */
9190 	if (num_modes > 1) {
9191 		NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set");
9192 		return -EINVAL;
9193 	}
9194 	/* avoid ambiguity if offload + drv/skb mode progs are both loaded */
9195 	if (!num_modes && dev_xdp_prog_count(dev) > 1) {
9196 		NL_SET_ERR_MSG(extack,
9197 			       "More than one program loaded, unset mode is ambiguous");
9198 		return -EINVAL;
9199 	}
9200 	/* old_prog != NULL implies XDP_FLAGS_REPLACE is set */
9201 	if (old_prog && !(flags & XDP_FLAGS_REPLACE)) {
9202 		NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified");
9203 		return -EINVAL;
9204 	}
9205 
9206 	mode = dev_xdp_mode(dev, flags);
9207 	/* can't replace attached link */
9208 	if (dev_xdp_link(dev, mode)) {
9209 		NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link");
9210 		return -EBUSY;
9211 	}
9212 
9213 	/* don't allow if an upper device already has a program */
9214 	netdev_for_each_upper_dev_rcu(dev, upper, iter) {
9215 		if (dev_xdp_prog_count(upper) > 0) {
9216 			NL_SET_ERR_MSG(extack, "Cannot attach when an upper device already has a program");
9217 			return -EEXIST;
9218 		}
9219 	}
9220 
9221 	cur_prog = dev_xdp_prog(dev, mode);
9222 	/* can't replace attached prog with link */
9223 	if (link && cur_prog) {
9224 		NL_SET_ERR_MSG(extack, "Can't replace active XDP program with BPF link");
9225 		return -EBUSY;
9226 	}
9227 	if ((flags & XDP_FLAGS_REPLACE) && cur_prog != old_prog) {
9228 		NL_SET_ERR_MSG(extack, "Active program does not match expected");
9229 		return -EEXIST;
9230 	}
9231 
9232 	/* put effective new program into new_prog */
9233 	if (link)
9234 		new_prog = link->link.prog;
9235 
9236 	if (new_prog) {
9237 		bool offload = mode == XDP_MODE_HW;
9238 		enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB
9239 					       ? XDP_MODE_DRV : XDP_MODE_SKB;
9240 
9241 		if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) {
9242 			NL_SET_ERR_MSG(extack, "XDP program already attached");
9243 			return -EBUSY;
9244 		}
9245 		if (!offload && dev_xdp_prog(dev, other_mode)) {
9246 			NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time");
9247 			return -EEXIST;
9248 		}
9249 		if (!offload && bpf_prog_is_offloaded(new_prog->aux)) {
9250 			NL_SET_ERR_MSG(extack, "Using offloaded program without HW_MODE flag is not supported");
9251 			return -EINVAL;
9252 		}
9253 		if (bpf_prog_is_dev_bound(new_prog->aux) && !bpf_offload_dev_match(new_prog, dev)) {
9254 			NL_SET_ERR_MSG(extack, "Program bound to different device");
9255 			return -EINVAL;
9256 		}
9257 		if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) {
9258 			NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device");
9259 			return -EINVAL;
9260 		}
9261 		if (new_prog->expected_attach_type == BPF_XDP_CPUMAP) {
9262 			NL_SET_ERR_MSG(extack, "BPF_XDP_CPUMAP programs can not be attached to a device");
9263 			return -EINVAL;
9264 		}
9265 	}
9266 
9267 	/* don't call drivers if the effective program didn't change */
9268 	if (new_prog != cur_prog) {
9269 		bpf_op = dev_xdp_bpf_op(dev, mode);
9270 		if (!bpf_op) {
9271 			NL_SET_ERR_MSG(extack, "Underlying driver does not support XDP in native mode");
9272 			return -EOPNOTSUPP;
9273 		}
9274 
9275 		err = dev_xdp_install(dev, mode, bpf_op, extack, flags, new_prog);
9276 		if (err)
9277 			return err;
9278 	}
9279 
9280 	if (link)
9281 		dev_xdp_set_link(dev, mode, link);
9282 	else
9283 		dev_xdp_set_prog(dev, mode, new_prog);
9284 	if (cur_prog)
9285 		bpf_prog_put(cur_prog);
9286 
9287 	return 0;
9288 }
9289 
9290 static int dev_xdp_attach_link(struct net_device *dev,
9291 			       struct netlink_ext_ack *extack,
9292 			       struct bpf_xdp_link *link)
9293 {
9294 	return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags);
9295 }
9296 
9297 static int dev_xdp_detach_link(struct net_device *dev,
9298 			       struct netlink_ext_ack *extack,
9299 			       struct bpf_xdp_link *link)
9300 {
9301 	enum bpf_xdp_mode mode;
9302 	bpf_op_t bpf_op;
9303 
9304 	ASSERT_RTNL();
9305 
9306 	mode = dev_xdp_mode(dev, link->flags);
9307 	if (dev_xdp_link(dev, mode) != link)
9308 		return -EINVAL;
9309 
9310 	bpf_op = dev_xdp_bpf_op(dev, mode);
9311 	WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
9312 	dev_xdp_set_link(dev, mode, NULL);
9313 	return 0;
9314 }
9315 
9316 static void bpf_xdp_link_release(struct bpf_link *link)
9317 {
9318 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9319 
9320 	rtnl_lock();
9321 
9322 	/* if racing with net_device's tear down, xdp_link->dev might be
9323 	 * already NULL, in which case link was already auto-detached
9324 	 */
9325 	if (xdp_link->dev) {
9326 		WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link));
9327 		xdp_link->dev = NULL;
9328 	}
9329 
9330 	rtnl_unlock();
9331 }
9332 
9333 static int bpf_xdp_link_detach(struct bpf_link *link)
9334 {
9335 	bpf_xdp_link_release(link);
9336 	return 0;
9337 }
9338 
9339 static void bpf_xdp_link_dealloc(struct bpf_link *link)
9340 {
9341 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9342 
9343 	kfree(xdp_link);
9344 }
9345 
9346 static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link,
9347 				     struct seq_file *seq)
9348 {
9349 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9350 	u32 ifindex = 0;
9351 
9352 	rtnl_lock();
9353 	if (xdp_link->dev)
9354 		ifindex = xdp_link->dev->ifindex;
9355 	rtnl_unlock();
9356 
9357 	seq_printf(seq, "ifindex:\t%u\n", ifindex);
9358 }
9359 
9360 static int bpf_xdp_link_fill_link_info(const struct bpf_link *link,
9361 				       struct bpf_link_info *info)
9362 {
9363 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9364 	u32 ifindex = 0;
9365 
9366 	rtnl_lock();
9367 	if (xdp_link->dev)
9368 		ifindex = xdp_link->dev->ifindex;
9369 	rtnl_unlock();
9370 
9371 	info->xdp.ifindex = ifindex;
9372 	return 0;
9373 }
9374 
9375 static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog,
9376 			       struct bpf_prog *old_prog)
9377 {
9378 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9379 	enum bpf_xdp_mode mode;
9380 	bpf_op_t bpf_op;
9381 	int err = 0;
9382 
9383 	rtnl_lock();
9384 
9385 	/* link might have been auto-released already, so fail */
9386 	if (!xdp_link->dev) {
9387 		err = -ENOLINK;
9388 		goto out_unlock;
9389 	}
9390 
9391 	if (old_prog && link->prog != old_prog) {
9392 		err = -EPERM;
9393 		goto out_unlock;
9394 	}
9395 	old_prog = link->prog;
9396 	if (old_prog->type != new_prog->type ||
9397 	    old_prog->expected_attach_type != new_prog->expected_attach_type) {
9398 		err = -EINVAL;
9399 		goto out_unlock;
9400 	}
9401 
9402 	if (old_prog == new_prog) {
9403 		/* no-op, don't disturb drivers */
9404 		bpf_prog_put(new_prog);
9405 		goto out_unlock;
9406 	}
9407 
9408 	mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags);
9409 	bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode);
9410 	err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL,
9411 			      xdp_link->flags, new_prog);
9412 	if (err)
9413 		goto out_unlock;
9414 
9415 	old_prog = xchg(&link->prog, new_prog);
9416 	bpf_prog_put(old_prog);
9417 
9418 out_unlock:
9419 	rtnl_unlock();
9420 	return err;
9421 }
9422 
9423 static const struct bpf_link_ops bpf_xdp_link_lops = {
9424 	.release = bpf_xdp_link_release,
9425 	.dealloc = bpf_xdp_link_dealloc,
9426 	.detach = bpf_xdp_link_detach,
9427 	.show_fdinfo = bpf_xdp_link_show_fdinfo,
9428 	.fill_link_info = bpf_xdp_link_fill_link_info,
9429 	.update_prog = bpf_xdp_link_update,
9430 };
9431 
9432 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
9433 {
9434 	struct net *net = current->nsproxy->net_ns;
9435 	struct bpf_link_primer link_primer;
9436 	struct bpf_xdp_link *link;
9437 	struct net_device *dev;
9438 	int err, fd;
9439 
9440 	rtnl_lock();
9441 	dev = dev_get_by_index(net, attr->link_create.target_ifindex);
9442 	if (!dev) {
9443 		rtnl_unlock();
9444 		return -EINVAL;
9445 	}
9446 
9447 	link = kzalloc(sizeof(*link), GFP_USER);
9448 	if (!link) {
9449 		err = -ENOMEM;
9450 		goto unlock;
9451 	}
9452 
9453 	bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog);
9454 	link->dev = dev;
9455 	link->flags = attr->link_create.flags;
9456 
9457 	err = bpf_link_prime(&link->link, &link_primer);
9458 	if (err) {
9459 		kfree(link);
9460 		goto unlock;
9461 	}
9462 
9463 	err = dev_xdp_attach_link(dev, NULL, link);
9464 	rtnl_unlock();
9465 
9466 	if (err) {
9467 		link->dev = NULL;
9468 		bpf_link_cleanup(&link_primer);
9469 		goto out_put_dev;
9470 	}
9471 
9472 	fd = bpf_link_settle(&link_primer);
9473 	/* link itself doesn't hold dev's refcnt to not complicate shutdown */
9474 	dev_put(dev);
9475 	return fd;
9476 
9477 unlock:
9478 	rtnl_unlock();
9479 
9480 out_put_dev:
9481 	dev_put(dev);
9482 	return err;
9483 }
9484 
9485 /**
9486  *	dev_change_xdp_fd - set or clear a bpf program for a device rx path
9487  *	@dev: device
9488  *	@extack: netlink extended ack
9489  *	@fd: new program fd or negative value to clear
9490  *	@expected_fd: old program fd that userspace expects to replace or clear
9491  *	@flags: xdp-related flags
9492  *
9493  *	Set or clear a bpf program for a device
9494  */
9495 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
9496 		      int fd, int expected_fd, u32 flags)
9497 {
9498 	enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags);
9499 	struct bpf_prog *new_prog = NULL, *old_prog = NULL;
9500 	int err;
9501 
9502 	ASSERT_RTNL();
9503 
9504 	if (fd >= 0) {
9505 		new_prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
9506 						 mode != XDP_MODE_SKB);
9507 		if (IS_ERR(new_prog))
9508 			return PTR_ERR(new_prog);
9509 	}
9510 
9511 	if (expected_fd >= 0) {
9512 		old_prog = bpf_prog_get_type_dev(expected_fd, BPF_PROG_TYPE_XDP,
9513 						 mode != XDP_MODE_SKB);
9514 		if (IS_ERR(old_prog)) {
9515 			err = PTR_ERR(old_prog);
9516 			old_prog = NULL;
9517 			goto err_out;
9518 		}
9519 	}
9520 
9521 	err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags);
9522 
9523 err_out:
9524 	if (err && new_prog)
9525 		bpf_prog_put(new_prog);
9526 	if (old_prog)
9527 		bpf_prog_put(old_prog);
9528 	return err;
9529 }
9530 
9531 /**
9532  *	dev_new_index	-	allocate an ifindex
9533  *	@net: the applicable net namespace
9534  *
9535  *	Returns a suitable unique value for a new device interface
9536  *	number.  The caller must hold the rtnl semaphore or the
9537  *	dev_base_lock to be sure it remains unique.
9538  */
9539 static int dev_new_index(struct net *net)
9540 {
9541 	int ifindex = net->ifindex;
9542 
9543 	for (;;) {
9544 		if (++ifindex <= 0)
9545 			ifindex = 1;
9546 		if (!__dev_get_by_index(net, ifindex))
9547 			return net->ifindex = ifindex;
9548 	}
9549 }
9550 
9551 /* Delayed registration/unregisteration */
9552 LIST_HEAD(net_todo_list);
9553 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
9554 
9555 static void net_set_todo(struct net_device *dev)
9556 {
9557 	list_add_tail(&dev->todo_list, &net_todo_list);
9558 	atomic_inc(&dev_net(dev)->dev_unreg_count);
9559 }
9560 
9561 static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
9562 	struct net_device *upper, netdev_features_t features)
9563 {
9564 	netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9565 	netdev_features_t feature;
9566 	int feature_bit;
9567 
9568 	for_each_netdev_feature(upper_disables, feature_bit) {
9569 		feature = __NETIF_F_BIT(feature_bit);
9570 		if (!(upper->wanted_features & feature)
9571 		    && (features & feature)) {
9572 			netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
9573 				   &feature, upper->name);
9574 			features &= ~feature;
9575 		}
9576 	}
9577 
9578 	return features;
9579 }
9580 
9581 static void netdev_sync_lower_features(struct net_device *upper,
9582 	struct net_device *lower, netdev_features_t features)
9583 {
9584 	netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9585 	netdev_features_t feature;
9586 	int feature_bit;
9587 
9588 	for_each_netdev_feature(upper_disables, feature_bit) {
9589 		feature = __NETIF_F_BIT(feature_bit);
9590 		if (!(features & feature) && (lower->features & feature)) {
9591 			netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
9592 				   &feature, lower->name);
9593 			lower->wanted_features &= ~feature;
9594 			__netdev_update_features(lower);
9595 
9596 			if (unlikely(lower->features & feature))
9597 				netdev_WARN(upper, "failed to disable %pNF on %s!\n",
9598 					    &feature, lower->name);
9599 			else
9600 				netdev_features_change(lower);
9601 		}
9602 	}
9603 }
9604 
9605 static netdev_features_t netdev_fix_features(struct net_device *dev,
9606 	netdev_features_t features)
9607 {
9608 	/* Fix illegal checksum combinations */
9609 	if ((features & NETIF_F_HW_CSUM) &&
9610 	    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
9611 		netdev_warn(dev, "mixed HW and IP checksum settings.\n");
9612 		features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
9613 	}
9614 
9615 	/* TSO requires that SG is present as well. */
9616 	if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
9617 		netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
9618 		features &= ~NETIF_F_ALL_TSO;
9619 	}
9620 
9621 	if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
9622 					!(features & NETIF_F_IP_CSUM)) {
9623 		netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
9624 		features &= ~NETIF_F_TSO;
9625 		features &= ~NETIF_F_TSO_ECN;
9626 	}
9627 
9628 	if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
9629 					 !(features & NETIF_F_IPV6_CSUM)) {
9630 		netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
9631 		features &= ~NETIF_F_TSO6;
9632 	}
9633 
9634 	/* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
9635 	if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
9636 		features &= ~NETIF_F_TSO_MANGLEID;
9637 
9638 	/* TSO ECN requires that TSO is present as well. */
9639 	if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
9640 		features &= ~NETIF_F_TSO_ECN;
9641 
9642 	/* Software GSO depends on SG. */
9643 	if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
9644 		netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
9645 		features &= ~NETIF_F_GSO;
9646 	}
9647 
9648 	/* GSO partial features require GSO partial be set */
9649 	if ((features & dev->gso_partial_features) &&
9650 	    !(features & NETIF_F_GSO_PARTIAL)) {
9651 		netdev_dbg(dev,
9652 			   "Dropping partially supported GSO features since no GSO partial.\n");
9653 		features &= ~dev->gso_partial_features;
9654 	}
9655 
9656 	if (!(features & NETIF_F_RXCSUM)) {
9657 		/* NETIF_F_GRO_HW implies doing RXCSUM since every packet
9658 		 * successfully merged by hardware must also have the
9659 		 * checksum verified by hardware.  If the user does not
9660 		 * want to enable RXCSUM, logically, we should disable GRO_HW.
9661 		 */
9662 		if (features & NETIF_F_GRO_HW) {
9663 			netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
9664 			features &= ~NETIF_F_GRO_HW;
9665 		}
9666 	}
9667 
9668 	/* LRO/HW-GRO features cannot be combined with RX-FCS */
9669 	if (features & NETIF_F_RXFCS) {
9670 		if (features & NETIF_F_LRO) {
9671 			netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
9672 			features &= ~NETIF_F_LRO;
9673 		}
9674 
9675 		if (features & NETIF_F_GRO_HW) {
9676 			netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
9677 			features &= ~NETIF_F_GRO_HW;
9678 		}
9679 	}
9680 
9681 	if ((features & NETIF_F_GRO_HW) && (features & NETIF_F_LRO)) {
9682 		netdev_dbg(dev, "Dropping LRO feature since HW-GRO is requested.\n");
9683 		features &= ~NETIF_F_LRO;
9684 	}
9685 
9686 	if (features & NETIF_F_HW_TLS_TX) {
9687 		bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) ==
9688 			(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
9689 		bool hw_csum = features & NETIF_F_HW_CSUM;
9690 
9691 		if (!ip_csum && !hw_csum) {
9692 			netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
9693 			features &= ~NETIF_F_HW_TLS_TX;
9694 		}
9695 	}
9696 
9697 	if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
9698 		netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
9699 		features &= ~NETIF_F_HW_TLS_RX;
9700 	}
9701 
9702 	return features;
9703 }
9704 
9705 int __netdev_update_features(struct net_device *dev)
9706 {
9707 	struct net_device *upper, *lower;
9708 	netdev_features_t features;
9709 	struct list_head *iter;
9710 	int err = -1;
9711 
9712 	ASSERT_RTNL();
9713 
9714 	features = netdev_get_wanted_features(dev);
9715 
9716 	if (dev->netdev_ops->ndo_fix_features)
9717 		features = dev->netdev_ops->ndo_fix_features(dev, features);
9718 
9719 	/* driver might be less strict about feature dependencies */
9720 	features = netdev_fix_features(dev, features);
9721 
9722 	/* some features can't be enabled if they're off on an upper device */
9723 	netdev_for_each_upper_dev_rcu(dev, upper, iter)
9724 		features = netdev_sync_upper_features(dev, upper, features);
9725 
9726 	if (dev->features == features)
9727 		goto sync_lower;
9728 
9729 	netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
9730 		&dev->features, &features);
9731 
9732 	if (dev->netdev_ops->ndo_set_features)
9733 		err = dev->netdev_ops->ndo_set_features(dev, features);
9734 	else
9735 		err = 0;
9736 
9737 	if (unlikely(err < 0)) {
9738 		netdev_err(dev,
9739 			"set_features() failed (%d); wanted %pNF, left %pNF\n",
9740 			err, &features, &dev->features);
9741 		/* return non-0 since some features might have changed and
9742 		 * it's better to fire a spurious notification than miss it
9743 		 */
9744 		return -1;
9745 	}
9746 
9747 sync_lower:
9748 	/* some features must be disabled on lower devices when disabled
9749 	 * on an upper device (think: bonding master or bridge)
9750 	 */
9751 	netdev_for_each_lower_dev(dev, lower, iter)
9752 		netdev_sync_lower_features(dev, lower, features);
9753 
9754 	if (!err) {
9755 		netdev_features_t diff = features ^ dev->features;
9756 
9757 		if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
9758 			/* udp_tunnel_{get,drop}_rx_info both need
9759 			 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
9760 			 * device, or they won't do anything.
9761 			 * Thus we need to update dev->features
9762 			 * *before* calling udp_tunnel_get_rx_info,
9763 			 * but *after* calling udp_tunnel_drop_rx_info.
9764 			 */
9765 			if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
9766 				dev->features = features;
9767 				udp_tunnel_get_rx_info(dev);
9768 			} else {
9769 				udp_tunnel_drop_rx_info(dev);
9770 			}
9771 		}
9772 
9773 		if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
9774 			if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
9775 				dev->features = features;
9776 				err |= vlan_get_rx_ctag_filter_info(dev);
9777 			} else {
9778 				vlan_drop_rx_ctag_filter_info(dev);
9779 			}
9780 		}
9781 
9782 		if (diff & NETIF_F_HW_VLAN_STAG_FILTER) {
9783 			if (features & NETIF_F_HW_VLAN_STAG_FILTER) {
9784 				dev->features = features;
9785 				err |= vlan_get_rx_stag_filter_info(dev);
9786 			} else {
9787 				vlan_drop_rx_stag_filter_info(dev);
9788 			}
9789 		}
9790 
9791 		dev->features = features;
9792 	}
9793 
9794 	return err < 0 ? 0 : 1;
9795 }
9796 
9797 /**
9798  *	netdev_update_features - recalculate device features
9799  *	@dev: the device to check
9800  *
9801  *	Recalculate dev->features set and send notifications if it
9802  *	has changed. Should be called after driver or hardware dependent
9803  *	conditions might have changed that influence the features.
9804  */
9805 void netdev_update_features(struct net_device *dev)
9806 {
9807 	if (__netdev_update_features(dev))
9808 		netdev_features_change(dev);
9809 }
9810 EXPORT_SYMBOL(netdev_update_features);
9811 
9812 /**
9813  *	netdev_change_features - recalculate device features
9814  *	@dev: the device to check
9815  *
9816  *	Recalculate dev->features set and send notifications even
9817  *	if they have not changed. Should be called instead of
9818  *	netdev_update_features() if also dev->vlan_features might
9819  *	have changed to allow the changes to be propagated to stacked
9820  *	VLAN devices.
9821  */
9822 void netdev_change_features(struct net_device *dev)
9823 {
9824 	__netdev_update_features(dev);
9825 	netdev_features_change(dev);
9826 }
9827 EXPORT_SYMBOL(netdev_change_features);
9828 
9829 /**
9830  *	netif_stacked_transfer_operstate -	transfer operstate
9831  *	@rootdev: the root or lower level device to transfer state from
9832  *	@dev: the device to transfer operstate to
9833  *
9834  *	Transfer operational state from root to device. This is normally
9835  *	called when a stacking relationship exists between the root
9836  *	device and the device(a leaf device).
9837  */
9838 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
9839 					struct net_device *dev)
9840 {
9841 	if (rootdev->operstate == IF_OPER_DORMANT)
9842 		netif_dormant_on(dev);
9843 	else
9844 		netif_dormant_off(dev);
9845 
9846 	if (rootdev->operstate == IF_OPER_TESTING)
9847 		netif_testing_on(dev);
9848 	else
9849 		netif_testing_off(dev);
9850 
9851 	if (netif_carrier_ok(rootdev))
9852 		netif_carrier_on(dev);
9853 	else
9854 		netif_carrier_off(dev);
9855 }
9856 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
9857 
9858 static int netif_alloc_rx_queues(struct net_device *dev)
9859 {
9860 	unsigned int i, count = dev->num_rx_queues;
9861 	struct netdev_rx_queue *rx;
9862 	size_t sz = count * sizeof(*rx);
9863 	int err = 0;
9864 
9865 	BUG_ON(count < 1);
9866 
9867 	rx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
9868 	if (!rx)
9869 		return -ENOMEM;
9870 
9871 	dev->_rx = rx;
9872 
9873 	for (i = 0; i < count; i++) {
9874 		rx[i].dev = dev;
9875 
9876 		/* XDP RX-queue setup */
9877 		err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0);
9878 		if (err < 0)
9879 			goto err_rxq_info;
9880 	}
9881 	return 0;
9882 
9883 err_rxq_info:
9884 	/* Rollback successful reg's and free other resources */
9885 	while (i--)
9886 		xdp_rxq_info_unreg(&rx[i].xdp_rxq);
9887 	kvfree(dev->_rx);
9888 	dev->_rx = NULL;
9889 	return err;
9890 }
9891 
9892 static void netif_free_rx_queues(struct net_device *dev)
9893 {
9894 	unsigned int i, count = dev->num_rx_queues;
9895 
9896 	/* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
9897 	if (!dev->_rx)
9898 		return;
9899 
9900 	for (i = 0; i < count; i++)
9901 		xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
9902 
9903 	kvfree(dev->_rx);
9904 }
9905 
9906 static void netdev_init_one_queue(struct net_device *dev,
9907 				  struct netdev_queue *queue, void *_unused)
9908 {
9909 	/* Initialize queue lock */
9910 	spin_lock_init(&queue->_xmit_lock);
9911 	netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
9912 	queue->xmit_lock_owner = -1;
9913 	netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
9914 	queue->dev = dev;
9915 #ifdef CONFIG_BQL
9916 	dql_init(&queue->dql, HZ);
9917 #endif
9918 }
9919 
9920 static void netif_free_tx_queues(struct net_device *dev)
9921 {
9922 	kvfree(dev->_tx);
9923 }
9924 
9925 static int netif_alloc_netdev_queues(struct net_device *dev)
9926 {
9927 	unsigned int count = dev->num_tx_queues;
9928 	struct netdev_queue *tx;
9929 	size_t sz = count * sizeof(*tx);
9930 
9931 	if (count < 1 || count > 0xffff)
9932 		return -EINVAL;
9933 
9934 	tx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
9935 	if (!tx)
9936 		return -ENOMEM;
9937 
9938 	dev->_tx = tx;
9939 
9940 	netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
9941 	spin_lock_init(&dev->tx_global_lock);
9942 
9943 	return 0;
9944 }
9945 
9946 void netif_tx_stop_all_queues(struct net_device *dev)
9947 {
9948 	unsigned int i;
9949 
9950 	for (i = 0; i < dev->num_tx_queues; i++) {
9951 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
9952 
9953 		netif_tx_stop_queue(txq);
9954 	}
9955 }
9956 EXPORT_SYMBOL(netif_tx_stop_all_queues);
9957 
9958 /**
9959  * register_netdevice() - register a network device
9960  * @dev: device to register
9961  *
9962  * Take a prepared network device structure and make it externally accessible.
9963  * A %NETDEV_REGISTER message is sent to the netdev notifier chain.
9964  * Callers must hold the rtnl lock - you may want register_netdev()
9965  * instead of this.
9966  */
9967 int register_netdevice(struct net_device *dev)
9968 {
9969 	int ret;
9970 	struct net *net = dev_net(dev);
9971 
9972 	BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE <
9973 		     NETDEV_FEATURE_COUNT);
9974 	BUG_ON(dev_boot_phase);
9975 	ASSERT_RTNL();
9976 
9977 	might_sleep();
9978 
9979 	/* When net_device's are persistent, this will be fatal. */
9980 	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
9981 	BUG_ON(!net);
9982 
9983 	ret = ethtool_check_ops(dev->ethtool_ops);
9984 	if (ret)
9985 		return ret;
9986 
9987 	spin_lock_init(&dev->addr_list_lock);
9988 	netdev_set_addr_lockdep_class(dev);
9989 
9990 	ret = dev_get_valid_name(net, dev, dev->name);
9991 	if (ret < 0)
9992 		goto out;
9993 
9994 	ret = -ENOMEM;
9995 	dev->name_node = netdev_name_node_head_alloc(dev);
9996 	if (!dev->name_node)
9997 		goto out;
9998 
9999 	/* Init, if this function is available */
10000 	if (dev->netdev_ops->ndo_init) {
10001 		ret = dev->netdev_ops->ndo_init(dev);
10002 		if (ret) {
10003 			if (ret > 0)
10004 				ret = -EIO;
10005 			goto err_free_name;
10006 		}
10007 	}
10008 
10009 	if (((dev->hw_features | dev->features) &
10010 	     NETIF_F_HW_VLAN_CTAG_FILTER) &&
10011 	    (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
10012 	     !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
10013 		netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
10014 		ret = -EINVAL;
10015 		goto err_uninit;
10016 	}
10017 
10018 	ret = -EBUSY;
10019 	if (!dev->ifindex)
10020 		dev->ifindex = dev_new_index(net);
10021 	else if (__dev_get_by_index(net, dev->ifindex))
10022 		goto err_uninit;
10023 
10024 	/* Transfer changeable features to wanted_features and enable
10025 	 * software offloads (GSO and GRO).
10026 	 */
10027 	dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF);
10028 	dev->features |= NETIF_F_SOFT_FEATURES;
10029 
10030 	if (dev->udp_tunnel_nic_info) {
10031 		dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10032 		dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10033 	}
10034 
10035 	dev->wanted_features = dev->features & dev->hw_features;
10036 
10037 	if (!(dev->flags & IFF_LOOPBACK))
10038 		dev->hw_features |= NETIF_F_NOCACHE_COPY;
10039 
10040 	/* If IPv4 TCP segmentation offload is supported we should also
10041 	 * allow the device to enable segmenting the frame with the option
10042 	 * of ignoring a static IP ID value.  This doesn't enable the
10043 	 * feature itself but allows the user to enable it later.
10044 	 */
10045 	if (dev->hw_features & NETIF_F_TSO)
10046 		dev->hw_features |= NETIF_F_TSO_MANGLEID;
10047 	if (dev->vlan_features & NETIF_F_TSO)
10048 		dev->vlan_features |= NETIF_F_TSO_MANGLEID;
10049 	if (dev->mpls_features & NETIF_F_TSO)
10050 		dev->mpls_features |= NETIF_F_TSO_MANGLEID;
10051 	if (dev->hw_enc_features & NETIF_F_TSO)
10052 		dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
10053 
10054 	/* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
10055 	 */
10056 	dev->vlan_features |= NETIF_F_HIGHDMA;
10057 
10058 	/* Make NETIF_F_SG inheritable to tunnel devices.
10059 	 */
10060 	dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
10061 
10062 	/* Make NETIF_F_SG inheritable to MPLS.
10063 	 */
10064 	dev->mpls_features |= NETIF_F_SG;
10065 
10066 	ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
10067 	ret = notifier_to_errno(ret);
10068 	if (ret)
10069 		goto err_uninit;
10070 
10071 	ret = netdev_register_kobject(dev);
10072 	write_lock(&dev_base_lock);
10073 	dev->reg_state = ret ? NETREG_UNREGISTERED : NETREG_REGISTERED;
10074 	write_unlock(&dev_base_lock);
10075 	if (ret)
10076 		goto err_uninit_notify;
10077 
10078 	__netdev_update_features(dev);
10079 
10080 	/*
10081 	 *	Default initial state at registry is that the
10082 	 *	device is present.
10083 	 */
10084 
10085 	set_bit(__LINK_STATE_PRESENT, &dev->state);
10086 
10087 	linkwatch_init_dev(dev);
10088 
10089 	dev_init_scheduler(dev);
10090 
10091 	netdev_hold(dev, &dev->dev_registered_tracker, GFP_KERNEL);
10092 	list_netdevice(dev);
10093 
10094 	add_device_randomness(dev->dev_addr, dev->addr_len);
10095 
10096 	/* If the device has permanent device address, driver should
10097 	 * set dev_addr and also addr_assign_type should be set to
10098 	 * NET_ADDR_PERM (default value).
10099 	 */
10100 	if (dev->addr_assign_type == NET_ADDR_PERM)
10101 		memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10102 
10103 	/* Notify protocols, that a new device appeared. */
10104 	ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
10105 	ret = notifier_to_errno(ret);
10106 	if (ret) {
10107 		/* Expect explicit free_netdev() on failure */
10108 		dev->needs_free_netdev = false;
10109 		unregister_netdevice_queue(dev, NULL);
10110 		goto out;
10111 	}
10112 	/*
10113 	 *	Prevent userspace races by waiting until the network
10114 	 *	device is fully setup before sending notifications.
10115 	 */
10116 	if (!dev->rtnl_link_ops ||
10117 	    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
10118 		rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL);
10119 
10120 out:
10121 	return ret;
10122 
10123 err_uninit_notify:
10124 	call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
10125 err_uninit:
10126 	if (dev->netdev_ops->ndo_uninit)
10127 		dev->netdev_ops->ndo_uninit(dev);
10128 	if (dev->priv_destructor)
10129 		dev->priv_destructor(dev);
10130 err_free_name:
10131 	netdev_name_node_free(dev->name_node);
10132 	goto out;
10133 }
10134 EXPORT_SYMBOL(register_netdevice);
10135 
10136 /**
10137  *	init_dummy_netdev	- init a dummy network device for NAPI
10138  *	@dev: device to init
10139  *
10140  *	This takes a network device structure and initialize the minimum
10141  *	amount of fields so it can be used to schedule NAPI polls without
10142  *	registering a full blown interface. This is to be used by drivers
10143  *	that need to tie several hardware interfaces to a single NAPI
10144  *	poll scheduler due to HW limitations.
10145  */
10146 int init_dummy_netdev(struct net_device *dev)
10147 {
10148 	/* Clear everything. Note we don't initialize spinlocks
10149 	 * are they aren't supposed to be taken by any of the
10150 	 * NAPI code and this dummy netdev is supposed to be
10151 	 * only ever used for NAPI polls
10152 	 */
10153 	memset(dev, 0, sizeof(struct net_device));
10154 
10155 	/* make sure we BUG if trying to hit standard
10156 	 * register/unregister code path
10157 	 */
10158 	dev->reg_state = NETREG_DUMMY;
10159 
10160 	/* NAPI wants this */
10161 	INIT_LIST_HEAD(&dev->napi_list);
10162 
10163 	/* a dummy interface is started by default */
10164 	set_bit(__LINK_STATE_PRESENT, &dev->state);
10165 	set_bit(__LINK_STATE_START, &dev->state);
10166 
10167 	/* napi_busy_loop stats accounting wants this */
10168 	dev_net_set(dev, &init_net);
10169 
10170 	/* Note : We dont allocate pcpu_refcnt for dummy devices,
10171 	 * because users of this 'device' dont need to change
10172 	 * its refcount.
10173 	 */
10174 
10175 	return 0;
10176 }
10177 EXPORT_SYMBOL_GPL(init_dummy_netdev);
10178 
10179 
10180 /**
10181  *	register_netdev	- register a network device
10182  *	@dev: device to register
10183  *
10184  *	Take a completed network device structure and add it to the kernel
10185  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
10186  *	chain. 0 is returned on success. A negative errno code is returned
10187  *	on a failure to set up the device, or if the name is a duplicate.
10188  *
10189  *	This is a wrapper around register_netdevice that takes the rtnl semaphore
10190  *	and expands the device name if you passed a format string to
10191  *	alloc_netdev.
10192  */
10193 int register_netdev(struct net_device *dev)
10194 {
10195 	int err;
10196 
10197 	if (rtnl_lock_killable())
10198 		return -EINTR;
10199 	err = register_netdevice(dev);
10200 	rtnl_unlock();
10201 	return err;
10202 }
10203 EXPORT_SYMBOL(register_netdev);
10204 
10205 int netdev_refcnt_read(const struct net_device *dev)
10206 {
10207 #ifdef CONFIG_PCPU_DEV_REFCNT
10208 	int i, refcnt = 0;
10209 
10210 	for_each_possible_cpu(i)
10211 		refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
10212 	return refcnt;
10213 #else
10214 	return refcount_read(&dev->dev_refcnt);
10215 #endif
10216 }
10217 EXPORT_SYMBOL(netdev_refcnt_read);
10218 
10219 int netdev_unregister_timeout_secs __read_mostly = 10;
10220 
10221 #define WAIT_REFS_MIN_MSECS 1
10222 #define WAIT_REFS_MAX_MSECS 250
10223 /**
10224  * netdev_wait_allrefs_any - wait until all references are gone.
10225  * @list: list of net_devices to wait on
10226  *
10227  * This is called when unregistering network devices.
10228  *
10229  * Any protocol or device that holds a reference should register
10230  * for netdevice notification, and cleanup and put back the
10231  * reference if they receive an UNREGISTER event.
10232  * We can get stuck here if buggy protocols don't correctly
10233  * call dev_put.
10234  */
10235 static struct net_device *netdev_wait_allrefs_any(struct list_head *list)
10236 {
10237 	unsigned long rebroadcast_time, warning_time;
10238 	struct net_device *dev;
10239 	int wait = 0;
10240 
10241 	rebroadcast_time = warning_time = jiffies;
10242 
10243 	list_for_each_entry(dev, list, todo_list)
10244 		if (netdev_refcnt_read(dev) == 1)
10245 			return dev;
10246 
10247 	while (true) {
10248 		if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
10249 			rtnl_lock();
10250 
10251 			/* Rebroadcast unregister notification */
10252 			list_for_each_entry(dev, list, todo_list)
10253 				call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
10254 
10255 			__rtnl_unlock();
10256 			rcu_barrier();
10257 			rtnl_lock();
10258 
10259 			list_for_each_entry(dev, list, todo_list)
10260 				if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
10261 					     &dev->state)) {
10262 					/* We must not have linkwatch events
10263 					 * pending on unregister. If this
10264 					 * happens, we simply run the queue
10265 					 * unscheduled, resulting in a noop
10266 					 * for this device.
10267 					 */
10268 					linkwatch_run_queue();
10269 					break;
10270 				}
10271 
10272 			__rtnl_unlock();
10273 
10274 			rebroadcast_time = jiffies;
10275 		}
10276 
10277 		if (!wait) {
10278 			rcu_barrier();
10279 			wait = WAIT_REFS_MIN_MSECS;
10280 		} else {
10281 			msleep(wait);
10282 			wait = min(wait << 1, WAIT_REFS_MAX_MSECS);
10283 		}
10284 
10285 		list_for_each_entry(dev, list, todo_list)
10286 			if (netdev_refcnt_read(dev) == 1)
10287 				return dev;
10288 
10289 		if (time_after(jiffies, warning_time +
10290 			       READ_ONCE(netdev_unregister_timeout_secs) * HZ)) {
10291 			list_for_each_entry(dev, list, todo_list) {
10292 				pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
10293 					 dev->name, netdev_refcnt_read(dev));
10294 				ref_tracker_dir_print(&dev->refcnt_tracker, 10);
10295 			}
10296 
10297 			warning_time = jiffies;
10298 		}
10299 	}
10300 }
10301 
10302 /* The sequence is:
10303  *
10304  *	rtnl_lock();
10305  *	...
10306  *	register_netdevice(x1);
10307  *	register_netdevice(x2);
10308  *	...
10309  *	unregister_netdevice(y1);
10310  *	unregister_netdevice(y2);
10311  *      ...
10312  *	rtnl_unlock();
10313  *	free_netdev(y1);
10314  *	free_netdev(y2);
10315  *
10316  * We are invoked by rtnl_unlock().
10317  * This allows us to deal with problems:
10318  * 1) We can delete sysfs objects which invoke hotplug
10319  *    without deadlocking with linkwatch via keventd.
10320  * 2) Since we run with the RTNL semaphore not held, we can sleep
10321  *    safely in order to wait for the netdev refcnt to drop to zero.
10322  *
10323  * We must not return until all unregister events added during
10324  * the interval the lock was held have been completed.
10325  */
10326 void netdev_run_todo(void)
10327 {
10328 	struct net_device *dev, *tmp;
10329 	struct list_head list;
10330 #ifdef CONFIG_LOCKDEP
10331 	struct list_head unlink_list;
10332 
10333 	list_replace_init(&net_unlink_list, &unlink_list);
10334 
10335 	while (!list_empty(&unlink_list)) {
10336 		struct net_device *dev = list_first_entry(&unlink_list,
10337 							  struct net_device,
10338 							  unlink_list);
10339 		list_del_init(&dev->unlink_list);
10340 		dev->nested_level = dev->lower_level - 1;
10341 	}
10342 #endif
10343 
10344 	/* Snapshot list, allow later requests */
10345 	list_replace_init(&net_todo_list, &list);
10346 
10347 	__rtnl_unlock();
10348 
10349 	/* Wait for rcu callbacks to finish before next phase */
10350 	if (!list_empty(&list))
10351 		rcu_barrier();
10352 
10353 	list_for_each_entry_safe(dev, tmp, &list, todo_list) {
10354 		if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
10355 			netdev_WARN(dev, "run_todo but not unregistering\n");
10356 			list_del(&dev->todo_list);
10357 			continue;
10358 		}
10359 
10360 		write_lock(&dev_base_lock);
10361 		dev->reg_state = NETREG_UNREGISTERED;
10362 		write_unlock(&dev_base_lock);
10363 		linkwatch_forget_dev(dev);
10364 	}
10365 
10366 	while (!list_empty(&list)) {
10367 		dev = netdev_wait_allrefs_any(&list);
10368 		list_del(&dev->todo_list);
10369 
10370 		/* paranoia */
10371 		BUG_ON(netdev_refcnt_read(dev) != 1);
10372 		BUG_ON(!list_empty(&dev->ptype_all));
10373 		BUG_ON(!list_empty(&dev->ptype_specific));
10374 		WARN_ON(rcu_access_pointer(dev->ip_ptr));
10375 		WARN_ON(rcu_access_pointer(dev->ip6_ptr));
10376 
10377 		if (dev->priv_destructor)
10378 			dev->priv_destructor(dev);
10379 		if (dev->needs_free_netdev)
10380 			free_netdev(dev);
10381 
10382 		if (atomic_dec_and_test(&dev_net(dev)->dev_unreg_count))
10383 			wake_up(&netdev_unregistering_wq);
10384 
10385 		/* Free network device */
10386 		kobject_put(&dev->dev.kobj);
10387 	}
10388 }
10389 
10390 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
10391  * all the same fields in the same order as net_device_stats, with only
10392  * the type differing, but rtnl_link_stats64 may have additional fields
10393  * at the end for newer counters.
10394  */
10395 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
10396 			     const struct net_device_stats *netdev_stats)
10397 {
10398 	size_t i, n = sizeof(*netdev_stats) / sizeof(atomic_long_t);
10399 	const atomic_long_t *src = (atomic_long_t *)netdev_stats;
10400 	u64 *dst = (u64 *)stats64;
10401 
10402 	BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
10403 	for (i = 0; i < n; i++)
10404 		dst[i] = (unsigned long)atomic_long_read(&src[i]);
10405 	/* zero out counters that only exist in rtnl_link_stats64 */
10406 	memset((char *)stats64 + n * sizeof(u64), 0,
10407 	       sizeof(*stats64) - n * sizeof(u64));
10408 }
10409 EXPORT_SYMBOL(netdev_stats_to_stats64);
10410 
10411 struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev)
10412 {
10413 	struct net_device_core_stats __percpu *p;
10414 
10415 	p = alloc_percpu_gfp(struct net_device_core_stats,
10416 			     GFP_ATOMIC | __GFP_NOWARN);
10417 
10418 	if (p && cmpxchg(&dev->core_stats, NULL, p))
10419 		free_percpu(p);
10420 
10421 	/* This READ_ONCE() pairs with the cmpxchg() above */
10422 	return READ_ONCE(dev->core_stats);
10423 }
10424 EXPORT_SYMBOL(netdev_core_stats_alloc);
10425 
10426 /**
10427  *	dev_get_stats	- get network device statistics
10428  *	@dev: device to get statistics from
10429  *	@storage: place to store stats
10430  *
10431  *	Get network statistics from device. Return @storage.
10432  *	The device driver may provide its own method by setting
10433  *	dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
10434  *	otherwise the internal statistics structure is used.
10435  */
10436 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
10437 					struct rtnl_link_stats64 *storage)
10438 {
10439 	const struct net_device_ops *ops = dev->netdev_ops;
10440 	const struct net_device_core_stats __percpu *p;
10441 
10442 	if (ops->ndo_get_stats64) {
10443 		memset(storage, 0, sizeof(*storage));
10444 		ops->ndo_get_stats64(dev, storage);
10445 	} else if (ops->ndo_get_stats) {
10446 		netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
10447 	} else {
10448 		netdev_stats_to_stats64(storage, &dev->stats);
10449 	}
10450 
10451 	/* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
10452 	p = READ_ONCE(dev->core_stats);
10453 	if (p) {
10454 		const struct net_device_core_stats *core_stats;
10455 		int i;
10456 
10457 		for_each_possible_cpu(i) {
10458 			core_stats = per_cpu_ptr(p, i);
10459 			storage->rx_dropped += READ_ONCE(core_stats->rx_dropped);
10460 			storage->tx_dropped += READ_ONCE(core_stats->tx_dropped);
10461 			storage->rx_nohandler += READ_ONCE(core_stats->rx_nohandler);
10462 			storage->rx_otherhost_dropped += READ_ONCE(core_stats->rx_otherhost_dropped);
10463 		}
10464 	}
10465 	return storage;
10466 }
10467 EXPORT_SYMBOL(dev_get_stats);
10468 
10469 /**
10470  *	dev_fetch_sw_netstats - get per-cpu network device statistics
10471  *	@s: place to store stats
10472  *	@netstats: per-cpu network stats to read from
10473  *
10474  *	Read per-cpu network statistics and populate the related fields in @s.
10475  */
10476 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
10477 			   const struct pcpu_sw_netstats __percpu *netstats)
10478 {
10479 	int cpu;
10480 
10481 	for_each_possible_cpu(cpu) {
10482 		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
10483 		const struct pcpu_sw_netstats *stats;
10484 		unsigned int start;
10485 
10486 		stats = per_cpu_ptr(netstats, cpu);
10487 		do {
10488 			start = u64_stats_fetch_begin(&stats->syncp);
10489 			rx_packets = u64_stats_read(&stats->rx_packets);
10490 			rx_bytes   = u64_stats_read(&stats->rx_bytes);
10491 			tx_packets = u64_stats_read(&stats->tx_packets);
10492 			tx_bytes   = u64_stats_read(&stats->tx_bytes);
10493 		} while (u64_stats_fetch_retry(&stats->syncp, start));
10494 
10495 		s->rx_packets += rx_packets;
10496 		s->rx_bytes   += rx_bytes;
10497 		s->tx_packets += tx_packets;
10498 		s->tx_bytes   += tx_bytes;
10499 	}
10500 }
10501 EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats);
10502 
10503 /**
10504  *	dev_get_tstats64 - ndo_get_stats64 implementation
10505  *	@dev: device to get statistics from
10506  *	@s: place to store stats
10507  *
10508  *	Populate @s from dev->stats and dev->tstats. Can be used as
10509  *	ndo_get_stats64() callback.
10510  */
10511 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s)
10512 {
10513 	netdev_stats_to_stats64(s, &dev->stats);
10514 	dev_fetch_sw_netstats(s, dev->tstats);
10515 }
10516 EXPORT_SYMBOL_GPL(dev_get_tstats64);
10517 
10518 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
10519 {
10520 	struct netdev_queue *queue = dev_ingress_queue(dev);
10521 
10522 #ifdef CONFIG_NET_CLS_ACT
10523 	if (queue)
10524 		return queue;
10525 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
10526 	if (!queue)
10527 		return NULL;
10528 	netdev_init_one_queue(dev, queue, NULL);
10529 	RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
10530 	queue->qdisc_sleeping = &noop_qdisc;
10531 	rcu_assign_pointer(dev->ingress_queue, queue);
10532 #endif
10533 	return queue;
10534 }
10535 
10536 static const struct ethtool_ops default_ethtool_ops;
10537 
10538 void netdev_set_default_ethtool_ops(struct net_device *dev,
10539 				    const struct ethtool_ops *ops)
10540 {
10541 	if (dev->ethtool_ops == &default_ethtool_ops)
10542 		dev->ethtool_ops = ops;
10543 }
10544 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
10545 
10546 /**
10547  * netdev_sw_irq_coalesce_default_on() - enable SW IRQ coalescing by default
10548  * @dev: netdev to enable the IRQ coalescing on
10549  *
10550  * Sets a conservative default for SW IRQ coalescing. Users can use
10551  * sysfs attributes to override the default values.
10552  */
10553 void netdev_sw_irq_coalesce_default_on(struct net_device *dev)
10554 {
10555 	WARN_ON(dev->reg_state == NETREG_REGISTERED);
10556 
10557 	dev->gro_flush_timeout = 20000;
10558 	dev->napi_defer_hard_irqs = 1;
10559 }
10560 EXPORT_SYMBOL_GPL(netdev_sw_irq_coalesce_default_on);
10561 
10562 void netdev_freemem(struct net_device *dev)
10563 {
10564 	char *addr = (char *)dev - dev->padded;
10565 
10566 	kvfree(addr);
10567 }
10568 
10569 /**
10570  * alloc_netdev_mqs - allocate network device
10571  * @sizeof_priv: size of private data to allocate space for
10572  * @name: device name format string
10573  * @name_assign_type: origin of device name
10574  * @setup: callback to initialize device
10575  * @txqs: the number of TX subqueues to allocate
10576  * @rxqs: the number of RX subqueues to allocate
10577  *
10578  * Allocates a struct net_device with private data area for driver use
10579  * and performs basic initialization.  Also allocates subqueue structs
10580  * for each queue on the device.
10581  */
10582 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
10583 		unsigned char name_assign_type,
10584 		void (*setup)(struct net_device *),
10585 		unsigned int txqs, unsigned int rxqs)
10586 {
10587 	struct net_device *dev;
10588 	unsigned int alloc_size;
10589 	struct net_device *p;
10590 
10591 	BUG_ON(strlen(name) >= sizeof(dev->name));
10592 
10593 	if (txqs < 1) {
10594 		pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
10595 		return NULL;
10596 	}
10597 
10598 	if (rxqs < 1) {
10599 		pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
10600 		return NULL;
10601 	}
10602 
10603 	alloc_size = sizeof(struct net_device);
10604 	if (sizeof_priv) {
10605 		/* ensure 32-byte alignment of private area */
10606 		alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
10607 		alloc_size += sizeof_priv;
10608 	}
10609 	/* ensure 32-byte alignment of whole construct */
10610 	alloc_size += NETDEV_ALIGN - 1;
10611 
10612 	p = kvzalloc(alloc_size, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
10613 	if (!p)
10614 		return NULL;
10615 
10616 	dev = PTR_ALIGN(p, NETDEV_ALIGN);
10617 	dev->padded = (char *)dev - (char *)p;
10618 
10619 	ref_tracker_dir_init(&dev->refcnt_tracker, 128);
10620 #ifdef CONFIG_PCPU_DEV_REFCNT
10621 	dev->pcpu_refcnt = alloc_percpu(int);
10622 	if (!dev->pcpu_refcnt)
10623 		goto free_dev;
10624 	__dev_hold(dev);
10625 #else
10626 	refcount_set(&dev->dev_refcnt, 1);
10627 #endif
10628 
10629 	if (dev_addr_init(dev))
10630 		goto free_pcpu;
10631 
10632 	dev_mc_init(dev);
10633 	dev_uc_init(dev);
10634 
10635 	dev_net_set(dev, &init_net);
10636 
10637 	dev->gso_max_size = GSO_LEGACY_MAX_SIZE;
10638 	dev->gso_max_segs = GSO_MAX_SEGS;
10639 	dev->gro_max_size = GRO_LEGACY_MAX_SIZE;
10640 	dev->gso_ipv4_max_size = GSO_LEGACY_MAX_SIZE;
10641 	dev->gro_ipv4_max_size = GRO_LEGACY_MAX_SIZE;
10642 	dev->tso_max_size = TSO_LEGACY_MAX_SIZE;
10643 	dev->tso_max_segs = TSO_MAX_SEGS;
10644 	dev->upper_level = 1;
10645 	dev->lower_level = 1;
10646 #ifdef CONFIG_LOCKDEP
10647 	dev->nested_level = 0;
10648 	INIT_LIST_HEAD(&dev->unlink_list);
10649 #endif
10650 
10651 	INIT_LIST_HEAD(&dev->napi_list);
10652 	INIT_LIST_HEAD(&dev->unreg_list);
10653 	INIT_LIST_HEAD(&dev->close_list);
10654 	INIT_LIST_HEAD(&dev->link_watch_list);
10655 	INIT_LIST_HEAD(&dev->adj_list.upper);
10656 	INIT_LIST_HEAD(&dev->adj_list.lower);
10657 	INIT_LIST_HEAD(&dev->ptype_all);
10658 	INIT_LIST_HEAD(&dev->ptype_specific);
10659 	INIT_LIST_HEAD(&dev->net_notifier_list);
10660 #ifdef CONFIG_NET_SCHED
10661 	hash_init(dev->qdisc_hash);
10662 #endif
10663 	dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
10664 	setup(dev);
10665 
10666 	if (!dev->tx_queue_len) {
10667 		dev->priv_flags |= IFF_NO_QUEUE;
10668 		dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
10669 	}
10670 
10671 	dev->num_tx_queues = txqs;
10672 	dev->real_num_tx_queues = txqs;
10673 	if (netif_alloc_netdev_queues(dev))
10674 		goto free_all;
10675 
10676 	dev->num_rx_queues = rxqs;
10677 	dev->real_num_rx_queues = rxqs;
10678 	if (netif_alloc_rx_queues(dev))
10679 		goto free_all;
10680 
10681 	strcpy(dev->name, name);
10682 	dev->name_assign_type = name_assign_type;
10683 	dev->group = INIT_NETDEV_GROUP;
10684 	if (!dev->ethtool_ops)
10685 		dev->ethtool_ops = &default_ethtool_ops;
10686 
10687 	nf_hook_netdev_init(dev);
10688 
10689 	return dev;
10690 
10691 free_all:
10692 	free_netdev(dev);
10693 	return NULL;
10694 
10695 free_pcpu:
10696 #ifdef CONFIG_PCPU_DEV_REFCNT
10697 	free_percpu(dev->pcpu_refcnt);
10698 free_dev:
10699 #endif
10700 	netdev_freemem(dev);
10701 	return NULL;
10702 }
10703 EXPORT_SYMBOL(alloc_netdev_mqs);
10704 
10705 /**
10706  * free_netdev - free network device
10707  * @dev: device
10708  *
10709  * This function does the last stage of destroying an allocated device
10710  * interface. The reference to the device object is released. If this
10711  * is the last reference then it will be freed.Must be called in process
10712  * context.
10713  */
10714 void free_netdev(struct net_device *dev)
10715 {
10716 	struct napi_struct *p, *n;
10717 
10718 	might_sleep();
10719 
10720 	/* When called immediately after register_netdevice() failed the unwind
10721 	 * handling may still be dismantling the device. Handle that case by
10722 	 * deferring the free.
10723 	 */
10724 	if (dev->reg_state == NETREG_UNREGISTERING) {
10725 		ASSERT_RTNL();
10726 		dev->needs_free_netdev = true;
10727 		return;
10728 	}
10729 
10730 	netif_free_tx_queues(dev);
10731 	netif_free_rx_queues(dev);
10732 
10733 	kfree(rcu_dereference_protected(dev->ingress_queue, 1));
10734 
10735 	/* Flush device addresses */
10736 	dev_addr_flush(dev);
10737 
10738 	list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
10739 		netif_napi_del(p);
10740 
10741 	ref_tracker_dir_exit(&dev->refcnt_tracker);
10742 #ifdef CONFIG_PCPU_DEV_REFCNT
10743 	free_percpu(dev->pcpu_refcnt);
10744 	dev->pcpu_refcnt = NULL;
10745 #endif
10746 	free_percpu(dev->core_stats);
10747 	dev->core_stats = NULL;
10748 	free_percpu(dev->xdp_bulkq);
10749 	dev->xdp_bulkq = NULL;
10750 
10751 	/*  Compatibility with error handling in drivers */
10752 	if (dev->reg_state == NETREG_UNINITIALIZED) {
10753 		netdev_freemem(dev);
10754 		return;
10755 	}
10756 
10757 	BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
10758 	dev->reg_state = NETREG_RELEASED;
10759 
10760 	/* will free via device release */
10761 	put_device(&dev->dev);
10762 }
10763 EXPORT_SYMBOL(free_netdev);
10764 
10765 /**
10766  *	synchronize_net -  Synchronize with packet receive processing
10767  *
10768  *	Wait for packets currently being received to be done.
10769  *	Does not block later packets from starting.
10770  */
10771 void synchronize_net(void)
10772 {
10773 	might_sleep();
10774 	if (rtnl_is_locked())
10775 		synchronize_rcu_expedited();
10776 	else
10777 		synchronize_rcu();
10778 }
10779 EXPORT_SYMBOL(synchronize_net);
10780 
10781 /**
10782  *	unregister_netdevice_queue - remove device from the kernel
10783  *	@dev: device
10784  *	@head: list
10785  *
10786  *	This function shuts down a device interface and removes it
10787  *	from the kernel tables.
10788  *	If head not NULL, device is queued to be unregistered later.
10789  *
10790  *	Callers must hold the rtnl semaphore.  You may want
10791  *	unregister_netdev() instead of this.
10792  */
10793 
10794 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
10795 {
10796 	ASSERT_RTNL();
10797 
10798 	if (head) {
10799 		list_move_tail(&dev->unreg_list, head);
10800 	} else {
10801 		LIST_HEAD(single);
10802 
10803 		list_add(&dev->unreg_list, &single);
10804 		unregister_netdevice_many(&single);
10805 	}
10806 }
10807 EXPORT_SYMBOL(unregister_netdevice_queue);
10808 
10809 void unregister_netdevice_many_notify(struct list_head *head,
10810 				      u32 portid, const struct nlmsghdr *nlh)
10811 {
10812 	struct net_device *dev, *tmp;
10813 	LIST_HEAD(close_head);
10814 
10815 	BUG_ON(dev_boot_phase);
10816 	ASSERT_RTNL();
10817 
10818 	if (list_empty(head))
10819 		return;
10820 
10821 	list_for_each_entry_safe(dev, tmp, head, unreg_list) {
10822 		/* Some devices call without registering
10823 		 * for initialization unwind. Remove those
10824 		 * devices and proceed with the remaining.
10825 		 */
10826 		if (dev->reg_state == NETREG_UNINITIALIZED) {
10827 			pr_debug("unregister_netdevice: device %s/%p never was registered\n",
10828 				 dev->name, dev);
10829 
10830 			WARN_ON(1);
10831 			list_del(&dev->unreg_list);
10832 			continue;
10833 		}
10834 		dev->dismantle = true;
10835 		BUG_ON(dev->reg_state != NETREG_REGISTERED);
10836 	}
10837 
10838 	/* If device is running, close it first. */
10839 	list_for_each_entry(dev, head, unreg_list)
10840 		list_add_tail(&dev->close_list, &close_head);
10841 	dev_close_many(&close_head, true);
10842 
10843 	list_for_each_entry(dev, head, unreg_list) {
10844 		/* And unlink it from device chain. */
10845 		write_lock(&dev_base_lock);
10846 		unlist_netdevice(dev, false);
10847 		dev->reg_state = NETREG_UNREGISTERING;
10848 		write_unlock(&dev_base_lock);
10849 	}
10850 	flush_all_backlogs();
10851 
10852 	synchronize_net();
10853 
10854 	list_for_each_entry(dev, head, unreg_list) {
10855 		struct sk_buff *skb = NULL;
10856 
10857 		/* Shutdown queueing discipline. */
10858 		dev_shutdown(dev);
10859 
10860 		dev_xdp_uninstall(dev);
10861 		bpf_dev_bound_netdev_unregister(dev);
10862 
10863 		netdev_offload_xstats_disable_all(dev);
10864 
10865 		/* Notify protocols, that we are about to destroy
10866 		 * this device. They should clean all the things.
10867 		 */
10868 		call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
10869 
10870 		if (!dev->rtnl_link_ops ||
10871 		    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
10872 			skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
10873 						     GFP_KERNEL, NULL, 0,
10874 						     portid, nlh);
10875 
10876 		/*
10877 		 *	Flush the unicast and multicast chains
10878 		 */
10879 		dev_uc_flush(dev);
10880 		dev_mc_flush(dev);
10881 
10882 		netdev_name_node_alt_flush(dev);
10883 		netdev_name_node_free(dev->name_node);
10884 
10885 		call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
10886 
10887 		if (dev->netdev_ops->ndo_uninit)
10888 			dev->netdev_ops->ndo_uninit(dev);
10889 
10890 		if (skb)
10891 			rtmsg_ifinfo_send(skb, dev, GFP_KERNEL, portid, nlh);
10892 
10893 		/* Notifier chain MUST detach us all upper devices. */
10894 		WARN_ON(netdev_has_any_upper_dev(dev));
10895 		WARN_ON(netdev_has_any_lower_dev(dev));
10896 
10897 		/* Remove entries from kobject tree */
10898 		netdev_unregister_kobject(dev);
10899 #ifdef CONFIG_XPS
10900 		/* Remove XPS queueing entries */
10901 		netif_reset_xps_queues_gt(dev, 0);
10902 #endif
10903 	}
10904 
10905 	synchronize_net();
10906 
10907 	list_for_each_entry(dev, head, unreg_list) {
10908 		netdev_put(dev, &dev->dev_registered_tracker);
10909 		net_set_todo(dev);
10910 	}
10911 
10912 	list_del(head);
10913 }
10914 
10915 /**
10916  *	unregister_netdevice_many - unregister many devices
10917  *	@head: list of devices
10918  *
10919  *  Note: As most callers use a stack allocated list_head,
10920  *  we force a list_del() to make sure stack wont be corrupted later.
10921  */
10922 void unregister_netdevice_many(struct list_head *head)
10923 {
10924 	unregister_netdevice_many_notify(head, 0, NULL);
10925 }
10926 EXPORT_SYMBOL(unregister_netdevice_many);
10927 
10928 /**
10929  *	unregister_netdev - remove device from the kernel
10930  *	@dev: device
10931  *
10932  *	This function shuts down a device interface and removes it
10933  *	from the kernel tables.
10934  *
10935  *	This is just a wrapper for unregister_netdevice that takes
10936  *	the rtnl semaphore.  In general you want to use this and not
10937  *	unregister_netdevice.
10938  */
10939 void unregister_netdev(struct net_device *dev)
10940 {
10941 	rtnl_lock();
10942 	unregister_netdevice(dev);
10943 	rtnl_unlock();
10944 }
10945 EXPORT_SYMBOL(unregister_netdev);
10946 
10947 /**
10948  *	__dev_change_net_namespace - move device to different nethost namespace
10949  *	@dev: device
10950  *	@net: network namespace
10951  *	@pat: If not NULL name pattern to try if the current device name
10952  *	      is already taken in the destination network namespace.
10953  *	@new_ifindex: If not zero, specifies device index in the target
10954  *	              namespace.
10955  *
10956  *	This function shuts down a device interface and moves it
10957  *	to a new network namespace. On success 0 is returned, on
10958  *	a failure a netagive errno code is returned.
10959  *
10960  *	Callers must hold the rtnl semaphore.
10961  */
10962 
10963 int __dev_change_net_namespace(struct net_device *dev, struct net *net,
10964 			       const char *pat, int new_ifindex)
10965 {
10966 	struct net *net_old = dev_net(dev);
10967 	int err, new_nsid;
10968 
10969 	ASSERT_RTNL();
10970 
10971 	/* Don't allow namespace local devices to be moved. */
10972 	err = -EINVAL;
10973 	if (dev->features & NETIF_F_NETNS_LOCAL)
10974 		goto out;
10975 
10976 	/* Ensure the device has been registrered */
10977 	if (dev->reg_state != NETREG_REGISTERED)
10978 		goto out;
10979 
10980 	/* Get out if there is nothing todo */
10981 	err = 0;
10982 	if (net_eq(net_old, net))
10983 		goto out;
10984 
10985 	/* Pick the destination device name, and ensure
10986 	 * we can use it in the destination network namespace.
10987 	 */
10988 	err = -EEXIST;
10989 	if (netdev_name_in_use(net, dev->name)) {
10990 		/* We get here if we can't use the current device name */
10991 		if (!pat)
10992 			goto out;
10993 		err = dev_get_valid_name(net, dev, pat);
10994 		if (err < 0)
10995 			goto out;
10996 	}
10997 
10998 	/* Check that new_ifindex isn't used yet. */
10999 	err = -EBUSY;
11000 	if (new_ifindex && __dev_get_by_index(net, new_ifindex))
11001 		goto out;
11002 
11003 	/*
11004 	 * And now a mini version of register_netdevice unregister_netdevice.
11005 	 */
11006 
11007 	/* If device is running close it first. */
11008 	dev_close(dev);
11009 
11010 	/* And unlink it from device chain */
11011 	unlist_netdevice(dev, true);
11012 
11013 	synchronize_net();
11014 
11015 	/* Shutdown queueing discipline. */
11016 	dev_shutdown(dev);
11017 
11018 	/* Notify protocols, that we are about to destroy
11019 	 * this device. They should clean all the things.
11020 	 *
11021 	 * Note that dev->reg_state stays at NETREG_REGISTERED.
11022 	 * This is wanted because this way 8021q and macvlan know
11023 	 * the device is just moving and can keep their slaves up.
11024 	 */
11025 	call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
11026 	rcu_barrier();
11027 
11028 	new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
11029 	/* If there is an ifindex conflict assign a new one */
11030 	if (!new_ifindex) {
11031 		if (__dev_get_by_index(net, dev->ifindex))
11032 			new_ifindex = dev_new_index(net);
11033 		else
11034 			new_ifindex = dev->ifindex;
11035 	}
11036 
11037 	rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
11038 			    new_ifindex);
11039 
11040 	/*
11041 	 *	Flush the unicast and multicast chains
11042 	 */
11043 	dev_uc_flush(dev);
11044 	dev_mc_flush(dev);
11045 
11046 	/* Send a netdev-removed uevent to the old namespace */
11047 	kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
11048 	netdev_adjacent_del_links(dev);
11049 
11050 	/* Move per-net netdevice notifiers that are following the netdevice */
11051 	move_netdevice_notifiers_dev_net(dev, net);
11052 
11053 	/* Actually switch the network namespace */
11054 	dev_net_set(dev, net);
11055 	dev->ifindex = new_ifindex;
11056 
11057 	/* Send a netdev-add uevent to the new namespace */
11058 	kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
11059 	netdev_adjacent_add_links(dev);
11060 
11061 	/* Fixup kobjects */
11062 	err = device_rename(&dev->dev, dev->name);
11063 	WARN_ON(err);
11064 
11065 	/* Adapt owner in case owning user namespace of target network
11066 	 * namespace is different from the original one.
11067 	 */
11068 	err = netdev_change_owner(dev, net_old, net);
11069 	WARN_ON(err);
11070 
11071 	/* Add the device back in the hashes */
11072 	list_netdevice(dev);
11073 
11074 	/* Notify protocols, that a new device appeared. */
11075 	call_netdevice_notifiers(NETDEV_REGISTER, dev);
11076 
11077 	/*
11078 	 *	Prevent userspace races by waiting until the network
11079 	 *	device is fully setup before sending notifications.
11080 	 */
11081 	rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL);
11082 
11083 	synchronize_net();
11084 	err = 0;
11085 out:
11086 	return err;
11087 }
11088 EXPORT_SYMBOL_GPL(__dev_change_net_namespace);
11089 
11090 static int dev_cpu_dead(unsigned int oldcpu)
11091 {
11092 	struct sk_buff **list_skb;
11093 	struct sk_buff *skb;
11094 	unsigned int cpu;
11095 	struct softnet_data *sd, *oldsd, *remsd = NULL;
11096 
11097 	local_irq_disable();
11098 	cpu = smp_processor_id();
11099 	sd = &per_cpu(softnet_data, cpu);
11100 	oldsd = &per_cpu(softnet_data, oldcpu);
11101 
11102 	/* Find end of our completion_queue. */
11103 	list_skb = &sd->completion_queue;
11104 	while (*list_skb)
11105 		list_skb = &(*list_skb)->next;
11106 	/* Append completion queue from offline CPU. */
11107 	*list_skb = oldsd->completion_queue;
11108 	oldsd->completion_queue = NULL;
11109 
11110 	/* Append output queue from offline CPU. */
11111 	if (oldsd->output_queue) {
11112 		*sd->output_queue_tailp = oldsd->output_queue;
11113 		sd->output_queue_tailp = oldsd->output_queue_tailp;
11114 		oldsd->output_queue = NULL;
11115 		oldsd->output_queue_tailp = &oldsd->output_queue;
11116 	}
11117 	/* Append NAPI poll list from offline CPU, with one exception :
11118 	 * process_backlog() must be called by cpu owning percpu backlog.
11119 	 * We properly handle process_queue & input_pkt_queue later.
11120 	 */
11121 	while (!list_empty(&oldsd->poll_list)) {
11122 		struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
11123 							    struct napi_struct,
11124 							    poll_list);
11125 
11126 		list_del_init(&napi->poll_list);
11127 		if (napi->poll == process_backlog)
11128 			napi->state = 0;
11129 		else
11130 			____napi_schedule(sd, napi);
11131 	}
11132 
11133 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
11134 	local_irq_enable();
11135 
11136 #ifdef CONFIG_RPS
11137 	remsd = oldsd->rps_ipi_list;
11138 	oldsd->rps_ipi_list = NULL;
11139 #endif
11140 	/* send out pending IPI's on offline CPU */
11141 	net_rps_send_ipi(remsd);
11142 
11143 	/* Process offline CPU's input_pkt_queue */
11144 	while ((skb = __skb_dequeue(&oldsd->process_queue))) {
11145 		netif_rx(skb);
11146 		input_queue_head_incr(oldsd);
11147 	}
11148 	while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
11149 		netif_rx(skb);
11150 		input_queue_head_incr(oldsd);
11151 	}
11152 
11153 	return 0;
11154 }
11155 
11156 /**
11157  *	netdev_increment_features - increment feature set by one
11158  *	@all: current feature set
11159  *	@one: new feature set
11160  *	@mask: mask feature set
11161  *
11162  *	Computes a new feature set after adding a device with feature set
11163  *	@one to the master device with current feature set @all.  Will not
11164  *	enable anything that is off in @mask. Returns the new feature set.
11165  */
11166 netdev_features_t netdev_increment_features(netdev_features_t all,
11167 	netdev_features_t one, netdev_features_t mask)
11168 {
11169 	if (mask & NETIF_F_HW_CSUM)
11170 		mask |= NETIF_F_CSUM_MASK;
11171 	mask |= NETIF_F_VLAN_CHALLENGED;
11172 
11173 	all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
11174 	all &= one | ~NETIF_F_ALL_FOR_ALL;
11175 
11176 	/* If one device supports hw checksumming, set for all. */
11177 	if (all & NETIF_F_HW_CSUM)
11178 		all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
11179 
11180 	return all;
11181 }
11182 EXPORT_SYMBOL(netdev_increment_features);
11183 
11184 static struct hlist_head * __net_init netdev_create_hash(void)
11185 {
11186 	int i;
11187 	struct hlist_head *hash;
11188 
11189 	hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
11190 	if (hash != NULL)
11191 		for (i = 0; i < NETDEV_HASHENTRIES; i++)
11192 			INIT_HLIST_HEAD(&hash[i]);
11193 
11194 	return hash;
11195 }
11196 
11197 /* Initialize per network namespace state */
11198 static int __net_init netdev_init(struct net *net)
11199 {
11200 	BUILD_BUG_ON(GRO_HASH_BUCKETS >
11201 		     8 * sizeof_field(struct napi_struct, gro_bitmask));
11202 
11203 	INIT_LIST_HEAD(&net->dev_base_head);
11204 
11205 	net->dev_name_head = netdev_create_hash();
11206 	if (net->dev_name_head == NULL)
11207 		goto err_name;
11208 
11209 	net->dev_index_head = netdev_create_hash();
11210 	if (net->dev_index_head == NULL)
11211 		goto err_idx;
11212 
11213 	RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain);
11214 
11215 	return 0;
11216 
11217 err_idx:
11218 	kfree(net->dev_name_head);
11219 err_name:
11220 	return -ENOMEM;
11221 }
11222 
11223 /**
11224  *	netdev_drivername - network driver for the device
11225  *	@dev: network device
11226  *
11227  *	Determine network driver for device.
11228  */
11229 const char *netdev_drivername(const struct net_device *dev)
11230 {
11231 	const struct device_driver *driver;
11232 	const struct device *parent;
11233 	const char *empty = "";
11234 
11235 	parent = dev->dev.parent;
11236 	if (!parent)
11237 		return empty;
11238 
11239 	driver = parent->driver;
11240 	if (driver && driver->name)
11241 		return driver->name;
11242 	return empty;
11243 }
11244 
11245 static void __netdev_printk(const char *level, const struct net_device *dev,
11246 			    struct va_format *vaf)
11247 {
11248 	if (dev && dev->dev.parent) {
11249 		dev_printk_emit(level[1] - '0',
11250 				dev->dev.parent,
11251 				"%s %s %s%s: %pV",
11252 				dev_driver_string(dev->dev.parent),
11253 				dev_name(dev->dev.parent),
11254 				netdev_name(dev), netdev_reg_state(dev),
11255 				vaf);
11256 	} else if (dev) {
11257 		printk("%s%s%s: %pV",
11258 		       level, netdev_name(dev), netdev_reg_state(dev), vaf);
11259 	} else {
11260 		printk("%s(NULL net_device): %pV", level, vaf);
11261 	}
11262 }
11263 
11264 void netdev_printk(const char *level, const struct net_device *dev,
11265 		   const char *format, ...)
11266 {
11267 	struct va_format vaf;
11268 	va_list args;
11269 
11270 	va_start(args, format);
11271 
11272 	vaf.fmt = format;
11273 	vaf.va = &args;
11274 
11275 	__netdev_printk(level, dev, &vaf);
11276 
11277 	va_end(args);
11278 }
11279 EXPORT_SYMBOL(netdev_printk);
11280 
11281 #define define_netdev_printk_level(func, level)			\
11282 void func(const struct net_device *dev, const char *fmt, ...)	\
11283 {								\
11284 	struct va_format vaf;					\
11285 	va_list args;						\
11286 								\
11287 	va_start(args, fmt);					\
11288 								\
11289 	vaf.fmt = fmt;						\
11290 	vaf.va = &args;						\
11291 								\
11292 	__netdev_printk(level, dev, &vaf);			\
11293 								\
11294 	va_end(args);						\
11295 }								\
11296 EXPORT_SYMBOL(func);
11297 
11298 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
11299 define_netdev_printk_level(netdev_alert, KERN_ALERT);
11300 define_netdev_printk_level(netdev_crit, KERN_CRIT);
11301 define_netdev_printk_level(netdev_err, KERN_ERR);
11302 define_netdev_printk_level(netdev_warn, KERN_WARNING);
11303 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
11304 define_netdev_printk_level(netdev_info, KERN_INFO);
11305 
11306 static void __net_exit netdev_exit(struct net *net)
11307 {
11308 	kfree(net->dev_name_head);
11309 	kfree(net->dev_index_head);
11310 	if (net != &init_net)
11311 		WARN_ON_ONCE(!list_empty(&net->dev_base_head));
11312 }
11313 
11314 static struct pernet_operations __net_initdata netdev_net_ops = {
11315 	.init = netdev_init,
11316 	.exit = netdev_exit,
11317 };
11318 
11319 static void __net_exit default_device_exit_net(struct net *net)
11320 {
11321 	struct net_device *dev, *aux;
11322 	/*
11323 	 * Push all migratable network devices back to the
11324 	 * initial network namespace
11325 	 */
11326 	ASSERT_RTNL();
11327 	for_each_netdev_safe(net, dev, aux) {
11328 		int err;
11329 		char fb_name[IFNAMSIZ];
11330 
11331 		/* Ignore unmoveable devices (i.e. loopback) */
11332 		if (dev->features & NETIF_F_NETNS_LOCAL)
11333 			continue;
11334 
11335 		/* Leave virtual devices for the generic cleanup */
11336 		if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund)
11337 			continue;
11338 
11339 		/* Push remaining network devices to init_net */
11340 		snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
11341 		if (netdev_name_in_use(&init_net, fb_name))
11342 			snprintf(fb_name, IFNAMSIZ, "dev%%d");
11343 		err = dev_change_net_namespace(dev, &init_net, fb_name);
11344 		if (err) {
11345 			pr_emerg("%s: failed to move %s to init_net: %d\n",
11346 				 __func__, dev->name, err);
11347 			BUG();
11348 		}
11349 	}
11350 }
11351 
11352 static void __net_exit default_device_exit_batch(struct list_head *net_list)
11353 {
11354 	/* At exit all network devices most be removed from a network
11355 	 * namespace.  Do this in the reverse order of registration.
11356 	 * Do this across as many network namespaces as possible to
11357 	 * improve batching efficiency.
11358 	 */
11359 	struct net_device *dev;
11360 	struct net *net;
11361 	LIST_HEAD(dev_kill_list);
11362 
11363 	rtnl_lock();
11364 	list_for_each_entry(net, net_list, exit_list) {
11365 		default_device_exit_net(net);
11366 		cond_resched();
11367 	}
11368 
11369 	list_for_each_entry(net, net_list, exit_list) {
11370 		for_each_netdev_reverse(net, dev) {
11371 			if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
11372 				dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
11373 			else
11374 				unregister_netdevice_queue(dev, &dev_kill_list);
11375 		}
11376 	}
11377 	unregister_netdevice_many(&dev_kill_list);
11378 	rtnl_unlock();
11379 }
11380 
11381 static struct pernet_operations __net_initdata default_device_ops = {
11382 	.exit_batch = default_device_exit_batch,
11383 };
11384 
11385 /*
11386  *	Initialize the DEV module. At boot time this walks the device list and
11387  *	unhooks any devices that fail to initialise (normally hardware not
11388  *	present) and leaves us with a valid list of present and active devices.
11389  *
11390  */
11391 
11392 /*
11393  *       This is called single threaded during boot, so no need
11394  *       to take the rtnl semaphore.
11395  */
11396 static int __init net_dev_init(void)
11397 {
11398 	int i, rc = -ENOMEM;
11399 
11400 	BUG_ON(!dev_boot_phase);
11401 
11402 	if (dev_proc_init())
11403 		goto out;
11404 
11405 	if (netdev_kobject_init())
11406 		goto out;
11407 
11408 	INIT_LIST_HEAD(&ptype_all);
11409 	for (i = 0; i < PTYPE_HASH_SIZE; i++)
11410 		INIT_LIST_HEAD(&ptype_base[i]);
11411 
11412 	if (register_pernet_subsys(&netdev_net_ops))
11413 		goto out;
11414 
11415 	/*
11416 	 *	Initialise the packet receive queues.
11417 	 */
11418 
11419 	for_each_possible_cpu(i) {
11420 		struct work_struct *flush = per_cpu_ptr(&flush_works, i);
11421 		struct softnet_data *sd = &per_cpu(softnet_data, i);
11422 
11423 		INIT_WORK(flush, flush_backlog);
11424 
11425 		skb_queue_head_init(&sd->input_pkt_queue);
11426 		skb_queue_head_init(&sd->process_queue);
11427 #ifdef CONFIG_XFRM_OFFLOAD
11428 		skb_queue_head_init(&sd->xfrm_backlog);
11429 #endif
11430 		INIT_LIST_HEAD(&sd->poll_list);
11431 		sd->output_queue_tailp = &sd->output_queue;
11432 #ifdef CONFIG_RPS
11433 		INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
11434 		sd->cpu = i;
11435 #endif
11436 		INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd);
11437 		spin_lock_init(&sd->defer_lock);
11438 
11439 		init_gro_hash(&sd->backlog);
11440 		sd->backlog.poll = process_backlog;
11441 		sd->backlog.weight = weight_p;
11442 	}
11443 
11444 	dev_boot_phase = 0;
11445 
11446 	/* The loopback device is special if any other network devices
11447 	 * is present in a network namespace the loopback device must
11448 	 * be present. Since we now dynamically allocate and free the
11449 	 * loopback device ensure this invariant is maintained by
11450 	 * keeping the loopback device as the first device on the
11451 	 * list of network devices.  Ensuring the loopback devices
11452 	 * is the first device that appears and the last network device
11453 	 * that disappears.
11454 	 */
11455 	if (register_pernet_device(&loopback_net_ops))
11456 		goto out;
11457 
11458 	if (register_pernet_device(&default_device_ops))
11459 		goto out;
11460 
11461 	open_softirq(NET_TX_SOFTIRQ, net_tx_action);
11462 	open_softirq(NET_RX_SOFTIRQ, net_rx_action);
11463 
11464 	rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
11465 				       NULL, dev_cpu_dead);
11466 	WARN_ON(rc < 0);
11467 	rc = 0;
11468 out:
11469 	return rc;
11470 }
11471 
11472 subsys_initcall(net_dev_init);
11473