xref: /linux/net/core/dev.c (revision fc1ca3348a74a1afaa7ffebc2b2f2cc149e11278)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *      NET3    Protocol independent device support routines.
4  *
5  *	Derived from the non IP parts of dev.c 1.0.19
6  *              Authors:	Ross Biro
7  *				Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
8  *				Mark Evans, <evansmp@uhura.aston.ac.uk>
9  *
10  *	Additional Authors:
11  *		Florian la Roche <rzsfl@rz.uni-sb.de>
12  *		Alan Cox <gw4pts@gw4pts.ampr.org>
13  *		David Hinds <dahinds@users.sourceforge.net>
14  *		Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
15  *		Adam Sulmicki <adam@cfar.umd.edu>
16  *              Pekka Riikonen <priikone@poesidon.pspt.fi>
17  *
18  *	Changes:
19  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
20  *                                      to 2 if register_netdev gets called
21  *                                      before net_dev_init & also removed a
22  *                                      few lines of code in the process.
23  *		Alan Cox	:	device private ioctl copies fields back.
24  *		Alan Cox	:	Transmit queue code does relevant
25  *					stunts to keep the queue safe.
26  *		Alan Cox	:	Fixed double lock.
27  *		Alan Cox	:	Fixed promisc NULL pointer trap
28  *		????????	:	Support the full private ioctl range
29  *		Alan Cox	:	Moved ioctl permission check into
30  *					drivers
31  *		Tim Kordas	:	SIOCADDMULTI/SIOCDELMULTI
32  *		Alan Cox	:	100 backlog just doesn't cut it when
33  *					you start doing multicast video 8)
34  *		Alan Cox	:	Rewrote net_bh and list manager.
35  *              Alan Cox        :       Fix ETH_P_ALL echoback lengths.
36  *		Alan Cox	:	Took out transmit every packet pass
37  *					Saved a few bytes in the ioctl handler
38  *		Alan Cox	:	Network driver sets packet type before
39  *					calling netif_rx. Saves a function
40  *					call a packet.
41  *		Alan Cox	:	Hashed net_bh()
42  *		Richard Kooijman:	Timestamp fixes.
43  *		Alan Cox	:	Wrong field in SIOCGIFDSTADDR
44  *		Alan Cox	:	Device lock protection.
45  *              Alan Cox        :       Fixed nasty side effect of device close
46  *					changes.
47  *		Rudi Cilibrasi	:	Pass the right thing to
48  *					set_mac_address()
49  *		Dave Miller	:	32bit quantity for the device lock to
50  *					make it work out on a Sparc.
51  *		Bjorn Ekwall	:	Added KERNELD hack.
52  *		Alan Cox	:	Cleaned up the backlog initialise.
53  *		Craig Metz	:	SIOCGIFCONF fix if space for under
54  *					1 device.
55  *	    Thomas Bogendoerfer :	Return ENODEV for dev_open, if there
56  *					is no device open function.
57  *		Andi Kleen	:	Fix error reporting for SIOCGIFCONF
58  *	    Michael Chastain	:	Fix signed/unsigned for SIOCGIFCONF
59  *		Cyrus Durgin	:	Cleaned for KMOD
60  *		Adam Sulmicki   :	Bug Fix : Network Device Unload
61  *					A network device unload needs to purge
62  *					the backlog queue.
63  *	Paul Rusty Russell	:	SIOCSIFNAME
64  *              Pekka Riikonen  :	Netdev boot-time settings code
65  *              Andrew Morton   :       Make unregister_netdevice wait
66  *                                      indefinitely on dev->refcnt
67  *              J Hadi Salim    :       - Backlog queue sampling
68  *				        - netif_rx() feedback
69  */
70 
71 #include <linux/uaccess.h>
72 #include <linux/bitops.h>
73 #include <linux/capability.h>
74 #include <linux/cpu.h>
75 #include <linux/types.h>
76 #include <linux/kernel.h>
77 #include <linux/hash.h>
78 #include <linux/slab.h>
79 #include <linux/sched.h>
80 #include <linux/sched/mm.h>
81 #include <linux/mutex.h>
82 #include <linux/rwsem.h>
83 #include <linux/string.h>
84 #include <linux/mm.h>
85 #include <linux/socket.h>
86 #include <linux/sockios.h>
87 #include <linux/errno.h>
88 #include <linux/interrupt.h>
89 #include <linux/if_ether.h>
90 #include <linux/netdevice.h>
91 #include <linux/etherdevice.h>
92 #include <linux/ethtool.h>
93 #include <linux/skbuff.h>
94 #include <linux/kthread.h>
95 #include <linux/bpf.h>
96 #include <linux/bpf_trace.h>
97 #include <net/net_namespace.h>
98 #include <net/sock.h>
99 #include <net/busy_poll.h>
100 #include <linux/rtnetlink.h>
101 #include <linux/stat.h>
102 #include <net/dsa.h>
103 #include <net/dst.h>
104 #include <net/dst_metadata.h>
105 #include <net/gro.h>
106 #include <net/pkt_sched.h>
107 #include <net/pkt_cls.h>
108 #include <net/checksum.h>
109 #include <net/xfrm.h>
110 #include <linux/highmem.h>
111 #include <linux/init.h>
112 #include <linux/module.h>
113 #include <linux/netpoll.h>
114 #include <linux/rcupdate.h>
115 #include <linux/delay.h>
116 #include <net/iw_handler.h>
117 #include <asm/current.h>
118 #include <linux/audit.h>
119 #include <linux/dmaengine.h>
120 #include <linux/err.h>
121 #include <linux/ctype.h>
122 #include <linux/if_arp.h>
123 #include <linux/if_vlan.h>
124 #include <linux/ip.h>
125 #include <net/ip.h>
126 #include <net/mpls.h>
127 #include <linux/ipv6.h>
128 #include <linux/in.h>
129 #include <linux/jhash.h>
130 #include <linux/random.h>
131 #include <trace/events/napi.h>
132 #include <trace/events/net.h>
133 #include <trace/events/skb.h>
134 #include <trace/events/qdisc.h>
135 #include <linux/inetdevice.h>
136 #include <linux/cpu_rmap.h>
137 #include <linux/static_key.h>
138 #include <linux/hashtable.h>
139 #include <linux/vmalloc.h>
140 #include <linux/if_macvlan.h>
141 #include <linux/errqueue.h>
142 #include <linux/hrtimer.h>
143 #include <linux/netfilter_netdev.h>
144 #include <linux/crash_dump.h>
145 #include <linux/sctp.h>
146 #include <net/udp_tunnel.h>
147 #include <linux/net_namespace.h>
148 #include <linux/indirect_call_wrapper.h>
149 #include <net/devlink.h>
150 #include <linux/pm_runtime.h>
151 #include <linux/prandom.h>
152 #include <linux/once_lite.h>
153 
154 #include "net-sysfs.h"
155 
156 
157 static DEFINE_SPINLOCK(ptype_lock);
158 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
159 struct list_head ptype_all __read_mostly;	/* Taps */
160 
161 static int netif_rx_internal(struct sk_buff *skb);
162 static int call_netdevice_notifiers_info(unsigned long val,
163 					 struct netdev_notifier_info *info);
164 static int call_netdevice_notifiers_extack(unsigned long val,
165 					   struct net_device *dev,
166 					   struct netlink_ext_ack *extack);
167 static struct napi_struct *napi_by_id(unsigned int napi_id);
168 
169 /*
170  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
171  * semaphore.
172  *
173  * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
174  *
175  * Writers must hold the rtnl semaphore while they loop through the
176  * dev_base_head list, and hold dev_base_lock for writing when they do the
177  * actual updates.  This allows pure readers to access the list even
178  * while a writer is preparing to update it.
179  *
180  * To put it another way, dev_base_lock is held for writing only to
181  * protect against pure readers; the rtnl semaphore provides the
182  * protection against other writers.
183  *
184  * See, for example usages, register_netdevice() and
185  * unregister_netdevice(), which must be called with the rtnl
186  * semaphore held.
187  */
188 DEFINE_RWLOCK(dev_base_lock);
189 EXPORT_SYMBOL(dev_base_lock);
190 
191 static DEFINE_MUTEX(ifalias_mutex);
192 
193 /* protects napi_hash addition/deletion and napi_gen_id */
194 static DEFINE_SPINLOCK(napi_hash_lock);
195 
196 static unsigned int napi_gen_id = NR_CPUS;
197 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
198 
199 static DECLARE_RWSEM(devnet_rename_sem);
200 
201 static inline void dev_base_seq_inc(struct net *net)
202 {
203 	while (++net->dev_base_seq == 0)
204 		;
205 }
206 
207 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
208 {
209 	unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
210 
211 	return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
212 }
213 
214 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
215 {
216 	return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
217 }
218 
219 static inline void rps_lock(struct softnet_data *sd)
220 {
221 #ifdef CONFIG_RPS
222 	spin_lock(&sd->input_pkt_queue.lock);
223 #endif
224 }
225 
226 static inline void rps_unlock(struct softnet_data *sd)
227 {
228 #ifdef CONFIG_RPS
229 	spin_unlock(&sd->input_pkt_queue.lock);
230 #endif
231 }
232 
233 static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
234 						       const char *name)
235 {
236 	struct netdev_name_node *name_node;
237 
238 	name_node = kmalloc(sizeof(*name_node), GFP_KERNEL);
239 	if (!name_node)
240 		return NULL;
241 	INIT_HLIST_NODE(&name_node->hlist);
242 	name_node->dev = dev;
243 	name_node->name = name;
244 	return name_node;
245 }
246 
247 static struct netdev_name_node *
248 netdev_name_node_head_alloc(struct net_device *dev)
249 {
250 	struct netdev_name_node *name_node;
251 
252 	name_node = netdev_name_node_alloc(dev, dev->name);
253 	if (!name_node)
254 		return NULL;
255 	INIT_LIST_HEAD(&name_node->list);
256 	return name_node;
257 }
258 
259 static void netdev_name_node_free(struct netdev_name_node *name_node)
260 {
261 	kfree(name_node);
262 }
263 
264 static void netdev_name_node_add(struct net *net,
265 				 struct netdev_name_node *name_node)
266 {
267 	hlist_add_head_rcu(&name_node->hlist,
268 			   dev_name_hash(net, name_node->name));
269 }
270 
271 static void netdev_name_node_del(struct netdev_name_node *name_node)
272 {
273 	hlist_del_rcu(&name_node->hlist);
274 }
275 
276 static struct netdev_name_node *netdev_name_node_lookup(struct net *net,
277 							const char *name)
278 {
279 	struct hlist_head *head = dev_name_hash(net, name);
280 	struct netdev_name_node *name_node;
281 
282 	hlist_for_each_entry(name_node, head, hlist)
283 		if (!strcmp(name_node->name, name))
284 			return name_node;
285 	return NULL;
286 }
287 
288 static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net,
289 							    const char *name)
290 {
291 	struct hlist_head *head = dev_name_hash(net, name);
292 	struct netdev_name_node *name_node;
293 
294 	hlist_for_each_entry_rcu(name_node, head, hlist)
295 		if (!strcmp(name_node->name, name))
296 			return name_node;
297 	return NULL;
298 }
299 
300 bool netdev_name_in_use(struct net *net, const char *name)
301 {
302 	return netdev_name_node_lookup(net, name);
303 }
304 EXPORT_SYMBOL(netdev_name_in_use);
305 
306 int netdev_name_node_alt_create(struct net_device *dev, const char *name)
307 {
308 	struct netdev_name_node *name_node;
309 	struct net *net = dev_net(dev);
310 
311 	name_node = netdev_name_node_lookup(net, name);
312 	if (name_node)
313 		return -EEXIST;
314 	name_node = netdev_name_node_alloc(dev, name);
315 	if (!name_node)
316 		return -ENOMEM;
317 	netdev_name_node_add(net, name_node);
318 	/* The node that holds dev->name acts as a head of per-device list. */
319 	list_add_tail(&name_node->list, &dev->name_node->list);
320 
321 	return 0;
322 }
323 EXPORT_SYMBOL(netdev_name_node_alt_create);
324 
325 static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
326 {
327 	list_del(&name_node->list);
328 	netdev_name_node_del(name_node);
329 	kfree(name_node->name);
330 	netdev_name_node_free(name_node);
331 }
332 
333 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
334 {
335 	struct netdev_name_node *name_node;
336 	struct net *net = dev_net(dev);
337 
338 	name_node = netdev_name_node_lookup(net, name);
339 	if (!name_node)
340 		return -ENOENT;
341 	/* lookup might have found our primary name or a name belonging
342 	 * to another device.
343 	 */
344 	if (name_node == dev->name_node || name_node->dev != dev)
345 		return -EINVAL;
346 
347 	__netdev_name_node_alt_destroy(name_node);
348 
349 	return 0;
350 }
351 EXPORT_SYMBOL(netdev_name_node_alt_destroy);
352 
353 static void netdev_name_node_alt_flush(struct net_device *dev)
354 {
355 	struct netdev_name_node *name_node, *tmp;
356 
357 	list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list)
358 		__netdev_name_node_alt_destroy(name_node);
359 }
360 
361 /* Device list insertion */
362 static void list_netdevice(struct net_device *dev)
363 {
364 	struct net *net = dev_net(dev);
365 
366 	ASSERT_RTNL();
367 
368 	write_lock_bh(&dev_base_lock);
369 	list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
370 	netdev_name_node_add(net, dev->name_node);
371 	hlist_add_head_rcu(&dev->index_hlist,
372 			   dev_index_hash(net, dev->ifindex));
373 	write_unlock_bh(&dev_base_lock);
374 
375 	dev_base_seq_inc(net);
376 }
377 
378 /* Device list removal
379  * caller must respect a RCU grace period before freeing/reusing dev
380  */
381 static void unlist_netdevice(struct net_device *dev)
382 {
383 	ASSERT_RTNL();
384 
385 	/* Unlink dev from the device chain */
386 	write_lock_bh(&dev_base_lock);
387 	list_del_rcu(&dev->dev_list);
388 	netdev_name_node_del(dev->name_node);
389 	hlist_del_rcu(&dev->index_hlist);
390 	write_unlock_bh(&dev_base_lock);
391 
392 	dev_base_seq_inc(dev_net(dev));
393 }
394 
395 /*
396  *	Our notifier list
397  */
398 
399 static RAW_NOTIFIER_HEAD(netdev_chain);
400 
401 /*
402  *	Device drivers call our routines to queue packets here. We empty the
403  *	queue in the local softnet handler.
404  */
405 
406 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
407 EXPORT_PER_CPU_SYMBOL(softnet_data);
408 
409 #ifdef CONFIG_LOCKDEP
410 /*
411  * register_netdevice() inits txq->_xmit_lock and sets lockdep class
412  * according to dev->type
413  */
414 static const unsigned short netdev_lock_type[] = {
415 	 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
416 	 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
417 	 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
418 	 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
419 	 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
420 	 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
421 	 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
422 	 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
423 	 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
424 	 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
425 	 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
426 	 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
427 	 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
428 	 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
429 	 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
430 
431 static const char *const netdev_lock_name[] = {
432 	"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
433 	"_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
434 	"_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
435 	"_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
436 	"_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
437 	"_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
438 	"_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
439 	"_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
440 	"_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
441 	"_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
442 	"_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
443 	"_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
444 	"_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
445 	"_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
446 	"_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
447 
448 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
449 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
450 
451 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
452 {
453 	int i;
454 
455 	for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
456 		if (netdev_lock_type[i] == dev_type)
457 			return i;
458 	/* the last key is used by default */
459 	return ARRAY_SIZE(netdev_lock_type) - 1;
460 }
461 
462 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
463 						 unsigned short dev_type)
464 {
465 	int i;
466 
467 	i = netdev_lock_pos(dev_type);
468 	lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
469 				   netdev_lock_name[i]);
470 }
471 
472 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
473 {
474 	int i;
475 
476 	i = netdev_lock_pos(dev->type);
477 	lockdep_set_class_and_name(&dev->addr_list_lock,
478 				   &netdev_addr_lock_key[i],
479 				   netdev_lock_name[i]);
480 }
481 #else
482 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
483 						 unsigned short dev_type)
484 {
485 }
486 
487 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
488 {
489 }
490 #endif
491 
492 /*******************************************************************************
493  *
494  *		Protocol management and registration routines
495  *
496  *******************************************************************************/
497 
498 
499 /*
500  *	Add a protocol ID to the list. Now that the input handler is
501  *	smarter we can dispense with all the messy stuff that used to be
502  *	here.
503  *
504  *	BEWARE!!! Protocol handlers, mangling input packets,
505  *	MUST BE last in hash buckets and checking protocol handlers
506  *	MUST start from promiscuous ptype_all chain in net_bh.
507  *	It is true now, do not change it.
508  *	Explanation follows: if protocol handler, mangling packet, will
509  *	be the first on list, it is not able to sense, that packet
510  *	is cloned and should be copied-on-write, so that it will
511  *	change it and subsequent readers will get broken packet.
512  *							--ANK (980803)
513  */
514 
515 static inline struct list_head *ptype_head(const struct packet_type *pt)
516 {
517 	if (pt->type == htons(ETH_P_ALL))
518 		return pt->dev ? &pt->dev->ptype_all : &ptype_all;
519 	else
520 		return pt->dev ? &pt->dev->ptype_specific :
521 				 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
522 }
523 
524 /**
525  *	dev_add_pack - add packet handler
526  *	@pt: packet type declaration
527  *
528  *	Add a protocol handler to the networking stack. The passed &packet_type
529  *	is linked into kernel lists and may not be freed until it has been
530  *	removed from the kernel lists.
531  *
532  *	This call does not sleep therefore it can not
533  *	guarantee all CPU's that are in middle of receiving packets
534  *	will see the new packet type (until the next received packet).
535  */
536 
537 void dev_add_pack(struct packet_type *pt)
538 {
539 	struct list_head *head = ptype_head(pt);
540 
541 	spin_lock(&ptype_lock);
542 	list_add_rcu(&pt->list, head);
543 	spin_unlock(&ptype_lock);
544 }
545 EXPORT_SYMBOL(dev_add_pack);
546 
547 /**
548  *	__dev_remove_pack	 - remove packet handler
549  *	@pt: packet type declaration
550  *
551  *	Remove a protocol handler that was previously added to the kernel
552  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
553  *	from the kernel lists and can be freed or reused once this function
554  *	returns.
555  *
556  *      The packet type might still be in use by receivers
557  *	and must not be freed until after all the CPU's have gone
558  *	through a quiescent state.
559  */
560 void __dev_remove_pack(struct packet_type *pt)
561 {
562 	struct list_head *head = ptype_head(pt);
563 	struct packet_type *pt1;
564 
565 	spin_lock(&ptype_lock);
566 
567 	list_for_each_entry(pt1, head, list) {
568 		if (pt == pt1) {
569 			list_del_rcu(&pt->list);
570 			goto out;
571 		}
572 	}
573 
574 	pr_warn("dev_remove_pack: %p not found\n", pt);
575 out:
576 	spin_unlock(&ptype_lock);
577 }
578 EXPORT_SYMBOL(__dev_remove_pack);
579 
580 /**
581  *	dev_remove_pack	 - remove packet handler
582  *	@pt: packet type declaration
583  *
584  *	Remove a protocol handler that was previously added to the kernel
585  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
586  *	from the kernel lists and can be freed or reused once this function
587  *	returns.
588  *
589  *	This call sleeps to guarantee that no CPU is looking at the packet
590  *	type after return.
591  */
592 void dev_remove_pack(struct packet_type *pt)
593 {
594 	__dev_remove_pack(pt);
595 
596 	synchronize_net();
597 }
598 EXPORT_SYMBOL(dev_remove_pack);
599 
600 
601 /*******************************************************************************
602  *
603  *			    Device Interface Subroutines
604  *
605  *******************************************************************************/
606 
607 /**
608  *	dev_get_iflink	- get 'iflink' value of a interface
609  *	@dev: targeted interface
610  *
611  *	Indicates the ifindex the interface is linked to.
612  *	Physical interfaces have the same 'ifindex' and 'iflink' values.
613  */
614 
615 int dev_get_iflink(const struct net_device *dev)
616 {
617 	if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
618 		return dev->netdev_ops->ndo_get_iflink(dev);
619 
620 	return dev->ifindex;
621 }
622 EXPORT_SYMBOL(dev_get_iflink);
623 
624 /**
625  *	dev_fill_metadata_dst - Retrieve tunnel egress information.
626  *	@dev: targeted interface
627  *	@skb: The packet.
628  *
629  *	For better visibility of tunnel traffic OVS needs to retrieve
630  *	egress tunnel information for a packet. Following API allows
631  *	user to get this info.
632  */
633 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
634 {
635 	struct ip_tunnel_info *info;
636 
637 	if (!dev->netdev_ops  || !dev->netdev_ops->ndo_fill_metadata_dst)
638 		return -EINVAL;
639 
640 	info = skb_tunnel_info_unclone(skb);
641 	if (!info)
642 		return -ENOMEM;
643 	if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
644 		return -EINVAL;
645 
646 	return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
647 }
648 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
649 
650 static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack)
651 {
652 	int k = stack->num_paths++;
653 
654 	if (WARN_ON_ONCE(k >= NET_DEVICE_PATH_STACK_MAX))
655 		return NULL;
656 
657 	return &stack->path[k];
658 }
659 
660 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
661 			  struct net_device_path_stack *stack)
662 {
663 	const struct net_device *last_dev;
664 	struct net_device_path_ctx ctx = {
665 		.dev	= dev,
666 		.daddr	= daddr,
667 	};
668 	struct net_device_path *path;
669 	int ret = 0;
670 
671 	stack->num_paths = 0;
672 	while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) {
673 		last_dev = ctx.dev;
674 		path = dev_fwd_path(stack);
675 		if (!path)
676 			return -1;
677 
678 		memset(path, 0, sizeof(struct net_device_path));
679 		ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path);
680 		if (ret < 0)
681 			return -1;
682 
683 		if (WARN_ON_ONCE(last_dev == ctx.dev))
684 			return -1;
685 	}
686 	path = dev_fwd_path(stack);
687 	if (!path)
688 		return -1;
689 	path->type = DEV_PATH_ETHERNET;
690 	path->dev = ctx.dev;
691 
692 	return ret;
693 }
694 EXPORT_SYMBOL_GPL(dev_fill_forward_path);
695 
696 /**
697  *	__dev_get_by_name	- find a device by its name
698  *	@net: the applicable net namespace
699  *	@name: name to find
700  *
701  *	Find an interface by name. Must be called under RTNL semaphore
702  *	or @dev_base_lock. If the name is found a pointer to the device
703  *	is returned. If the name is not found then %NULL is returned. The
704  *	reference counters are not incremented so the caller must be
705  *	careful with locks.
706  */
707 
708 struct net_device *__dev_get_by_name(struct net *net, const char *name)
709 {
710 	struct netdev_name_node *node_name;
711 
712 	node_name = netdev_name_node_lookup(net, name);
713 	return node_name ? node_name->dev : NULL;
714 }
715 EXPORT_SYMBOL(__dev_get_by_name);
716 
717 /**
718  * dev_get_by_name_rcu	- find a device by its name
719  * @net: the applicable net namespace
720  * @name: name to find
721  *
722  * Find an interface by name.
723  * If the name is found a pointer to the device is returned.
724  * If the name is not found then %NULL is returned.
725  * The reference counters are not incremented so the caller must be
726  * careful with locks. The caller must hold RCU lock.
727  */
728 
729 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
730 {
731 	struct netdev_name_node *node_name;
732 
733 	node_name = netdev_name_node_lookup_rcu(net, name);
734 	return node_name ? node_name->dev : NULL;
735 }
736 EXPORT_SYMBOL(dev_get_by_name_rcu);
737 
738 /**
739  *	dev_get_by_name		- find a device by its name
740  *	@net: the applicable net namespace
741  *	@name: name to find
742  *
743  *	Find an interface by name. This can be called from any
744  *	context and does its own locking. The returned handle has
745  *	the usage count incremented and the caller must use dev_put() to
746  *	release it when it is no longer needed. %NULL is returned if no
747  *	matching device is found.
748  */
749 
750 struct net_device *dev_get_by_name(struct net *net, const char *name)
751 {
752 	struct net_device *dev;
753 
754 	rcu_read_lock();
755 	dev = dev_get_by_name_rcu(net, name);
756 	dev_hold(dev);
757 	rcu_read_unlock();
758 	return dev;
759 }
760 EXPORT_SYMBOL(dev_get_by_name);
761 
762 /**
763  *	__dev_get_by_index - find a device by its ifindex
764  *	@net: the applicable net namespace
765  *	@ifindex: index of device
766  *
767  *	Search for an interface by index. Returns %NULL if the device
768  *	is not found or a pointer to the device. The device has not
769  *	had its reference counter increased so the caller must be careful
770  *	about locking. The caller must hold either the RTNL semaphore
771  *	or @dev_base_lock.
772  */
773 
774 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
775 {
776 	struct net_device *dev;
777 	struct hlist_head *head = dev_index_hash(net, ifindex);
778 
779 	hlist_for_each_entry(dev, head, index_hlist)
780 		if (dev->ifindex == ifindex)
781 			return dev;
782 
783 	return NULL;
784 }
785 EXPORT_SYMBOL(__dev_get_by_index);
786 
787 /**
788  *	dev_get_by_index_rcu - find a device by its ifindex
789  *	@net: the applicable net namespace
790  *	@ifindex: index of device
791  *
792  *	Search for an interface by index. Returns %NULL if the device
793  *	is not found or a pointer to the device. The device has not
794  *	had its reference counter increased so the caller must be careful
795  *	about locking. The caller must hold RCU lock.
796  */
797 
798 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
799 {
800 	struct net_device *dev;
801 	struct hlist_head *head = dev_index_hash(net, ifindex);
802 
803 	hlist_for_each_entry_rcu(dev, head, index_hlist)
804 		if (dev->ifindex == ifindex)
805 			return dev;
806 
807 	return NULL;
808 }
809 EXPORT_SYMBOL(dev_get_by_index_rcu);
810 
811 
812 /**
813  *	dev_get_by_index - find a device by its ifindex
814  *	@net: the applicable net namespace
815  *	@ifindex: index of device
816  *
817  *	Search for an interface by index. Returns NULL if the device
818  *	is not found or a pointer to the device. The device returned has
819  *	had a reference added and the pointer is safe until the user calls
820  *	dev_put to indicate they have finished with it.
821  */
822 
823 struct net_device *dev_get_by_index(struct net *net, int ifindex)
824 {
825 	struct net_device *dev;
826 
827 	rcu_read_lock();
828 	dev = dev_get_by_index_rcu(net, ifindex);
829 	dev_hold(dev);
830 	rcu_read_unlock();
831 	return dev;
832 }
833 EXPORT_SYMBOL(dev_get_by_index);
834 
835 /**
836  *	dev_get_by_napi_id - find a device by napi_id
837  *	@napi_id: ID of the NAPI struct
838  *
839  *	Search for an interface by NAPI ID. Returns %NULL if the device
840  *	is not found or a pointer to the device. The device has not had
841  *	its reference counter increased so the caller must be careful
842  *	about locking. The caller must hold RCU lock.
843  */
844 
845 struct net_device *dev_get_by_napi_id(unsigned int napi_id)
846 {
847 	struct napi_struct *napi;
848 
849 	WARN_ON_ONCE(!rcu_read_lock_held());
850 
851 	if (napi_id < MIN_NAPI_ID)
852 		return NULL;
853 
854 	napi = napi_by_id(napi_id);
855 
856 	return napi ? napi->dev : NULL;
857 }
858 EXPORT_SYMBOL(dev_get_by_napi_id);
859 
860 /**
861  *	netdev_get_name - get a netdevice name, knowing its ifindex.
862  *	@net: network namespace
863  *	@name: a pointer to the buffer where the name will be stored.
864  *	@ifindex: the ifindex of the interface to get the name from.
865  */
866 int netdev_get_name(struct net *net, char *name, int ifindex)
867 {
868 	struct net_device *dev;
869 	int ret;
870 
871 	down_read(&devnet_rename_sem);
872 	rcu_read_lock();
873 
874 	dev = dev_get_by_index_rcu(net, ifindex);
875 	if (!dev) {
876 		ret = -ENODEV;
877 		goto out;
878 	}
879 
880 	strcpy(name, dev->name);
881 
882 	ret = 0;
883 out:
884 	rcu_read_unlock();
885 	up_read(&devnet_rename_sem);
886 	return ret;
887 }
888 
889 /**
890  *	dev_getbyhwaddr_rcu - find a device by its hardware address
891  *	@net: the applicable net namespace
892  *	@type: media type of device
893  *	@ha: hardware address
894  *
895  *	Search for an interface by MAC address. Returns NULL if the device
896  *	is not found or a pointer to the device.
897  *	The caller must hold RCU or RTNL.
898  *	The returned device has not had its ref count increased
899  *	and the caller must therefore be careful about locking
900  *
901  */
902 
903 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
904 				       const char *ha)
905 {
906 	struct net_device *dev;
907 
908 	for_each_netdev_rcu(net, dev)
909 		if (dev->type == type &&
910 		    !memcmp(dev->dev_addr, ha, dev->addr_len))
911 			return dev;
912 
913 	return NULL;
914 }
915 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
916 
917 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
918 {
919 	struct net_device *dev, *ret = NULL;
920 
921 	rcu_read_lock();
922 	for_each_netdev_rcu(net, dev)
923 		if (dev->type == type) {
924 			dev_hold(dev);
925 			ret = dev;
926 			break;
927 		}
928 	rcu_read_unlock();
929 	return ret;
930 }
931 EXPORT_SYMBOL(dev_getfirstbyhwtype);
932 
933 /**
934  *	__dev_get_by_flags - find any device with given flags
935  *	@net: the applicable net namespace
936  *	@if_flags: IFF_* values
937  *	@mask: bitmask of bits in if_flags to check
938  *
939  *	Search for any interface with the given flags. Returns NULL if a device
940  *	is not found or a pointer to the device. Must be called inside
941  *	rtnl_lock(), and result refcount is unchanged.
942  */
943 
944 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
945 				      unsigned short mask)
946 {
947 	struct net_device *dev, *ret;
948 
949 	ASSERT_RTNL();
950 
951 	ret = NULL;
952 	for_each_netdev(net, dev) {
953 		if (((dev->flags ^ if_flags) & mask) == 0) {
954 			ret = dev;
955 			break;
956 		}
957 	}
958 	return ret;
959 }
960 EXPORT_SYMBOL(__dev_get_by_flags);
961 
962 /**
963  *	dev_valid_name - check if name is okay for network device
964  *	@name: name string
965  *
966  *	Network device names need to be valid file names to
967  *	allow sysfs to work.  We also disallow any kind of
968  *	whitespace.
969  */
970 bool dev_valid_name(const char *name)
971 {
972 	if (*name == '\0')
973 		return false;
974 	if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
975 		return false;
976 	if (!strcmp(name, ".") || !strcmp(name, ".."))
977 		return false;
978 
979 	while (*name) {
980 		if (*name == '/' || *name == ':' || isspace(*name))
981 			return false;
982 		name++;
983 	}
984 	return true;
985 }
986 EXPORT_SYMBOL(dev_valid_name);
987 
988 /**
989  *	__dev_alloc_name - allocate a name for a device
990  *	@net: network namespace to allocate the device name in
991  *	@name: name format string
992  *	@buf:  scratch buffer and result name string
993  *
994  *	Passed a format string - eg "lt%d" it will try and find a suitable
995  *	id. It scans list of devices to build up a free map, then chooses
996  *	the first empty slot. The caller must hold the dev_base or rtnl lock
997  *	while allocating the name and adding the device in order to avoid
998  *	duplicates.
999  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1000  *	Returns the number of the unit assigned or a negative errno code.
1001  */
1002 
1003 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1004 {
1005 	int i = 0;
1006 	const char *p;
1007 	const int max_netdevices = 8*PAGE_SIZE;
1008 	unsigned long *inuse;
1009 	struct net_device *d;
1010 
1011 	if (!dev_valid_name(name))
1012 		return -EINVAL;
1013 
1014 	p = strchr(name, '%');
1015 	if (p) {
1016 		/*
1017 		 * Verify the string as this thing may have come from
1018 		 * the user.  There must be either one "%d" and no other "%"
1019 		 * characters.
1020 		 */
1021 		if (p[1] != 'd' || strchr(p + 2, '%'))
1022 			return -EINVAL;
1023 
1024 		/* Use one page as a bit array of possible slots */
1025 		inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1026 		if (!inuse)
1027 			return -ENOMEM;
1028 
1029 		for_each_netdev(net, d) {
1030 			struct netdev_name_node *name_node;
1031 			list_for_each_entry(name_node, &d->name_node->list, list) {
1032 				if (!sscanf(name_node->name, name, &i))
1033 					continue;
1034 				if (i < 0 || i >= max_netdevices)
1035 					continue;
1036 
1037 				/*  avoid cases where sscanf is not exact inverse of printf */
1038 				snprintf(buf, IFNAMSIZ, name, i);
1039 				if (!strncmp(buf, name_node->name, IFNAMSIZ))
1040 					set_bit(i, inuse);
1041 			}
1042 			if (!sscanf(d->name, name, &i))
1043 				continue;
1044 			if (i < 0 || i >= max_netdevices)
1045 				continue;
1046 
1047 			/*  avoid cases where sscanf is not exact inverse of printf */
1048 			snprintf(buf, IFNAMSIZ, name, i);
1049 			if (!strncmp(buf, d->name, IFNAMSIZ))
1050 				set_bit(i, inuse);
1051 		}
1052 
1053 		i = find_first_zero_bit(inuse, max_netdevices);
1054 		free_page((unsigned long) inuse);
1055 	}
1056 
1057 	snprintf(buf, IFNAMSIZ, name, i);
1058 	if (!netdev_name_in_use(net, buf))
1059 		return i;
1060 
1061 	/* It is possible to run out of possible slots
1062 	 * when the name is long and there isn't enough space left
1063 	 * for the digits, or if all bits are used.
1064 	 */
1065 	return -ENFILE;
1066 }
1067 
1068 static int dev_alloc_name_ns(struct net *net,
1069 			     struct net_device *dev,
1070 			     const char *name)
1071 {
1072 	char buf[IFNAMSIZ];
1073 	int ret;
1074 
1075 	BUG_ON(!net);
1076 	ret = __dev_alloc_name(net, name, buf);
1077 	if (ret >= 0)
1078 		strlcpy(dev->name, buf, IFNAMSIZ);
1079 	return ret;
1080 }
1081 
1082 /**
1083  *	dev_alloc_name - allocate a name for a device
1084  *	@dev: device
1085  *	@name: name format string
1086  *
1087  *	Passed a format string - eg "lt%d" it will try and find a suitable
1088  *	id. It scans list of devices to build up a free map, then chooses
1089  *	the first empty slot. The caller must hold the dev_base or rtnl lock
1090  *	while allocating the name and adding the device in order to avoid
1091  *	duplicates.
1092  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1093  *	Returns the number of the unit assigned or a negative errno code.
1094  */
1095 
1096 int dev_alloc_name(struct net_device *dev, const char *name)
1097 {
1098 	return dev_alloc_name_ns(dev_net(dev), dev, name);
1099 }
1100 EXPORT_SYMBOL(dev_alloc_name);
1101 
1102 static int dev_get_valid_name(struct net *net, struct net_device *dev,
1103 			      const char *name)
1104 {
1105 	BUG_ON(!net);
1106 
1107 	if (!dev_valid_name(name))
1108 		return -EINVAL;
1109 
1110 	if (strchr(name, '%'))
1111 		return dev_alloc_name_ns(net, dev, name);
1112 	else if (netdev_name_in_use(net, name))
1113 		return -EEXIST;
1114 	else if (dev->name != name)
1115 		strlcpy(dev->name, name, IFNAMSIZ);
1116 
1117 	return 0;
1118 }
1119 
1120 /**
1121  *	dev_change_name - change name of a device
1122  *	@dev: device
1123  *	@newname: name (or format string) must be at least IFNAMSIZ
1124  *
1125  *	Change name of a device, can pass format strings "eth%d".
1126  *	for wildcarding.
1127  */
1128 int dev_change_name(struct net_device *dev, const char *newname)
1129 {
1130 	unsigned char old_assign_type;
1131 	char oldname[IFNAMSIZ];
1132 	int err = 0;
1133 	int ret;
1134 	struct net *net;
1135 
1136 	ASSERT_RTNL();
1137 	BUG_ON(!dev_net(dev));
1138 
1139 	net = dev_net(dev);
1140 
1141 	/* Some auto-enslaved devices e.g. failover slaves are
1142 	 * special, as userspace might rename the device after
1143 	 * the interface had been brought up and running since
1144 	 * the point kernel initiated auto-enslavement. Allow
1145 	 * live name change even when these slave devices are
1146 	 * up and running.
1147 	 *
1148 	 * Typically, users of these auto-enslaving devices
1149 	 * don't actually care about slave name change, as
1150 	 * they are supposed to operate on master interface
1151 	 * directly.
1152 	 */
1153 	if (dev->flags & IFF_UP &&
1154 	    likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
1155 		return -EBUSY;
1156 
1157 	down_write(&devnet_rename_sem);
1158 
1159 	if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1160 		up_write(&devnet_rename_sem);
1161 		return 0;
1162 	}
1163 
1164 	memcpy(oldname, dev->name, IFNAMSIZ);
1165 
1166 	err = dev_get_valid_name(net, dev, newname);
1167 	if (err < 0) {
1168 		up_write(&devnet_rename_sem);
1169 		return err;
1170 	}
1171 
1172 	if (oldname[0] && !strchr(oldname, '%'))
1173 		netdev_info(dev, "renamed from %s\n", oldname);
1174 
1175 	old_assign_type = dev->name_assign_type;
1176 	dev->name_assign_type = NET_NAME_RENAMED;
1177 
1178 rollback:
1179 	ret = device_rename(&dev->dev, dev->name);
1180 	if (ret) {
1181 		memcpy(dev->name, oldname, IFNAMSIZ);
1182 		dev->name_assign_type = old_assign_type;
1183 		up_write(&devnet_rename_sem);
1184 		return ret;
1185 	}
1186 
1187 	up_write(&devnet_rename_sem);
1188 
1189 	netdev_adjacent_rename_links(dev, oldname);
1190 
1191 	write_lock_bh(&dev_base_lock);
1192 	netdev_name_node_del(dev->name_node);
1193 	write_unlock_bh(&dev_base_lock);
1194 
1195 	synchronize_rcu();
1196 
1197 	write_lock_bh(&dev_base_lock);
1198 	netdev_name_node_add(net, dev->name_node);
1199 	write_unlock_bh(&dev_base_lock);
1200 
1201 	ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1202 	ret = notifier_to_errno(ret);
1203 
1204 	if (ret) {
1205 		/* err >= 0 after dev_alloc_name() or stores the first errno */
1206 		if (err >= 0) {
1207 			err = ret;
1208 			down_write(&devnet_rename_sem);
1209 			memcpy(dev->name, oldname, IFNAMSIZ);
1210 			memcpy(oldname, newname, IFNAMSIZ);
1211 			dev->name_assign_type = old_assign_type;
1212 			old_assign_type = NET_NAME_RENAMED;
1213 			goto rollback;
1214 		} else {
1215 			netdev_err(dev, "name change rollback failed: %d\n",
1216 				   ret);
1217 		}
1218 	}
1219 
1220 	return err;
1221 }
1222 
1223 /**
1224  *	dev_set_alias - change ifalias of a device
1225  *	@dev: device
1226  *	@alias: name up to IFALIASZ
1227  *	@len: limit of bytes to copy from info
1228  *
1229  *	Set ifalias for a device,
1230  */
1231 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1232 {
1233 	struct dev_ifalias *new_alias = NULL;
1234 
1235 	if (len >= IFALIASZ)
1236 		return -EINVAL;
1237 
1238 	if (len) {
1239 		new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1240 		if (!new_alias)
1241 			return -ENOMEM;
1242 
1243 		memcpy(new_alias->ifalias, alias, len);
1244 		new_alias->ifalias[len] = 0;
1245 	}
1246 
1247 	mutex_lock(&ifalias_mutex);
1248 	new_alias = rcu_replace_pointer(dev->ifalias, new_alias,
1249 					mutex_is_locked(&ifalias_mutex));
1250 	mutex_unlock(&ifalias_mutex);
1251 
1252 	if (new_alias)
1253 		kfree_rcu(new_alias, rcuhead);
1254 
1255 	return len;
1256 }
1257 EXPORT_SYMBOL(dev_set_alias);
1258 
1259 /**
1260  *	dev_get_alias - get ifalias of a device
1261  *	@dev: device
1262  *	@name: buffer to store name of ifalias
1263  *	@len: size of buffer
1264  *
1265  *	get ifalias for a device.  Caller must make sure dev cannot go
1266  *	away,  e.g. rcu read lock or own a reference count to device.
1267  */
1268 int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1269 {
1270 	const struct dev_ifalias *alias;
1271 	int ret = 0;
1272 
1273 	rcu_read_lock();
1274 	alias = rcu_dereference(dev->ifalias);
1275 	if (alias)
1276 		ret = snprintf(name, len, "%s", alias->ifalias);
1277 	rcu_read_unlock();
1278 
1279 	return ret;
1280 }
1281 
1282 /**
1283  *	netdev_features_change - device changes features
1284  *	@dev: device to cause notification
1285  *
1286  *	Called to indicate a device has changed features.
1287  */
1288 void netdev_features_change(struct net_device *dev)
1289 {
1290 	call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1291 }
1292 EXPORT_SYMBOL(netdev_features_change);
1293 
1294 /**
1295  *	netdev_state_change - device changes state
1296  *	@dev: device to cause notification
1297  *
1298  *	Called to indicate a device has changed state. This function calls
1299  *	the notifier chains for netdev_chain and sends a NEWLINK message
1300  *	to the routing socket.
1301  */
1302 void netdev_state_change(struct net_device *dev)
1303 {
1304 	if (dev->flags & IFF_UP) {
1305 		struct netdev_notifier_change_info change_info = {
1306 			.info.dev = dev,
1307 		};
1308 
1309 		call_netdevice_notifiers_info(NETDEV_CHANGE,
1310 					      &change_info.info);
1311 		rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1312 	}
1313 }
1314 EXPORT_SYMBOL(netdev_state_change);
1315 
1316 /**
1317  * __netdev_notify_peers - notify network peers about existence of @dev,
1318  * to be called when rtnl lock is already held.
1319  * @dev: network device
1320  *
1321  * Generate traffic such that interested network peers are aware of
1322  * @dev, such as by generating a gratuitous ARP. This may be used when
1323  * a device wants to inform the rest of the network about some sort of
1324  * reconfiguration such as a failover event or virtual machine
1325  * migration.
1326  */
1327 void __netdev_notify_peers(struct net_device *dev)
1328 {
1329 	ASSERT_RTNL();
1330 	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1331 	call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1332 }
1333 EXPORT_SYMBOL(__netdev_notify_peers);
1334 
1335 /**
1336  * netdev_notify_peers - notify network peers about existence of @dev
1337  * @dev: network device
1338  *
1339  * Generate traffic such that interested network peers are aware of
1340  * @dev, such as by generating a gratuitous ARP. This may be used when
1341  * a device wants to inform the rest of the network about some sort of
1342  * reconfiguration such as a failover event or virtual machine
1343  * migration.
1344  */
1345 void netdev_notify_peers(struct net_device *dev)
1346 {
1347 	rtnl_lock();
1348 	__netdev_notify_peers(dev);
1349 	rtnl_unlock();
1350 }
1351 EXPORT_SYMBOL(netdev_notify_peers);
1352 
1353 static int napi_threaded_poll(void *data);
1354 
1355 static int napi_kthread_create(struct napi_struct *n)
1356 {
1357 	int err = 0;
1358 
1359 	/* Create and wake up the kthread once to put it in
1360 	 * TASK_INTERRUPTIBLE mode to avoid the blocked task
1361 	 * warning and work with loadavg.
1362 	 */
1363 	n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d",
1364 				n->dev->name, n->napi_id);
1365 	if (IS_ERR(n->thread)) {
1366 		err = PTR_ERR(n->thread);
1367 		pr_err("kthread_run failed with err %d\n", err);
1368 		n->thread = NULL;
1369 	}
1370 
1371 	return err;
1372 }
1373 
1374 static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1375 {
1376 	const struct net_device_ops *ops = dev->netdev_ops;
1377 	int ret;
1378 
1379 	ASSERT_RTNL();
1380 	dev_addr_check(dev);
1381 
1382 	if (!netif_device_present(dev)) {
1383 		/* may be detached because parent is runtime-suspended */
1384 		if (dev->dev.parent)
1385 			pm_runtime_resume(dev->dev.parent);
1386 		if (!netif_device_present(dev))
1387 			return -ENODEV;
1388 	}
1389 
1390 	/* Block netpoll from trying to do any rx path servicing.
1391 	 * If we don't do this there is a chance ndo_poll_controller
1392 	 * or ndo_poll may be running while we open the device
1393 	 */
1394 	netpoll_poll_disable(dev);
1395 
1396 	ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack);
1397 	ret = notifier_to_errno(ret);
1398 	if (ret)
1399 		return ret;
1400 
1401 	set_bit(__LINK_STATE_START, &dev->state);
1402 
1403 	if (ops->ndo_validate_addr)
1404 		ret = ops->ndo_validate_addr(dev);
1405 
1406 	if (!ret && ops->ndo_open)
1407 		ret = ops->ndo_open(dev);
1408 
1409 	netpoll_poll_enable(dev);
1410 
1411 	if (ret)
1412 		clear_bit(__LINK_STATE_START, &dev->state);
1413 	else {
1414 		dev->flags |= IFF_UP;
1415 		dev_set_rx_mode(dev);
1416 		dev_activate(dev);
1417 		add_device_randomness(dev->dev_addr, dev->addr_len);
1418 	}
1419 
1420 	return ret;
1421 }
1422 
1423 /**
1424  *	dev_open	- prepare an interface for use.
1425  *	@dev: device to open
1426  *	@extack: netlink extended ack
1427  *
1428  *	Takes a device from down to up state. The device's private open
1429  *	function is invoked and then the multicast lists are loaded. Finally
1430  *	the device is moved into the up state and a %NETDEV_UP message is
1431  *	sent to the netdev notifier chain.
1432  *
1433  *	Calling this function on an active interface is a nop. On a failure
1434  *	a negative errno code is returned.
1435  */
1436 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1437 {
1438 	int ret;
1439 
1440 	if (dev->flags & IFF_UP)
1441 		return 0;
1442 
1443 	ret = __dev_open(dev, extack);
1444 	if (ret < 0)
1445 		return ret;
1446 
1447 	rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1448 	call_netdevice_notifiers(NETDEV_UP, dev);
1449 
1450 	return ret;
1451 }
1452 EXPORT_SYMBOL(dev_open);
1453 
1454 static void __dev_close_many(struct list_head *head)
1455 {
1456 	struct net_device *dev;
1457 
1458 	ASSERT_RTNL();
1459 	might_sleep();
1460 
1461 	list_for_each_entry(dev, head, close_list) {
1462 		/* Temporarily disable netpoll until the interface is down */
1463 		netpoll_poll_disable(dev);
1464 
1465 		call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1466 
1467 		clear_bit(__LINK_STATE_START, &dev->state);
1468 
1469 		/* Synchronize to scheduled poll. We cannot touch poll list, it
1470 		 * can be even on different cpu. So just clear netif_running().
1471 		 *
1472 		 * dev->stop() will invoke napi_disable() on all of it's
1473 		 * napi_struct instances on this device.
1474 		 */
1475 		smp_mb__after_atomic(); /* Commit netif_running(). */
1476 	}
1477 
1478 	dev_deactivate_many(head);
1479 
1480 	list_for_each_entry(dev, head, close_list) {
1481 		const struct net_device_ops *ops = dev->netdev_ops;
1482 
1483 		/*
1484 		 *	Call the device specific close. This cannot fail.
1485 		 *	Only if device is UP
1486 		 *
1487 		 *	We allow it to be called even after a DETACH hot-plug
1488 		 *	event.
1489 		 */
1490 		if (ops->ndo_stop)
1491 			ops->ndo_stop(dev);
1492 
1493 		dev->flags &= ~IFF_UP;
1494 		netpoll_poll_enable(dev);
1495 	}
1496 }
1497 
1498 static void __dev_close(struct net_device *dev)
1499 {
1500 	LIST_HEAD(single);
1501 
1502 	list_add(&dev->close_list, &single);
1503 	__dev_close_many(&single);
1504 	list_del(&single);
1505 }
1506 
1507 void dev_close_many(struct list_head *head, bool unlink)
1508 {
1509 	struct net_device *dev, *tmp;
1510 
1511 	/* Remove the devices that don't need to be closed */
1512 	list_for_each_entry_safe(dev, tmp, head, close_list)
1513 		if (!(dev->flags & IFF_UP))
1514 			list_del_init(&dev->close_list);
1515 
1516 	__dev_close_many(head);
1517 
1518 	list_for_each_entry_safe(dev, tmp, head, close_list) {
1519 		rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1520 		call_netdevice_notifiers(NETDEV_DOWN, dev);
1521 		if (unlink)
1522 			list_del_init(&dev->close_list);
1523 	}
1524 }
1525 EXPORT_SYMBOL(dev_close_many);
1526 
1527 /**
1528  *	dev_close - shutdown an interface.
1529  *	@dev: device to shutdown
1530  *
1531  *	This function moves an active device into down state. A
1532  *	%NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1533  *	is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1534  *	chain.
1535  */
1536 void dev_close(struct net_device *dev)
1537 {
1538 	if (dev->flags & IFF_UP) {
1539 		LIST_HEAD(single);
1540 
1541 		list_add(&dev->close_list, &single);
1542 		dev_close_many(&single, true);
1543 		list_del(&single);
1544 	}
1545 }
1546 EXPORT_SYMBOL(dev_close);
1547 
1548 
1549 /**
1550  *	dev_disable_lro - disable Large Receive Offload on a device
1551  *	@dev: device
1552  *
1553  *	Disable Large Receive Offload (LRO) on a net device.  Must be
1554  *	called under RTNL.  This is needed if received packets may be
1555  *	forwarded to another interface.
1556  */
1557 void dev_disable_lro(struct net_device *dev)
1558 {
1559 	struct net_device *lower_dev;
1560 	struct list_head *iter;
1561 
1562 	dev->wanted_features &= ~NETIF_F_LRO;
1563 	netdev_update_features(dev);
1564 
1565 	if (unlikely(dev->features & NETIF_F_LRO))
1566 		netdev_WARN(dev, "failed to disable LRO!\n");
1567 
1568 	netdev_for_each_lower_dev(dev, lower_dev, iter)
1569 		dev_disable_lro(lower_dev);
1570 }
1571 EXPORT_SYMBOL(dev_disable_lro);
1572 
1573 /**
1574  *	dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1575  *	@dev: device
1576  *
1577  *	Disable HW Generic Receive Offload (GRO_HW) on a net device.  Must be
1578  *	called under RTNL.  This is needed if Generic XDP is installed on
1579  *	the device.
1580  */
1581 static void dev_disable_gro_hw(struct net_device *dev)
1582 {
1583 	dev->wanted_features &= ~NETIF_F_GRO_HW;
1584 	netdev_update_features(dev);
1585 
1586 	if (unlikely(dev->features & NETIF_F_GRO_HW))
1587 		netdev_WARN(dev, "failed to disable GRO_HW!\n");
1588 }
1589 
1590 const char *netdev_cmd_to_name(enum netdev_cmd cmd)
1591 {
1592 #define N(val) 						\
1593 	case NETDEV_##val:				\
1594 		return "NETDEV_" __stringify(val);
1595 	switch (cmd) {
1596 	N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
1597 	N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
1598 	N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
1599 	N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER)
1600 	N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO)
1601 	N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO)
1602 	N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
1603 	N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
1604 	N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
1605 	N(PRE_CHANGEADDR)
1606 	}
1607 #undef N
1608 	return "UNKNOWN_NETDEV_EVENT";
1609 }
1610 EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
1611 
1612 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1613 				   struct net_device *dev)
1614 {
1615 	struct netdev_notifier_info info = {
1616 		.dev = dev,
1617 	};
1618 
1619 	return nb->notifier_call(nb, val, &info);
1620 }
1621 
1622 static int call_netdevice_register_notifiers(struct notifier_block *nb,
1623 					     struct net_device *dev)
1624 {
1625 	int err;
1626 
1627 	err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1628 	err = notifier_to_errno(err);
1629 	if (err)
1630 		return err;
1631 
1632 	if (!(dev->flags & IFF_UP))
1633 		return 0;
1634 
1635 	call_netdevice_notifier(nb, NETDEV_UP, dev);
1636 	return 0;
1637 }
1638 
1639 static void call_netdevice_unregister_notifiers(struct notifier_block *nb,
1640 						struct net_device *dev)
1641 {
1642 	if (dev->flags & IFF_UP) {
1643 		call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1644 					dev);
1645 		call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1646 	}
1647 	call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1648 }
1649 
1650 static int call_netdevice_register_net_notifiers(struct notifier_block *nb,
1651 						 struct net *net)
1652 {
1653 	struct net_device *dev;
1654 	int err;
1655 
1656 	for_each_netdev(net, dev) {
1657 		err = call_netdevice_register_notifiers(nb, dev);
1658 		if (err)
1659 			goto rollback;
1660 	}
1661 	return 0;
1662 
1663 rollback:
1664 	for_each_netdev_continue_reverse(net, dev)
1665 		call_netdevice_unregister_notifiers(nb, dev);
1666 	return err;
1667 }
1668 
1669 static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb,
1670 						    struct net *net)
1671 {
1672 	struct net_device *dev;
1673 
1674 	for_each_netdev(net, dev)
1675 		call_netdevice_unregister_notifiers(nb, dev);
1676 }
1677 
1678 static int dev_boot_phase = 1;
1679 
1680 /**
1681  * register_netdevice_notifier - register a network notifier block
1682  * @nb: notifier
1683  *
1684  * Register a notifier to be called when network device events occur.
1685  * The notifier passed is linked into the kernel structures and must
1686  * not be reused until it has been unregistered. A negative errno code
1687  * is returned on a failure.
1688  *
1689  * When registered all registration and up events are replayed
1690  * to the new notifier to allow device to have a race free
1691  * view of the network device list.
1692  */
1693 
1694 int register_netdevice_notifier(struct notifier_block *nb)
1695 {
1696 	struct net *net;
1697 	int err;
1698 
1699 	/* Close race with setup_net() and cleanup_net() */
1700 	down_write(&pernet_ops_rwsem);
1701 	rtnl_lock();
1702 	err = raw_notifier_chain_register(&netdev_chain, nb);
1703 	if (err)
1704 		goto unlock;
1705 	if (dev_boot_phase)
1706 		goto unlock;
1707 	for_each_net(net) {
1708 		err = call_netdevice_register_net_notifiers(nb, net);
1709 		if (err)
1710 			goto rollback;
1711 	}
1712 
1713 unlock:
1714 	rtnl_unlock();
1715 	up_write(&pernet_ops_rwsem);
1716 	return err;
1717 
1718 rollback:
1719 	for_each_net_continue_reverse(net)
1720 		call_netdevice_unregister_net_notifiers(nb, net);
1721 
1722 	raw_notifier_chain_unregister(&netdev_chain, nb);
1723 	goto unlock;
1724 }
1725 EXPORT_SYMBOL(register_netdevice_notifier);
1726 
1727 /**
1728  * unregister_netdevice_notifier - unregister a network notifier block
1729  * @nb: notifier
1730  *
1731  * Unregister a notifier previously registered by
1732  * register_netdevice_notifier(). The notifier is unlinked into the
1733  * kernel structures and may then be reused. A negative errno code
1734  * is returned on a failure.
1735  *
1736  * After unregistering unregister and down device events are synthesized
1737  * for all devices on the device list to the removed notifier to remove
1738  * the need for special case cleanup code.
1739  */
1740 
1741 int unregister_netdevice_notifier(struct notifier_block *nb)
1742 {
1743 	struct net *net;
1744 	int err;
1745 
1746 	/* Close race with setup_net() and cleanup_net() */
1747 	down_write(&pernet_ops_rwsem);
1748 	rtnl_lock();
1749 	err = raw_notifier_chain_unregister(&netdev_chain, nb);
1750 	if (err)
1751 		goto unlock;
1752 
1753 	for_each_net(net)
1754 		call_netdevice_unregister_net_notifiers(nb, net);
1755 
1756 unlock:
1757 	rtnl_unlock();
1758 	up_write(&pernet_ops_rwsem);
1759 	return err;
1760 }
1761 EXPORT_SYMBOL(unregister_netdevice_notifier);
1762 
1763 static int __register_netdevice_notifier_net(struct net *net,
1764 					     struct notifier_block *nb,
1765 					     bool ignore_call_fail)
1766 {
1767 	int err;
1768 
1769 	err = raw_notifier_chain_register(&net->netdev_chain, nb);
1770 	if (err)
1771 		return err;
1772 	if (dev_boot_phase)
1773 		return 0;
1774 
1775 	err = call_netdevice_register_net_notifiers(nb, net);
1776 	if (err && !ignore_call_fail)
1777 		goto chain_unregister;
1778 
1779 	return 0;
1780 
1781 chain_unregister:
1782 	raw_notifier_chain_unregister(&net->netdev_chain, nb);
1783 	return err;
1784 }
1785 
1786 static int __unregister_netdevice_notifier_net(struct net *net,
1787 					       struct notifier_block *nb)
1788 {
1789 	int err;
1790 
1791 	err = raw_notifier_chain_unregister(&net->netdev_chain, nb);
1792 	if (err)
1793 		return err;
1794 
1795 	call_netdevice_unregister_net_notifiers(nb, net);
1796 	return 0;
1797 }
1798 
1799 /**
1800  * register_netdevice_notifier_net - register a per-netns network notifier block
1801  * @net: network namespace
1802  * @nb: notifier
1803  *
1804  * Register a notifier to be called when network device events occur.
1805  * The notifier passed is linked into the kernel structures and must
1806  * not be reused until it has been unregistered. A negative errno code
1807  * is returned on a failure.
1808  *
1809  * When registered all registration and up events are replayed
1810  * to the new notifier to allow device to have a race free
1811  * view of the network device list.
1812  */
1813 
1814 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb)
1815 {
1816 	int err;
1817 
1818 	rtnl_lock();
1819 	err = __register_netdevice_notifier_net(net, nb, false);
1820 	rtnl_unlock();
1821 	return err;
1822 }
1823 EXPORT_SYMBOL(register_netdevice_notifier_net);
1824 
1825 /**
1826  * unregister_netdevice_notifier_net - unregister a per-netns
1827  *                                     network notifier block
1828  * @net: network namespace
1829  * @nb: notifier
1830  *
1831  * Unregister a notifier previously registered by
1832  * register_netdevice_notifier(). The notifier is unlinked into the
1833  * kernel structures and may then be reused. A negative errno code
1834  * is returned on a failure.
1835  *
1836  * After unregistering unregister and down device events are synthesized
1837  * for all devices on the device list to the removed notifier to remove
1838  * the need for special case cleanup code.
1839  */
1840 
1841 int unregister_netdevice_notifier_net(struct net *net,
1842 				      struct notifier_block *nb)
1843 {
1844 	int err;
1845 
1846 	rtnl_lock();
1847 	err = __unregister_netdevice_notifier_net(net, nb);
1848 	rtnl_unlock();
1849 	return err;
1850 }
1851 EXPORT_SYMBOL(unregister_netdevice_notifier_net);
1852 
1853 int register_netdevice_notifier_dev_net(struct net_device *dev,
1854 					struct notifier_block *nb,
1855 					struct netdev_net_notifier *nn)
1856 {
1857 	int err;
1858 
1859 	rtnl_lock();
1860 	err = __register_netdevice_notifier_net(dev_net(dev), nb, false);
1861 	if (!err) {
1862 		nn->nb = nb;
1863 		list_add(&nn->list, &dev->net_notifier_list);
1864 	}
1865 	rtnl_unlock();
1866 	return err;
1867 }
1868 EXPORT_SYMBOL(register_netdevice_notifier_dev_net);
1869 
1870 int unregister_netdevice_notifier_dev_net(struct net_device *dev,
1871 					  struct notifier_block *nb,
1872 					  struct netdev_net_notifier *nn)
1873 {
1874 	int err;
1875 
1876 	rtnl_lock();
1877 	list_del(&nn->list);
1878 	err = __unregister_netdevice_notifier_net(dev_net(dev), nb);
1879 	rtnl_unlock();
1880 	return err;
1881 }
1882 EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net);
1883 
1884 static void move_netdevice_notifiers_dev_net(struct net_device *dev,
1885 					     struct net *net)
1886 {
1887 	struct netdev_net_notifier *nn;
1888 
1889 	list_for_each_entry(nn, &dev->net_notifier_list, list) {
1890 		__unregister_netdevice_notifier_net(dev_net(dev), nn->nb);
1891 		__register_netdevice_notifier_net(net, nn->nb, true);
1892 	}
1893 }
1894 
1895 /**
1896  *	call_netdevice_notifiers_info - call all network notifier blocks
1897  *	@val: value passed unmodified to notifier function
1898  *	@info: notifier information data
1899  *
1900  *	Call all network notifier blocks.  Parameters and return value
1901  *	are as for raw_notifier_call_chain().
1902  */
1903 
1904 static int call_netdevice_notifiers_info(unsigned long val,
1905 					 struct netdev_notifier_info *info)
1906 {
1907 	struct net *net = dev_net(info->dev);
1908 	int ret;
1909 
1910 	ASSERT_RTNL();
1911 
1912 	/* Run per-netns notifier block chain first, then run the global one.
1913 	 * Hopefully, one day, the global one is going to be removed after
1914 	 * all notifier block registrators get converted to be per-netns.
1915 	 */
1916 	ret = raw_notifier_call_chain(&net->netdev_chain, val, info);
1917 	if (ret & NOTIFY_STOP_MASK)
1918 		return ret;
1919 	return raw_notifier_call_chain(&netdev_chain, val, info);
1920 }
1921 
1922 static int call_netdevice_notifiers_extack(unsigned long val,
1923 					   struct net_device *dev,
1924 					   struct netlink_ext_ack *extack)
1925 {
1926 	struct netdev_notifier_info info = {
1927 		.dev = dev,
1928 		.extack = extack,
1929 	};
1930 
1931 	return call_netdevice_notifiers_info(val, &info);
1932 }
1933 
1934 /**
1935  *	call_netdevice_notifiers - call all network notifier blocks
1936  *      @val: value passed unmodified to notifier function
1937  *      @dev: net_device pointer passed unmodified to notifier function
1938  *
1939  *	Call all network notifier blocks.  Parameters and return value
1940  *	are as for raw_notifier_call_chain().
1941  */
1942 
1943 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1944 {
1945 	return call_netdevice_notifiers_extack(val, dev, NULL);
1946 }
1947 EXPORT_SYMBOL(call_netdevice_notifiers);
1948 
1949 /**
1950  *	call_netdevice_notifiers_mtu - call all network notifier blocks
1951  *	@val: value passed unmodified to notifier function
1952  *	@dev: net_device pointer passed unmodified to notifier function
1953  *	@arg: additional u32 argument passed to the notifier function
1954  *
1955  *	Call all network notifier blocks.  Parameters and return value
1956  *	are as for raw_notifier_call_chain().
1957  */
1958 static int call_netdevice_notifiers_mtu(unsigned long val,
1959 					struct net_device *dev, u32 arg)
1960 {
1961 	struct netdev_notifier_info_ext info = {
1962 		.info.dev = dev,
1963 		.ext.mtu = arg,
1964 	};
1965 
1966 	BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
1967 
1968 	return call_netdevice_notifiers_info(val, &info.info);
1969 }
1970 
1971 #ifdef CONFIG_NET_INGRESS
1972 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
1973 
1974 void net_inc_ingress_queue(void)
1975 {
1976 	static_branch_inc(&ingress_needed_key);
1977 }
1978 EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1979 
1980 void net_dec_ingress_queue(void)
1981 {
1982 	static_branch_dec(&ingress_needed_key);
1983 }
1984 EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1985 #endif
1986 
1987 #ifdef CONFIG_NET_EGRESS
1988 static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
1989 
1990 void net_inc_egress_queue(void)
1991 {
1992 	static_branch_inc(&egress_needed_key);
1993 }
1994 EXPORT_SYMBOL_GPL(net_inc_egress_queue);
1995 
1996 void net_dec_egress_queue(void)
1997 {
1998 	static_branch_dec(&egress_needed_key);
1999 }
2000 EXPORT_SYMBOL_GPL(net_dec_egress_queue);
2001 #endif
2002 
2003 static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
2004 #ifdef CONFIG_JUMP_LABEL
2005 static atomic_t netstamp_needed_deferred;
2006 static atomic_t netstamp_wanted;
2007 static void netstamp_clear(struct work_struct *work)
2008 {
2009 	int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
2010 	int wanted;
2011 
2012 	wanted = atomic_add_return(deferred, &netstamp_wanted);
2013 	if (wanted > 0)
2014 		static_branch_enable(&netstamp_needed_key);
2015 	else
2016 		static_branch_disable(&netstamp_needed_key);
2017 }
2018 static DECLARE_WORK(netstamp_work, netstamp_clear);
2019 #endif
2020 
2021 void net_enable_timestamp(void)
2022 {
2023 #ifdef CONFIG_JUMP_LABEL
2024 	int wanted;
2025 
2026 	while (1) {
2027 		wanted = atomic_read(&netstamp_wanted);
2028 		if (wanted <= 0)
2029 			break;
2030 		if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
2031 			return;
2032 	}
2033 	atomic_inc(&netstamp_needed_deferred);
2034 	schedule_work(&netstamp_work);
2035 #else
2036 	static_branch_inc(&netstamp_needed_key);
2037 #endif
2038 }
2039 EXPORT_SYMBOL(net_enable_timestamp);
2040 
2041 void net_disable_timestamp(void)
2042 {
2043 #ifdef CONFIG_JUMP_LABEL
2044 	int wanted;
2045 
2046 	while (1) {
2047 		wanted = atomic_read(&netstamp_wanted);
2048 		if (wanted <= 1)
2049 			break;
2050 		if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
2051 			return;
2052 	}
2053 	atomic_dec(&netstamp_needed_deferred);
2054 	schedule_work(&netstamp_work);
2055 #else
2056 	static_branch_dec(&netstamp_needed_key);
2057 #endif
2058 }
2059 EXPORT_SYMBOL(net_disable_timestamp);
2060 
2061 static inline void net_timestamp_set(struct sk_buff *skb)
2062 {
2063 	skb->tstamp = 0;
2064 	if (static_branch_unlikely(&netstamp_needed_key))
2065 		__net_timestamp(skb);
2066 }
2067 
2068 #define net_timestamp_check(COND, SKB)				\
2069 	if (static_branch_unlikely(&netstamp_needed_key)) {	\
2070 		if ((COND) && !(SKB)->tstamp)			\
2071 			__net_timestamp(SKB);			\
2072 	}							\
2073 
2074 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
2075 {
2076 	return __is_skb_forwardable(dev, skb, true);
2077 }
2078 EXPORT_SYMBOL_GPL(is_skb_forwardable);
2079 
2080 static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb,
2081 			      bool check_mtu)
2082 {
2083 	int ret = ____dev_forward_skb(dev, skb, check_mtu);
2084 
2085 	if (likely(!ret)) {
2086 		skb->protocol = eth_type_trans(skb, dev);
2087 		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
2088 	}
2089 
2090 	return ret;
2091 }
2092 
2093 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2094 {
2095 	return __dev_forward_skb2(dev, skb, true);
2096 }
2097 EXPORT_SYMBOL_GPL(__dev_forward_skb);
2098 
2099 /**
2100  * dev_forward_skb - loopback an skb to another netif
2101  *
2102  * @dev: destination network device
2103  * @skb: buffer to forward
2104  *
2105  * return values:
2106  *	NET_RX_SUCCESS	(no congestion)
2107  *	NET_RX_DROP     (packet was dropped, but freed)
2108  *
2109  * dev_forward_skb can be used for injecting an skb from the
2110  * start_xmit function of one device into the receive queue
2111  * of another device.
2112  *
2113  * The receiving device may be in another namespace, so
2114  * we have to clear all information in the skb that could
2115  * impact namespace isolation.
2116  */
2117 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2118 {
2119 	return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
2120 }
2121 EXPORT_SYMBOL_GPL(dev_forward_skb);
2122 
2123 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb)
2124 {
2125 	return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb);
2126 }
2127 
2128 static inline int deliver_skb(struct sk_buff *skb,
2129 			      struct packet_type *pt_prev,
2130 			      struct net_device *orig_dev)
2131 {
2132 	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
2133 		return -ENOMEM;
2134 	refcount_inc(&skb->users);
2135 	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2136 }
2137 
2138 static inline void deliver_ptype_list_skb(struct sk_buff *skb,
2139 					  struct packet_type **pt,
2140 					  struct net_device *orig_dev,
2141 					  __be16 type,
2142 					  struct list_head *ptype_list)
2143 {
2144 	struct packet_type *ptype, *pt_prev = *pt;
2145 
2146 	list_for_each_entry_rcu(ptype, ptype_list, list) {
2147 		if (ptype->type != type)
2148 			continue;
2149 		if (pt_prev)
2150 			deliver_skb(skb, pt_prev, orig_dev);
2151 		pt_prev = ptype;
2152 	}
2153 	*pt = pt_prev;
2154 }
2155 
2156 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
2157 {
2158 	if (!ptype->af_packet_priv || !skb->sk)
2159 		return false;
2160 
2161 	if (ptype->id_match)
2162 		return ptype->id_match(ptype, skb->sk);
2163 	else if ((struct sock *)ptype->af_packet_priv == skb->sk)
2164 		return true;
2165 
2166 	return false;
2167 }
2168 
2169 /**
2170  * dev_nit_active - return true if any network interface taps are in use
2171  *
2172  * @dev: network device to check for the presence of taps
2173  */
2174 bool dev_nit_active(struct net_device *dev)
2175 {
2176 	return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all);
2177 }
2178 EXPORT_SYMBOL_GPL(dev_nit_active);
2179 
2180 /*
2181  *	Support routine. Sends outgoing frames to any network
2182  *	taps currently in use.
2183  */
2184 
2185 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
2186 {
2187 	struct packet_type *ptype;
2188 	struct sk_buff *skb2 = NULL;
2189 	struct packet_type *pt_prev = NULL;
2190 	struct list_head *ptype_list = &ptype_all;
2191 
2192 	rcu_read_lock();
2193 again:
2194 	list_for_each_entry_rcu(ptype, ptype_list, list) {
2195 		if (ptype->ignore_outgoing)
2196 			continue;
2197 
2198 		/* Never send packets back to the socket
2199 		 * they originated from - MvS (miquels@drinkel.ow.org)
2200 		 */
2201 		if (skb_loop_sk(ptype, skb))
2202 			continue;
2203 
2204 		if (pt_prev) {
2205 			deliver_skb(skb2, pt_prev, skb->dev);
2206 			pt_prev = ptype;
2207 			continue;
2208 		}
2209 
2210 		/* need to clone skb, done only once */
2211 		skb2 = skb_clone(skb, GFP_ATOMIC);
2212 		if (!skb2)
2213 			goto out_unlock;
2214 
2215 		net_timestamp_set(skb2);
2216 
2217 		/* skb->nh should be correctly
2218 		 * set by sender, so that the second statement is
2219 		 * just protection against buggy protocols.
2220 		 */
2221 		skb_reset_mac_header(skb2);
2222 
2223 		if (skb_network_header(skb2) < skb2->data ||
2224 		    skb_network_header(skb2) > skb_tail_pointer(skb2)) {
2225 			net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2226 					     ntohs(skb2->protocol),
2227 					     dev->name);
2228 			skb_reset_network_header(skb2);
2229 		}
2230 
2231 		skb2->transport_header = skb2->network_header;
2232 		skb2->pkt_type = PACKET_OUTGOING;
2233 		pt_prev = ptype;
2234 	}
2235 
2236 	if (ptype_list == &ptype_all) {
2237 		ptype_list = &dev->ptype_all;
2238 		goto again;
2239 	}
2240 out_unlock:
2241 	if (pt_prev) {
2242 		if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
2243 			pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2244 		else
2245 			kfree_skb(skb2);
2246 	}
2247 	rcu_read_unlock();
2248 }
2249 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
2250 
2251 /**
2252  * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2253  * @dev: Network device
2254  * @txq: number of queues available
2255  *
2256  * If real_num_tx_queues is changed the tc mappings may no longer be
2257  * valid. To resolve this verify the tc mapping remains valid and if
2258  * not NULL the mapping. With no priorities mapping to this
2259  * offset/count pair it will no longer be used. In the worst case TC0
2260  * is invalid nothing can be done so disable priority mappings. If is
2261  * expected that drivers will fix this mapping if they can before
2262  * calling netif_set_real_num_tx_queues.
2263  */
2264 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
2265 {
2266 	int i;
2267 	struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2268 
2269 	/* If TC0 is invalidated disable TC mapping */
2270 	if (tc->offset + tc->count > txq) {
2271 		netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2272 		dev->num_tc = 0;
2273 		return;
2274 	}
2275 
2276 	/* Invalidated prio to tc mappings set to TC0 */
2277 	for (i = 1; i < TC_BITMASK + 1; i++) {
2278 		int q = netdev_get_prio_tc_map(dev, i);
2279 
2280 		tc = &dev->tc_to_txq[q];
2281 		if (tc->offset + tc->count > txq) {
2282 			netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2283 				    i, q);
2284 			netdev_set_prio_tc_map(dev, i, 0);
2285 		}
2286 	}
2287 }
2288 
2289 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2290 {
2291 	if (dev->num_tc) {
2292 		struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2293 		int i;
2294 
2295 		/* walk through the TCs and see if it falls into any of them */
2296 		for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2297 			if ((txq - tc->offset) < tc->count)
2298 				return i;
2299 		}
2300 
2301 		/* didn't find it, just return -1 to indicate no match */
2302 		return -1;
2303 	}
2304 
2305 	return 0;
2306 }
2307 EXPORT_SYMBOL(netdev_txq_to_tc);
2308 
2309 #ifdef CONFIG_XPS
2310 static struct static_key xps_needed __read_mostly;
2311 static struct static_key xps_rxqs_needed __read_mostly;
2312 static DEFINE_MUTEX(xps_map_mutex);
2313 #define xmap_dereference(P)		\
2314 	rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2315 
2316 static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2317 			     struct xps_dev_maps *old_maps, int tci, u16 index)
2318 {
2319 	struct xps_map *map = NULL;
2320 	int pos;
2321 
2322 	if (dev_maps)
2323 		map = xmap_dereference(dev_maps->attr_map[tci]);
2324 	if (!map)
2325 		return false;
2326 
2327 	for (pos = map->len; pos--;) {
2328 		if (map->queues[pos] != index)
2329 			continue;
2330 
2331 		if (map->len > 1) {
2332 			map->queues[pos] = map->queues[--map->len];
2333 			break;
2334 		}
2335 
2336 		if (old_maps)
2337 			RCU_INIT_POINTER(old_maps->attr_map[tci], NULL);
2338 		RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2339 		kfree_rcu(map, rcu);
2340 		return false;
2341 	}
2342 
2343 	return true;
2344 }
2345 
2346 static bool remove_xps_queue_cpu(struct net_device *dev,
2347 				 struct xps_dev_maps *dev_maps,
2348 				 int cpu, u16 offset, u16 count)
2349 {
2350 	int num_tc = dev_maps->num_tc;
2351 	bool active = false;
2352 	int tci;
2353 
2354 	for (tci = cpu * num_tc; num_tc--; tci++) {
2355 		int i, j;
2356 
2357 		for (i = count, j = offset; i--; j++) {
2358 			if (!remove_xps_queue(dev_maps, NULL, tci, j))
2359 				break;
2360 		}
2361 
2362 		active |= i < 0;
2363 	}
2364 
2365 	return active;
2366 }
2367 
2368 static void reset_xps_maps(struct net_device *dev,
2369 			   struct xps_dev_maps *dev_maps,
2370 			   enum xps_map_type type)
2371 {
2372 	static_key_slow_dec_cpuslocked(&xps_needed);
2373 	if (type == XPS_RXQS)
2374 		static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
2375 
2376 	RCU_INIT_POINTER(dev->xps_maps[type], NULL);
2377 
2378 	kfree_rcu(dev_maps, rcu);
2379 }
2380 
2381 static void clean_xps_maps(struct net_device *dev, enum xps_map_type type,
2382 			   u16 offset, u16 count)
2383 {
2384 	struct xps_dev_maps *dev_maps;
2385 	bool active = false;
2386 	int i, j;
2387 
2388 	dev_maps = xmap_dereference(dev->xps_maps[type]);
2389 	if (!dev_maps)
2390 		return;
2391 
2392 	for (j = 0; j < dev_maps->nr_ids; j++)
2393 		active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, count);
2394 	if (!active)
2395 		reset_xps_maps(dev, dev_maps, type);
2396 
2397 	if (type == XPS_CPUS) {
2398 		for (i = offset + (count - 1); count--; i--)
2399 			netdev_queue_numa_node_write(
2400 				netdev_get_tx_queue(dev, i), NUMA_NO_NODE);
2401 	}
2402 }
2403 
2404 static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2405 				   u16 count)
2406 {
2407 	if (!static_key_false(&xps_needed))
2408 		return;
2409 
2410 	cpus_read_lock();
2411 	mutex_lock(&xps_map_mutex);
2412 
2413 	if (static_key_false(&xps_rxqs_needed))
2414 		clean_xps_maps(dev, XPS_RXQS, offset, count);
2415 
2416 	clean_xps_maps(dev, XPS_CPUS, offset, count);
2417 
2418 	mutex_unlock(&xps_map_mutex);
2419 	cpus_read_unlock();
2420 }
2421 
2422 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2423 {
2424 	netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2425 }
2426 
2427 static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
2428 				      u16 index, bool is_rxqs_map)
2429 {
2430 	struct xps_map *new_map;
2431 	int alloc_len = XPS_MIN_MAP_ALLOC;
2432 	int i, pos;
2433 
2434 	for (pos = 0; map && pos < map->len; pos++) {
2435 		if (map->queues[pos] != index)
2436 			continue;
2437 		return map;
2438 	}
2439 
2440 	/* Need to add tx-queue to this CPU's/rx-queue's existing map */
2441 	if (map) {
2442 		if (pos < map->alloc_len)
2443 			return map;
2444 
2445 		alloc_len = map->alloc_len * 2;
2446 	}
2447 
2448 	/* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2449 	 *  map
2450 	 */
2451 	if (is_rxqs_map)
2452 		new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
2453 	else
2454 		new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2455 				       cpu_to_node(attr_index));
2456 	if (!new_map)
2457 		return NULL;
2458 
2459 	for (i = 0; i < pos; i++)
2460 		new_map->queues[i] = map->queues[i];
2461 	new_map->alloc_len = alloc_len;
2462 	new_map->len = pos;
2463 
2464 	return new_map;
2465 }
2466 
2467 /* Copy xps maps at a given index */
2468 static void xps_copy_dev_maps(struct xps_dev_maps *dev_maps,
2469 			      struct xps_dev_maps *new_dev_maps, int index,
2470 			      int tc, bool skip_tc)
2471 {
2472 	int i, tci = index * dev_maps->num_tc;
2473 	struct xps_map *map;
2474 
2475 	/* copy maps belonging to foreign traffic classes */
2476 	for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2477 		if (i == tc && skip_tc)
2478 			continue;
2479 
2480 		/* fill in the new device map from the old device map */
2481 		map = xmap_dereference(dev_maps->attr_map[tci]);
2482 		RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2483 	}
2484 }
2485 
2486 /* Must be called under cpus_read_lock */
2487 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2488 			  u16 index, enum xps_map_type type)
2489 {
2490 	struct xps_dev_maps *dev_maps, *new_dev_maps = NULL, *old_dev_maps = NULL;
2491 	const unsigned long *online_mask = NULL;
2492 	bool active = false, copy = false;
2493 	int i, j, tci, numa_node_id = -2;
2494 	int maps_sz, num_tc = 1, tc = 0;
2495 	struct xps_map *map, *new_map;
2496 	unsigned int nr_ids;
2497 
2498 	if (dev->num_tc) {
2499 		/* Do not allow XPS on subordinate device directly */
2500 		num_tc = dev->num_tc;
2501 		if (num_tc < 0)
2502 			return -EINVAL;
2503 
2504 		/* If queue belongs to subordinate dev use its map */
2505 		dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2506 
2507 		tc = netdev_txq_to_tc(dev, index);
2508 		if (tc < 0)
2509 			return -EINVAL;
2510 	}
2511 
2512 	mutex_lock(&xps_map_mutex);
2513 
2514 	dev_maps = xmap_dereference(dev->xps_maps[type]);
2515 	if (type == XPS_RXQS) {
2516 		maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
2517 		nr_ids = dev->num_rx_queues;
2518 	} else {
2519 		maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
2520 		if (num_possible_cpus() > 1)
2521 			online_mask = cpumask_bits(cpu_online_mask);
2522 		nr_ids = nr_cpu_ids;
2523 	}
2524 
2525 	if (maps_sz < L1_CACHE_BYTES)
2526 		maps_sz = L1_CACHE_BYTES;
2527 
2528 	/* The old dev_maps could be larger or smaller than the one we're
2529 	 * setting up now, as dev->num_tc or nr_ids could have been updated in
2530 	 * between. We could try to be smart, but let's be safe instead and only
2531 	 * copy foreign traffic classes if the two map sizes match.
2532 	 */
2533 	if (dev_maps &&
2534 	    dev_maps->num_tc == num_tc && dev_maps->nr_ids == nr_ids)
2535 		copy = true;
2536 
2537 	/* allocate memory for queue storage */
2538 	for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
2539 	     j < nr_ids;) {
2540 		if (!new_dev_maps) {
2541 			new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2542 			if (!new_dev_maps) {
2543 				mutex_unlock(&xps_map_mutex);
2544 				return -ENOMEM;
2545 			}
2546 
2547 			new_dev_maps->nr_ids = nr_ids;
2548 			new_dev_maps->num_tc = num_tc;
2549 		}
2550 
2551 		tci = j * num_tc + tc;
2552 		map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL;
2553 
2554 		map = expand_xps_map(map, j, index, type == XPS_RXQS);
2555 		if (!map)
2556 			goto error;
2557 
2558 		RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2559 	}
2560 
2561 	if (!new_dev_maps)
2562 		goto out_no_new_maps;
2563 
2564 	if (!dev_maps) {
2565 		/* Increment static keys at most once per type */
2566 		static_key_slow_inc_cpuslocked(&xps_needed);
2567 		if (type == XPS_RXQS)
2568 			static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
2569 	}
2570 
2571 	for (j = 0; j < nr_ids; j++) {
2572 		bool skip_tc = false;
2573 
2574 		tci = j * num_tc + tc;
2575 		if (netif_attr_test_mask(j, mask, nr_ids) &&
2576 		    netif_attr_test_online(j, online_mask, nr_ids)) {
2577 			/* add tx-queue to CPU/rx-queue maps */
2578 			int pos = 0;
2579 
2580 			skip_tc = true;
2581 
2582 			map = xmap_dereference(new_dev_maps->attr_map[tci]);
2583 			while ((pos < map->len) && (map->queues[pos] != index))
2584 				pos++;
2585 
2586 			if (pos == map->len)
2587 				map->queues[map->len++] = index;
2588 #ifdef CONFIG_NUMA
2589 			if (type == XPS_CPUS) {
2590 				if (numa_node_id == -2)
2591 					numa_node_id = cpu_to_node(j);
2592 				else if (numa_node_id != cpu_to_node(j))
2593 					numa_node_id = -1;
2594 			}
2595 #endif
2596 		}
2597 
2598 		if (copy)
2599 			xps_copy_dev_maps(dev_maps, new_dev_maps, j, tc,
2600 					  skip_tc);
2601 	}
2602 
2603 	rcu_assign_pointer(dev->xps_maps[type], new_dev_maps);
2604 
2605 	/* Cleanup old maps */
2606 	if (!dev_maps)
2607 		goto out_no_old_maps;
2608 
2609 	for (j = 0; j < dev_maps->nr_ids; j++) {
2610 		for (i = num_tc, tci = j * dev_maps->num_tc; i--; tci++) {
2611 			map = xmap_dereference(dev_maps->attr_map[tci]);
2612 			if (!map)
2613 				continue;
2614 
2615 			if (copy) {
2616 				new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2617 				if (map == new_map)
2618 					continue;
2619 			}
2620 
2621 			RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2622 			kfree_rcu(map, rcu);
2623 		}
2624 	}
2625 
2626 	old_dev_maps = dev_maps;
2627 
2628 out_no_old_maps:
2629 	dev_maps = new_dev_maps;
2630 	active = true;
2631 
2632 out_no_new_maps:
2633 	if (type == XPS_CPUS)
2634 		/* update Tx queue numa node */
2635 		netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2636 					     (numa_node_id >= 0) ?
2637 					     numa_node_id : NUMA_NO_NODE);
2638 
2639 	if (!dev_maps)
2640 		goto out_no_maps;
2641 
2642 	/* removes tx-queue from unused CPUs/rx-queues */
2643 	for (j = 0; j < dev_maps->nr_ids; j++) {
2644 		tci = j * dev_maps->num_tc;
2645 
2646 		for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2647 			if (i == tc &&
2648 			    netif_attr_test_mask(j, mask, dev_maps->nr_ids) &&
2649 			    netif_attr_test_online(j, online_mask, dev_maps->nr_ids))
2650 				continue;
2651 
2652 			active |= remove_xps_queue(dev_maps,
2653 						   copy ? old_dev_maps : NULL,
2654 						   tci, index);
2655 		}
2656 	}
2657 
2658 	if (old_dev_maps)
2659 		kfree_rcu(old_dev_maps, rcu);
2660 
2661 	/* free map if not active */
2662 	if (!active)
2663 		reset_xps_maps(dev, dev_maps, type);
2664 
2665 out_no_maps:
2666 	mutex_unlock(&xps_map_mutex);
2667 
2668 	return 0;
2669 error:
2670 	/* remove any maps that we added */
2671 	for (j = 0; j < nr_ids; j++) {
2672 		for (i = num_tc, tci = j * num_tc; i--; tci++) {
2673 			new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2674 			map = copy ?
2675 			      xmap_dereference(dev_maps->attr_map[tci]) :
2676 			      NULL;
2677 			if (new_map && new_map != map)
2678 				kfree(new_map);
2679 		}
2680 	}
2681 
2682 	mutex_unlock(&xps_map_mutex);
2683 
2684 	kfree(new_dev_maps);
2685 	return -ENOMEM;
2686 }
2687 EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
2688 
2689 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2690 			u16 index)
2691 {
2692 	int ret;
2693 
2694 	cpus_read_lock();
2695 	ret =  __netif_set_xps_queue(dev, cpumask_bits(mask), index, XPS_CPUS);
2696 	cpus_read_unlock();
2697 
2698 	return ret;
2699 }
2700 EXPORT_SYMBOL(netif_set_xps_queue);
2701 
2702 #endif
2703 static void netdev_unbind_all_sb_channels(struct net_device *dev)
2704 {
2705 	struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2706 
2707 	/* Unbind any subordinate channels */
2708 	while (txq-- != &dev->_tx[0]) {
2709 		if (txq->sb_dev)
2710 			netdev_unbind_sb_channel(dev, txq->sb_dev);
2711 	}
2712 }
2713 
2714 void netdev_reset_tc(struct net_device *dev)
2715 {
2716 #ifdef CONFIG_XPS
2717 	netif_reset_xps_queues_gt(dev, 0);
2718 #endif
2719 	netdev_unbind_all_sb_channels(dev);
2720 
2721 	/* Reset TC configuration of device */
2722 	dev->num_tc = 0;
2723 	memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2724 	memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2725 }
2726 EXPORT_SYMBOL(netdev_reset_tc);
2727 
2728 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2729 {
2730 	if (tc >= dev->num_tc)
2731 		return -EINVAL;
2732 
2733 #ifdef CONFIG_XPS
2734 	netif_reset_xps_queues(dev, offset, count);
2735 #endif
2736 	dev->tc_to_txq[tc].count = count;
2737 	dev->tc_to_txq[tc].offset = offset;
2738 	return 0;
2739 }
2740 EXPORT_SYMBOL(netdev_set_tc_queue);
2741 
2742 int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2743 {
2744 	if (num_tc > TC_MAX_QUEUE)
2745 		return -EINVAL;
2746 
2747 #ifdef CONFIG_XPS
2748 	netif_reset_xps_queues_gt(dev, 0);
2749 #endif
2750 	netdev_unbind_all_sb_channels(dev);
2751 
2752 	dev->num_tc = num_tc;
2753 	return 0;
2754 }
2755 EXPORT_SYMBOL(netdev_set_num_tc);
2756 
2757 void netdev_unbind_sb_channel(struct net_device *dev,
2758 			      struct net_device *sb_dev)
2759 {
2760 	struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2761 
2762 #ifdef CONFIG_XPS
2763 	netif_reset_xps_queues_gt(sb_dev, 0);
2764 #endif
2765 	memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
2766 	memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
2767 
2768 	while (txq-- != &dev->_tx[0]) {
2769 		if (txq->sb_dev == sb_dev)
2770 			txq->sb_dev = NULL;
2771 	}
2772 }
2773 EXPORT_SYMBOL(netdev_unbind_sb_channel);
2774 
2775 int netdev_bind_sb_channel_queue(struct net_device *dev,
2776 				 struct net_device *sb_dev,
2777 				 u8 tc, u16 count, u16 offset)
2778 {
2779 	/* Make certain the sb_dev and dev are already configured */
2780 	if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
2781 		return -EINVAL;
2782 
2783 	/* We cannot hand out queues we don't have */
2784 	if ((offset + count) > dev->real_num_tx_queues)
2785 		return -EINVAL;
2786 
2787 	/* Record the mapping */
2788 	sb_dev->tc_to_txq[tc].count = count;
2789 	sb_dev->tc_to_txq[tc].offset = offset;
2790 
2791 	/* Provide a way for Tx queue to find the tc_to_txq map or
2792 	 * XPS map for itself.
2793 	 */
2794 	while (count--)
2795 		netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
2796 
2797 	return 0;
2798 }
2799 EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
2800 
2801 int netdev_set_sb_channel(struct net_device *dev, u16 channel)
2802 {
2803 	/* Do not use a multiqueue device to represent a subordinate channel */
2804 	if (netif_is_multiqueue(dev))
2805 		return -ENODEV;
2806 
2807 	/* We allow channels 1 - 32767 to be used for subordinate channels.
2808 	 * Channel 0 is meant to be "native" mode and used only to represent
2809 	 * the main root device. We allow writing 0 to reset the device back
2810 	 * to normal mode after being used as a subordinate channel.
2811 	 */
2812 	if (channel > S16_MAX)
2813 		return -EINVAL;
2814 
2815 	dev->num_tc = -channel;
2816 
2817 	return 0;
2818 }
2819 EXPORT_SYMBOL(netdev_set_sb_channel);
2820 
2821 /*
2822  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2823  * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
2824  */
2825 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2826 {
2827 	bool disabling;
2828 	int rc;
2829 
2830 	disabling = txq < dev->real_num_tx_queues;
2831 
2832 	if (txq < 1 || txq > dev->num_tx_queues)
2833 		return -EINVAL;
2834 
2835 	if (dev->reg_state == NETREG_REGISTERED ||
2836 	    dev->reg_state == NETREG_UNREGISTERING) {
2837 		ASSERT_RTNL();
2838 
2839 		rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2840 						  txq);
2841 		if (rc)
2842 			return rc;
2843 
2844 		if (dev->num_tc)
2845 			netif_setup_tc(dev, txq);
2846 
2847 		dev_qdisc_change_real_num_tx(dev, txq);
2848 
2849 		dev->real_num_tx_queues = txq;
2850 
2851 		if (disabling) {
2852 			synchronize_net();
2853 			qdisc_reset_all_tx_gt(dev, txq);
2854 #ifdef CONFIG_XPS
2855 			netif_reset_xps_queues_gt(dev, txq);
2856 #endif
2857 		}
2858 	} else {
2859 		dev->real_num_tx_queues = txq;
2860 	}
2861 
2862 	return 0;
2863 }
2864 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2865 
2866 #ifdef CONFIG_SYSFS
2867 /**
2868  *	netif_set_real_num_rx_queues - set actual number of RX queues used
2869  *	@dev: Network device
2870  *	@rxq: Actual number of RX queues
2871  *
2872  *	This must be called either with the rtnl_lock held or before
2873  *	registration of the net device.  Returns 0 on success, or a
2874  *	negative error code.  If called before registration, it always
2875  *	succeeds.
2876  */
2877 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2878 {
2879 	int rc;
2880 
2881 	if (rxq < 1 || rxq > dev->num_rx_queues)
2882 		return -EINVAL;
2883 
2884 	if (dev->reg_state == NETREG_REGISTERED) {
2885 		ASSERT_RTNL();
2886 
2887 		rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2888 						  rxq);
2889 		if (rc)
2890 			return rc;
2891 	}
2892 
2893 	dev->real_num_rx_queues = rxq;
2894 	return 0;
2895 }
2896 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2897 #endif
2898 
2899 /**
2900  *	netif_set_real_num_queues - set actual number of RX and TX queues used
2901  *	@dev: Network device
2902  *	@txq: Actual number of TX queues
2903  *	@rxq: Actual number of RX queues
2904  *
2905  *	Set the real number of both TX and RX queues.
2906  *	Does nothing if the number of queues is already correct.
2907  */
2908 int netif_set_real_num_queues(struct net_device *dev,
2909 			      unsigned int txq, unsigned int rxq)
2910 {
2911 	unsigned int old_rxq = dev->real_num_rx_queues;
2912 	int err;
2913 
2914 	if (txq < 1 || txq > dev->num_tx_queues ||
2915 	    rxq < 1 || rxq > dev->num_rx_queues)
2916 		return -EINVAL;
2917 
2918 	/* Start from increases, so the error path only does decreases -
2919 	 * decreases can't fail.
2920 	 */
2921 	if (rxq > dev->real_num_rx_queues) {
2922 		err = netif_set_real_num_rx_queues(dev, rxq);
2923 		if (err)
2924 			return err;
2925 	}
2926 	if (txq > dev->real_num_tx_queues) {
2927 		err = netif_set_real_num_tx_queues(dev, txq);
2928 		if (err)
2929 			goto undo_rx;
2930 	}
2931 	if (rxq < dev->real_num_rx_queues)
2932 		WARN_ON(netif_set_real_num_rx_queues(dev, rxq));
2933 	if (txq < dev->real_num_tx_queues)
2934 		WARN_ON(netif_set_real_num_tx_queues(dev, txq));
2935 
2936 	return 0;
2937 undo_rx:
2938 	WARN_ON(netif_set_real_num_rx_queues(dev, old_rxq));
2939 	return err;
2940 }
2941 EXPORT_SYMBOL(netif_set_real_num_queues);
2942 
2943 /**
2944  * netif_get_num_default_rss_queues - default number of RSS queues
2945  *
2946  * This routine should set an upper limit on the number of RSS queues
2947  * used by default by multiqueue devices.
2948  */
2949 int netif_get_num_default_rss_queues(void)
2950 {
2951 	return is_kdump_kernel() ?
2952 		1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2953 }
2954 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2955 
2956 static void __netif_reschedule(struct Qdisc *q)
2957 {
2958 	struct softnet_data *sd;
2959 	unsigned long flags;
2960 
2961 	local_irq_save(flags);
2962 	sd = this_cpu_ptr(&softnet_data);
2963 	q->next_sched = NULL;
2964 	*sd->output_queue_tailp = q;
2965 	sd->output_queue_tailp = &q->next_sched;
2966 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
2967 	local_irq_restore(flags);
2968 }
2969 
2970 void __netif_schedule(struct Qdisc *q)
2971 {
2972 	if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2973 		__netif_reschedule(q);
2974 }
2975 EXPORT_SYMBOL(__netif_schedule);
2976 
2977 struct dev_kfree_skb_cb {
2978 	enum skb_free_reason reason;
2979 };
2980 
2981 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
2982 {
2983 	return (struct dev_kfree_skb_cb *)skb->cb;
2984 }
2985 
2986 void netif_schedule_queue(struct netdev_queue *txq)
2987 {
2988 	rcu_read_lock();
2989 	if (!netif_xmit_stopped(txq)) {
2990 		struct Qdisc *q = rcu_dereference(txq->qdisc);
2991 
2992 		__netif_schedule(q);
2993 	}
2994 	rcu_read_unlock();
2995 }
2996 EXPORT_SYMBOL(netif_schedule_queue);
2997 
2998 void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2999 {
3000 	if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
3001 		struct Qdisc *q;
3002 
3003 		rcu_read_lock();
3004 		q = rcu_dereference(dev_queue->qdisc);
3005 		__netif_schedule(q);
3006 		rcu_read_unlock();
3007 	}
3008 }
3009 EXPORT_SYMBOL(netif_tx_wake_queue);
3010 
3011 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
3012 {
3013 	unsigned long flags;
3014 
3015 	if (unlikely(!skb))
3016 		return;
3017 
3018 	if (likely(refcount_read(&skb->users) == 1)) {
3019 		smp_rmb();
3020 		refcount_set(&skb->users, 0);
3021 	} else if (likely(!refcount_dec_and_test(&skb->users))) {
3022 		return;
3023 	}
3024 	get_kfree_skb_cb(skb)->reason = reason;
3025 	local_irq_save(flags);
3026 	skb->next = __this_cpu_read(softnet_data.completion_queue);
3027 	__this_cpu_write(softnet_data.completion_queue, skb);
3028 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
3029 	local_irq_restore(flags);
3030 }
3031 EXPORT_SYMBOL(__dev_kfree_skb_irq);
3032 
3033 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
3034 {
3035 	if (in_hardirq() || irqs_disabled())
3036 		__dev_kfree_skb_irq(skb, reason);
3037 	else
3038 		dev_kfree_skb(skb);
3039 }
3040 EXPORT_SYMBOL(__dev_kfree_skb_any);
3041 
3042 
3043 /**
3044  * netif_device_detach - mark device as removed
3045  * @dev: network device
3046  *
3047  * Mark device as removed from system and therefore no longer available.
3048  */
3049 void netif_device_detach(struct net_device *dev)
3050 {
3051 	if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
3052 	    netif_running(dev)) {
3053 		netif_tx_stop_all_queues(dev);
3054 	}
3055 }
3056 EXPORT_SYMBOL(netif_device_detach);
3057 
3058 /**
3059  * netif_device_attach - mark device as attached
3060  * @dev: network device
3061  *
3062  * Mark device as attached from system and restart if needed.
3063  */
3064 void netif_device_attach(struct net_device *dev)
3065 {
3066 	if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
3067 	    netif_running(dev)) {
3068 		netif_tx_wake_all_queues(dev);
3069 		__netdev_watchdog_up(dev);
3070 	}
3071 }
3072 EXPORT_SYMBOL(netif_device_attach);
3073 
3074 /*
3075  * Returns a Tx hash based on the given packet descriptor a Tx queues' number
3076  * to be used as a distribution range.
3077  */
3078 static u16 skb_tx_hash(const struct net_device *dev,
3079 		       const struct net_device *sb_dev,
3080 		       struct sk_buff *skb)
3081 {
3082 	u32 hash;
3083 	u16 qoffset = 0;
3084 	u16 qcount = dev->real_num_tx_queues;
3085 
3086 	if (dev->num_tc) {
3087 		u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
3088 
3089 		qoffset = sb_dev->tc_to_txq[tc].offset;
3090 		qcount = sb_dev->tc_to_txq[tc].count;
3091 		if (unlikely(!qcount)) {
3092 			net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n",
3093 					     sb_dev->name, qoffset, tc);
3094 			qoffset = 0;
3095 			qcount = dev->real_num_tx_queues;
3096 		}
3097 	}
3098 
3099 	if (skb_rx_queue_recorded(skb)) {
3100 		hash = skb_get_rx_queue(skb);
3101 		if (hash >= qoffset)
3102 			hash -= qoffset;
3103 		while (unlikely(hash >= qcount))
3104 			hash -= qcount;
3105 		return hash + qoffset;
3106 	}
3107 
3108 	return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
3109 }
3110 
3111 static void skb_warn_bad_offload(const struct sk_buff *skb)
3112 {
3113 	static const netdev_features_t null_features;
3114 	struct net_device *dev = skb->dev;
3115 	const char *name = "";
3116 
3117 	if (!net_ratelimit())
3118 		return;
3119 
3120 	if (dev) {
3121 		if (dev->dev.parent)
3122 			name = dev_driver_string(dev->dev.parent);
3123 		else
3124 			name = netdev_name(dev);
3125 	}
3126 	skb_dump(KERN_WARNING, skb, false);
3127 	WARN(1, "%s: caps=(%pNF, %pNF)\n",
3128 	     name, dev ? &dev->features : &null_features,
3129 	     skb->sk ? &skb->sk->sk_route_caps : &null_features);
3130 }
3131 
3132 /*
3133  * Invalidate hardware checksum when packet is to be mangled, and
3134  * complete checksum manually on outgoing path.
3135  */
3136 int skb_checksum_help(struct sk_buff *skb)
3137 {
3138 	__wsum csum;
3139 	int ret = 0, offset;
3140 
3141 	if (skb->ip_summed == CHECKSUM_COMPLETE)
3142 		goto out_set_summed;
3143 
3144 	if (unlikely(skb_is_gso(skb))) {
3145 		skb_warn_bad_offload(skb);
3146 		return -EINVAL;
3147 	}
3148 
3149 	/* Before computing a checksum, we should make sure no frag could
3150 	 * be modified by an external entity : checksum could be wrong.
3151 	 */
3152 	if (skb_has_shared_frag(skb)) {
3153 		ret = __skb_linearize(skb);
3154 		if (ret)
3155 			goto out;
3156 	}
3157 
3158 	offset = skb_checksum_start_offset(skb);
3159 	BUG_ON(offset >= skb_headlen(skb));
3160 	csum = skb_checksum(skb, offset, skb->len - offset, 0);
3161 
3162 	offset += skb->csum_offset;
3163 	BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
3164 
3165 	ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
3166 	if (ret)
3167 		goto out;
3168 
3169 	*(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
3170 out_set_summed:
3171 	skb->ip_summed = CHECKSUM_NONE;
3172 out:
3173 	return ret;
3174 }
3175 EXPORT_SYMBOL(skb_checksum_help);
3176 
3177 int skb_crc32c_csum_help(struct sk_buff *skb)
3178 {
3179 	__le32 crc32c_csum;
3180 	int ret = 0, offset, start;
3181 
3182 	if (skb->ip_summed != CHECKSUM_PARTIAL)
3183 		goto out;
3184 
3185 	if (unlikely(skb_is_gso(skb)))
3186 		goto out;
3187 
3188 	/* Before computing a checksum, we should make sure no frag could
3189 	 * be modified by an external entity : checksum could be wrong.
3190 	 */
3191 	if (unlikely(skb_has_shared_frag(skb))) {
3192 		ret = __skb_linearize(skb);
3193 		if (ret)
3194 			goto out;
3195 	}
3196 	start = skb_checksum_start_offset(skb);
3197 	offset = start + offsetof(struct sctphdr, checksum);
3198 	if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
3199 		ret = -EINVAL;
3200 		goto out;
3201 	}
3202 
3203 	ret = skb_ensure_writable(skb, offset + sizeof(__le32));
3204 	if (ret)
3205 		goto out;
3206 
3207 	crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
3208 						  skb->len - start, ~(__u32)0,
3209 						  crc32c_csum_stub));
3210 	*(__le32 *)(skb->data + offset) = crc32c_csum;
3211 	skb->ip_summed = CHECKSUM_NONE;
3212 	skb->csum_not_inet = 0;
3213 out:
3214 	return ret;
3215 }
3216 
3217 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
3218 {
3219 	__be16 type = skb->protocol;
3220 
3221 	/* Tunnel gso handlers can set protocol to ethernet. */
3222 	if (type == htons(ETH_P_TEB)) {
3223 		struct ethhdr *eth;
3224 
3225 		if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
3226 			return 0;
3227 
3228 		eth = (struct ethhdr *)skb->data;
3229 		type = eth->h_proto;
3230 	}
3231 
3232 	return __vlan_get_protocol(skb, type, depth);
3233 }
3234 
3235 /* openvswitch calls this on rx path, so we need a different check.
3236  */
3237 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
3238 {
3239 	if (tx_path)
3240 		return skb->ip_summed != CHECKSUM_PARTIAL &&
3241 		       skb->ip_summed != CHECKSUM_UNNECESSARY;
3242 
3243 	return skb->ip_summed == CHECKSUM_NONE;
3244 }
3245 
3246 /**
3247  *	__skb_gso_segment - Perform segmentation on skb.
3248  *	@skb: buffer to segment
3249  *	@features: features for the output path (see dev->features)
3250  *	@tx_path: whether it is called in TX path
3251  *
3252  *	This function segments the given skb and returns a list of segments.
3253  *
3254  *	It may return NULL if the skb requires no segmentation.  This is
3255  *	only possible when GSO is used for verifying header integrity.
3256  *
3257  *	Segmentation preserves SKB_GSO_CB_OFFSET bytes of previous skb cb.
3258  */
3259 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3260 				  netdev_features_t features, bool tx_path)
3261 {
3262 	struct sk_buff *segs;
3263 
3264 	if (unlikely(skb_needs_check(skb, tx_path))) {
3265 		int err;
3266 
3267 		/* We're going to init ->check field in TCP or UDP header */
3268 		err = skb_cow_head(skb, 0);
3269 		if (err < 0)
3270 			return ERR_PTR(err);
3271 	}
3272 
3273 	/* Only report GSO partial support if it will enable us to
3274 	 * support segmentation on this frame without needing additional
3275 	 * work.
3276 	 */
3277 	if (features & NETIF_F_GSO_PARTIAL) {
3278 		netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
3279 		struct net_device *dev = skb->dev;
3280 
3281 		partial_features |= dev->features & dev->gso_partial_features;
3282 		if (!skb_gso_ok(skb, features | partial_features))
3283 			features &= ~NETIF_F_GSO_PARTIAL;
3284 	}
3285 
3286 	BUILD_BUG_ON(SKB_GSO_CB_OFFSET +
3287 		     sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
3288 
3289 	SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3290 	SKB_GSO_CB(skb)->encap_level = 0;
3291 
3292 	skb_reset_mac_header(skb);
3293 	skb_reset_mac_len(skb);
3294 
3295 	segs = skb_mac_gso_segment(skb, features);
3296 
3297 	if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
3298 		skb_warn_bad_offload(skb);
3299 
3300 	return segs;
3301 }
3302 EXPORT_SYMBOL(__skb_gso_segment);
3303 
3304 /* Take action when hardware reception checksum errors are detected. */
3305 #ifdef CONFIG_BUG
3306 static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3307 {
3308 	netdev_err(dev, "hw csum failure\n");
3309 	skb_dump(KERN_ERR, skb, true);
3310 	dump_stack();
3311 }
3312 
3313 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3314 {
3315 	DO_ONCE_LITE(do_netdev_rx_csum_fault, dev, skb);
3316 }
3317 EXPORT_SYMBOL(netdev_rx_csum_fault);
3318 #endif
3319 
3320 /* XXX: check that highmem exists at all on the given machine. */
3321 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
3322 {
3323 #ifdef CONFIG_HIGHMEM
3324 	int i;
3325 
3326 	if (!(dev->features & NETIF_F_HIGHDMA)) {
3327 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3328 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3329 
3330 			if (PageHighMem(skb_frag_page(frag)))
3331 				return 1;
3332 		}
3333 	}
3334 #endif
3335 	return 0;
3336 }
3337 
3338 /* If MPLS offload request, verify we are testing hardware MPLS features
3339  * instead of standard features for the netdev.
3340  */
3341 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3342 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3343 					   netdev_features_t features,
3344 					   __be16 type)
3345 {
3346 	if (eth_p_mpls(type))
3347 		features &= skb->dev->mpls_features;
3348 
3349 	return features;
3350 }
3351 #else
3352 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3353 					   netdev_features_t features,
3354 					   __be16 type)
3355 {
3356 	return features;
3357 }
3358 #endif
3359 
3360 static netdev_features_t harmonize_features(struct sk_buff *skb,
3361 	netdev_features_t features)
3362 {
3363 	__be16 type;
3364 
3365 	type = skb_network_protocol(skb, NULL);
3366 	features = net_mpls_features(skb, features, type);
3367 
3368 	if (skb->ip_summed != CHECKSUM_NONE &&
3369 	    !can_checksum_protocol(features, type)) {
3370 		features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3371 	}
3372 	if (illegal_highdma(skb->dev, skb))
3373 		features &= ~NETIF_F_SG;
3374 
3375 	return features;
3376 }
3377 
3378 netdev_features_t passthru_features_check(struct sk_buff *skb,
3379 					  struct net_device *dev,
3380 					  netdev_features_t features)
3381 {
3382 	return features;
3383 }
3384 EXPORT_SYMBOL(passthru_features_check);
3385 
3386 static netdev_features_t dflt_features_check(struct sk_buff *skb,
3387 					     struct net_device *dev,
3388 					     netdev_features_t features)
3389 {
3390 	return vlan_features_check(skb, features);
3391 }
3392 
3393 static netdev_features_t gso_features_check(const struct sk_buff *skb,
3394 					    struct net_device *dev,
3395 					    netdev_features_t features)
3396 {
3397 	u16 gso_segs = skb_shinfo(skb)->gso_segs;
3398 
3399 	if (gso_segs > READ_ONCE(dev->gso_max_segs))
3400 		return features & ~NETIF_F_GSO_MASK;
3401 
3402 	if (!skb_shinfo(skb)->gso_type) {
3403 		skb_warn_bad_offload(skb);
3404 		return features & ~NETIF_F_GSO_MASK;
3405 	}
3406 
3407 	/* Support for GSO partial features requires software
3408 	 * intervention before we can actually process the packets
3409 	 * so we need to strip support for any partial features now
3410 	 * and we can pull them back in after we have partially
3411 	 * segmented the frame.
3412 	 */
3413 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
3414 		features &= ~dev->gso_partial_features;
3415 
3416 	/* Make sure to clear the IPv4 ID mangling feature if the
3417 	 * IPv4 header has the potential to be fragmented.
3418 	 */
3419 	if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
3420 		struct iphdr *iph = skb->encapsulation ?
3421 				    inner_ip_hdr(skb) : ip_hdr(skb);
3422 
3423 		if (!(iph->frag_off & htons(IP_DF)))
3424 			features &= ~NETIF_F_TSO_MANGLEID;
3425 	}
3426 
3427 	return features;
3428 }
3429 
3430 netdev_features_t netif_skb_features(struct sk_buff *skb)
3431 {
3432 	struct net_device *dev = skb->dev;
3433 	netdev_features_t features = dev->features;
3434 
3435 	if (skb_is_gso(skb))
3436 		features = gso_features_check(skb, dev, features);
3437 
3438 	/* If encapsulation offload request, verify we are testing
3439 	 * hardware encapsulation features instead of standard
3440 	 * features for the netdev
3441 	 */
3442 	if (skb->encapsulation)
3443 		features &= dev->hw_enc_features;
3444 
3445 	if (skb_vlan_tagged(skb))
3446 		features = netdev_intersect_features(features,
3447 						     dev->vlan_features |
3448 						     NETIF_F_HW_VLAN_CTAG_TX |
3449 						     NETIF_F_HW_VLAN_STAG_TX);
3450 
3451 	if (dev->netdev_ops->ndo_features_check)
3452 		features &= dev->netdev_ops->ndo_features_check(skb, dev,
3453 								features);
3454 	else
3455 		features &= dflt_features_check(skb, dev, features);
3456 
3457 	return harmonize_features(skb, features);
3458 }
3459 EXPORT_SYMBOL(netif_skb_features);
3460 
3461 static int xmit_one(struct sk_buff *skb, struct net_device *dev,
3462 		    struct netdev_queue *txq, bool more)
3463 {
3464 	unsigned int len;
3465 	int rc;
3466 
3467 	if (dev_nit_active(dev))
3468 		dev_queue_xmit_nit(skb, dev);
3469 
3470 	len = skb->len;
3471 	PRANDOM_ADD_NOISE(skb, dev, txq, len + jiffies);
3472 	trace_net_dev_start_xmit(skb, dev);
3473 	rc = netdev_start_xmit(skb, dev, txq, more);
3474 	trace_net_dev_xmit(skb, rc, dev, len);
3475 
3476 	return rc;
3477 }
3478 
3479 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3480 				    struct netdev_queue *txq, int *ret)
3481 {
3482 	struct sk_buff *skb = first;
3483 	int rc = NETDEV_TX_OK;
3484 
3485 	while (skb) {
3486 		struct sk_buff *next = skb->next;
3487 
3488 		skb_mark_not_on_list(skb);
3489 		rc = xmit_one(skb, dev, txq, next != NULL);
3490 		if (unlikely(!dev_xmit_complete(rc))) {
3491 			skb->next = next;
3492 			goto out;
3493 		}
3494 
3495 		skb = next;
3496 		if (netif_tx_queue_stopped(txq) && skb) {
3497 			rc = NETDEV_TX_BUSY;
3498 			break;
3499 		}
3500 	}
3501 
3502 out:
3503 	*ret = rc;
3504 	return skb;
3505 }
3506 
3507 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3508 					  netdev_features_t features)
3509 {
3510 	if (skb_vlan_tag_present(skb) &&
3511 	    !vlan_hw_offload_capable(features, skb->vlan_proto))
3512 		skb = __vlan_hwaccel_push_inside(skb);
3513 	return skb;
3514 }
3515 
3516 int skb_csum_hwoffload_help(struct sk_buff *skb,
3517 			    const netdev_features_t features)
3518 {
3519 	if (unlikely(skb_csum_is_sctp(skb)))
3520 		return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3521 			skb_crc32c_csum_help(skb);
3522 
3523 	if (features & NETIF_F_HW_CSUM)
3524 		return 0;
3525 
3526 	if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
3527 		switch (skb->csum_offset) {
3528 		case offsetof(struct tcphdr, check):
3529 		case offsetof(struct udphdr, check):
3530 			return 0;
3531 		}
3532 	}
3533 
3534 	return skb_checksum_help(skb);
3535 }
3536 EXPORT_SYMBOL(skb_csum_hwoffload_help);
3537 
3538 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
3539 {
3540 	netdev_features_t features;
3541 
3542 	features = netif_skb_features(skb);
3543 	skb = validate_xmit_vlan(skb, features);
3544 	if (unlikely(!skb))
3545 		goto out_null;
3546 
3547 	skb = sk_validate_xmit_skb(skb, dev);
3548 	if (unlikely(!skb))
3549 		goto out_null;
3550 
3551 	if (netif_needs_gso(skb, features)) {
3552 		struct sk_buff *segs;
3553 
3554 		segs = skb_gso_segment(skb, features);
3555 		if (IS_ERR(segs)) {
3556 			goto out_kfree_skb;
3557 		} else if (segs) {
3558 			consume_skb(skb);
3559 			skb = segs;
3560 		}
3561 	} else {
3562 		if (skb_needs_linearize(skb, features) &&
3563 		    __skb_linearize(skb))
3564 			goto out_kfree_skb;
3565 
3566 		/* If packet is not checksummed and device does not
3567 		 * support checksumming for this protocol, complete
3568 		 * checksumming here.
3569 		 */
3570 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
3571 			if (skb->encapsulation)
3572 				skb_set_inner_transport_header(skb,
3573 							       skb_checksum_start_offset(skb));
3574 			else
3575 				skb_set_transport_header(skb,
3576 							 skb_checksum_start_offset(skb));
3577 			if (skb_csum_hwoffload_help(skb, features))
3578 				goto out_kfree_skb;
3579 		}
3580 	}
3581 
3582 	skb = validate_xmit_xfrm(skb, features, again);
3583 
3584 	return skb;
3585 
3586 out_kfree_skb:
3587 	kfree_skb(skb);
3588 out_null:
3589 	atomic_long_inc(&dev->tx_dropped);
3590 	return NULL;
3591 }
3592 
3593 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
3594 {
3595 	struct sk_buff *next, *head = NULL, *tail;
3596 
3597 	for (; skb != NULL; skb = next) {
3598 		next = skb->next;
3599 		skb_mark_not_on_list(skb);
3600 
3601 		/* in case skb wont be segmented, point to itself */
3602 		skb->prev = skb;
3603 
3604 		skb = validate_xmit_skb(skb, dev, again);
3605 		if (!skb)
3606 			continue;
3607 
3608 		if (!head)
3609 			head = skb;
3610 		else
3611 			tail->next = skb;
3612 		/* If skb was segmented, skb->prev points to
3613 		 * the last segment. If not, it still contains skb.
3614 		 */
3615 		tail = skb->prev;
3616 	}
3617 	return head;
3618 }
3619 EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
3620 
3621 static void qdisc_pkt_len_init(struct sk_buff *skb)
3622 {
3623 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
3624 
3625 	qdisc_skb_cb(skb)->pkt_len = skb->len;
3626 
3627 	/* To get more precise estimation of bytes sent on wire,
3628 	 * we add to pkt_len the headers size of all segments
3629 	 */
3630 	if (shinfo->gso_size && skb_transport_header_was_set(skb)) {
3631 		unsigned int hdr_len;
3632 		u16 gso_segs = shinfo->gso_segs;
3633 
3634 		/* mac layer + network layer */
3635 		hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3636 
3637 		/* + transport layer */
3638 		if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3639 			const struct tcphdr *th;
3640 			struct tcphdr _tcphdr;
3641 
3642 			th = skb_header_pointer(skb, skb_transport_offset(skb),
3643 						sizeof(_tcphdr), &_tcphdr);
3644 			if (likely(th))
3645 				hdr_len += __tcp_hdrlen(th);
3646 		} else {
3647 			struct udphdr _udphdr;
3648 
3649 			if (skb_header_pointer(skb, skb_transport_offset(skb),
3650 					       sizeof(_udphdr), &_udphdr))
3651 				hdr_len += sizeof(struct udphdr);
3652 		}
3653 
3654 		if (shinfo->gso_type & SKB_GSO_DODGY)
3655 			gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3656 						shinfo->gso_size);
3657 
3658 		qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
3659 	}
3660 }
3661 
3662 static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q,
3663 			     struct sk_buff **to_free,
3664 			     struct netdev_queue *txq)
3665 {
3666 	int rc;
3667 
3668 	rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK;
3669 	if (rc == NET_XMIT_SUCCESS)
3670 		trace_qdisc_enqueue(q, txq, skb);
3671 	return rc;
3672 }
3673 
3674 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3675 				 struct net_device *dev,
3676 				 struct netdev_queue *txq)
3677 {
3678 	spinlock_t *root_lock = qdisc_lock(q);
3679 	struct sk_buff *to_free = NULL;
3680 	bool contended;
3681 	int rc;
3682 
3683 	qdisc_calculate_pkt_len(skb, q);
3684 
3685 	if (q->flags & TCQ_F_NOLOCK) {
3686 		if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) &&
3687 		    qdisc_run_begin(q)) {
3688 			/* Retest nolock_qdisc_is_empty() within the protection
3689 			 * of q->seqlock to protect from racing with requeuing.
3690 			 */
3691 			if (unlikely(!nolock_qdisc_is_empty(q))) {
3692 				rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3693 				__qdisc_run(q);
3694 				qdisc_run_end(q);
3695 
3696 				goto no_lock_out;
3697 			}
3698 
3699 			qdisc_bstats_cpu_update(q, skb);
3700 			if (sch_direct_xmit(skb, q, dev, txq, NULL, true) &&
3701 			    !nolock_qdisc_is_empty(q))
3702 				__qdisc_run(q);
3703 
3704 			qdisc_run_end(q);
3705 			return NET_XMIT_SUCCESS;
3706 		}
3707 
3708 		rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3709 		qdisc_run(q);
3710 
3711 no_lock_out:
3712 		if (unlikely(to_free))
3713 			kfree_skb_list(to_free);
3714 		return rc;
3715 	}
3716 
3717 	/*
3718 	 * Heuristic to force contended enqueues to serialize on a
3719 	 * separate lock before trying to get qdisc main lock.
3720 	 * This permits qdisc->running owner to get the lock more
3721 	 * often and dequeue packets faster.
3722 	 */
3723 	contended = qdisc_is_running(q);
3724 	if (unlikely(contended))
3725 		spin_lock(&q->busylock);
3726 
3727 	spin_lock(root_lock);
3728 	if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3729 		__qdisc_drop(skb, &to_free);
3730 		rc = NET_XMIT_DROP;
3731 	} else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
3732 		   qdisc_run_begin(q)) {
3733 		/*
3734 		 * This is a work-conserving queue; there are no old skbs
3735 		 * waiting to be sent out; and the qdisc is not running -
3736 		 * xmit the skb directly.
3737 		 */
3738 
3739 		qdisc_bstats_update(q, skb);
3740 
3741 		if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
3742 			if (unlikely(contended)) {
3743 				spin_unlock(&q->busylock);
3744 				contended = false;
3745 			}
3746 			__qdisc_run(q);
3747 		}
3748 
3749 		qdisc_run_end(q);
3750 		rc = NET_XMIT_SUCCESS;
3751 	} else {
3752 		rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3753 		if (qdisc_run_begin(q)) {
3754 			if (unlikely(contended)) {
3755 				spin_unlock(&q->busylock);
3756 				contended = false;
3757 			}
3758 			__qdisc_run(q);
3759 			qdisc_run_end(q);
3760 		}
3761 	}
3762 	spin_unlock(root_lock);
3763 	if (unlikely(to_free))
3764 		kfree_skb_list(to_free);
3765 	if (unlikely(contended))
3766 		spin_unlock(&q->busylock);
3767 	return rc;
3768 }
3769 
3770 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3771 static void skb_update_prio(struct sk_buff *skb)
3772 {
3773 	const struct netprio_map *map;
3774 	const struct sock *sk;
3775 	unsigned int prioidx;
3776 
3777 	if (skb->priority)
3778 		return;
3779 	map = rcu_dereference_bh(skb->dev->priomap);
3780 	if (!map)
3781 		return;
3782 	sk = skb_to_full_sk(skb);
3783 	if (!sk)
3784 		return;
3785 
3786 	prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3787 
3788 	if (prioidx < map->priomap_len)
3789 		skb->priority = map->priomap[prioidx];
3790 }
3791 #else
3792 #define skb_update_prio(skb)
3793 #endif
3794 
3795 /**
3796  *	dev_loopback_xmit - loop back @skb
3797  *	@net: network namespace this loopback is happening in
3798  *	@sk:  sk needed to be a netfilter okfn
3799  *	@skb: buffer to transmit
3800  */
3801 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
3802 {
3803 	skb_reset_mac_header(skb);
3804 	__skb_pull(skb, skb_network_offset(skb));
3805 	skb->pkt_type = PACKET_LOOPBACK;
3806 	if (skb->ip_summed == CHECKSUM_NONE)
3807 		skb->ip_summed = CHECKSUM_UNNECESSARY;
3808 	WARN_ON(!skb_dst(skb));
3809 	skb_dst_force(skb);
3810 	netif_rx_ni(skb);
3811 	return 0;
3812 }
3813 EXPORT_SYMBOL(dev_loopback_xmit);
3814 
3815 #ifdef CONFIG_NET_EGRESS
3816 static struct sk_buff *
3817 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3818 {
3819 #ifdef CONFIG_NET_CLS_ACT
3820 	struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
3821 	struct tcf_result cl_res;
3822 
3823 	if (!miniq)
3824 		return skb;
3825 
3826 	/* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
3827 	qdisc_skb_cb(skb)->mru = 0;
3828 	qdisc_skb_cb(skb)->post_ct = false;
3829 	mini_qdisc_bstats_cpu_update(miniq, skb);
3830 
3831 	switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) {
3832 	case TC_ACT_OK:
3833 	case TC_ACT_RECLASSIFY:
3834 		skb->tc_index = TC_H_MIN(cl_res.classid);
3835 		break;
3836 	case TC_ACT_SHOT:
3837 		mini_qdisc_qstats_cpu_drop(miniq);
3838 		*ret = NET_XMIT_DROP;
3839 		kfree_skb(skb);
3840 		return NULL;
3841 	case TC_ACT_STOLEN:
3842 	case TC_ACT_QUEUED:
3843 	case TC_ACT_TRAP:
3844 		*ret = NET_XMIT_SUCCESS;
3845 		consume_skb(skb);
3846 		return NULL;
3847 	case TC_ACT_REDIRECT:
3848 		/* No need to push/pop skb's mac_header here on egress! */
3849 		skb_do_redirect(skb);
3850 		*ret = NET_XMIT_SUCCESS;
3851 		return NULL;
3852 	default:
3853 		break;
3854 	}
3855 #endif /* CONFIG_NET_CLS_ACT */
3856 
3857 	return skb;
3858 }
3859 #endif /* CONFIG_NET_EGRESS */
3860 
3861 #ifdef CONFIG_XPS
3862 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
3863 			       struct xps_dev_maps *dev_maps, unsigned int tci)
3864 {
3865 	int tc = netdev_get_prio_tc_map(dev, skb->priority);
3866 	struct xps_map *map;
3867 	int queue_index = -1;
3868 
3869 	if (tc >= dev_maps->num_tc || tci >= dev_maps->nr_ids)
3870 		return queue_index;
3871 
3872 	tci *= dev_maps->num_tc;
3873 	tci += tc;
3874 
3875 	map = rcu_dereference(dev_maps->attr_map[tci]);
3876 	if (map) {
3877 		if (map->len == 1)
3878 			queue_index = map->queues[0];
3879 		else
3880 			queue_index = map->queues[reciprocal_scale(
3881 						skb_get_hash(skb), map->len)];
3882 		if (unlikely(queue_index >= dev->real_num_tx_queues))
3883 			queue_index = -1;
3884 	}
3885 	return queue_index;
3886 }
3887 #endif
3888 
3889 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
3890 			 struct sk_buff *skb)
3891 {
3892 #ifdef CONFIG_XPS
3893 	struct xps_dev_maps *dev_maps;
3894 	struct sock *sk = skb->sk;
3895 	int queue_index = -1;
3896 
3897 	if (!static_key_false(&xps_needed))
3898 		return -1;
3899 
3900 	rcu_read_lock();
3901 	if (!static_key_false(&xps_rxqs_needed))
3902 		goto get_cpus_map;
3903 
3904 	dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_RXQS]);
3905 	if (dev_maps) {
3906 		int tci = sk_rx_queue_get(sk);
3907 
3908 		if (tci >= 0)
3909 			queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
3910 							  tci);
3911 	}
3912 
3913 get_cpus_map:
3914 	if (queue_index < 0) {
3915 		dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_CPUS]);
3916 		if (dev_maps) {
3917 			unsigned int tci = skb->sender_cpu - 1;
3918 
3919 			queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
3920 							  tci);
3921 		}
3922 	}
3923 	rcu_read_unlock();
3924 
3925 	return queue_index;
3926 #else
3927 	return -1;
3928 #endif
3929 }
3930 
3931 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
3932 		     struct net_device *sb_dev)
3933 {
3934 	return 0;
3935 }
3936 EXPORT_SYMBOL(dev_pick_tx_zero);
3937 
3938 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
3939 		       struct net_device *sb_dev)
3940 {
3941 	return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
3942 }
3943 EXPORT_SYMBOL(dev_pick_tx_cpu_id);
3944 
3945 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
3946 		     struct net_device *sb_dev)
3947 {
3948 	struct sock *sk = skb->sk;
3949 	int queue_index = sk_tx_queue_get(sk);
3950 
3951 	sb_dev = sb_dev ? : dev;
3952 
3953 	if (queue_index < 0 || skb->ooo_okay ||
3954 	    queue_index >= dev->real_num_tx_queues) {
3955 		int new_index = get_xps_queue(dev, sb_dev, skb);
3956 
3957 		if (new_index < 0)
3958 			new_index = skb_tx_hash(dev, sb_dev, skb);
3959 
3960 		if (queue_index != new_index && sk &&
3961 		    sk_fullsock(sk) &&
3962 		    rcu_access_pointer(sk->sk_dst_cache))
3963 			sk_tx_queue_set(sk, new_index);
3964 
3965 		queue_index = new_index;
3966 	}
3967 
3968 	return queue_index;
3969 }
3970 EXPORT_SYMBOL(netdev_pick_tx);
3971 
3972 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
3973 					 struct sk_buff *skb,
3974 					 struct net_device *sb_dev)
3975 {
3976 	int queue_index = 0;
3977 
3978 #ifdef CONFIG_XPS
3979 	u32 sender_cpu = skb->sender_cpu - 1;
3980 
3981 	if (sender_cpu >= (u32)NR_CPUS)
3982 		skb->sender_cpu = raw_smp_processor_id() + 1;
3983 #endif
3984 
3985 	if (dev->real_num_tx_queues != 1) {
3986 		const struct net_device_ops *ops = dev->netdev_ops;
3987 
3988 		if (ops->ndo_select_queue)
3989 			queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
3990 		else
3991 			queue_index = netdev_pick_tx(dev, skb, sb_dev);
3992 
3993 		queue_index = netdev_cap_txqueue(dev, queue_index);
3994 	}
3995 
3996 	skb_set_queue_mapping(skb, queue_index);
3997 	return netdev_get_tx_queue(dev, queue_index);
3998 }
3999 
4000 /**
4001  *	__dev_queue_xmit - transmit a buffer
4002  *	@skb: buffer to transmit
4003  *	@sb_dev: suboordinate device used for L2 forwarding offload
4004  *
4005  *	Queue a buffer for transmission to a network device. The caller must
4006  *	have set the device and priority and built the buffer before calling
4007  *	this function. The function can be called from an interrupt.
4008  *
4009  *	A negative errno code is returned on a failure. A success does not
4010  *	guarantee the frame will be transmitted as it may be dropped due
4011  *	to congestion or traffic shaping.
4012  *
4013  * -----------------------------------------------------------------------------------
4014  *      I notice this method can also return errors from the queue disciplines,
4015  *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
4016  *      be positive.
4017  *
4018  *      Regardless of the return value, the skb is consumed, so it is currently
4019  *      difficult to retry a send to this method.  (You can bump the ref count
4020  *      before sending to hold a reference for retry if you are careful.)
4021  *
4022  *      When calling this method, interrupts MUST be enabled.  This is because
4023  *      the BH enable code must have IRQs enabled so that it will not deadlock.
4024  *          --BLG
4025  */
4026 static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
4027 {
4028 	struct net_device *dev = skb->dev;
4029 	struct netdev_queue *txq;
4030 	struct Qdisc *q;
4031 	int rc = -ENOMEM;
4032 	bool again = false;
4033 
4034 	skb_reset_mac_header(skb);
4035 
4036 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
4037 		__skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED);
4038 
4039 	/* Disable soft irqs for various locks below. Also
4040 	 * stops preemption for RCU.
4041 	 */
4042 	rcu_read_lock_bh();
4043 
4044 	skb_update_prio(skb);
4045 
4046 	qdisc_pkt_len_init(skb);
4047 #ifdef CONFIG_NET_CLS_ACT
4048 	skb->tc_at_ingress = 0;
4049 #endif
4050 #ifdef CONFIG_NET_EGRESS
4051 	if (static_branch_unlikely(&egress_needed_key)) {
4052 		if (nf_hook_egress_active()) {
4053 			skb = nf_hook_egress(skb, &rc, dev);
4054 			if (!skb)
4055 				goto out;
4056 		}
4057 		nf_skip_egress(skb, true);
4058 		skb = sch_handle_egress(skb, &rc, dev);
4059 		if (!skb)
4060 			goto out;
4061 		nf_skip_egress(skb, false);
4062 	}
4063 #endif
4064 	/* If device/qdisc don't need skb->dst, release it right now while
4065 	 * its hot in this cpu cache.
4066 	 */
4067 	if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
4068 		skb_dst_drop(skb);
4069 	else
4070 		skb_dst_force(skb);
4071 
4072 	txq = netdev_core_pick_tx(dev, skb, sb_dev);
4073 	q = rcu_dereference_bh(txq->qdisc);
4074 
4075 	trace_net_dev_queue(skb);
4076 	if (q->enqueue) {
4077 		rc = __dev_xmit_skb(skb, q, dev, txq);
4078 		goto out;
4079 	}
4080 
4081 	/* The device has no queue. Common case for software devices:
4082 	 * loopback, all the sorts of tunnels...
4083 
4084 	 * Really, it is unlikely that netif_tx_lock protection is necessary
4085 	 * here.  (f.e. loopback and IP tunnels are clean ignoring statistics
4086 	 * counters.)
4087 	 * However, it is possible, that they rely on protection
4088 	 * made by us here.
4089 
4090 	 * Check this and shot the lock. It is not prone from deadlocks.
4091 	 *Either shot noqueue qdisc, it is even simpler 8)
4092 	 */
4093 	if (dev->flags & IFF_UP) {
4094 		int cpu = smp_processor_id(); /* ok because BHs are off */
4095 
4096 		if (txq->xmit_lock_owner != cpu) {
4097 			if (dev_xmit_recursion())
4098 				goto recursion_alert;
4099 
4100 			skb = validate_xmit_skb(skb, dev, &again);
4101 			if (!skb)
4102 				goto out;
4103 
4104 			PRANDOM_ADD_NOISE(skb, dev, txq, jiffies);
4105 			HARD_TX_LOCK(dev, txq, cpu);
4106 
4107 			if (!netif_xmit_stopped(txq)) {
4108 				dev_xmit_recursion_inc();
4109 				skb = dev_hard_start_xmit(skb, dev, txq, &rc);
4110 				dev_xmit_recursion_dec();
4111 				if (dev_xmit_complete(rc)) {
4112 					HARD_TX_UNLOCK(dev, txq);
4113 					goto out;
4114 				}
4115 			}
4116 			HARD_TX_UNLOCK(dev, txq);
4117 			net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
4118 					     dev->name);
4119 		} else {
4120 			/* Recursion is detected! It is possible,
4121 			 * unfortunately
4122 			 */
4123 recursion_alert:
4124 			net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
4125 					     dev->name);
4126 		}
4127 	}
4128 
4129 	rc = -ENETDOWN;
4130 	rcu_read_unlock_bh();
4131 
4132 	atomic_long_inc(&dev->tx_dropped);
4133 	kfree_skb_list(skb);
4134 	return rc;
4135 out:
4136 	rcu_read_unlock_bh();
4137 	return rc;
4138 }
4139 
4140 int dev_queue_xmit(struct sk_buff *skb)
4141 {
4142 	return __dev_queue_xmit(skb, NULL);
4143 }
4144 EXPORT_SYMBOL(dev_queue_xmit);
4145 
4146 int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
4147 {
4148 	return __dev_queue_xmit(skb, sb_dev);
4149 }
4150 EXPORT_SYMBOL(dev_queue_xmit_accel);
4151 
4152 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
4153 {
4154 	struct net_device *dev = skb->dev;
4155 	struct sk_buff *orig_skb = skb;
4156 	struct netdev_queue *txq;
4157 	int ret = NETDEV_TX_BUSY;
4158 	bool again = false;
4159 
4160 	if (unlikely(!netif_running(dev) ||
4161 		     !netif_carrier_ok(dev)))
4162 		goto drop;
4163 
4164 	skb = validate_xmit_skb_list(skb, dev, &again);
4165 	if (skb != orig_skb)
4166 		goto drop;
4167 
4168 	skb_set_queue_mapping(skb, queue_id);
4169 	txq = skb_get_tx_queue(dev, skb);
4170 	PRANDOM_ADD_NOISE(skb, dev, txq, jiffies);
4171 
4172 	local_bh_disable();
4173 
4174 	dev_xmit_recursion_inc();
4175 	HARD_TX_LOCK(dev, txq, smp_processor_id());
4176 	if (!netif_xmit_frozen_or_drv_stopped(txq))
4177 		ret = netdev_start_xmit(skb, dev, txq, false);
4178 	HARD_TX_UNLOCK(dev, txq);
4179 	dev_xmit_recursion_dec();
4180 
4181 	local_bh_enable();
4182 	return ret;
4183 drop:
4184 	atomic_long_inc(&dev->tx_dropped);
4185 	kfree_skb_list(skb);
4186 	return NET_XMIT_DROP;
4187 }
4188 EXPORT_SYMBOL(__dev_direct_xmit);
4189 
4190 /*************************************************************************
4191  *			Receiver routines
4192  *************************************************************************/
4193 
4194 int netdev_max_backlog __read_mostly = 1000;
4195 EXPORT_SYMBOL(netdev_max_backlog);
4196 
4197 int netdev_tstamp_prequeue __read_mostly = 1;
4198 int netdev_budget __read_mostly = 300;
4199 /* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
4200 unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ;
4201 int weight_p __read_mostly = 64;           /* old backlog weight */
4202 int dev_weight_rx_bias __read_mostly = 1;  /* bias for backlog weight */
4203 int dev_weight_tx_bias __read_mostly = 1;  /* bias for output_queue quota */
4204 int dev_rx_weight __read_mostly = 64;
4205 int dev_tx_weight __read_mostly = 64;
4206 
4207 /* Called with irq disabled */
4208 static inline void ____napi_schedule(struct softnet_data *sd,
4209 				     struct napi_struct *napi)
4210 {
4211 	struct task_struct *thread;
4212 
4213 	if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
4214 		/* Paired with smp_mb__before_atomic() in
4215 		 * napi_enable()/dev_set_threaded().
4216 		 * Use READ_ONCE() to guarantee a complete
4217 		 * read on napi->thread. Only call
4218 		 * wake_up_process() when it's not NULL.
4219 		 */
4220 		thread = READ_ONCE(napi->thread);
4221 		if (thread) {
4222 			/* Avoid doing set_bit() if the thread is in
4223 			 * INTERRUPTIBLE state, cause napi_thread_wait()
4224 			 * makes sure to proceed with napi polling
4225 			 * if the thread is explicitly woken from here.
4226 			 */
4227 			if (READ_ONCE(thread->__state) != TASK_INTERRUPTIBLE)
4228 				set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
4229 			wake_up_process(thread);
4230 			return;
4231 		}
4232 	}
4233 
4234 	list_add_tail(&napi->poll_list, &sd->poll_list);
4235 	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
4236 }
4237 
4238 #ifdef CONFIG_RPS
4239 
4240 /* One global table that all flow-based protocols share. */
4241 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
4242 EXPORT_SYMBOL(rps_sock_flow_table);
4243 u32 rps_cpu_mask __read_mostly;
4244 EXPORT_SYMBOL(rps_cpu_mask);
4245 
4246 struct static_key_false rps_needed __read_mostly;
4247 EXPORT_SYMBOL(rps_needed);
4248 struct static_key_false rfs_needed __read_mostly;
4249 EXPORT_SYMBOL(rfs_needed);
4250 
4251 static struct rps_dev_flow *
4252 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4253 	    struct rps_dev_flow *rflow, u16 next_cpu)
4254 {
4255 	if (next_cpu < nr_cpu_ids) {
4256 #ifdef CONFIG_RFS_ACCEL
4257 		struct netdev_rx_queue *rxqueue;
4258 		struct rps_dev_flow_table *flow_table;
4259 		struct rps_dev_flow *old_rflow;
4260 		u32 flow_id;
4261 		u16 rxq_index;
4262 		int rc;
4263 
4264 		/* Should we steer this flow to a different hardware queue? */
4265 		if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
4266 		    !(dev->features & NETIF_F_NTUPLE))
4267 			goto out;
4268 		rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
4269 		if (rxq_index == skb_get_rx_queue(skb))
4270 			goto out;
4271 
4272 		rxqueue = dev->_rx + rxq_index;
4273 		flow_table = rcu_dereference(rxqueue->rps_flow_table);
4274 		if (!flow_table)
4275 			goto out;
4276 		flow_id = skb_get_hash(skb) & flow_table->mask;
4277 		rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
4278 							rxq_index, flow_id);
4279 		if (rc < 0)
4280 			goto out;
4281 		old_rflow = rflow;
4282 		rflow = &flow_table->flows[flow_id];
4283 		rflow->filter = rc;
4284 		if (old_rflow->filter == rflow->filter)
4285 			old_rflow->filter = RPS_NO_FILTER;
4286 	out:
4287 #endif
4288 		rflow->last_qtail =
4289 			per_cpu(softnet_data, next_cpu).input_queue_head;
4290 	}
4291 
4292 	rflow->cpu = next_cpu;
4293 	return rflow;
4294 }
4295 
4296 /*
4297  * get_rps_cpu is called from netif_receive_skb and returns the target
4298  * CPU from the RPS map of the receiving queue for a given skb.
4299  * rcu_read_lock must be held on entry.
4300  */
4301 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4302 		       struct rps_dev_flow **rflowp)
4303 {
4304 	const struct rps_sock_flow_table *sock_flow_table;
4305 	struct netdev_rx_queue *rxqueue = dev->_rx;
4306 	struct rps_dev_flow_table *flow_table;
4307 	struct rps_map *map;
4308 	int cpu = -1;
4309 	u32 tcpu;
4310 	u32 hash;
4311 
4312 	if (skb_rx_queue_recorded(skb)) {
4313 		u16 index = skb_get_rx_queue(skb);
4314 
4315 		if (unlikely(index >= dev->real_num_rx_queues)) {
4316 			WARN_ONCE(dev->real_num_rx_queues > 1,
4317 				  "%s received packet on queue %u, but number "
4318 				  "of RX queues is %u\n",
4319 				  dev->name, index, dev->real_num_rx_queues);
4320 			goto done;
4321 		}
4322 		rxqueue += index;
4323 	}
4324 
4325 	/* Avoid computing hash if RFS/RPS is not active for this rxqueue */
4326 
4327 	flow_table = rcu_dereference(rxqueue->rps_flow_table);
4328 	map = rcu_dereference(rxqueue->rps_map);
4329 	if (!flow_table && !map)
4330 		goto done;
4331 
4332 	skb_reset_network_header(skb);
4333 	hash = skb_get_hash(skb);
4334 	if (!hash)
4335 		goto done;
4336 
4337 	sock_flow_table = rcu_dereference(rps_sock_flow_table);
4338 	if (flow_table && sock_flow_table) {
4339 		struct rps_dev_flow *rflow;
4340 		u32 next_cpu;
4341 		u32 ident;
4342 
4343 		/* First check into global flow table if there is a match */
4344 		ident = sock_flow_table->ents[hash & sock_flow_table->mask];
4345 		if ((ident ^ hash) & ~rps_cpu_mask)
4346 			goto try_rps;
4347 
4348 		next_cpu = ident & rps_cpu_mask;
4349 
4350 		/* OK, now we know there is a match,
4351 		 * we can look at the local (per receive queue) flow table
4352 		 */
4353 		rflow = &flow_table->flows[hash & flow_table->mask];
4354 		tcpu = rflow->cpu;
4355 
4356 		/*
4357 		 * If the desired CPU (where last recvmsg was done) is
4358 		 * different from current CPU (one in the rx-queue flow
4359 		 * table entry), switch if one of the following holds:
4360 		 *   - Current CPU is unset (>= nr_cpu_ids).
4361 		 *   - Current CPU is offline.
4362 		 *   - The current CPU's queue tail has advanced beyond the
4363 		 *     last packet that was enqueued using this table entry.
4364 		 *     This guarantees that all previous packets for the flow
4365 		 *     have been dequeued, thus preserving in order delivery.
4366 		 */
4367 		if (unlikely(tcpu != next_cpu) &&
4368 		    (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
4369 		     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
4370 		      rflow->last_qtail)) >= 0)) {
4371 			tcpu = next_cpu;
4372 			rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
4373 		}
4374 
4375 		if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
4376 			*rflowp = rflow;
4377 			cpu = tcpu;
4378 			goto done;
4379 		}
4380 	}
4381 
4382 try_rps:
4383 
4384 	if (map) {
4385 		tcpu = map->cpus[reciprocal_scale(hash, map->len)];
4386 		if (cpu_online(tcpu)) {
4387 			cpu = tcpu;
4388 			goto done;
4389 		}
4390 	}
4391 
4392 done:
4393 	return cpu;
4394 }
4395 
4396 #ifdef CONFIG_RFS_ACCEL
4397 
4398 /**
4399  * rps_may_expire_flow - check whether an RFS hardware filter may be removed
4400  * @dev: Device on which the filter was set
4401  * @rxq_index: RX queue index
4402  * @flow_id: Flow ID passed to ndo_rx_flow_steer()
4403  * @filter_id: Filter ID returned by ndo_rx_flow_steer()
4404  *
4405  * Drivers that implement ndo_rx_flow_steer() should periodically call
4406  * this function for each installed filter and remove the filters for
4407  * which it returns %true.
4408  */
4409 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
4410 			 u32 flow_id, u16 filter_id)
4411 {
4412 	struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
4413 	struct rps_dev_flow_table *flow_table;
4414 	struct rps_dev_flow *rflow;
4415 	bool expire = true;
4416 	unsigned int cpu;
4417 
4418 	rcu_read_lock();
4419 	flow_table = rcu_dereference(rxqueue->rps_flow_table);
4420 	if (flow_table && flow_id <= flow_table->mask) {
4421 		rflow = &flow_table->flows[flow_id];
4422 		cpu = READ_ONCE(rflow->cpu);
4423 		if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
4424 		    ((int)(per_cpu(softnet_data, cpu).input_queue_head -
4425 			   rflow->last_qtail) <
4426 		     (int)(10 * flow_table->mask)))
4427 			expire = false;
4428 	}
4429 	rcu_read_unlock();
4430 	return expire;
4431 }
4432 EXPORT_SYMBOL(rps_may_expire_flow);
4433 
4434 #endif /* CONFIG_RFS_ACCEL */
4435 
4436 /* Called from hardirq (IPI) context */
4437 static void rps_trigger_softirq(void *data)
4438 {
4439 	struct softnet_data *sd = data;
4440 
4441 	____napi_schedule(sd, &sd->backlog);
4442 	sd->received_rps++;
4443 }
4444 
4445 #endif /* CONFIG_RPS */
4446 
4447 /*
4448  * Check if this softnet_data structure is another cpu one
4449  * If yes, queue it to our IPI list and return 1
4450  * If no, return 0
4451  */
4452 static int rps_ipi_queued(struct softnet_data *sd)
4453 {
4454 #ifdef CONFIG_RPS
4455 	struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
4456 
4457 	if (sd != mysd) {
4458 		sd->rps_ipi_next = mysd->rps_ipi_list;
4459 		mysd->rps_ipi_list = sd;
4460 
4461 		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
4462 		return 1;
4463 	}
4464 #endif /* CONFIG_RPS */
4465 	return 0;
4466 }
4467 
4468 #ifdef CONFIG_NET_FLOW_LIMIT
4469 int netdev_flow_limit_table_len __read_mostly = (1 << 12);
4470 #endif
4471 
4472 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
4473 {
4474 #ifdef CONFIG_NET_FLOW_LIMIT
4475 	struct sd_flow_limit *fl;
4476 	struct softnet_data *sd;
4477 	unsigned int old_flow, new_flow;
4478 
4479 	if (qlen < (netdev_max_backlog >> 1))
4480 		return false;
4481 
4482 	sd = this_cpu_ptr(&softnet_data);
4483 
4484 	rcu_read_lock();
4485 	fl = rcu_dereference(sd->flow_limit);
4486 	if (fl) {
4487 		new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
4488 		old_flow = fl->history[fl->history_head];
4489 		fl->history[fl->history_head] = new_flow;
4490 
4491 		fl->history_head++;
4492 		fl->history_head &= FLOW_LIMIT_HISTORY - 1;
4493 
4494 		if (likely(fl->buckets[old_flow]))
4495 			fl->buckets[old_flow]--;
4496 
4497 		if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
4498 			fl->count++;
4499 			rcu_read_unlock();
4500 			return true;
4501 		}
4502 	}
4503 	rcu_read_unlock();
4504 #endif
4505 	return false;
4506 }
4507 
4508 /*
4509  * enqueue_to_backlog is called to queue an skb to a per CPU backlog
4510  * queue (may be a remote CPU queue).
4511  */
4512 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
4513 			      unsigned int *qtail)
4514 {
4515 	struct softnet_data *sd;
4516 	unsigned long flags;
4517 	unsigned int qlen;
4518 
4519 	sd = &per_cpu(softnet_data, cpu);
4520 
4521 	local_irq_save(flags);
4522 
4523 	rps_lock(sd);
4524 	if (!netif_running(skb->dev))
4525 		goto drop;
4526 	qlen = skb_queue_len(&sd->input_pkt_queue);
4527 	if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
4528 		if (qlen) {
4529 enqueue:
4530 			__skb_queue_tail(&sd->input_pkt_queue, skb);
4531 			input_queue_tail_incr_save(sd, qtail);
4532 			rps_unlock(sd);
4533 			local_irq_restore(flags);
4534 			return NET_RX_SUCCESS;
4535 		}
4536 
4537 		/* Schedule NAPI for backlog device
4538 		 * We can use non atomic operation since we own the queue lock
4539 		 */
4540 		if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
4541 			if (!rps_ipi_queued(sd))
4542 				____napi_schedule(sd, &sd->backlog);
4543 		}
4544 		goto enqueue;
4545 	}
4546 
4547 drop:
4548 	sd->dropped++;
4549 	rps_unlock(sd);
4550 
4551 	local_irq_restore(flags);
4552 
4553 	atomic_long_inc(&skb->dev->rx_dropped);
4554 	kfree_skb(skb);
4555 	return NET_RX_DROP;
4556 }
4557 
4558 static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
4559 {
4560 	struct net_device *dev = skb->dev;
4561 	struct netdev_rx_queue *rxqueue;
4562 
4563 	rxqueue = dev->_rx;
4564 
4565 	if (skb_rx_queue_recorded(skb)) {
4566 		u16 index = skb_get_rx_queue(skb);
4567 
4568 		if (unlikely(index >= dev->real_num_rx_queues)) {
4569 			WARN_ONCE(dev->real_num_rx_queues > 1,
4570 				  "%s received packet on queue %u, but number "
4571 				  "of RX queues is %u\n",
4572 				  dev->name, index, dev->real_num_rx_queues);
4573 
4574 			return rxqueue; /* Return first rxqueue */
4575 		}
4576 		rxqueue += index;
4577 	}
4578 	return rxqueue;
4579 }
4580 
4581 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
4582 			     struct bpf_prog *xdp_prog)
4583 {
4584 	void *orig_data, *orig_data_end, *hard_start;
4585 	struct netdev_rx_queue *rxqueue;
4586 	bool orig_bcast, orig_host;
4587 	u32 mac_len, frame_sz;
4588 	__be16 orig_eth_type;
4589 	struct ethhdr *eth;
4590 	u32 metalen, act;
4591 	int off;
4592 
4593 	/* The XDP program wants to see the packet starting at the MAC
4594 	 * header.
4595 	 */
4596 	mac_len = skb->data - skb_mac_header(skb);
4597 	hard_start = skb->data - skb_headroom(skb);
4598 
4599 	/* SKB "head" area always have tailroom for skb_shared_info */
4600 	frame_sz = (void *)skb_end_pointer(skb) - hard_start;
4601 	frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4602 
4603 	rxqueue = netif_get_rxqueue(skb);
4604 	xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
4605 	xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
4606 			 skb_headlen(skb) + mac_len, true);
4607 
4608 	orig_data_end = xdp->data_end;
4609 	orig_data = xdp->data;
4610 	eth = (struct ethhdr *)xdp->data;
4611 	orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr);
4612 	orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
4613 	orig_eth_type = eth->h_proto;
4614 
4615 	act = bpf_prog_run_xdp(xdp_prog, xdp);
4616 
4617 	/* check if bpf_xdp_adjust_head was used */
4618 	off = xdp->data - orig_data;
4619 	if (off) {
4620 		if (off > 0)
4621 			__skb_pull(skb, off);
4622 		else if (off < 0)
4623 			__skb_push(skb, -off);
4624 
4625 		skb->mac_header += off;
4626 		skb_reset_network_header(skb);
4627 	}
4628 
4629 	/* check if bpf_xdp_adjust_tail was used */
4630 	off = xdp->data_end - orig_data_end;
4631 	if (off != 0) {
4632 		skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
4633 		skb->len += off; /* positive on grow, negative on shrink */
4634 	}
4635 
4636 	/* check if XDP changed eth hdr such SKB needs update */
4637 	eth = (struct ethhdr *)xdp->data;
4638 	if ((orig_eth_type != eth->h_proto) ||
4639 	    (orig_host != ether_addr_equal_64bits(eth->h_dest,
4640 						  skb->dev->dev_addr)) ||
4641 	    (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
4642 		__skb_push(skb, ETH_HLEN);
4643 		skb->pkt_type = PACKET_HOST;
4644 		skb->protocol = eth_type_trans(skb, skb->dev);
4645 	}
4646 
4647 	/* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull
4648 	 * before calling us again on redirect path. We do not call do_redirect
4649 	 * as we leave that up to the caller.
4650 	 *
4651 	 * Caller is responsible for managing lifetime of skb (i.e. calling
4652 	 * kfree_skb in response to actions it cannot handle/XDP_DROP).
4653 	 */
4654 	switch (act) {
4655 	case XDP_REDIRECT:
4656 	case XDP_TX:
4657 		__skb_push(skb, mac_len);
4658 		break;
4659 	case XDP_PASS:
4660 		metalen = xdp->data - xdp->data_meta;
4661 		if (metalen)
4662 			skb_metadata_set(skb, metalen);
4663 		break;
4664 	}
4665 
4666 	return act;
4667 }
4668 
4669 static u32 netif_receive_generic_xdp(struct sk_buff *skb,
4670 				     struct xdp_buff *xdp,
4671 				     struct bpf_prog *xdp_prog)
4672 {
4673 	u32 act = XDP_DROP;
4674 
4675 	/* Reinjected packets coming from act_mirred or similar should
4676 	 * not get XDP generic processing.
4677 	 */
4678 	if (skb_is_redirected(skb))
4679 		return XDP_PASS;
4680 
4681 	/* XDP packets must be linear and must have sufficient headroom
4682 	 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
4683 	 * native XDP provides, thus we need to do it here as well.
4684 	 */
4685 	if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
4686 	    skb_headroom(skb) < XDP_PACKET_HEADROOM) {
4687 		int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
4688 		int troom = skb->tail + skb->data_len - skb->end;
4689 
4690 		/* In case we have to go down the path and also linearize,
4691 		 * then lets do the pskb_expand_head() work just once here.
4692 		 */
4693 		if (pskb_expand_head(skb,
4694 				     hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
4695 				     troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
4696 			goto do_drop;
4697 		if (skb_linearize(skb))
4698 			goto do_drop;
4699 	}
4700 
4701 	act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog);
4702 	switch (act) {
4703 	case XDP_REDIRECT:
4704 	case XDP_TX:
4705 	case XDP_PASS:
4706 		break;
4707 	default:
4708 		bpf_warn_invalid_xdp_action(act);
4709 		fallthrough;
4710 	case XDP_ABORTED:
4711 		trace_xdp_exception(skb->dev, xdp_prog, act);
4712 		fallthrough;
4713 	case XDP_DROP:
4714 	do_drop:
4715 		kfree_skb(skb);
4716 		break;
4717 	}
4718 
4719 	return act;
4720 }
4721 
4722 /* When doing generic XDP we have to bypass the qdisc layer and the
4723  * network taps in order to match in-driver-XDP behavior.
4724  */
4725 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
4726 {
4727 	struct net_device *dev = skb->dev;
4728 	struct netdev_queue *txq;
4729 	bool free_skb = true;
4730 	int cpu, rc;
4731 
4732 	txq = netdev_core_pick_tx(dev, skb, NULL);
4733 	cpu = smp_processor_id();
4734 	HARD_TX_LOCK(dev, txq, cpu);
4735 	if (!netif_xmit_stopped(txq)) {
4736 		rc = netdev_start_xmit(skb, dev, txq, 0);
4737 		if (dev_xmit_complete(rc))
4738 			free_skb = false;
4739 	}
4740 	HARD_TX_UNLOCK(dev, txq);
4741 	if (free_skb) {
4742 		trace_xdp_exception(dev, xdp_prog, XDP_TX);
4743 		kfree_skb(skb);
4744 	}
4745 }
4746 
4747 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
4748 
4749 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
4750 {
4751 	if (xdp_prog) {
4752 		struct xdp_buff xdp;
4753 		u32 act;
4754 		int err;
4755 
4756 		act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
4757 		if (act != XDP_PASS) {
4758 			switch (act) {
4759 			case XDP_REDIRECT:
4760 				err = xdp_do_generic_redirect(skb->dev, skb,
4761 							      &xdp, xdp_prog);
4762 				if (err)
4763 					goto out_redir;
4764 				break;
4765 			case XDP_TX:
4766 				generic_xdp_tx(skb, xdp_prog);
4767 				break;
4768 			}
4769 			return XDP_DROP;
4770 		}
4771 	}
4772 	return XDP_PASS;
4773 out_redir:
4774 	kfree_skb(skb);
4775 	return XDP_DROP;
4776 }
4777 EXPORT_SYMBOL_GPL(do_xdp_generic);
4778 
4779 static int netif_rx_internal(struct sk_buff *skb)
4780 {
4781 	int ret;
4782 
4783 	net_timestamp_check(netdev_tstamp_prequeue, skb);
4784 
4785 	trace_netif_rx(skb);
4786 
4787 #ifdef CONFIG_RPS
4788 	if (static_branch_unlikely(&rps_needed)) {
4789 		struct rps_dev_flow voidflow, *rflow = &voidflow;
4790 		int cpu;
4791 
4792 		preempt_disable();
4793 		rcu_read_lock();
4794 
4795 		cpu = get_rps_cpu(skb->dev, skb, &rflow);
4796 		if (cpu < 0)
4797 			cpu = smp_processor_id();
4798 
4799 		ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4800 
4801 		rcu_read_unlock();
4802 		preempt_enable();
4803 	} else
4804 #endif
4805 	{
4806 		unsigned int qtail;
4807 
4808 		ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
4809 		put_cpu();
4810 	}
4811 	return ret;
4812 }
4813 
4814 /**
4815  *	netif_rx	-	post buffer to the network code
4816  *	@skb: buffer to post
4817  *
4818  *	This function receives a packet from a device driver and queues it for
4819  *	the upper (protocol) levels to process.  It always succeeds. The buffer
4820  *	may be dropped during processing for congestion control or by the
4821  *	protocol layers.
4822  *
4823  *	return values:
4824  *	NET_RX_SUCCESS	(no congestion)
4825  *	NET_RX_DROP     (packet was dropped)
4826  *
4827  */
4828 
4829 int netif_rx(struct sk_buff *skb)
4830 {
4831 	int ret;
4832 
4833 	trace_netif_rx_entry(skb);
4834 
4835 	ret = netif_rx_internal(skb);
4836 	trace_netif_rx_exit(ret);
4837 
4838 	return ret;
4839 }
4840 EXPORT_SYMBOL(netif_rx);
4841 
4842 int netif_rx_ni(struct sk_buff *skb)
4843 {
4844 	int err;
4845 
4846 	trace_netif_rx_ni_entry(skb);
4847 
4848 	preempt_disable();
4849 	err = netif_rx_internal(skb);
4850 	if (local_softirq_pending())
4851 		do_softirq();
4852 	preempt_enable();
4853 	trace_netif_rx_ni_exit(err);
4854 
4855 	return err;
4856 }
4857 EXPORT_SYMBOL(netif_rx_ni);
4858 
4859 int netif_rx_any_context(struct sk_buff *skb)
4860 {
4861 	/*
4862 	 * If invoked from contexts which do not invoke bottom half
4863 	 * processing either at return from interrupt or when softrqs are
4864 	 * reenabled, use netif_rx_ni() which invokes bottomhalf processing
4865 	 * directly.
4866 	 */
4867 	if (in_interrupt())
4868 		return netif_rx(skb);
4869 	else
4870 		return netif_rx_ni(skb);
4871 }
4872 EXPORT_SYMBOL(netif_rx_any_context);
4873 
4874 static __latent_entropy void net_tx_action(struct softirq_action *h)
4875 {
4876 	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4877 
4878 	if (sd->completion_queue) {
4879 		struct sk_buff *clist;
4880 
4881 		local_irq_disable();
4882 		clist = sd->completion_queue;
4883 		sd->completion_queue = NULL;
4884 		local_irq_enable();
4885 
4886 		while (clist) {
4887 			struct sk_buff *skb = clist;
4888 
4889 			clist = clist->next;
4890 
4891 			WARN_ON(refcount_read(&skb->users));
4892 			if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
4893 				trace_consume_skb(skb);
4894 			else
4895 				trace_kfree_skb(skb, net_tx_action);
4896 
4897 			if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
4898 				__kfree_skb(skb);
4899 			else
4900 				__kfree_skb_defer(skb);
4901 		}
4902 	}
4903 
4904 	if (sd->output_queue) {
4905 		struct Qdisc *head;
4906 
4907 		local_irq_disable();
4908 		head = sd->output_queue;
4909 		sd->output_queue = NULL;
4910 		sd->output_queue_tailp = &sd->output_queue;
4911 		local_irq_enable();
4912 
4913 		rcu_read_lock();
4914 
4915 		while (head) {
4916 			struct Qdisc *q = head;
4917 			spinlock_t *root_lock = NULL;
4918 
4919 			head = head->next_sched;
4920 
4921 			/* We need to make sure head->next_sched is read
4922 			 * before clearing __QDISC_STATE_SCHED
4923 			 */
4924 			smp_mb__before_atomic();
4925 
4926 			if (!(q->flags & TCQ_F_NOLOCK)) {
4927 				root_lock = qdisc_lock(q);
4928 				spin_lock(root_lock);
4929 			} else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
4930 						     &q->state))) {
4931 				/* There is a synchronize_net() between
4932 				 * STATE_DEACTIVATED flag being set and
4933 				 * qdisc_reset()/some_qdisc_is_busy() in
4934 				 * dev_deactivate(), so we can safely bail out
4935 				 * early here to avoid data race between
4936 				 * qdisc_deactivate() and some_qdisc_is_busy()
4937 				 * for lockless qdisc.
4938 				 */
4939 				clear_bit(__QDISC_STATE_SCHED, &q->state);
4940 				continue;
4941 			}
4942 
4943 			clear_bit(__QDISC_STATE_SCHED, &q->state);
4944 			qdisc_run(q);
4945 			if (root_lock)
4946 				spin_unlock(root_lock);
4947 		}
4948 
4949 		rcu_read_unlock();
4950 	}
4951 
4952 	xfrm_dev_backlog(sd);
4953 }
4954 
4955 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
4956 /* This hook is defined here for ATM LANE */
4957 int (*br_fdb_test_addr_hook)(struct net_device *dev,
4958 			     unsigned char *addr) __read_mostly;
4959 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
4960 #endif
4961 
4962 static inline struct sk_buff *
4963 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4964 		   struct net_device *orig_dev, bool *another)
4965 {
4966 #ifdef CONFIG_NET_CLS_ACT
4967 	struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress);
4968 	struct tcf_result cl_res;
4969 
4970 	/* If there's at least one ingress present somewhere (so
4971 	 * we get here via enabled static key), remaining devices
4972 	 * that are not configured with an ingress qdisc will bail
4973 	 * out here.
4974 	 */
4975 	if (!miniq)
4976 		return skb;
4977 
4978 	if (*pt_prev) {
4979 		*ret = deliver_skb(skb, *pt_prev, orig_dev);
4980 		*pt_prev = NULL;
4981 	}
4982 
4983 	qdisc_skb_cb(skb)->pkt_len = skb->len;
4984 	qdisc_skb_cb(skb)->mru = 0;
4985 	qdisc_skb_cb(skb)->post_ct = false;
4986 	skb->tc_at_ingress = 1;
4987 	mini_qdisc_bstats_cpu_update(miniq, skb);
4988 
4989 	switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) {
4990 	case TC_ACT_OK:
4991 	case TC_ACT_RECLASSIFY:
4992 		skb->tc_index = TC_H_MIN(cl_res.classid);
4993 		break;
4994 	case TC_ACT_SHOT:
4995 		mini_qdisc_qstats_cpu_drop(miniq);
4996 		kfree_skb(skb);
4997 		return NULL;
4998 	case TC_ACT_STOLEN:
4999 	case TC_ACT_QUEUED:
5000 	case TC_ACT_TRAP:
5001 		consume_skb(skb);
5002 		return NULL;
5003 	case TC_ACT_REDIRECT:
5004 		/* skb_mac_header check was done by cls/act_bpf, so
5005 		 * we can safely push the L2 header back before
5006 		 * redirecting to another netdev
5007 		 */
5008 		__skb_push(skb, skb->mac_len);
5009 		if (skb_do_redirect(skb) == -EAGAIN) {
5010 			__skb_pull(skb, skb->mac_len);
5011 			*another = true;
5012 			break;
5013 		}
5014 		return NULL;
5015 	case TC_ACT_CONSUMED:
5016 		return NULL;
5017 	default:
5018 		break;
5019 	}
5020 #endif /* CONFIG_NET_CLS_ACT */
5021 	return skb;
5022 }
5023 
5024 /**
5025  *	netdev_is_rx_handler_busy - check if receive handler is registered
5026  *	@dev: device to check
5027  *
5028  *	Check if a receive handler is already registered for a given device.
5029  *	Return true if there one.
5030  *
5031  *	The caller must hold the rtnl_mutex.
5032  */
5033 bool netdev_is_rx_handler_busy(struct net_device *dev)
5034 {
5035 	ASSERT_RTNL();
5036 	return dev && rtnl_dereference(dev->rx_handler);
5037 }
5038 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
5039 
5040 /**
5041  *	netdev_rx_handler_register - register receive handler
5042  *	@dev: device to register a handler for
5043  *	@rx_handler: receive handler to register
5044  *	@rx_handler_data: data pointer that is used by rx handler
5045  *
5046  *	Register a receive handler for a device. This handler will then be
5047  *	called from __netif_receive_skb. A negative errno code is returned
5048  *	on a failure.
5049  *
5050  *	The caller must hold the rtnl_mutex.
5051  *
5052  *	For a general description of rx_handler, see enum rx_handler_result.
5053  */
5054 int netdev_rx_handler_register(struct net_device *dev,
5055 			       rx_handler_func_t *rx_handler,
5056 			       void *rx_handler_data)
5057 {
5058 	if (netdev_is_rx_handler_busy(dev))
5059 		return -EBUSY;
5060 
5061 	if (dev->priv_flags & IFF_NO_RX_HANDLER)
5062 		return -EINVAL;
5063 
5064 	/* Note: rx_handler_data must be set before rx_handler */
5065 	rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
5066 	rcu_assign_pointer(dev->rx_handler, rx_handler);
5067 
5068 	return 0;
5069 }
5070 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
5071 
5072 /**
5073  *	netdev_rx_handler_unregister - unregister receive handler
5074  *	@dev: device to unregister a handler from
5075  *
5076  *	Unregister a receive handler from a device.
5077  *
5078  *	The caller must hold the rtnl_mutex.
5079  */
5080 void netdev_rx_handler_unregister(struct net_device *dev)
5081 {
5082 
5083 	ASSERT_RTNL();
5084 	RCU_INIT_POINTER(dev->rx_handler, NULL);
5085 	/* a reader seeing a non NULL rx_handler in a rcu_read_lock()
5086 	 * section has a guarantee to see a non NULL rx_handler_data
5087 	 * as well.
5088 	 */
5089 	synchronize_net();
5090 	RCU_INIT_POINTER(dev->rx_handler_data, NULL);
5091 }
5092 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
5093 
5094 /*
5095  * Limit the use of PFMEMALLOC reserves to those protocols that implement
5096  * the special handling of PFMEMALLOC skbs.
5097  */
5098 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
5099 {
5100 	switch (skb->protocol) {
5101 	case htons(ETH_P_ARP):
5102 	case htons(ETH_P_IP):
5103 	case htons(ETH_P_IPV6):
5104 	case htons(ETH_P_8021Q):
5105 	case htons(ETH_P_8021AD):
5106 		return true;
5107 	default:
5108 		return false;
5109 	}
5110 }
5111 
5112 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
5113 			     int *ret, struct net_device *orig_dev)
5114 {
5115 	if (nf_hook_ingress_active(skb)) {
5116 		int ingress_retval;
5117 
5118 		if (*pt_prev) {
5119 			*ret = deliver_skb(skb, *pt_prev, orig_dev);
5120 			*pt_prev = NULL;
5121 		}
5122 
5123 		rcu_read_lock();
5124 		ingress_retval = nf_hook_ingress(skb);
5125 		rcu_read_unlock();
5126 		return ingress_retval;
5127 	}
5128 	return 0;
5129 }
5130 
5131 static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
5132 				    struct packet_type **ppt_prev)
5133 {
5134 	struct packet_type *ptype, *pt_prev;
5135 	rx_handler_func_t *rx_handler;
5136 	struct sk_buff *skb = *pskb;
5137 	struct net_device *orig_dev;
5138 	bool deliver_exact = false;
5139 	int ret = NET_RX_DROP;
5140 	__be16 type;
5141 
5142 	net_timestamp_check(!netdev_tstamp_prequeue, skb);
5143 
5144 	trace_netif_receive_skb(skb);
5145 
5146 	orig_dev = skb->dev;
5147 
5148 	skb_reset_network_header(skb);
5149 	if (!skb_transport_header_was_set(skb))
5150 		skb_reset_transport_header(skb);
5151 	skb_reset_mac_len(skb);
5152 
5153 	pt_prev = NULL;
5154 
5155 another_round:
5156 	skb->skb_iif = skb->dev->ifindex;
5157 
5158 	__this_cpu_inc(softnet_data.processed);
5159 
5160 	if (static_branch_unlikely(&generic_xdp_needed_key)) {
5161 		int ret2;
5162 
5163 		migrate_disable();
5164 		ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
5165 		migrate_enable();
5166 
5167 		if (ret2 != XDP_PASS) {
5168 			ret = NET_RX_DROP;
5169 			goto out;
5170 		}
5171 	}
5172 
5173 	if (eth_type_vlan(skb->protocol)) {
5174 		skb = skb_vlan_untag(skb);
5175 		if (unlikely(!skb))
5176 			goto out;
5177 	}
5178 
5179 	if (skb_skip_tc_classify(skb))
5180 		goto skip_classify;
5181 
5182 	if (pfmemalloc)
5183 		goto skip_taps;
5184 
5185 	list_for_each_entry_rcu(ptype, &ptype_all, list) {
5186 		if (pt_prev)
5187 			ret = deliver_skb(skb, pt_prev, orig_dev);
5188 		pt_prev = ptype;
5189 	}
5190 
5191 	list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
5192 		if (pt_prev)
5193 			ret = deliver_skb(skb, pt_prev, orig_dev);
5194 		pt_prev = ptype;
5195 	}
5196 
5197 skip_taps:
5198 #ifdef CONFIG_NET_INGRESS
5199 	if (static_branch_unlikely(&ingress_needed_key)) {
5200 		bool another = false;
5201 
5202 		nf_skip_egress(skb, true);
5203 		skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev,
5204 					 &another);
5205 		if (another)
5206 			goto another_round;
5207 		if (!skb)
5208 			goto out;
5209 
5210 		nf_skip_egress(skb, false);
5211 		if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
5212 			goto out;
5213 	}
5214 #endif
5215 	skb_reset_redirect(skb);
5216 skip_classify:
5217 	if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
5218 		goto drop;
5219 
5220 	if (skb_vlan_tag_present(skb)) {
5221 		if (pt_prev) {
5222 			ret = deliver_skb(skb, pt_prev, orig_dev);
5223 			pt_prev = NULL;
5224 		}
5225 		if (vlan_do_receive(&skb))
5226 			goto another_round;
5227 		else if (unlikely(!skb))
5228 			goto out;
5229 	}
5230 
5231 	rx_handler = rcu_dereference(skb->dev->rx_handler);
5232 	if (rx_handler) {
5233 		if (pt_prev) {
5234 			ret = deliver_skb(skb, pt_prev, orig_dev);
5235 			pt_prev = NULL;
5236 		}
5237 		switch (rx_handler(&skb)) {
5238 		case RX_HANDLER_CONSUMED:
5239 			ret = NET_RX_SUCCESS;
5240 			goto out;
5241 		case RX_HANDLER_ANOTHER:
5242 			goto another_round;
5243 		case RX_HANDLER_EXACT:
5244 			deliver_exact = true;
5245 			break;
5246 		case RX_HANDLER_PASS:
5247 			break;
5248 		default:
5249 			BUG();
5250 		}
5251 	}
5252 
5253 	if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) {
5254 check_vlan_id:
5255 		if (skb_vlan_tag_get_id(skb)) {
5256 			/* Vlan id is non 0 and vlan_do_receive() above couldn't
5257 			 * find vlan device.
5258 			 */
5259 			skb->pkt_type = PACKET_OTHERHOST;
5260 		} else if (eth_type_vlan(skb->protocol)) {
5261 			/* Outer header is 802.1P with vlan 0, inner header is
5262 			 * 802.1Q or 802.1AD and vlan_do_receive() above could
5263 			 * not find vlan dev for vlan id 0.
5264 			 */
5265 			__vlan_hwaccel_clear_tag(skb);
5266 			skb = skb_vlan_untag(skb);
5267 			if (unlikely(!skb))
5268 				goto out;
5269 			if (vlan_do_receive(&skb))
5270 				/* After stripping off 802.1P header with vlan 0
5271 				 * vlan dev is found for inner header.
5272 				 */
5273 				goto another_round;
5274 			else if (unlikely(!skb))
5275 				goto out;
5276 			else
5277 				/* We have stripped outer 802.1P vlan 0 header.
5278 				 * But could not find vlan dev.
5279 				 * check again for vlan id to set OTHERHOST.
5280 				 */
5281 				goto check_vlan_id;
5282 		}
5283 		/* Note: we might in the future use prio bits
5284 		 * and set skb->priority like in vlan_do_receive()
5285 		 * For the time being, just ignore Priority Code Point
5286 		 */
5287 		__vlan_hwaccel_clear_tag(skb);
5288 	}
5289 
5290 	type = skb->protocol;
5291 
5292 	/* deliver only exact match when indicated */
5293 	if (likely(!deliver_exact)) {
5294 		deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5295 				       &ptype_base[ntohs(type) &
5296 						   PTYPE_HASH_MASK]);
5297 	}
5298 
5299 	deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5300 			       &orig_dev->ptype_specific);
5301 
5302 	if (unlikely(skb->dev != orig_dev)) {
5303 		deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5304 				       &skb->dev->ptype_specific);
5305 	}
5306 
5307 	if (pt_prev) {
5308 		if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
5309 			goto drop;
5310 		*ppt_prev = pt_prev;
5311 	} else {
5312 drop:
5313 		if (!deliver_exact)
5314 			atomic_long_inc(&skb->dev->rx_dropped);
5315 		else
5316 			atomic_long_inc(&skb->dev->rx_nohandler);
5317 		kfree_skb(skb);
5318 		/* Jamal, now you will not able to escape explaining
5319 		 * me how you were going to use this. :-)
5320 		 */
5321 		ret = NET_RX_DROP;
5322 	}
5323 
5324 out:
5325 	/* The invariant here is that if *ppt_prev is not NULL
5326 	 * then skb should also be non-NULL.
5327 	 *
5328 	 * Apparently *ppt_prev assignment above holds this invariant due to
5329 	 * skb dereferencing near it.
5330 	 */
5331 	*pskb = skb;
5332 	return ret;
5333 }
5334 
5335 static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
5336 {
5337 	struct net_device *orig_dev = skb->dev;
5338 	struct packet_type *pt_prev = NULL;
5339 	int ret;
5340 
5341 	ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5342 	if (pt_prev)
5343 		ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
5344 					 skb->dev, pt_prev, orig_dev);
5345 	return ret;
5346 }
5347 
5348 /**
5349  *	netif_receive_skb_core - special purpose version of netif_receive_skb
5350  *	@skb: buffer to process
5351  *
5352  *	More direct receive version of netif_receive_skb().  It should
5353  *	only be used by callers that have a need to skip RPS and Generic XDP.
5354  *	Caller must also take care of handling if ``(page_is_)pfmemalloc``.
5355  *
5356  *	This function may only be called from softirq context and interrupts
5357  *	should be enabled.
5358  *
5359  *	Return values (usually ignored):
5360  *	NET_RX_SUCCESS: no congestion
5361  *	NET_RX_DROP: packet was dropped
5362  */
5363 int netif_receive_skb_core(struct sk_buff *skb)
5364 {
5365 	int ret;
5366 
5367 	rcu_read_lock();
5368 	ret = __netif_receive_skb_one_core(skb, false);
5369 	rcu_read_unlock();
5370 
5371 	return ret;
5372 }
5373 EXPORT_SYMBOL(netif_receive_skb_core);
5374 
5375 static inline void __netif_receive_skb_list_ptype(struct list_head *head,
5376 						  struct packet_type *pt_prev,
5377 						  struct net_device *orig_dev)
5378 {
5379 	struct sk_buff *skb, *next;
5380 
5381 	if (!pt_prev)
5382 		return;
5383 	if (list_empty(head))
5384 		return;
5385 	if (pt_prev->list_func != NULL)
5386 		INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv,
5387 				   ip_list_rcv, head, pt_prev, orig_dev);
5388 	else
5389 		list_for_each_entry_safe(skb, next, head, list) {
5390 			skb_list_del_init(skb);
5391 			pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
5392 		}
5393 }
5394 
5395 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
5396 {
5397 	/* Fast-path assumptions:
5398 	 * - There is no RX handler.
5399 	 * - Only one packet_type matches.
5400 	 * If either of these fails, we will end up doing some per-packet
5401 	 * processing in-line, then handling the 'last ptype' for the whole
5402 	 * sublist.  This can't cause out-of-order delivery to any single ptype,
5403 	 * because the 'last ptype' must be constant across the sublist, and all
5404 	 * other ptypes are handled per-packet.
5405 	 */
5406 	/* Current (common) ptype of sublist */
5407 	struct packet_type *pt_curr = NULL;
5408 	/* Current (common) orig_dev of sublist */
5409 	struct net_device *od_curr = NULL;
5410 	struct list_head sublist;
5411 	struct sk_buff *skb, *next;
5412 
5413 	INIT_LIST_HEAD(&sublist);
5414 	list_for_each_entry_safe(skb, next, head, list) {
5415 		struct net_device *orig_dev = skb->dev;
5416 		struct packet_type *pt_prev = NULL;
5417 
5418 		skb_list_del_init(skb);
5419 		__netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5420 		if (!pt_prev)
5421 			continue;
5422 		if (pt_curr != pt_prev || od_curr != orig_dev) {
5423 			/* dispatch old sublist */
5424 			__netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5425 			/* start new sublist */
5426 			INIT_LIST_HEAD(&sublist);
5427 			pt_curr = pt_prev;
5428 			od_curr = orig_dev;
5429 		}
5430 		list_add_tail(&skb->list, &sublist);
5431 	}
5432 
5433 	/* dispatch final sublist */
5434 	__netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5435 }
5436 
5437 static int __netif_receive_skb(struct sk_buff *skb)
5438 {
5439 	int ret;
5440 
5441 	if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
5442 		unsigned int noreclaim_flag;
5443 
5444 		/*
5445 		 * PFMEMALLOC skbs are special, they should
5446 		 * - be delivered to SOCK_MEMALLOC sockets only
5447 		 * - stay away from userspace
5448 		 * - have bounded memory usage
5449 		 *
5450 		 * Use PF_MEMALLOC as this saves us from propagating the allocation
5451 		 * context down to all allocation sites.
5452 		 */
5453 		noreclaim_flag = memalloc_noreclaim_save();
5454 		ret = __netif_receive_skb_one_core(skb, true);
5455 		memalloc_noreclaim_restore(noreclaim_flag);
5456 	} else
5457 		ret = __netif_receive_skb_one_core(skb, false);
5458 
5459 	return ret;
5460 }
5461 
5462 static void __netif_receive_skb_list(struct list_head *head)
5463 {
5464 	unsigned long noreclaim_flag = 0;
5465 	struct sk_buff *skb, *next;
5466 	bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
5467 
5468 	list_for_each_entry_safe(skb, next, head, list) {
5469 		if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
5470 			struct list_head sublist;
5471 
5472 			/* Handle the previous sublist */
5473 			list_cut_before(&sublist, head, &skb->list);
5474 			if (!list_empty(&sublist))
5475 				__netif_receive_skb_list_core(&sublist, pfmemalloc);
5476 			pfmemalloc = !pfmemalloc;
5477 			/* See comments in __netif_receive_skb */
5478 			if (pfmemalloc)
5479 				noreclaim_flag = memalloc_noreclaim_save();
5480 			else
5481 				memalloc_noreclaim_restore(noreclaim_flag);
5482 		}
5483 	}
5484 	/* Handle the remaining sublist */
5485 	if (!list_empty(head))
5486 		__netif_receive_skb_list_core(head, pfmemalloc);
5487 	/* Restore pflags */
5488 	if (pfmemalloc)
5489 		memalloc_noreclaim_restore(noreclaim_flag);
5490 }
5491 
5492 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
5493 {
5494 	struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
5495 	struct bpf_prog *new = xdp->prog;
5496 	int ret = 0;
5497 
5498 	switch (xdp->command) {
5499 	case XDP_SETUP_PROG:
5500 		rcu_assign_pointer(dev->xdp_prog, new);
5501 		if (old)
5502 			bpf_prog_put(old);
5503 
5504 		if (old && !new) {
5505 			static_branch_dec(&generic_xdp_needed_key);
5506 		} else if (new && !old) {
5507 			static_branch_inc(&generic_xdp_needed_key);
5508 			dev_disable_lro(dev);
5509 			dev_disable_gro_hw(dev);
5510 		}
5511 		break;
5512 
5513 	default:
5514 		ret = -EINVAL;
5515 		break;
5516 	}
5517 
5518 	return ret;
5519 }
5520 
5521 static int netif_receive_skb_internal(struct sk_buff *skb)
5522 {
5523 	int ret;
5524 
5525 	net_timestamp_check(netdev_tstamp_prequeue, skb);
5526 
5527 	if (skb_defer_rx_timestamp(skb))
5528 		return NET_RX_SUCCESS;
5529 
5530 	rcu_read_lock();
5531 #ifdef CONFIG_RPS
5532 	if (static_branch_unlikely(&rps_needed)) {
5533 		struct rps_dev_flow voidflow, *rflow = &voidflow;
5534 		int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5535 
5536 		if (cpu >= 0) {
5537 			ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5538 			rcu_read_unlock();
5539 			return ret;
5540 		}
5541 	}
5542 #endif
5543 	ret = __netif_receive_skb(skb);
5544 	rcu_read_unlock();
5545 	return ret;
5546 }
5547 
5548 void netif_receive_skb_list_internal(struct list_head *head)
5549 {
5550 	struct sk_buff *skb, *next;
5551 	struct list_head sublist;
5552 
5553 	INIT_LIST_HEAD(&sublist);
5554 	list_for_each_entry_safe(skb, next, head, list) {
5555 		net_timestamp_check(netdev_tstamp_prequeue, skb);
5556 		skb_list_del_init(skb);
5557 		if (!skb_defer_rx_timestamp(skb))
5558 			list_add_tail(&skb->list, &sublist);
5559 	}
5560 	list_splice_init(&sublist, head);
5561 
5562 	rcu_read_lock();
5563 #ifdef CONFIG_RPS
5564 	if (static_branch_unlikely(&rps_needed)) {
5565 		list_for_each_entry_safe(skb, next, head, list) {
5566 			struct rps_dev_flow voidflow, *rflow = &voidflow;
5567 			int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5568 
5569 			if (cpu >= 0) {
5570 				/* Will be handled, remove from list */
5571 				skb_list_del_init(skb);
5572 				enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5573 			}
5574 		}
5575 	}
5576 #endif
5577 	__netif_receive_skb_list(head);
5578 	rcu_read_unlock();
5579 }
5580 
5581 /**
5582  *	netif_receive_skb - process receive buffer from network
5583  *	@skb: buffer to process
5584  *
5585  *	netif_receive_skb() is the main receive data processing function.
5586  *	It always succeeds. The buffer may be dropped during processing
5587  *	for congestion control or by the protocol layers.
5588  *
5589  *	This function may only be called from softirq context and interrupts
5590  *	should be enabled.
5591  *
5592  *	Return values (usually ignored):
5593  *	NET_RX_SUCCESS: no congestion
5594  *	NET_RX_DROP: packet was dropped
5595  */
5596 int netif_receive_skb(struct sk_buff *skb)
5597 {
5598 	int ret;
5599 
5600 	trace_netif_receive_skb_entry(skb);
5601 
5602 	ret = netif_receive_skb_internal(skb);
5603 	trace_netif_receive_skb_exit(ret);
5604 
5605 	return ret;
5606 }
5607 EXPORT_SYMBOL(netif_receive_skb);
5608 
5609 /**
5610  *	netif_receive_skb_list - process many receive buffers from network
5611  *	@head: list of skbs to process.
5612  *
5613  *	Since return value of netif_receive_skb() is normally ignored, and
5614  *	wouldn't be meaningful for a list, this function returns void.
5615  *
5616  *	This function may only be called from softirq context and interrupts
5617  *	should be enabled.
5618  */
5619 void netif_receive_skb_list(struct list_head *head)
5620 {
5621 	struct sk_buff *skb;
5622 
5623 	if (list_empty(head))
5624 		return;
5625 	if (trace_netif_receive_skb_list_entry_enabled()) {
5626 		list_for_each_entry(skb, head, list)
5627 			trace_netif_receive_skb_list_entry(skb);
5628 	}
5629 	netif_receive_skb_list_internal(head);
5630 	trace_netif_receive_skb_list_exit(0);
5631 }
5632 EXPORT_SYMBOL(netif_receive_skb_list);
5633 
5634 static DEFINE_PER_CPU(struct work_struct, flush_works);
5635 
5636 /* Network device is going away, flush any packets still pending */
5637 static void flush_backlog(struct work_struct *work)
5638 {
5639 	struct sk_buff *skb, *tmp;
5640 	struct softnet_data *sd;
5641 
5642 	local_bh_disable();
5643 	sd = this_cpu_ptr(&softnet_data);
5644 
5645 	local_irq_disable();
5646 	rps_lock(sd);
5647 	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
5648 		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5649 			__skb_unlink(skb, &sd->input_pkt_queue);
5650 			dev_kfree_skb_irq(skb);
5651 			input_queue_head_incr(sd);
5652 		}
5653 	}
5654 	rps_unlock(sd);
5655 	local_irq_enable();
5656 
5657 	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
5658 		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5659 			__skb_unlink(skb, &sd->process_queue);
5660 			kfree_skb(skb);
5661 			input_queue_head_incr(sd);
5662 		}
5663 	}
5664 	local_bh_enable();
5665 }
5666 
5667 static bool flush_required(int cpu)
5668 {
5669 #if IS_ENABLED(CONFIG_RPS)
5670 	struct softnet_data *sd = &per_cpu(softnet_data, cpu);
5671 	bool do_flush;
5672 
5673 	local_irq_disable();
5674 	rps_lock(sd);
5675 
5676 	/* as insertion into process_queue happens with the rps lock held,
5677 	 * process_queue access may race only with dequeue
5678 	 */
5679 	do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
5680 		   !skb_queue_empty_lockless(&sd->process_queue);
5681 	rps_unlock(sd);
5682 	local_irq_enable();
5683 
5684 	return do_flush;
5685 #endif
5686 	/* without RPS we can't safely check input_pkt_queue: during a
5687 	 * concurrent remote skb_queue_splice() we can detect as empty both
5688 	 * input_pkt_queue and process_queue even if the latter could end-up
5689 	 * containing a lot of packets.
5690 	 */
5691 	return true;
5692 }
5693 
5694 static void flush_all_backlogs(void)
5695 {
5696 	static cpumask_t flush_cpus;
5697 	unsigned int cpu;
5698 
5699 	/* since we are under rtnl lock protection we can use static data
5700 	 * for the cpumask and avoid allocating on stack the possibly
5701 	 * large mask
5702 	 */
5703 	ASSERT_RTNL();
5704 
5705 	cpus_read_lock();
5706 
5707 	cpumask_clear(&flush_cpus);
5708 	for_each_online_cpu(cpu) {
5709 		if (flush_required(cpu)) {
5710 			queue_work_on(cpu, system_highpri_wq,
5711 				      per_cpu_ptr(&flush_works, cpu));
5712 			cpumask_set_cpu(cpu, &flush_cpus);
5713 		}
5714 	}
5715 
5716 	/* we can have in flight packet[s] on the cpus we are not flushing,
5717 	 * synchronize_net() in unregister_netdevice_many() will take care of
5718 	 * them
5719 	 */
5720 	for_each_cpu(cpu, &flush_cpus)
5721 		flush_work(per_cpu_ptr(&flush_works, cpu));
5722 
5723 	cpus_read_unlock();
5724 }
5725 
5726 static void net_rps_send_ipi(struct softnet_data *remsd)
5727 {
5728 #ifdef CONFIG_RPS
5729 	while (remsd) {
5730 		struct softnet_data *next = remsd->rps_ipi_next;
5731 
5732 		if (cpu_online(remsd->cpu))
5733 			smp_call_function_single_async(remsd->cpu, &remsd->csd);
5734 		remsd = next;
5735 	}
5736 #endif
5737 }
5738 
5739 /*
5740  * net_rps_action_and_irq_enable sends any pending IPI's for rps.
5741  * Note: called with local irq disabled, but exits with local irq enabled.
5742  */
5743 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
5744 {
5745 #ifdef CONFIG_RPS
5746 	struct softnet_data *remsd = sd->rps_ipi_list;
5747 
5748 	if (remsd) {
5749 		sd->rps_ipi_list = NULL;
5750 
5751 		local_irq_enable();
5752 
5753 		/* Send pending IPI's to kick RPS processing on remote cpus. */
5754 		net_rps_send_ipi(remsd);
5755 	} else
5756 #endif
5757 		local_irq_enable();
5758 }
5759 
5760 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
5761 {
5762 #ifdef CONFIG_RPS
5763 	return sd->rps_ipi_list != NULL;
5764 #else
5765 	return false;
5766 #endif
5767 }
5768 
5769 static int process_backlog(struct napi_struct *napi, int quota)
5770 {
5771 	struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
5772 	bool again = true;
5773 	int work = 0;
5774 
5775 	/* Check if we have pending ipi, its better to send them now,
5776 	 * not waiting net_rx_action() end.
5777 	 */
5778 	if (sd_has_rps_ipi_waiting(sd)) {
5779 		local_irq_disable();
5780 		net_rps_action_and_irq_enable(sd);
5781 	}
5782 
5783 	napi->weight = dev_rx_weight;
5784 	while (again) {
5785 		struct sk_buff *skb;
5786 
5787 		while ((skb = __skb_dequeue(&sd->process_queue))) {
5788 			rcu_read_lock();
5789 			__netif_receive_skb(skb);
5790 			rcu_read_unlock();
5791 			input_queue_head_incr(sd);
5792 			if (++work >= quota)
5793 				return work;
5794 
5795 		}
5796 
5797 		local_irq_disable();
5798 		rps_lock(sd);
5799 		if (skb_queue_empty(&sd->input_pkt_queue)) {
5800 			/*
5801 			 * Inline a custom version of __napi_complete().
5802 			 * only current cpu owns and manipulates this napi,
5803 			 * and NAPI_STATE_SCHED is the only possible flag set
5804 			 * on backlog.
5805 			 * We can use a plain write instead of clear_bit(),
5806 			 * and we dont need an smp_mb() memory barrier.
5807 			 */
5808 			napi->state = 0;
5809 			again = false;
5810 		} else {
5811 			skb_queue_splice_tail_init(&sd->input_pkt_queue,
5812 						   &sd->process_queue);
5813 		}
5814 		rps_unlock(sd);
5815 		local_irq_enable();
5816 	}
5817 
5818 	return work;
5819 }
5820 
5821 /**
5822  * __napi_schedule - schedule for receive
5823  * @n: entry to schedule
5824  *
5825  * The entry's receive function will be scheduled to run.
5826  * Consider using __napi_schedule_irqoff() if hard irqs are masked.
5827  */
5828 void __napi_schedule(struct napi_struct *n)
5829 {
5830 	unsigned long flags;
5831 
5832 	local_irq_save(flags);
5833 	____napi_schedule(this_cpu_ptr(&softnet_data), n);
5834 	local_irq_restore(flags);
5835 }
5836 EXPORT_SYMBOL(__napi_schedule);
5837 
5838 /**
5839  *	napi_schedule_prep - check if napi can be scheduled
5840  *	@n: napi context
5841  *
5842  * Test if NAPI routine is already running, and if not mark
5843  * it as running.  This is used as a condition variable to
5844  * insure only one NAPI poll instance runs.  We also make
5845  * sure there is no pending NAPI disable.
5846  */
5847 bool napi_schedule_prep(struct napi_struct *n)
5848 {
5849 	unsigned long val, new;
5850 
5851 	do {
5852 		val = READ_ONCE(n->state);
5853 		if (unlikely(val & NAPIF_STATE_DISABLE))
5854 			return false;
5855 		new = val | NAPIF_STATE_SCHED;
5856 
5857 		/* Sets STATE_MISSED bit if STATE_SCHED was already set
5858 		 * This was suggested by Alexander Duyck, as compiler
5859 		 * emits better code than :
5860 		 * if (val & NAPIF_STATE_SCHED)
5861 		 *     new |= NAPIF_STATE_MISSED;
5862 		 */
5863 		new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
5864 						   NAPIF_STATE_MISSED;
5865 	} while (cmpxchg(&n->state, val, new) != val);
5866 
5867 	return !(val & NAPIF_STATE_SCHED);
5868 }
5869 EXPORT_SYMBOL(napi_schedule_prep);
5870 
5871 /**
5872  * __napi_schedule_irqoff - schedule for receive
5873  * @n: entry to schedule
5874  *
5875  * Variant of __napi_schedule() assuming hard irqs are masked.
5876  *
5877  * On PREEMPT_RT enabled kernels this maps to __napi_schedule()
5878  * because the interrupt disabled assumption might not be true
5879  * due to force-threaded interrupts and spinlock substitution.
5880  */
5881 void __napi_schedule_irqoff(struct napi_struct *n)
5882 {
5883 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
5884 		____napi_schedule(this_cpu_ptr(&softnet_data), n);
5885 	else
5886 		__napi_schedule(n);
5887 }
5888 EXPORT_SYMBOL(__napi_schedule_irqoff);
5889 
5890 bool napi_complete_done(struct napi_struct *n, int work_done)
5891 {
5892 	unsigned long flags, val, new, timeout = 0;
5893 	bool ret = true;
5894 
5895 	/*
5896 	 * 1) Don't let napi dequeue from the cpu poll list
5897 	 *    just in case its running on a different cpu.
5898 	 * 2) If we are busy polling, do nothing here, we have
5899 	 *    the guarantee we will be called later.
5900 	 */
5901 	if (unlikely(n->state & (NAPIF_STATE_NPSVC |
5902 				 NAPIF_STATE_IN_BUSY_POLL)))
5903 		return false;
5904 
5905 	if (work_done) {
5906 		if (n->gro_bitmask)
5907 			timeout = READ_ONCE(n->dev->gro_flush_timeout);
5908 		n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs);
5909 	}
5910 	if (n->defer_hard_irqs_count > 0) {
5911 		n->defer_hard_irqs_count--;
5912 		timeout = READ_ONCE(n->dev->gro_flush_timeout);
5913 		if (timeout)
5914 			ret = false;
5915 	}
5916 	if (n->gro_bitmask) {
5917 		/* When the NAPI instance uses a timeout and keeps postponing
5918 		 * it, we need to bound somehow the time packets are kept in
5919 		 * the GRO layer
5920 		 */
5921 		napi_gro_flush(n, !!timeout);
5922 	}
5923 
5924 	gro_normal_list(n);
5925 
5926 	if (unlikely(!list_empty(&n->poll_list))) {
5927 		/* If n->poll_list is not empty, we need to mask irqs */
5928 		local_irq_save(flags);
5929 		list_del_init(&n->poll_list);
5930 		local_irq_restore(flags);
5931 	}
5932 
5933 	do {
5934 		val = READ_ONCE(n->state);
5935 
5936 		WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
5937 
5938 		new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED |
5939 			      NAPIF_STATE_SCHED_THREADED |
5940 			      NAPIF_STATE_PREFER_BUSY_POLL);
5941 
5942 		/* If STATE_MISSED was set, leave STATE_SCHED set,
5943 		 * because we will call napi->poll() one more time.
5944 		 * This C code was suggested by Alexander Duyck to help gcc.
5945 		 */
5946 		new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
5947 						    NAPIF_STATE_SCHED;
5948 	} while (cmpxchg(&n->state, val, new) != val);
5949 
5950 	if (unlikely(val & NAPIF_STATE_MISSED)) {
5951 		__napi_schedule(n);
5952 		return false;
5953 	}
5954 
5955 	if (timeout)
5956 		hrtimer_start(&n->timer, ns_to_ktime(timeout),
5957 			      HRTIMER_MODE_REL_PINNED);
5958 	return ret;
5959 }
5960 EXPORT_SYMBOL(napi_complete_done);
5961 
5962 /* must be called under rcu_read_lock(), as we dont take a reference */
5963 static struct napi_struct *napi_by_id(unsigned int napi_id)
5964 {
5965 	unsigned int hash = napi_id % HASH_SIZE(napi_hash);
5966 	struct napi_struct *napi;
5967 
5968 	hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
5969 		if (napi->napi_id == napi_id)
5970 			return napi;
5971 
5972 	return NULL;
5973 }
5974 
5975 #if defined(CONFIG_NET_RX_BUSY_POLL)
5976 
5977 static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
5978 {
5979 	if (!skip_schedule) {
5980 		gro_normal_list(napi);
5981 		__napi_schedule(napi);
5982 		return;
5983 	}
5984 
5985 	if (napi->gro_bitmask) {
5986 		/* flush too old packets
5987 		 * If HZ < 1000, flush all packets.
5988 		 */
5989 		napi_gro_flush(napi, HZ >= 1000);
5990 	}
5991 
5992 	gro_normal_list(napi);
5993 	clear_bit(NAPI_STATE_SCHED, &napi->state);
5994 }
5995 
5996 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bool prefer_busy_poll,
5997 			   u16 budget)
5998 {
5999 	bool skip_schedule = false;
6000 	unsigned long timeout;
6001 	int rc;
6002 
6003 	/* Busy polling means there is a high chance device driver hard irq
6004 	 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
6005 	 * set in napi_schedule_prep().
6006 	 * Since we are about to call napi->poll() once more, we can safely
6007 	 * clear NAPI_STATE_MISSED.
6008 	 *
6009 	 * Note: x86 could use a single "lock and ..." instruction
6010 	 * to perform these two clear_bit()
6011 	 */
6012 	clear_bit(NAPI_STATE_MISSED, &napi->state);
6013 	clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
6014 
6015 	local_bh_disable();
6016 
6017 	if (prefer_busy_poll) {
6018 		napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs);
6019 		timeout = READ_ONCE(napi->dev->gro_flush_timeout);
6020 		if (napi->defer_hard_irqs_count && timeout) {
6021 			hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED);
6022 			skip_schedule = true;
6023 		}
6024 	}
6025 
6026 	/* All we really want here is to re-enable device interrupts.
6027 	 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
6028 	 */
6029 	rc = napi->poll(napi, budget);
6030 	/* We can't gro_normal_list() here, because napi->poll() might have
6031 	 * rearmed the napi (napi_complete_done()) in which case it could
6032 	 * already be running on another CPU.
6033 	 */
6034 	trace_napi_poll(napi, rc, budget);
6035 	netpoll_poll_unlock(have_poll_lock);
6036 	if (rc == budget)
6037 		__busy_poll_stop(napi, skip_schedule);
6038 	local_bh_enable();
6039 }
6040 
6041 void napi_busy_loop(unsigned int napi_id,
6042 		    bool (*loop_end)(void *, unsigned long),
6043 		    void *loop_end_arg, bool prefer_busy_poll, u16 budget)
6044 {
6045 	unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
6046 	int (*napi_poll)(struct napi_struct *napi, int budget);
6047 	void *have_poll_lock = NULL;
6048 	struct napi_struct *napi;
6049 
6050 restart:
6051 	napi_poll = NULL;
6052 
6053 	rcu_read_lock();
6054 
6055 	napi = napi_by_id(napi_id);
6056 	if (!napi)
6057 		goto out;
6058 
6059 	preempt_disable();
6060 	for (;;) {
6061 		int work = 0;
6062 
6063 		local_bh_disable();
6064 		if (!napi_poll) {
6065 			unsigned long val = READ_ONCE(napi->state);
6066 
6067 			/* If multiple threads are competing for this napi,
6068 			 * we avoid dirtying napi->state as much as we can.
6069 			 */
6070 			if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
6071 				   NAPIF_STATE_IN_BUSY_POLL)) {
6072 				if (prefer_busy_poll)
6073 					set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6074 				goto count;
6075 			}
6076 			if (cmpxchg(&napi->state, val,
6077 				    val | NAPIF_STATE_IN_BUSY_POLL |
6078 					  NAPIF_STATE_SCHED) != val) {
6079 				if (prefer_busy_poll)
6080 					set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6081 				goto count;
6082 			}
6083 			have_poll_lock = netpoll_poll_lock(napi);
6084 			napi_poll = napi->poll;
6085 		}
6086 		work = napi_poll(napi, budget);
6087 		trace_napi_poll(napi, work, budget);
6088 		gro_normal_list(napi);
6089 count:
6090 		if (work > 0)
6091 			__NET_ADD_STATS(dev_net(napi->dev),
6092 					LINUX_MIB_BUSYPOLLRXPACKETS, work);
6093 		local_bh_enable();
6094 
6095 		if (!loop_end || loop_end(loop_end_arg, start_time))
6096 			break;
6097 
6098 		if (unlikely(need_resched())) {
6099 			if (napi_poll)
6100 				busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
6101 			preempt_enable();
6102 			rcu_read_unlock();
6103 			cond_resched();
6104 			if (loop_end(loop_end_arg, start_time))
6105 				return;
6106 			goto restart;
6107 		}
6108 		cpu_relax();
6109 	}
6110 	if (napi_poll)
6111 		busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
6112 	preempt_enable();
6113 out:
6114 	rcu_read_unlock();
6115 }
6116 EXPORT_SYMBOL(napi_busy_loop);
6117 
6118 #endif /* CONFIG_NET_RX_BUSY_POLL */
6119 
6120 static void napi_hash_add(struct napi_struct *napi)
6121 {
6122 	if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state))
6123 		return;
6124 
6125 	spin_lock(&napi_hash_lock);
6126 
6127 	/* 0..NR_CPUS range is reserved for sender_cpu use */
6128 	do {
6129 		if (unlikely(++napi_gen_id < MIN_NAPI_ID))
6130 			napi_gen_id = MIN_NAPI_ID;
6131 	} while (napi_by_id(napi_gen_id));
6132 	napi->napi_id = napi_gen_id;
6133 
6134 	hlist_add_head_rcu(&napi->napi_hash_node,
6135 			   &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
6136 
6137 	spin_unlock(&napi_hash_lock);
6138 }
6139 
6140 /* Warning : caller is responsible to make sure rcu grace period
6141  * is respected before freeing memory containing @napi
6142  */
6143 static void napi_hash_del(struct napi_struct *napi)
6144 {
6145 	spin_lock(&napi_hash_lock);
6146 
6147 	hlist_del_init_rcu(&napi->napi_hash_node);
6148 
6149 	spin_unlock(&napi_hash_lock);
6150 }
6151 
6152 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
6153 {
6154 	struct napi_struct *napi;
6155 
6156 	napi = container_of(timer, struct napi_struct, timer);
6157 
6158 	/* Note : we use a relaxed variant of napi_schedule_prep() not setting
6159 	 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
6160 	 */
6161 	if (!napi_disable_pending(napi) &&
6162 	    !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) {
6163 		clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6164 		__napi_schedule_irqoff(napi);
6165 	}
6166 
6167 	return HRTIMER_NORESTART;
6168 }
6169 
6170 static void init_gro_hash(struct napi_struct *napi)
6171 {
6172 	int i;
6173 
6174 	for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6175 		INIT_LIST_HEAD(&napi->gro_hash[i].list);
6176 		napi->gro_hash[i].count = 0;
6177 	}
6178 	napi->gro_bitmask = 0;
6179 }
6180 
6181 int dev_set_threaded(struct net_device *dev, bool threaded)
6182 {
6183 	struct napi_struct *napi;
6184 	int err = 0;
6185 
6186 	if (dev->threaded == threaded)
6187 		return 0;
6188 
6189 	if (threaded) {
6190 		list_for_each_entry(napi, &dev->napi_list, dev_list) {
6191 			if (!napi->thread) {
6192 				err = napi_kthread_create(napi);
6193 				if (err) {
6194 					threaded = false;
6195 					break;
6196 				}
6197 			}
6198 		}
6199 	}
6200 
6201 	dev->threaded = threaded;
6202 
6203 	/* Make sure kthread is created before THREADED bit
6204 	 * is set.
6205 	 */
6206 	smp_mb__before_atomic();
6207 
6208 	/* Setting/unsetting threaded mode on a napi might not immediately
6209 	 * take effect, if the current napi instance is actively being
6210 	 * polled. In this case, the switch between threaded mode and
6211 	 * softirq mode will happen in the next round of napi_schedule().
6212 	 * This should not cause hiccups/stalls to the live traffic.
6213 	 */
6214 	list_for_each_entry(napi, &dev->napi_list, dev_list) {
6215 		if (threaded)
6216 			set_bit(NAPI_STATE_THREADED, &napi->state);
6217 		else
6218 			clear_bit(NAPI_STATE_THREADED, &napi->state);
6219 	}
6220 
6221 	return err;
6222 }
6223 EXPORT_SYMBOL(dev_set_threaded);
6224 
6225 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
6226 		    int (*poll)(struct napi_struct *, int), int weight)
6227 {
6228 	if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state)))
6229 		return;
6230 
6231 	INIT_LIST_HEAD(&napi->poll_list);
6232 	INIT_HLIST_NODE(&napi->napi_hash_node);
6233 	hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6234 	napi->timer.function = napi_watchdog;
6235 	init_gro_hash(napi);
6236 	napi->skb = NULL;
6237 	INIT_LIST_HEAD(&napi->rx_list);
6238 	napi->rx_count = 0;
6239 	napi->poll = poll;
6240 	if (weight > NAPI_POLL_WEIGHT)
6241 		netdev_err_once(dev, "%s() called with weight %d\n", __func__,
6242 				weight);
6243 	napi->weight = weight;
6244 	napi->dev = dev;
6245 #ifdef CONFIG_NETPOLL
6246 	napi->poll_owner = -1;
6247 #endif
6248 	set_bit(NAPI_STATE_SCHED, &napi->state);
6249 	set_bit(NAPI_STATE_NPSVC, &napi->state);
6250 	list_add_rcu(&napi->dev_list, &dev->napi_list);
6251 	napi_hash_add(napi);
6252 	/* Create kthread for this napi if dev->threaded is set.
6253 	 * Clear dev->threaded if kthread creation failed so that
6254 	 * threaded mode will not be enabled in napi_enable().
6255 	 */
6256 	if (dev->threaded && napi_kthread_create(napi))
6257 		dev->threaded = 0;
6258 }
6259 EXPORT_SYMBOL(netif_napi_add);
6260 
6261 void napi_disable(struct napi_struct *n)
6262 {
6263 	unsigned long val, new;
6264 
6265 	might_sleep();
6266 	set_bit(NAPI_STATE_DISABLE, &n->state);
6267 
6268 	for ( ; ; ) {
6269 		val = READ_ONCE(n->state);
6270 		if (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) {
6271 			usleep_range(20, 200);
6272 			continue;
6273 		}
6274 
6275 		new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC;
6276 		new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL);
6277 
6278 		if (cmpxchg(&n->state, val, new) == val)
6279 			break;
6280 	}
6281 
6282 	hrtimer_cancel(&n->timer);
6283 
6284 	clear_bit(NAPI_STATE_DISABLE, &n->state);
6285 }
6286 EXPORT_SYMBOL(napi_disable);
6287 
6288 /**
6289  *	napi_enable - enable NAPI scheduling
6290  *	@n: NAPI context
6291  *
6292  * Resume NAPI from being scheduled on this context.
6293  * Must be paired with napi_disable.
6294  */
6295 void napi_enable(struct napi_struct *n)
6296 {
6297 	unsigned long val, new;
6298 
6299 	do {
6300 		val = READ_ONCE(n->state);
6301 		BUG_ON(!test_bit(NAPI_STATE_SCHED, &val));
6302 
6303 		new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC);
6304 		if (n->dev->threaded && n->thread)
6305 			new |= NAPIF_STATE_THREADED;
6306 	} while (cmpxchg(&n->state, val, new) != val);
6307 }
6308 EXPORT_SYMBOL(napi_enable);
6309 
6310 static void flush_gro_hash(struct napi_struct *napi)
6311 {
6312 	int i;
6313 
6314 	for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6315 		struct sk_buff *skb, *n;
6316 
6317 		list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
6318 			kfree_skb(skb);
6319 		napi->gro_hash[i].count = 0;
6320 	}
6321 }
6322 
6323 /* Must be called in process context */
6324 void __netif_napi_del(struct napi_struct *napi)
6325 {
6326 	if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state))
6327 		return;
6328 
6329 	napi_hash_del(napi);
6330 	list_del_rcu(&napi->dev_list);
6331 	napi_free_frags(napi);
6332 
6333 	flush_gro_hash(napi);
6334 	napi->gro_bitmask = 0;
6335 
6336 	if (napi->thread) {
6337 		kthread_stop(napi->thread);
6338 		napi->thread = NULL;
6339 	}
6340 }
6341 EXPORT_SYMBOL(__netif_napi_del);
6342 
6343 static int __napi_poll(struct napi_struct *n, bool *repoll)
6344 {
6345 	int work, weight;
6346 
6347 	weight = n->weight;
6348 
6349 	/* This NAPI_STATE_SCHED test is for avoiding a race
6350 	 * with netpoll's poll_napi().  Only the entity which
6351 	 * obtains the lock and sees NAPI_STATE_SCHED set will
6352 	 * actually make the ->poll() call.  Therefore we avoid
6353 	 * accidentally calling ->poll() when NAPI is not scheduled.
6354 	 */
6355 	work = 0;
6356 	if (test_bit(NAPI_STATE_SCHED, &n->state)) {
6357 		work = n->poll(n, weight);
6358 		trace_napi_poll(n, work, weight);
6359 	}
6360 
6361 	if (unlikely(work > weight))
6362 		netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
6363 				n->poll, work, weight);
6364 
6365 	if (likely(work < weight))
6366 		return work;
6367 
6368 	/* Drivers must not modify the NAPI state if they
6369 	 * consume the entire weight.  In such cases this code
6370 	 * still "owns" the NAPI instance and therefore can
6371 	 * move the instance around on the list at-will.
6372 	 */
6373 	if (unlikely(napi_disable_pending(n))) {
6374 		napi_complete(n);
6375 		return work;
6376 	}
6377 
6378 	/* The NAPI context has more processing work, but busy-polling
6379 	 * is preferred. Exit early.
6380 	 */
6381 	if (napi_prefer_busy_poll(n)) {
6382 		if (napi_complete_done(n, work)) {
6383 			/* If timeout is not set, we need to make sure
6384 			 * that the NAPI is re-scheduled.
6385 			 */
6386 			napi_schedule(n);
6387 		}
6388 		return work;
6389 	}
6390 
6391 	if (n->gro_bitmask) {
6392 		/* flush too old packets
6393 		 * If HZ < 1000, flush all packets.
6394 		 */
6395 		napi_gro_flush(n, HZ >= 1000);
6396 	}
6397 
6398 	gro_normal_list(n);
6399 
6400 	/* Some drivers may have called napi_schedule
6401 	 * prior to exhausting their budget.
6402 	 */
6403 	if (unlikely(!list_empty(&n->poll_list))) {
6404 		pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
6405 			     n->dev ? n->dev->name : "backlog");
6406 		return work;
6407 	}
6408 
6409 	*repoll = true;
6410 
6411 	return work;
6412 }
6413 
6414 static int napi_poll(struct napi_struct *n, struct list_head *repoll)
6415 {
6416 	bool do_repoll = false;
6417 	void *have;
6418 	int work;
6419 
6420 	list_del_init(&n->poll_list);
6421 
6422 	have = netpoll_poll_lock(n);
6423 
6424 	work = __napi_poll(n, &do_repoll);
6425 
6426 	if (do_repoll)
6427 		list_add_tail(&n->poll_list, repoll);
6428 
6429 	netpoll_poll_unlock(have);
6430 
6431 	return work;
6432 }
6433 
6434 static int napi_thread_wait(struct napi_struct *napi)
6435 {
6436 	bool woken = false;
6437 
6438 	set_current_state(TASK_INTERRUPTIBLE);
6439 
6440 	while (!kthread_should_stop()) {
6441 		/* Testing SCHED_THREADED bit here to make sure the current
6442 		 * kthread owns this napi and could poll on this napi.
6443 		 * Testing SCHED bit is not enough because SCHED bit might be
6444 		 * set by some other busy poll thread or by napi_disable().
6445 		 */
6446 		if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) {
6447 			WARN_ON(!list_empty(&napi->poll_list));
6448 			__set_current_state(TASK_RUNNING);
6449 			return 0;
6450 		}
6451 
6452 		schedule();
6453 		/* woken being true indicates this thread owns this napi. */
6454 		woken = true;
6455 		set_current_state(TASK_INTERRUPTIBLE);
6456 	}
6457 	__set_current_state(TASK_RUNNING);
6458 
6459 	return -1;
6460 }
6461 
6462 static int napi_threaded_poll(void *data)
6463 {
6464 	struct napi_struct *napi = data;
6465 	void *have;
6466 
6467 	while (!napi_thread_wait(napi)) {
6468 		for (;;) {
6469 			bool repoll = false;
6470 
6471 			local_bh_disable();
6472 
6473 			have = netpoll_poll_lock(napi);
6474 			__napi_poll(napi, &repoll);
6475 			netpoll_poll_unlock(have);
6476 
6477 			local_bh_enable();
6478 
6479 			if (!repoll)
6480 				break;
6481 
6482 			cond_resched();
6483 		}
6484 	}
6485 	return 0;
6486 }
6487 
6488 static __latent_entropy void net_rx_action(struct softirq_action *h)
6489 {
6490 	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
6491 	unsigned long time_limit = jiffies +
6492 		usecs_to_jiffies(netdev_budget_usecs);
6493 	int budget = netdev_budget;
6494 	LIST_HEAD(list);
6495 	LIST_HEAD(repoll);
6496 
6497 	local_irq_disable();
6498 	list_splice_init(&sd->poll_list, &list);
6499 	local_irq_enable();
6500 
6501 	for (;;) {
6502 		struct napi_struct *n;
6503 
6504 		if (list_empty(&list)) {
6505 			if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
6506 				return;
6507 			break;
6508 		}
6509 
6510 		n = list_first_entry(&list, struct napi_struct, poll_list);
6511 		budget -= napi_poll(n, &repoll);
6512 
6513 		/* If softirq window is exhausted then punt.
6514 		 * Allow this to run for 2 jiffies since which will allow
6515 		 * an average latency of 1.5/HZ.
6516 		 */
6517 		if (unlikely(budget <= 0 ||
6518 			     time_after_eq(jiffies, time_limit))) {
6519 			sd->time_squeeze++;
6520 			break;
6521 		}
6522 	}
6523 
6524 	local_irq_disable();
6525 
6526 	list_splice_tail_init(&sd->poll_list, &list);
6527 	list_splice_tail(&repoll, &list);
6528 	list_splice(&list, &sd->poll_list);
6529 	if (!list_empty(&sd->poll_list))
6530 		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
6531 
6532 	net_rps_action_and_irq_enable(sd);
6533 }
6534 
6535 struct netdev_adjacent {
6536 	struct net_device *dev;
6537 
6538 	/* upper master flag, there can only be one master device per list */
6539 	bool master;
6540 
6541 	/* lookup ignore flag */
6542 	bool ignore;
6543 
6544 	/* counter for the number of times this device was added to us */
6545 	u16 ref_nr;
6546 
6547 	/* private field for the users */
6548 	void *private;
6549 
6550 	struct list_head list;
6551 	struct rcu_head rcu;
6552 };
6553 
6554 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
6555 						 struct list_head *adj_list)
6556 {
6557 	struct netdev_adjacent *adj;
6558 
6559 	list_for_each_entry(adj, adj_list, list) {
6560 		if (adj->dev == adj_dev)
6561 			return adj;
6562 	}
6563 	return NULL;
6564 }
6565 
6566 static int ____netdev_has_upper_dev(struct net_device *upper_dev,
6567 				    struct netdev_nested_priv *priv)
6568 {
6569 	struct net_device *dev = (struct net_device *)priv->data;
6570 
6571 	return upper_dev == dev;
6572 }
6573 
6574 /**
6575  * netdev_has_upper_dev - Check if device is linked to an upper device
6576  * @dev: device
6577  * @upper_dev: upper device to check
6578  *
6579  * Find out if a device is linked to specified upper device and return true
6580  * in case it is. Note that this checks only immediate upper device,
6581  * not through a complete stack of devices. The caller must hold the RTNL lock.
6582  */
6583 bool netdev_has_upper_dev(struct net_device *dev,
6584 			  struct net_device *upper_dev)
6585 {
6586 	struct netdev_nested_priv priv = {
6587 		.data = (void *)upper_dev,
6588 	};
6589 
6590 	ASSERT_RTNL();
6591 
6592 	return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
6593 					     &priv);
6594 }
6595 EXPORT_SYMBOL(netdev_has_upper_dev);
6596 
6597 /**
6598  * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device
6599  * @dev: device
6600  * @upper_dev: upper device to check
6601  *
6602  * Find out if a device is linked to specified upper device and return true
6603  * in case it is. Note that this checks the entire upper device chain.
6604  * The caller must hold rcu lock.
6605  */
6606 
6607 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
6608 				  struct net_device *upper_dev)
6609 {
6610 	struct netdev_nested_priv priv = {
6611 		.data = (void *)upper_dev,
6612 	};
6613 
6614 	return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
6615 					       &priv);
6616 }
6617 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
6618 
6619 /**
6620  * netdev_has_any_upper_dev - Check if device is linked to some device
6621  * @dev: device
6622  *
6623  * Find out if a device is linked to an upper device and return true in case
6624  * it is. The caller must hold the RTNL lock.
6625  */
6626 bool netdev_has_any_upper_dev(struct net_device *dev)
6627 {
6628 	ASSERT_RTNL();
6629 
6630 	return !list_empty(&dev->adj_list.upper);
6631 }
6632 EXPORT_SYMBOL(netdev_has_any_upper_dev);
6633 
6634 /**
6635  * netdev_master_upper_dev_get - Get master upper device
6636  * @dev: device
6637  *
6638  * Find a master upper device and return pointer to it or NULL in case
6639  * it's not there. The caller must hold the RTNL lock.
6640  */
6641 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
6642 {
6643 	struct netdev_adjacent *upper;
6644 
6645 	ASSERT_RTNL();
6646 
6647 	if (list_empty(&dev->adj_list.upper))
6648 		return NULL;
6649 
6650 	upper = list_first_entry(&dev->adj_list.upper,
6651 				 struct netdev_adjacent, list);
6652 	if (likely(upper->master))
6653 		return upper->dev;
6654 	return NULL;
6655 }
6656 EXPORT_SYMBOL(netdev_master_upper_dev_get);
6657 
6658 static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
6659 {
6660 	struct netdev_adjacent *upper;
6661 
6662 	ASSERT_RTNL();
6663 
6664 	if (list_empty(&dev->adj_list.upper))
6665 		return NULL;
6666 
6667 	upper = list_first_entry(&dev->adj_list.upper,
6668 				 struct netdev_adjacent, list);
6669 	if (likely(upper->master) && !upper->ignore)
6670 		return upper->dev;
6671 	return NULL;
6672 }
6673 
6674 /**
6675  * netdev_has_any_lower_dev - Check if device is linked to some device
6676  * @dev: device
6677  *
6678  * Find out if a device is linked to a lower device and return true in case
6679  * it is. The caller must hold the RTNL lock.
6680  */
6681 static bool netdev_has_any_lower_dev(struct net_device *dev)
6682 {
6683 	ASSERT_RTNL();
6684 
6685 	return !list_empty(&dev->adj_list.lower);
6686 }
6687 
6688 void *netdev_adjacent_get_private(struct list_head *adj_list)
6689 {
6690 	struct netdev_adjacent *adj;
6691 
6692 	adj = list_entry(adj_list, struct netdev_adjacent, list);
6693 
6694 	return adj->private;
6695 }
6696 EXPORT_SYMBOL(netdev_adjacent_get_private);
6697 
6698 /**
6699  * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
6700  * @dev: device
6701  * @iter: list_head ** of the current position
6702  *
6703  * Gets the next device from the dev's upper list, starting from iter
6704  * position. The caller must hold RCU read lock.
6705  */
6706 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
6707 						 struct list_head **iter)
6708 {
6709 	struct netdev_adjacent *upper;
6710 
6711 	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6712 
6713 	upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6714 
6715 	if (&upper->list == &dev->adj_list.upper)
6716 		return NULL;
6717 
6718 	*iter = &upper->list;
6719 
6720 	return upper->dev;
6721 }
6722 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
6723 
6724 static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
6725 						  struct list_head **iter,
6726 						  bool *ignore)
6727 {
6728 	struct netdev_adjacent *upper;
6729 
6730 	upper = list_entry((*iter)->next, struct netdev_adjacent, list);
6731 
6732 	if (&upper->list == &dev->adj_list.upper)
6733 		return NULL;
6734 
6735 	*iter = &upper->list;
6736 	*ignore = upper->ignore;
6737 
6738 	return upper->dev;
6739 }
6740 
6741 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
6742 						    struct list_head **iter)
6743 {
6744 	struct netdev_adjacent *upper;
6745 
6746 	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6747 
6748 	upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6749 
6750 	if (&upper->list == &dev->adj_list.upper)
6751 		return NULL;
6752 
6753 	*iter = &upper->list;
6754 
6755 	return upper->dev;
6756 }
6757 
6758 static int __netdev_walk_all_upper_dev(struct net_device *dev,
6759 				       int (*fn)(struct net_device *dev,
6760 					 struct netdev_nested_priv *priv),
6761 				       struct netdev_nested_priv *priv)
6762 {
6763 	struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
6764 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
6765 	int ret, cur = 0;
6766 	bool ignore;
6767 
6768 	now = dev;
6769 	iter = &dev->adj_list.upper;
6770 
6771 	while (1) {
6772 		if (now != dev) {
6773 			ret = fn(now, priv);
6774 			if (ret)
6775 				return ret;
6776 		}
6777 
6778 		next = NULL;
6779 		while (1) {
6780 			udev = __netdev_next_upper_dev(now, &iter, &ignore);
6781 			if (!udev)
6782 				break;
6783 			if (ignore)
6784 				continue;
6785 
6786 			next = udev;
6787 			niter = &udev->adj_list.upper;
6788 			dev_stack[cur] = now;
6789 			iter_stack[cur++] = iter;
6790 			break;
6791 		}
6792 
6793 		if (!next) {
6794 			if (!cur)
6795 				return 0;
6796 			next = dev_stack[--cur];
6797 			niter = iter_stack[cur];
6798 		}
6799 
6800 		now = next;
6801 		iter = niter;
6802 	}
6803 
6804 	return 0;
6805 }
6806 
6807 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
6808 				  int (*fn)(struct net_device *dev,
6809 					    struct netdev_nested_priv *priv),
6810 				  struct netdev_nested_priv *priv)
6811 {
6812 	struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
6813 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
6814 	int ret, cur = 0;
6815 
6816 	now = dev;
6817 	iter = &dev->adj_list.upper;
6818 
6819 	while (1) {
6820 		if (now != dev) {
6821 			ret = fn(now, priv);
6822 			if (ret)
6823 				return ret;
6824 		}
6825 
6826 		next = NULL;
6827 		while (1) {
6828 			udev = netdev_next_upper_dev_rcu(now, &iter);
6829 			if (!udev)
6830 				break;
6831 
6832 			next = udev;
6833 			niter = &udev->adj_list.upper;
6834 			dev_stack[cur] = now;
6835 			iter_stack[cur++] = iter;
6836 			break;
6837 		}
6838 
6839 		if (!next) {
6840 			if (!cur)
6841 				return 0;
6842 			next = dev_stack[--cur];
6843 			niter = iter_stack[cur];
6844 		}
6845 
6846 		now = next;
6847 		iter = niter;
6848 	}
6849 
6850 	return 0;
6851 }
6852 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
6853 
6854 static bool __netdev_has_upper_dev(struct net_device *dev,
6855 				   struct net_device *upper_dev)
6856 {
6857 	struct netdev_nested_priv priv = {
6858 		.flags = 0,
6859 		.data = (void *)upper_dev,
6860 	};
6861 
6862 	ASSERT_RTNL();
6863 
6864 	return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
6865 					   &priv);
6866 }
6867 
6868 /**
6869  * netdev_lower_get_next_private - Get the next ->private from the
6870  *				   lower neighbour list
6871  * @dev: device
6872  * @iter: list_head ** of the current position
6873  *
6874  * Gets the next netdev_adjacent->private from the dev's lower neighbour
6875  * list, starting from iter position. The caller must hold either hold the
6876  * RTNL lock or its own locking that guarantees that the neighbour lower
6877  * list will remain unchanged.
6878  */
6879 void *netdev_lower_get_next_private(struct net_device *dev,
6880 				    struct list_head **iter)
6881 {
6882 	struct netdev_adjacent *lower;
6883 
6884 	lower = list_entry(*iter, struct netdev_adjacent, list);
6885 
6886 	if (&lower->list == &dev->adj_list.lower)
6887 		return NULL;
6888 
6889 	*iter = lower->list.next;
6890 
6891 	return lower->private;
6892 }
6893 EXPORT_SYMBOL(netdev_lower_get_next_private);
6894 
6895 /**
6896  * netdev_lower_get_next_private_rcu - Get the next ->private from the
6897  *				       lower neighbour list, RCU
6898  *				       variant
6899  * @dev: device
6900  * @iter: list_head ** of the current position
6901  *
6902  * Gets the next netdev_adjacent->private from the dev's lower neighbour
6903  * list, starting from iter position. The caller must hold RCU read lock.
6904  */
6905 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
6906 					struct list_head **iter)
6907 {
6908 	struct netdev_adjacent *lower;
6909 
6910 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
6911 
6912 	lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6913 
6914 	if (&lower->list == &dev->adj_list.lower)
6915 		return NULL;
6916 
6917 	*iter = &lower->list;
6918 
6919 	return lower->private;
6920 }
6921 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
6922 
6923 /**
6924  * netdev_lower_get_next - Get the next device from the lower neighbour
6925  *                         list
6926  * @dev: device
6927  * @iter: list_head ** of the current position
6928  *
6929  * Gets the next netdev_adjacent from the dev's lower neighbour
6930  * list, starting from iter position. The caller must hold RTNL lock or
6931  * its own locking that guarantees that the neighbour lower
6932  * list will remain unchanged.
6933  */
6934 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
6935 {
6936 	struct netdev_adjacent *lower;
6937 
6938 	lower = list_entry(*iter, struct netdev_adjacent, list);
6939 
6940 	if (&lower->list == &dev->adj_list.lower)
6941 		return NULL;
6942 
6943 	*iter = lower->list.next;
6944 
6945 	return lower->dev;
6946 }
6947 EXPORT_SYMBOL(netdev_lower_get_next);
6948 
6949 static struct net_device *netdev_next_lower_dev(struct net_device *dev,
6950 						struct list_head **iter)
6951 {
6952 	struct netdev_adjacent *lower;
6953 
6954 	lower = list_entry((*iter)->next, struct netdev_adjacent, list);
6955 
6956 	if (&lower->list == &dev->adj_list.lower)
6957 		return NULL;
6958 
6959 	*iter = &lower->list;
6960 
6961 	return lower->dev;
6962 }
6963 
6964 static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
6965 						  struct list_head **iter,
6966 						  bool *ignore)
6967 {
6968 	struct netdev_adjacent *lower;
6969 
6970 	lower = list_entry((*iter)->next, struct netdev_adjacent, list);
6971 
6972 	if (&lower->list == &dev->adj_list.lower)
6973 		return NULL;
6974 
6975 	*iter = &lower->list;
6976 	*ignore = lower->ignore;
6977 
6978 	return lower->dev;
6979 }
6980 
6981 int netdev_walk_all_lower_dev(struct net_device *dev,
6982 			      int (*fn)(struct net_device *dev,
6983 					struct netdev_nested_priv *priv),
6984 			      struct netdev_nested_priv *priv)
6985 {
6986 	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
6987 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
6988 	int ret, cur = 0;
6989 
6990 	now = dev;
6991 	iter = &dev->adj_list.lower;
6992 
6993 	while (1) {
6994 		if (now != dev) {
6995 			ret = fn(now, priv);
6996 			if (ret)
6997 				return ret;
6998 		}
6999 
7000 		next = NULL;
7001 		while (1) {
7002 			ldev = netdev_next_lower_dev(now, &iter);
7003 			if (!ldev)
7004 				break;
7005 
7006 			next = ldev;
7007 			niter = &ldev->adj_list.lower;
7008 			dev_stack[cur] = now;
7009 			iter_stack[cur++] = iter;
7010 			break;
7011 		}
7012 
7013 		if (!next) {
7014 			if (!cur)
7015 				return 0;
7016 			next = dev_stack[--cur];
7017 			niter = iter_stack[cur];
7018 		}
7019 
7020 		now = next;
7021 		iter = niter;
7022 	}
7023 
7024 	return 0;
7025 }
7026 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
7027 
7028 static int __netdev_walk_all_lower_dev(struct net_device *dev,
7029 				       int (*fn)(struct net_device *dev,
7030 					 struct netdev_nested_priv *priv),
7031 				       struct netdev_nested_priv *priv)
7032 {
7033 	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7034 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7035 	int ret, cur = 0;
7036 	bool ignore;
7037 
7038 	now = dev;
7039 	iter = &dev->adj_list.lower;
7040 
7041 	while (1) {
7042 		if (now != dev) {
7043 			ret = fn(now, priv);
7044 			if (ret)
7045 				return ret;
7046 		}
7047 
7048 		next = NULL;
7049 		while (1) {
7050 			ldev = __netdev_next_lower_dev(now, &iter, &ignore);
7051 			if (!ldev)
7052 				break;
7053 			if (ignore)
7054 				continue;
7055 
7056 			next = ldev;
7057 			niter = &ldev->adj_list.lower;
7058 			dev_stack[cur] = now;
7059 			iter_stack[cur++] = iter;
7060 			break;
7061 		}
7062 
7063 		if (!next) {
7064 			if (!cur)
7065 				return 0;
7066 			next = dev_stack[--cur];
7067 			niter = iter_stack[cur];
7068 		}
7069 
7070 		now = next;
7071 		iter = niter;
7072 	}
7073 
7074 	return 0;
7075 }
7076 
7077 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
7078 					     struct list_head **iter)
7079 {
7080 	struct netdev_adjacent *lower;
7081 
7082 	lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7083 	if (&lower->list == &dev->adj_list.lower)
7084 		return NULL;
7085 
7086 	*iter = &lower->list;
7087 
7088 	return lower->dev;
7089 }
7090 EXPORT_SYMBOL(netdev_next_lower_dev_rcu);
7091 
7092 static u8 __netdev_upper_depth(struct net_device *dev)
7093 {
7094 	struct net_device *udev;
7095 	struct list_head *iter;
7096 	u8 max_depth = 0;
7097 	bool ignore;
7098 
7099 	for (iter = &dev->adj_list.upper,
7100 	     udev = __netdev_next_upper_dev(dev, &iter, &ignore);
7101 	     udev;
7102 	     udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
7103 		if (ignore)
7104 			continue;
7105 		if (max_depth < udev->upper_level)
7106 			max_depth = udev->upper_level;
7107 	}
7108 
7109 	return max_depth;
7110 }
7111 
7112 static u8 __netdev_lower_depth(struct net_device *dev)
7113 {
7114 	struct net_device *ldev;
7115 	struct list_head *iter;
7116 	u8 max_depth = 0;
7117 	bool ignore;
7118 
7119 	for (iter = &dev->adj_list.lower,
7120 	     ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
7121 	     ldev;
7122 	     ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
7123 		if (ignore)
7124 			continue;
7125 		if (max_depth < ldev->lower_level)
7126 			max_depth = ldev->lower_level;
7127 	}
7128 
7129 	return max_depth;
7130 }
7131 
7132 static int __netdev_update_upper_level(struct net_device *dev,
7133 				       struct netdev_nested_priv *__unused)
7134 {
7135 	dev->upper_level = __netdev_upper_depth(dev) + 1;
7136 	return 0;
7137 }
7138 
7139 static int __netdev_update_lower_level(struct net_device *dev,
7140 				       struct netdev_nested_priv *priv)
7141 {
7142 	dev->lower_level = __netdev_lower_depth(dev) + 1;
7143 
7144 #ifdef CONFIG_LOCKDEP
7145 	if (!priv)
7146 		return 0;
7147 
7148 	if (priv->flags & NESTED_SYNC_IMM)
7149 		dev->nested_level = dev->lower_level - 1;
7150 	if (priv->flags & NESTED_SYNC_TODO)
7151 		net_unlink_todo(dev);
7152 #endif
7153 	return 0;
7154 }
7155 
7156 int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
7157 				  int (*fn)(struct net_device *dev,
7158 					    struct netdev_nested_priv *priv),
7159 				  struct netdev_nested_priv *priv)
7160 {
7161 	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7162 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7163 	int ret, cur = 0;
7164 
7165 	now = dev;
7166 	iter = &dev->adj_list.lower;
7167 
7168 	while (1) {
7169 		if (now != dev) {
7170 			ret = fn(now, priv);
7171 			if (ret)
7172 				return ret;
7173 		}
7174 
7175 		next = NULL;
7176 		while (1) {
7177 			ldev = netdev_next_lower_dev_rcu(now, &iter);
7178 			if (!ldev)
7179 				break;
7180 
7181 			next = ldev;
7182 			niter = &ldev->adj_list.lower;
7183 			dev_stack[cur] = now;
7184 			iter_stack[cur++] = iter;
7185 			break;
7186 		}
7187 
7188 		if (!next) {
7189 			if (!cur)
7190 				return 0;
7191 			next = dev_stack[--cur];
7192 			niter = iter_stack[cur];
7193 		}
7194 
7195 		now = next;
7196 		iter = niter;
7197 	}
7198 
7199 	return 0;
7200 }
7201 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
7202 
7203 /**
7204  * netdev_lower_get_first_private_rcu - Get the first ->private from the
7205  *				       lower neighbour list, RCU
7206  *				       variant
7207  * @dev: device
7208  *
7209  * Gets the first netdev_adjacent->private from the dev's lower neighbour
7210  * list. The caller must hold RCU read lock.
7211  */
7212 void *netdev_lower_get_first_private_rcu(struct net_device *dev)
7213 {
7214 	struct netdev_adjacent *lower;
7215 
7216 	lower = list_first_or_null_rcu(&dev->adj_list.lower,
7217 			struct netdev_adjacent, list);
7218 	if (lower)
7219 		return lower->private;
7220 	return NULL;
7221 }
7222 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
7223 
7224 /**
7225  * netdev_master_upper_dev_get_rcu - Get master upper device
7226  * @dev: device
7227  *
7228  * Find a master upper device and return pointer to it or NULL in case
7229  * it's not there. The caller must hold the RCU read lock.
7230  */
7231 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
7232 {
7233 	struct netdev_adjacent *upper;
7234 
7235 	upper = list_first_or_null_rcu(&dev->adj_list.upper,
7236 				       struct netdev_adjacent, list);
7237 	if (upper && likely(upper->master))
7238 		return upper->dev;
7239 	return NULL;
7240 }
7241 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
7242 
7243 static int netdev_adjacent_sysfs_add(struct net_device *dev,
7244 			      struct net_device *adj_dev,
7245 			      struct list_head *dev_list)
7246 {
7247 	char linkname[IFNAMSIZ+7];
7248 
7249 	sprintf(linkname, dev_list == &dev->adj_list.upper ?
7250 		"upper_%s" : "lower_%s", adj_dev->name);
7251 	return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
7252 				 linkname);
7253 }
7254 static void netdev_adjacent_sysfs_del(struct net_device *dev,
7255 			       char *name,
7256 			       struct list_head *dev_list)
7257 {
7258 	char linkname[IFNAMSIZ+7];
7259 
7260 	sprintf(linkname, dev_list == &dev->adj_list.upper ?
7261 		"upper_%s" : "lower_%s", name);
7262 	sysfs_remove_link(&(dev->dev.kobj), linkname);
7263 }
7264 
7265 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
7266 						 struct net_device *adj_dev,
7267 						 struct list_head *dev_list)
7268 {
7269 	return (dev_list == &dev->adj_list.upper ||
7270 		dev_list == &dev->adj_list.lower) &&
7271 		net_eq(dev_net(dev), dev_net(adj_dev));
7272 }
7273 
7274 static int __netdev_adjacent_dev_insert(struct net_device *dev,
7275 					struct net_device *adj_dev,
7276 					struct list_head *dev_list,
7277 					void *private, bool master)
7278 {
7279 	struct netdev_adjacent *adj;
7280 	int ret;
7281 
7282 	adj = __netdev_find_adj(adj_dev, dev_list);
7283 
7284 	if (adj) {
7285 		adj->ref_nr += 1;
7286 		pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
7287 			 dev->name, adj_dev->name, adj->ref_nr);
7288 
7289 		return 0;
7290 	}
7291 
7292 	adj = kmalloc(sizeof(*adj), GFP_KERNEL);
7293 	if (!adj)
7294 		return -ENOMEM;
7295 
7296 	adj->dev = adj_dev;
7297 	adj->master = master;
7298 	adj->ref_nr = 1;
7299 	adj->private = private;
7300 	adj->ignore = false;
7301 	dev_hold(adj_dev);
7302 
7303 	pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
7304 		 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
7305 
7306 	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
7307 		ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
7308 		if (ret)
7309 			goto free_adj;
7310 	}
7311 
7312 	/* Ensure that master link is always the first item in list. */
7313 	if (master) {
7314 		ret = sysfs_create_link(&(dev->dev.kobj),
7315 					&(adj_dev->dev.kobj), "master");
7316 		if (ret)
7317 			goto remove_symlinks;
7318 
7319 		list_add_rcu(&adj->list, dev_list);
7320 	} else {
7321 		list_add_tail_rcu(&adj->list, dev_list);
7322 	}
7323 
7324 	return 0;
7325 
7326 remove_symlinks:
7327 	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
7328 		netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7329 free_adj:
7330 	kfree(adj);
7331 	dev_put(adj_dev);
7332 
7333 	return ret;
7334 }
7335 
7336 static void __netdev_adjacent_dev_remove(struct net_device *dev,
7337 					 struct net_device *adj_dev,
7338 					 u16 ref_nr,
7339 					 struct list_head *dev_list)
7340 {
7341 	struct netdev_adjacent *adj;
7342 
7343 	pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
7344 		 dev->name, adj_dev->name, ref_nr);
7345 
7346 	adj = __netdev_find_adj(adj_dev, dev_list);
7347 
7348 	if (!adj) {
7349 		pr_err("Adjacency does not exist for device %s from %s\n",
7350 		       dev->name, adj_dev->name);
7351 		WARN_ON(1);
7352 		return;
7353 	}
7354 
7355 	if (adj->ref_nr > ref_nr) {
7356 		pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
7357 			 dev->name, adj_dev->name, ref_nr,
7358 			 adj->ref_nr - ref_nr);
7359 		adj->ref_nr -= ref_nr;
7360 		return;
7361 	}
7362 
7363 	if (adj->master)
7364 		sysfs_remove_link(&(dev->dev.kobj), "master");
7365 
7366 	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
7367 		netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7368 
7369 	list_del_rcu(&adj->list);
7370 	pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
7371 		 adj_dev->name, dev->name, adj_dev->name);
7372 	dev_put(adj_dev);
7373 	kfree_rcu(adj, rcu);
7374 }
7375 
7376 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
7377 					    struct net_device *upper_dev,
7378 					    struct list_head *up_list,
7379 					    struct list_head *down_list,
7380 					    void *private, bool master)
7381 {
7382 	int ret;
7383 
7384 	ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
7385 					   private, master);
7386 	if (ret)
7387 		return ret;
7388 
7389 	ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
7390 					   private, false);
7391 	if (ret) {
7392 		__netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
7393 		return ret;
7394 	}
7395 
7396 	return 0;
7397 }
7398 
7399 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
7400 					       struct net_device *upper_dev,
7401 					       u16 ref_nr,
7402 					       struct list_head *up_list,
7403 					       struct list_head *down_list)
7404 {
7405 	__netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
7406 	__netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
7407 }
7408 
7409 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
7410 						struct net_device *upper_dev,
7411 						void *private, bool master)
7412 {
7413 	return __netdev_adjacent_dev_link_lists(dev, upper_dev,
7414 						&dev->adj_list.upper,
7415 						&upper_dev->adj_list.lower,
7416 						private, master);
7417 }
7418 
7419 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
7420 						   struct net_device *upper_dev)
7421 {
7422 	__netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
7423 					   &dev->adj_list.upper,
7424 					   &upper_dev->adj_list.lower);
7425 }
7426 
7427 static int __netdev_upper_dev_link(struct net_device *dev,
7428 				   struct net_device *upper_dev, bool master,
7429 				   void *upper_priv, void *upper_info,
7430 				   struct netdev_nested_priv *priv,
7431 				   struct netlink_ext_ack *extack)
7432 {
7433 	struct netdev_notifier_changeupper_info changeupper_info = {
7434 		.info = {
7435 			.dev = dev,
7436 			.extack = extack,
7437 		},
7438 		.upper_dev = upper_dev,
7439 		.master = master,
7440 		.linking = true,
7441 		.upper_info = upper_info,
7442 	};
7443 	struct net_device *master_dev;
7444 	int ret = 0;
7445 
7446 	ASSERT_RTNL();
7447 
7448 	if (dev == upper_dev)
7449 		return -EBUSY;
7450 
7451 	/* To prevent loops, check if dev is not upper device to upper_dev. */
7452 	if (__netdev_has_upper_dev(upper_dev, dev))
7453 		return -EBUSY;
7454 
7455 	if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
7456 		return -EMLINK;
7457 
7458 	if (!master) {
7459 		if (__netdev_has_upper_dev(dev, upper_dev))
7460 			return -EEXIST;
7461 	} else {
7462 		master_dev = __netdev_master_upper_dev_get(dev);
7463 		if (master_dev)
7464 			return master_dev == upper_dev ? -EEXIST : -EBUSY;
7465 	}
7466 
7467 	ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7468 					    &changeupper_info.info);
7469 	ret = notifier_to_errno(ret);
7470 	if (ret)
7471 		return ret;
7472 
7473 	ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
7474 						   master);
7475 	if (ret)
7476 		return ret;
7477 
7478 	ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7479 					    &changeupper_info.info);
7480 	ret = notifier_to_errno(ret);
7481 	if (ret)
7482 		goto rollback;
7483 
7484 	__netdev_update_upper_level(dev, NULL);
7485 	__netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
7486 
7487 	__netdev_update_lower_level(upper_dev, priv);
7488 	__netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
7489 				    priv);
7490 
7491 	return 0;
7492 
7493 rollback:
7494 	__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7495 
7496 	return ret;
7497 }
7498 
7499 /**
7500  * netdev_upper_dev_link - Add a link to the upper device
7501  * @dev: device
7502  * @upper_dev: new upper device
7503  * @extack: netlink extended ack
7504  *
7505  * Adds a link to device which is upper to this one. The caller must hold
7506  * the RTNL lock. On a failure a negative errno code is returned.
7507  * On success the reference counts are adjusted and the function
7508  * returns zero.
7509  */
7510 int netdev_upper_dev_link(struct net_device *dev,
7511 			  struct net_device *upper_dev,
7512 			  struct netlink_ext_ack *extack)
7513 {
7514 	struct netdev_nested_priv priv = {
7515 		.flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
7516 		.data = NULL,
7517 	};
7518 
7519 	return __netdev_upper_dev_link(dev, upper_dev, false,
7520 				       NULL, NULL, &priv, extack);
7521 }
7522 EXPORT_SYMBOL(netdev_upper_dev_link);
7523 
7524 /**
7525  * netdev_master_upper_dev_link - Add a master link to the upper device
7526  * @dev: device
7527  * @upper_dev: new upper device
7528  * @upper_priv: upper device private
7529  * @upper_info: upper info to be passed down via notifier
7530  * @extack: netlink extended ack
7531  *
7532  * Adds a link to device which is upper to this one. In this case, only
7533  * one master upper device can be linked, although other non-master devices
7534  * might be linked as well. The caller must hold the RTNL lock.
7535  * On a failure a negative errno code is returned. On success the reference
7536  * counts are adjusted and the function returns zero.
7537  */
7538 int netdev_master_upper_dev_link(struct net_device *dev,
7539 				 struct net_device *upper_dev,
7540 				 void *upper_priv, void *upper_info,
7541 				 struct netlink_ext_ack *extack)
7542 {
7543 	struct netdev_nested_priv priv = {
7544 		.flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
7545 		.data = NULL,
7546 	};
7547 
7548 	return __netdev_upper_dev_link(dev, upper_dev, true,
7549 				       upper_priv, upper_info, &priv, extack);
7550 }
7551 EXPORT_SYMBOL(netdev_master_upper_dev_link);
7552 
7553 static void __netdev_upper_dev_unlink(struct net_device *dev,
7554 				      struct net_device *upper_dev,
7555 				      struct netdev_nested_priv *priv)
7556 {
7557 	struct netdev_notifier_changeupper_info changeupper_info = {
7558 		.info = {
7559 			.dev = dev,
7560 		},
7561 		.upper_dev = upper_dev,
7562 		.linking = false,
7563 	};
7564 
7565 	ASSERT_RTNL();
7566 
7567 	changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
7568 
7569 	call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7570 				      &changeupper_info.info);
7571 
7572 	__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7573 
7574 	call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7575 				      &changeupper_info.info);
7576 
7577 	__netdev_update_upper_level(dev, NULL);
7578 	__netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
7579 
7580 	__netdev_update_lower_level(upper_dev, priv);
7581 	__netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
7582 				    priv);
7583 }
7584 
7585 /**
7586  * netdev_upper_dev_unlink - Removes a link to upper device
7587  * @dev: device
7588  * @upper_dev: new upper device
7589  *
7590  * Removes a link to device which is upper to this one. The caller must hold
7591  * the RTNL lock.
7592  */
7593 void netdev_upper_dev_unlink(struct net_device *dev,
7594 			     struct net_device *upper_dev)
7595 {
7596 	struct netdev_nested_priv priv = {
7597 		.flags = NESTED_SYNC_TODO,
7598 		.data = NULL,
7599 	};
7600 
7601 	__netdev_upper_dev_unlink(dev, upper_dev, &priv);
7602 }
7603 EXPORT_SYMBOL(netdev_upper_dev_unlink);
7604 
7605 static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
7606 				      struct net_device *lower_dev,
7607 				      bool val)
7608 {
7609 	struct netdev_adjacent *adj;
7610 
7611 	adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower);
7612 	if (adj)
7613 		adj->ignore = val;
7614 
7615 	adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper);
7616 	if (adj)
7617 		adj->ignore = val;
7618 }
7619 
7620 static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
7621 					struct net_device *lower_dev)
7622 {
7623 	__netdev_adjacent_dev_set(upper_dev, lower_dev, true);
7624 }
7625 
7626 static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
7627 				       struct net_device *lower_dev)
7628 {
7629 	__netdev_adjacent_dev_set(upper_dev, lower_dev, false);
7630 }
7631 
7632 int netdev_adjacent_change_prepare(struct net_device *old_dev,
7633 				   struct net_device *new_dev,
7634 				   struct net_device *dev,
7635 				   struct netlink_ext_ack *extack)
7636 {
7637 	struct netdev_nested_priv priv = {
7638 		.flags = 0,
7639 		.data = NULL,
7640 	};
7641 	int err;
7642 
7643 	if (!new_dev)
7644 		return 0;
7645 
7646 	if (old_dev && new_dev != old_dev)
7647 		netdev_adjacent_dev_disable(dev, old_dev);
7648 	err = __netdev_upper_dev_link(new_dev, dev, false, NULL, NULL, &priv,
7649 				      extack);
7650 	if (err) {
7651 		if (old_dev && new_dev != old_dev)
7652 			netdev_adjacent_dev_enable(dev, old_dev);
7653 		return err;
7654 	}
7655 
7656 	return 0;
7657 }
7658 EXPORT_SYMBOL(netdev_adjacent_change_prepare);
7659 
7660 void netdev_adjacent_change_commit(struct net_device *old_dev,
7661 				   struct net_device *new_dev,
7662 				   struct net_device *dev)
7663 {
7664 	struct netdev_nested_priv priv = {
7665 		.flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
7666 		.data = NULL,
7667 	};
7668 
7669 	if (!new_dev || !old_dev)
7670 		return;
7671 
7672 	if (new_dev == old_dev)
7673 		return;
7674 
7675 	netdev_adjacent_dev_enable(dev, old_dev);
7676 	__netdev_upper_dev_unlink(old_dev, dev, &priv);
7677 }
7678 EXPORT_SYMBOL(netdev_adjacent_change_commit);
7679 
7680 void netdev_adjacent_change_abort(struct net_device *old_dev,
7681 				  struct net_device *new_dev,
7682 				  struct net_device *dev)
7683 {
7684 	struct netdev_nested_priv priv = {
7685 		.flags = 0,
7686 		.data = NULL,
7687 	};
7688 
7689 	if (!new_dev)
7690 		return;
7691 
7692 	if (old_dev && new_dev != old_dev)
7693 		netdev_adjacent_dev_enable(dev, old_dev);
7694 
7695 	__netdev_upper_dev_unlink(new_dev, dev, &priv);
7696 }
7697 EXPORT_SYMBOL(netdev_adjacent_change_abort);
7698 
7699 /**
7700  * netdev_bonding_info_change - Dispatch event about slave change
7701  * @dev: device
7702  * @bonding_info: info to dispatch
7703  *
7704  * Send NETDEV_BONDING_INFO to netdev notifiers with info.
7705  * The caller must hold the RTNL lock.
7706  */
7707 void netdev_bonding_info_change(struct net_device *dev,
7708 				struct netdev_bonding_info *bonding_info)
7709 {
7710 	struct netdev_notifier_bonding_info info = {
7711 		.info.dev = dev,
7712 	};
7713 
7714 	memcpy(&info.bonding_info, bonding_info,
7715 	       sizeof(struct netdev_bonding_info));
7716 	call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
7717 				      &info.info);
7718 }
7719 EXPORT_SYMBOL(netdev_bonding_info_change);
7720 
7721 /**
7722  * netdev_get_xmit_slave - Get the xmit slave of master device
7723  * @dev: device
7724  * @skb: The packet
7725  * @all_slaves: assume all the slaves are active
7726  *
7727  * The reference counters are not incremented so the caller must be
7728  * careful with locks. The caller must hold RCU lock.
7729  * %NULL is returned if no slave is found.
7730  */
7731 
7732 struct net_device *netdev_get_xmit_slave(struct net_device *dev,
7733 					 struct sk_buff *skb,
7734 					 bool all_slaves)
7735 {
7736 	const struct net_device_ops *ops = dev->netdev_ops;
7737 
7738 	if (!ops->ndo_get_xmit_slave)
7739 		return NULL;
7740 	return ops->ndo_get_xmit_slave(dev, skb, all_slaves);
7741 }
7742 EXPORT_SYMBOL(netdev_get_xmit_slave);
7743 
7744 static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev,
7745 						  struct sock *sk)
7746 {
7747 	const struct net_device_ops *ops = dev->netdev_ops;
7748 
7749 	if (!ops->ndo_sk_get_lower_dev)
7750 		return NULL;
7751 	return ops->ndo_sk_get_lower_dev(dev, sk);
7752 }
7753 
7754 /**
7755  * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket
7756  * @dev: device
7757  * @sk: the socket
7758  *
7759  * %NULL is returned if no lower device is found.
7760  */
7761 
7762 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
7763 					    struct sock *sk)
7764 {
7765 	struct net_device *lower;
7766 
7767 	lower = netdev_sk_get_lower_dev(dev, sk);
7768 	while (lower) {
7769 		dev = lower;
7770 		lower = netdev_sk_get_lower_dev(dev, sk);
7771 	}
7772 
7773 	return dev;
7774 }
7775 EXPORT_SYMBOL(netdev_sk_get_lowest_dev);
7776 
7777 static void netdev_adjacent_add_links(struct net_device *dev)
7778 {
7779 	struct netdev_adjacent *iter;
7780 
7781 	struct net *net = dev_net(dev);
7782 
7783 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
7784 		if (!net_eq(net, dev_net(iter->dev)))
7785 			continue;
7786 		netdev_adjacent_sysfs_add(iter->dev, dev,
7787 					  &iter->dev->adj_list.lower);
7788 		netdev_adjacent_sysfs_add(dev, iter->dev,
7789 					  &dev->adj_list.upper);
7790 	}
7791 
7792 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
7793 		if (!net_eq(net, dev_net(iter->dev)))
7794 			continue;
7795 		netdev_adjacent_sysfs_add(iter->dev, dev,
7796 					  &iter->dev->adj_list.upper);
7797 		netdev_adjacent_sysfs_add(dev, iter->dev,
7798 					  &dev->adj_list.lower);
7799 	}
7800 }
7801 
7802 static void netdev_adjacent_del_links(struct net_device *dev)
7803 {
7804 	struct netdev_adjacent *iter;
7805 
7806 	struct net *net = dev_net(dev);
7807 
7808 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
7809 		if (!net_eq(net, dev_net(iter->dev)))
7810 			continue;
7811 		netdev_adjacent_sysfs_del(iter->dev, dev->name,
7812 					  &iter->dev->adj_list.lower);
7813 		netdev_adjacent_sysfs_del(dev, iter->dev->name,
7814 					  &dev->adj_list.upper);
7815 	}
7816 
7817 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
7818 		if (!net_eq(net, dev_net(iter->dev)))
7819 			continue;
7820 		netdev_adjacent_sysfs_del(iter->dev, dev->name,
7821 					  &iter->dev->adj_list.upper);
7822 		netdev_adjacent_sysfs_del(dev, iter->dev->name,
7823 					  &dev->adj_list.lower);
7824 	}
7825 }
7826 
7827 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
7828 {
7829 	struct netdev_adjacent *iter;
7830 
7831 	struct net *net = dev_net(dev);
7832 
7833 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
7834 		if (!net_eq(net, dev_net(iter->dev)))
7835 			continue;
7836 		netdev_adjacent_sysfs_del(iter->dev, oldname,
7837 					  &iter->dev->adj_list.lower);
7838 		netdev_adjacent_sysfs_add(iter->dev, dev,
7839 					  &iter->dev->adj_list.lower);
7840 	}
7841 
7842 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
7843 		if (!net_eq(net, dev_net(iter->dev)))
7844 			continue;
7845 		netdev_adjacent_sysfs_del(iter->dev, oldname,
7846 					  &iter->dev->adj_list.upper);
7847 		netdev_adjacent_sysfs_add(iter->dev, dev,
7848 					  &iter->dev->adj_list.upper);
7849 	}
7850 }
7851 
7852 void *netdev_lower_dev_get_private(struct net_device *dev,
7853 				   struct net_device *lower_dev)
7854 {
7855 	struct netdev_adjacent *lower;
7856 
7857 	if (!lower_dev)
7858 		return NULL;
7859 	lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
7860 	if (!lower)
7861 		return NULL;
7862 
7863 	return lower->private;
7864 }
7865 EXPORT_SYMBOL(netdev_lower_dev_get_private);
7866 
7867 
7868 /**
7869  * netdev_lower_state_changed - Dispatch event about lower device state change
7870  * @lower_dev: device
7871  * @lower_state_info: state to dispatch
7872  *
7873  * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
7874  * The caller must hold the RTNL lock.
7875  */
7876 void netdev_lower_state_changed(struct net_device *lower_dev,
7877 				void *lower_state_info)
7878 {
7879 	struct netdev_notifier_changelowerstate_info changelowerstate_info = {
7880 		.info.dev = lower_dev,
7881 	};
7882 
7883 	ASSERT_RTNL();
7884 	changelowerstate_info.lower_state_info = lower_state_info;
7885 	call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
7886 				      &changelowerstate_info.info);
7887 }
7888 EXPORT_SYMBOL(netdev_lower_state_changed);
7889 
7890 static void dev_change_rx_flags(struct net_device *dev, int flags)
7891 {
7892 	const struct net_device_ops *ops = dev->netdev_ops;
7893 
7894 	if (ops->ndo_change_rx_flags)
7895 		ops->ndo_change_rx_flags(dev, flags);
7896 }
7897 
7898 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
7899 {
7900 	unsigned int old_flags = dev->flags;
7901 	kuid_t uid;
7902 	kgid_t gid;
7903 
7904 	ASSERT_RTNL();
7905 
7906 	dev->flags |= IFF_PROMISC;
7907 	dev->promiscuity += inc;
7908 	if (dev->promiscuity == 0) {
7909 		/*
7910 		 * Avoid overflow.
7911 		 * If inc causes overflow, untouch promisc and return error.
7912 		 */
7913 		if (inc < 0)
7914 			dev->flags &= ~IFF_PROMISC;
7915 		else {
7916 			dev->promiscuity -= inc;
7917 			netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n");
7918 			return -EOVERFLOW;
7919 		}
7920 	}
7921 	if (dev->flags != old_flags) {
7922 		pr_info("device %s %s promiscuous mode\n",
7923 			dev->name,
7924 			dev->flags & IFF_PROMISC ? "entered" : "left");
7925 		if (audit_enabled) {
7926 			current_uid_gid(&uid, &gid);
7927 			audit_log(audit_context(), GFP_ATOMIC,
7928 				  AUDIT_ANOM_PROMISCUOUS,
7929 				  "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
7930 				  dev->name, (dev->flags & IFF_PROMISC),
7931 				  (old_flags & IFF_PROMISC),
7932 				  from_kuid(&init_user_ns, audit_get_loginuid(current)),
7933 				  from_kuid(&init_user_ns, uid),
7934 				  from_kgid(&init_user_ns, gid),
7935 				  audit_get_sessionid(current));
7936 		}
7937 
7938 		dev_change_rx_flags(dev, IFF_PROMISC);
7939 	}
7940 	if (notify)
7941 		__dev_notify_flags(dev, old_flags, IFF_PROMISC);
7942 	return 0;
7943 }
7944 
7945 /**
7946  *	dev_set_promiscuity	- update promiscuity count on a device
7947  *	@dev: device
7948  *	@inc: modifier
7949  *
7950  *	Add or remove promiscuity from a device. While the count in the device
7951  *	remains above zero the interface remains promiscuous. Once it hits zero
7952  *	the device reverts back to normal filtering operation. A negative inc
7953  *	value is used to drop promiscuity on the device.
7954  *	Return 0 if successful or a negative errno code on error.
7955  */
7956 int dev_set_promiscuity(struct net_device *dev, int inc)
7957 {
7958 	unsigned int old_flags = dev->flags;
7959 	int err;
7960 
7961 	err = __dev_set_promiscuity(dev, inc, true);
7962 	if (err < 0)
7963 		return err;
7964 	if (dev->flags != old_flags)
7965 		dev_set_rx_mode(dev);
7966 	return err;
7967 }
7968 EXPORT_SYMBOL(dev_set_promiscuity);
7969 
7970 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
7971 {
7972 	unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
7973 
7974 	ASSERT_RTNL();
7975 
7976 	dev->flags |= IFF_ALLMULTI;
7977 	dev->allmulti += inc;
7978 	if (dev->allmulti == 0) {
7979 		/*
7980 		 * Avoid overflow.
7981 		 * If inc causes overflow, untouch allmulti and return error.
7982 		 */
7983 		if (inc < 0)
7984 			dev->flags &= ~IFF_ALLMULTI;
7985 		else {
7986 			dev->allmulti -= inc;
7987 			netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n");
7988 			return -EOVERFLOW;
7989 		}
7990 	}
7991 	if (dev->flags ^ old_flags) {
7992 		dev_change_rx_flags(dev, IFF_ALLMULTI);
7993 		dev_set_rx_mode(dev);
7994 		if (notify)
7995 			__dev_notify_flags(dev, old_flags,
7996 					   dev->gflags ^ old_gflags);
7997 	}
7998 	return 0;
7999 }
8000 
8001 /**
8002  *	dev_set_allmulti	- update allmulti count on a device
8003  *	@dev: device
8004  *	@inc: modifier
8005  *
8006  *	Add or remove reception of all multicast frames to a device. While the
8007  *	count in the device remains above zero the interface remains listening
8008  *	to all interfaces. Once it hits zero the device reverts back to normal
8009  *	filtering operation. A negative @inc value is used to drop the counter
8010  *	when releasing a resource needing all multicasts.
8011  *	Return 0 if successful or a negative errno code on error.
8012  */
8013 
8014 int dev_set_allmulti(struct net_device *dev, int inc)
8015 {
8016 	return __dev_set_allmulti(dev, inc, true);
8017 }
8018 EXPORT_SYMBOL(dev_set_allmulti);
8019 
8020 /*
8021  *	Upload unicast and multicast address lists to device and
8022  *	configure RX filtering. When the device doesn't support unicast
8023  *	filtering it is put in promiscuous mode while unicast addresses
8024  *	are present.
8025  */
8026 void __dev_set_rx_mode(struct net_device *dev)
8027 {
8028 	const struct net_device_ops *ops = dev->netdev_ops;
8029 
8030 	/* dev_open will call this function so the list will stay sane. */
8031 	if (!(dev->flags&IFF_UP))
8032 		return;
8033 
8034 	if (!netif_device_present(dev))
8035 		return;
8036 
8037 	if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
8038 		/* Unicast addresses changes may only happen under the rtnl,
8039 		 * therefore calling __dev_set_promiscuity here is safe.
8040 		 */
8041 		if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
8042 			__dev_set_promiscuity(dev, 1, false);
8043 			dev->uc_promisc = true;
8044 		} else if (netdev_uc_empty(dev) && dev->uc_promisc) {
8045 			__dev_set_promiscuity(dev, -1, false);
8046 			dev->uc_promisc = false;
8047 		}
8048 	}
8049 
8050 	if (ops->ndo_set_rx_mode)
8051 		ops->ndo_set_rx_mode(dev);
8052 }
8053 
8054 void dev_set_rx_mode(struct net_device *dev)
8055 {
8056 	netif_addr_lock_bh(dev);
8057 	__dev_set_rx_mode(dev);
8058 	netif_addr_unlock_bh(dev);
8059 }
8060 
8061 /**
8062  *	dev_get_flags - get flags reported to userspace
8063  *	@dev: device
8064  *
8065  *	Get the combination of flag bits exported through APIs to userspace.
8066  */
8067 unsigned int dev_get_flags(const struct net_device *dev)
8068 {
8069 	unsigned int flags;
8070 
8071 	flags = (dev->flags & ~(IFF_PROMISC |
8072 				IFF_ALLMULTI |
8073 				IFF_RUNNING |
8074 				IFF_LOWER_UP |
8075 				IFF_DORMANT)) |
8076 		(dev->gflags & (IFF_PROMISC |
8077 				IFF_ALLMULTI));
8078 
8079 	if (netif_running(dev)) {
8080 		if (netif_oper_up(dev))
8081 			flags |= IFF_RUNNING;
8082 		if (netif_carrier_ok(dev))
8083 			flags |= IFF_LOWER_UP;
8084 		if (netif_dormant(dev))
8085 			flags |= IFF_DORMANT;
8086 	}
8087 
8088 	return flags;
8089 }
8090 EXPORT_SYMBOL(dev_get_flags);
8091 
8092 int __dev_change_flags(struct net_device *dev, unsigned int flags,
8093 		       struct netlink_ext_ack *extack)
8094 {
8095 	unsigned int old_flags = dev->flags;
8096 	int ret;
8097 
8098 	ASSERT_RTNL();
8099 
8100 	/*
8101 	 *	Set the flags on our device.
8102 	 */
8103 
8104 	dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
8105 			       IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
8106 			       IFF_AUTOMEDIA)) |
8107 		     (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
8108 				    IFF_ALLMULTI));
8109 
8110 	/*
8111 	 *	Load in the correct multicast list now the flags have changed.
8112 	 */
8113 
8114 	if ((old_flags ^ flags) & IFF_MULTICAST)
8115 		dev_change_rx_flags(dev, IFF_MULTICAST);
8116 
8117 	dev_set_rx_mode(dev);
8118 
8119 	/*
8120 	 *	Have we downed the interface. We handle IFF_UP ourselves
8121 	 *	according to user attempts to set it, rather than blindly
8122 	 *	setting it.
8123 	 */
8124 
8125 	ret = 0;
8126 	if ((old_flags ^ flags) & IFF_UP) {
8127 		if (old_flags & IFF_UP)
8128 			__dev_close(dev);
8129 		else
8130 			ret = __dev_open(dev, extack);
8131 	}
8132 
8133 	if ((flags ^ dev->gflags) & IFF_PROMISC) {
8134 		int inc = (flags & IFF_PROMISC) ? 1 : -1;
8135 		unsigned int old_flags = dev->flags;
8136 
8137 		dev->gflags ^= IFF_PROMISC;
8138 
8139 		if (__dev_set_promiscuity(dev, inc, false) >= 0)
8140 			if (dev->flags != old_flags)
8141 				dev_set_rx_mode(dev);
8142 	}
8143 
8144 	/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
8145 	 * is important. Some (broken) drivers set IFF_PROMISC, when
8146 	 * IFF_ALLMULTI is requested not asking us and not reporting.
8147 	 */
8148 	if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
8149 		int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
8150 
8151 		dev->gflags ^= IFF_ALLMULTI;
8152 		__dev_set_allmulti(dev, inc, false);
8153 	}
8154 
8155 	return ret;
8156 }
8157 
8158 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
8159 			unsigned int gchanges)
8160 {
8161 	unsigned int changes = dev->flags ^ old_flags;
8162 
8163 	if (gchanges)
8164 		rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
8165 
8166 	if (changes & IFF_UP) {
8167 		if (dev->flags & IFF_UP)
8168 			call_netdevice_notifiers(NETDEV_UP, dev);
8169 		else
8170 			call_netdevice_notifiers(NETDEV_DOWN, dev);
8171 	}
8172 
8173 	if (dev->flags & IFF_UP &&
8174 	    (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
8175 		struct netdev_notifier_change_info change_info = {
8176 			.info = {
8177 				.dev = dev,
8178 			},
8179 			.flags_changed = changes,
8180 		};
8181 
8182 		call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
8183 	}
8184 }
8185 
8186 /**
8187  *	dev_change_flags - change device settings
8188  *	@dev: device
8189  *	@flags: device state flags
8190  *	@extack: netlink extended ack
8191  *
8192  *	Change settings on device based state flags. The flags are
8193  *	in the userspace exported format.
8194  */
8195 int dev_change_flags(struct net_device *dev, unsigned int flags,
8196 		     struct netlink_ext_ack *extack)
8197 {
8198 	int ret;
8199 	unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
8200 
8201 	ret = __dev_change_flags(dev, flags, extack);
8202 	if (ret < 0)
8203 		return ret;
8204 
8205 	changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
8206 	__dev_notify_flags(dev, old_flags, changes);
8207 	return ret;
8208 }
8209 EXPORT_SYMBOL(dev_change_flags);
8210 
8211 int __dev_set_mtu(struct net_device *dev, int new_mtu)
8212 {
8213 	const struct net_device_ops *ops = dev->netdev_ops;
8214 
8215 	if (ops->ndo_change_mtu)
8216 		return ops->ndo_change_mtu(dev, new_mtu);
8217 
8218 	/* Pairs with all the lockless reads of dev->mtu in the stack */
8219 	WRITE_ONCE(dev->mtu, new_mtu);
8220 	return 0;
8221 }
8222 EXPORT_SYMBOL(__dev_set_mtu);
8223 
8224 int dev_validate_mtu(struct net_device *dev, int new_mtu,
8225 		     struct netlink_ext_ack *extack)
8226 {
8227 	/* MTU must be positive, and in range */
8228 	if (new_mtu < 0 || new_mtu < dev->min_mtu) {
8229 		NL_SET_ERR_MSG(extack, "mtu less than device minimum");
8230 		return -EINVAL;
8231 	}
8232 
8233 	if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
8234 		NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
8235 		return -EINVAL;
8236 	}
8237 	return 0;
8238 }
8239 
8240 /**
8241  *	dev_set_mtu_ext - Change maximum transfer unit
8242  *	@dev: device
8243  *	@new_mtu: new transfer unit
8244  *	@extack: netlink extended ack
8245  *
8246  *	Change the maximum transfer size of the network device.
8247  */
8248 int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
8249 		    struct netlink_ext_ack *extack)
8250 {
8251 	int err, orig_mtu;
8252 
8253 	if (new_mtu == dev->mtu)
8254 		return 0;
8255 
8256 	err = dev_validate_mtu(dev, new_mtu, extack);
8257 	if (err)
8258 		return err;
8259 
8260 	if (!netif_device_present(dev))
8261 		return -ENODEV;
8262 
8263 	err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
8264 	err = notifier_to_errno(err);
8265 	if (err)
8266 		return err;
8267 
8268 	orig_mtu = dev->mtu;
8269 	err = __dev_set_mtu(dev, new_mtu);
8270 
8271 	if (!err) {
8272 		err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8273 						   orig_mtu);
8274 		err = notifier_to_errno(err);
8275 		if (err) {
8276 			/* setting mtu back and notifying everyone again,
8277 			 * so that they have a chance to revert changes.
8278 			 */
8279 			__dev_set_mtu(dev, orig_mtu);
8280 			call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8281 						     new_mtu);
8282 		}
8283 	}
8284 	return err;
8285 }
8286 
8287 int dev_set_mtu(struct net_device *dev, int new_mtu)
8288 {
8289 	struct netlink_ext_ack extack;
8290 	int err;
8291 
8292 	memset(&extack, 0, sizeof(extack));
8293 	err = dev_set_mtu_ext(dev, new_mtu, &extack);
8294 	if (err && extack._msg)
8295 		net_err_ratelimited("%s: %s\n", dev->name, extack._msg);
8296 	return err;
8297 }
8298 EXPORT_SYMBOL(dev_set_mtu);
8299 
8300 /**
8301  *	dev_change_tx_queue_len - Change TX queue length of a netdevice
8302  *	@dev: device
8303  *	@new_len: new tx queue length
8304  */
8305 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
8306 {
8307 	unsigned int orig_len = dev->tx_queue_len;
8308 	int res;
8309 
8310 	if (new_len != (unsigned int)new_len)
8311 		return -ERANGE;
8312 
8313 	if (new_len != orig_len) {
8314 		dev->tx_queue_len = new_len;
8315 		res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
8316 		res = notifier_to_errno(res);
8317 		if (res)
8318 			goto err_rollback;
8319 		res = dev_qdisc_change_tx_queue_len(dev);
8320 		if (res)
8321 			goto err_rollback;
8322 	}
8323 
8324 	return 0;
8325 
8326 err_rollback:
8327 	netdev_err(dev, "refused to change device tx_queue_len\n");
8328 	dev->tx_queue_len = orig_len;
8329 	return res;
8330 }
8331 
8332 /**
8333  *	dev_set_group - Change group this device belongs to
8334  *	@dev: device
8335  *	@new_group: group this device should belong to
8336  */
8337 void dev_set_group(struct net_device *dev, int new_group)
8338 {
8339 	dev->group = new_group;
8340 }
8341 EXPORT_SYMBOL(dev_set_group);
8342 
8343 /**
8344  *	dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
8345  *	@dev: device
8346  *	@addr: new address
8347  *	@extack: netlink extended ack
8348  */
8349 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
8350 			      struct netlink_ext_ack *extack)
8351 {
8352 	struct netdev_notifier_pre_changeaddr_info info = {
8353 		.info.dev = dev,
8354 		.info.extack = extack,
8355 		.dev_addr = addr,
8356 	};
8357 	int rc;
8358 
8359 	rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info);
8360 	return notifier_to_errno(rc);
8361 }
8362 EXPORT_SYMBOL(dev_pre_changeaddr_notify);
8363 
8364 /**
8365  *	dev_set_mac_address - Change Media Access Control Address
8366  *	@dev: device
8367  *	@sa: new address
8368  *	@extack: netlink extended ack
8369  *
8370  *	Change the hardware (MAC) address of the device
8371  */
8372 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
8373 			struct netlink_ext_ack *extack)
8374 {
8375 	const struct net_device_ops *ops = dev->netdev_ops;
8376 	int err;
8377 
8378 	if (!ops->ndo_set_mac_address)
8379 		return -EOPNOTSUPP;
8380 	if (sa->sa_family != dev->type)
8381 		return -EINVAL;
8382 	if (!netif_device_present(dev))
8383 		return -ENODEV;
8384 	err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack);
8385 	if (err)
8386 		return err;
8387 	err = ops->ndo_set_mac_address(dev, sa);
8388 	if (err)
8389 		return err;
8390 	dev->addr_assign_type = NET_ADDR_SET;
8391 	call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
8392 	add_device_randomness(dev->dev_addr, dev->addr_len);
8393 	return 0;
8394 }
8395 EXPORT_SYMBOL(dev_set_mac_address);
8396 
8397 static DECLARE_RWSEM(dev_addr_sem);
8398 
8399 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
8400 			     struct netlink_ext_ack *extack)
8401 {
8402 	int ret;
8403 
8404 	down_write(&dev_addr_sem);
8405 	ret = dev_set_mac_address(dev, sa, extack);
8406 	up_write(&dev_addr_sem);
8407 	return ret;
8408 }
8409 EXPORT_SYMBOL(dev_set_mac_address_user);
8410 
8411 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name)
8412 {
8413 	size_t size = sizeof(sa->sa_data);
8414 	struct net_device *dev;
8415 	int ret = 0;
8416 
8417 	down_read(&dev_addr_sem);
8418 	rcu_read_lock();
8419 
8420 	dev = dev_get_by_name_rcu(net, dev_name);
8421 	if (!dev) {
8422 		ret = -ENODEV;
8423 		goto unlock;
8424 	}
8425 	if (!dev->addr_len)
8426 		memset(sa->sa_data, 0, size);
8427 	else
8428 		memcpy(sa->sa_data, dev->dev_addr,
8429 		       min_t(size_t, size, dev->addr_len));
8430 	sa->sa_family = dev->type;
8431 
8432 unlock:
8433 	rcu_read_unlock();
8434 	up_read(&dev_addr_sem);
8435 	return ret;
8436 }
8437 EXPORT_SYMBOL(dev_get_mac_address);
8438 
8439 /**
8440  *	dev_change_carrier - Change device carrier
8441  *	@dev: device
8442  *	@new_carrier: new value
8443  *
8444  *	Change device carrier
8445  */
8446 int dev_change_carrier(struct net_device *dev, bool new_carrier)
8447 {
8448 	const struct net_device_ops *ops = dev->netdev_ops;
8449 
8450 	if (!ops->ndo_change_carrier)
8451 		return -EOPNOTSUPP;
8452 	if (!netif_device_present(dev))
8453 		return -ENODEV;
8454 	return ops->ndo_change_carrier(dev, new_carrier);
8455 }
8456 EXPORT_SYMBOL(dev_change_carrier);
8457 
8458 /**
8459  *	dev_get_phys_port_id - Get device physical port ID
8460  *	@dev: device
8461  *	@ppid: port ID
8462  *
8463  *	Get device physical port ID
8464  */
8465 int dev_get_phys_port_id(struct net_device *dev,
8466 			 struct netdev_phys_item_id *ppid)
8467 {
8468 	const struct net_device_ops *ops = dev->netdev_ops;
8469 
8470 	if (!ops->ndo_get_phys_port_id)
8471 		return -EOPNOTSUPP;
8472 	return ops->ndo_get_phys_port_id(dev, ppid);
8473 }
8474 EXPORT_SYMBOL(dev_get_phys_port_id);
8475 
8476 /**
8477  *	dev_get_phys_port_name - Get device physical port name
8478  *	@dev: device
8479  *	@name: port name
8480  *	@len: limit of bytes to copy to name
8481  *
8482  *	Get device physical port name
8483  */
8484 int dev_get_phys_port_name(struct net_device *dev,
8485 			   char *name, size_t len)
8486 {
8487 	const struct net_device_ops *ops = dev->netdev_ops;
8488 	int err;
8489 
8490 	if (ops->ndo_get_phys_port_name) {
8491 		err = ops->ndo_get_phys_port_name(dev, name, len);
8492 		if (err != -EOPNOTSUPP)
8493 			return err;
8494 	}
8495 	return devlink_compat_phys_port_name_get(dev, name, len);
8496 }
8497 EXPORT_SYMBOL(dev_get_phys_port_name);
8498 
8499 /**
8500  *	dev_get_port_parent_id - Get the device's port parent identifier
8501  *	@dev: network device
8502  *	@ppid: pointer to a storage for the port's parent identifier
8503  *	@recurse: allow/disallow recursion to lower devices
8504  *
8505  *	Get the devices's port parent identifier
8506  */
8507 int dev_get_port_parent_id(struct net_device *dev,
8508 			   struct netdev_phys_item_id *ppid,
8509 			   bool recurse)
8510 {
8511 	const struct net_device_ops *ops = dev->netdev_ops;
8512 	struct netdev_phys_item_id first = { };
8513 	struct net_device *lower_dev;
8514 	struct list_head *iter;
8515 	int err;
8516 
8517 	if (ops->ndo_get_port_parent_id) {
8518 		err = ops->ndo_get_port_parent_id(dev, ppid);
8519 		if (err != -EOPNOTSUPP)
8520 			return err;
8521 	}
8522 
8523 	err = devlink_compat_switch_id_get(dev, ppid);
8524 	if (!recurse || err != -EOPNOTSUPP)
8525 		return err;
8526 
8527 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
8528 		err = dev_get_port_parent_id(lower_dev, ppid, true);
8529 		if (err)
8530 			break;
8531 		if (!first.id_len)
8532 			first = *ppid;
8533 		else if (memcmp(&first, ppid, sizeof(*ppid)))
8534 			return -EOPNOTSUPP;
8535 	}
8536 
8537 	return err;
8538 }
8539 EXPORT_SYMBOL(dev_get_port_parent_id);
8540 
8541 /**
8542  *	netdev_port_same_parent_id - Indicate if two network devices have
8543  *	the same port parent identifier
8544  *	@a: first network device
8545  *	@b: second network device
8546  */
8547 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
8548 {
8549 	struct netdev_phys_item_id a_id = { };
8550 	struct netdev_phys_item_id b_id = { };
8551 
8552 	if (dev_get_port_parent_id(a, &a_id, true) ||
8553 	    dev_get_port_parent_id(b, &b_id, true))
8554 		return false;
8555 
8556 	return netdev_phys_item_id_same(&a_id, &b_id);
8557 }
8558 EXPORT_SYMBOL(netdev_port_same_parent_id);
8559 
8560 /**
8561  *	dev_change_proto_down - set carrier according to proto_down.
8562  *
8563  *	@dev: device
8564  *	@proto_down: new value
8565  */
8566 int dev_change_proto_down(struct net_device *dev, bool proto_down)
8567 {
8568 	if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN))
8569 		return -EOPNOTSUPP;
8570 	if (!netif_device_present(dev))
8571 		return -ENODEV;
8572 	if (proto_down)
8573 		netif_carrier_off(dev);
8574 	else
8575 		netif_carrier_on(dev);
8576 	dev->proto_down = proto_down;
8577 	return 0;
8578 }
8579 EXPORT_SYMBOL(dev_change_proto_down);
8580 
8581 /**
8582  *	dev_change_proto_down_reason - proto down reason
8583  *
8584  *	@dev: device
8585  *	@mask: proto down mask
8586  *	@value: proto down value
8587  */
8588 void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
8589 				  u32 value)
8590 {
8591 	int b;
8592 
8593 	if (!mask) {
8594 		dev->proto_down_reason = value;
8595 	} else {
8596 		for_each_set_bit(b, &mask, 32) {
8597 			if (value & (1 << b))
8598 				dev->proto_down_reason |= BIT(b);
8599 			else
8600 				dev->proto_down_reason &= ~BIT(b);
8601 		}
8602 	}
8603 }
8604 EXPORT_SYMBOL(dev_change_proto_down_reason);
8605 
8606 struct bpf_xdp_link {
8607 	struct bpf_link link;
8608 	struct net_device *dev; /* protected by rtnl_lock, no refcnt held */
8609 	int flags;
8610 };
8611 
8612 static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags)
8613 {
8614 	if (flags & XDP_FLAGS_HW_MODE)
8615 		return XDP_MODE_HW;
8616 	if (flags & XDP_FLAGS_DRV_MODE)
8617 		return XDP_MODE_DRV;
8618 	if (flags & XDP_FLAGS_SKB_MODE)
8619 		return XDP_MODE_SKB;
8620 	return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB;
8621 }
8622 
8623 static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode)
8624 {
8625 	switch (mode) {
8626 	case XDP_MODE_SKB:
8627 		return generic_xdp_install;
8628 	case XDP_MODE_DRV:
8629 	case XDP_MODE_HW:
8630 		return dev->netdev_ops->ndo_bpf;
8631 	default:
8632 		return NULL;
8633 	}
8634 }
8635 
8636 static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev,
8637 					 enum bpf_xdp_mode mode)
8638 {
8639 	return dev->xdp_state[mode].link;
8640 }
8641 
8642 static struct bpf_prog *dev_xdp_prog(struct net_device *dev,
8643 				     enum bpf_xdp_mode mode)
8644 {
8645 	struct bpf_xdp_link *link = dev_xdp_link(dev, mode);
8646 
8647 	if (link)
8648 		return link->link.prog;
8649 	return dev->xdp_state[mode].prog;
8650 }
8651 
8652 u8 dev_xdp_prog_count(struct net_device *dev)
8653 {
8654 	u8 count = 0;
8655 	int i;
8656 
8657 	for (i = 0; i < __MAX_XDP_MODE; i++)
8658 		if (dev->xdp_state[i].prog || dev->xdp_state[i].link)
8659 			count++;
8660 	return count;
8661 }
8662 EXPORT_SYMBOL_GPL(dev_xdp_prog_count);
8663 
8664 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode)
8665 {
8666 	struct bpf_prog *prog = dev_xdp_prog(dev, mode);
8667 
8668 	return prog ? prog->aux->id : 0;
8669 }
8670 
8671 static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode,
8672 			     struct bpf_xdp_link *link)
8673 {
8674 	dev->xdp_state[mode].link = link;
8675 	dev->xdp_state[mode].prog = NULL;
8676 }
8677 
8678 static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode,
8679 			     struct bpf_prog *prog)
8680 {
8681 	dev->xdp_state[mode].link = NULL;
8682 	dev->xdp_state[mode].prog = prog;
8683 }
8684 
8685 static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode,
8686 			   bpf_op_t bpf_op, struct netlink_ext_ack *extack,
8687 			   u32 flags, struct bpf_prog *prog)
8688 {
8689 	struct netdev_bpf xdp;
8690 	int err;
8691 
8692 	memset(&xdp, 0, sizeof(xdp));
8693 	xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG;
8694 	xdp.extack = extack;
8695 	xdp.flags = flags;
8696 	xdp.prog = prog;
8697 
8698 	/* Drivers assume refcnt is already incremented (i.e, prog pointer is
8699 	 * "moved" into driver), so they don't increment it on their own, but
8700 	 * they do decrement refcnt when program is detached or replaced.
8701 	 * Given net_device also owns link/prog, we need to bump refcnt here
8702 	 * to prevent drivers from underflowing it.
8703 	 */
8704 	if (prog)
8705 		bpf_prog_inc(prog);
8706 	err = bpf_op(dev, &xdp);
8707 	if (err) {
8708 		if (prog)
8709 			bpf_prog_put(prog);
8710 		return err;
8711 	}
8712 
8713 	if (mode != XDP_MODE_HW)
8714 		bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog);
8715 
8716 	return 0;
8717 }
8718 
8719 static void dev_xdp_uninstall(struct net_device *dev)
8720 {
8721 	struct bpf_xdp_link *link;
8722 	struct bpf_prog *prog;
8723 	enum bpf_xdp_mode mode;
8724 	bpf_op_t bpf_op;
8725 
8726 	ASSERT_RTNL();
8727 
8728 	for (mode = XDP_MODE_SKB; mode < __MAX_XDP_MODE; mode++) {
8729 		prog = dev_xdp_prog(dev, mode);
8730 		if (!prog)
8731 			continue;
8732 
8733 		bpf_op = dev_xdp_bpf_op(dev, mode);
8734 		if (!bpf_op)
8735 			continue;
8736 
8737 		WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
8738 
8739 		/* auto-detach link from net device */
8740 		link = dev_xdp_link(dev, mode);
8741 		if (link)
8742 			link->dev = NULL;
8743 		else
8744 			bpf_prog_put(prog);
8745 
8746 		dev_xdp_set_link(dev, mode, NULL);
8747 	}
8748 }
8749 
8750 static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack,
8751 			  struct bpf_xdp_link *link, struct bpf_prog *new_prog,
8752 			  struct bpf_prog *old_prog, u32 flags)
8753 {
8754 	unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES);
8755 	struct bpf_prog *cur_prog;
8756 	struct net_device *upper;
8757 	struct list_head *iter;
8758 	enum bpf_xdp_mode mode;
8759 	bpf_op_t bpf_op;
8760 	int err;
8761 
8762 	ASSERT_RTNL();
8763 
8764 	/* either link or prog attachment, never both */
8765 	if (link && (new_prog || old_prog))
8766 		return -EINVAL;
8767 	/* link supports only XDP mode flags */
8768 	if (link && (flags & ~XDP_FLAGS_MODES)) {
8769 		NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment");
8770 		return -EINVAL;
8771 	}
8772 	/* just one XDP mode bit should be set, zero defaults to drv/skb mode */
8773 	if (num_modes > 1) {
8774 		NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set");
8775 		return -EINVAL;
8776 	}
8777 	/* avoid ambiguity if offload + drv/skb mode progs are both loaded */
8778 	if (!num_modes && dev_xdp_prog_count(dev) > 1) {
8779 		NL_SET_ERR_MSG(extack,
8780 			       "More than one program loaded, unset mode is ambiguous");
8781 		return -EINVAL;
8782 	}
8783 	/* old_prog != NULL implies XDP_FLAGS_REPLACE is set */
8784 	if (old_prog && !(flags & XDP_FLAGS_REPLACE)) {
8785 		NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified");
8786 		return -EINVAL;
8787 	}
8788 
8789 	mode = dev_xdp_mode(dev, flags);
8790 	/* can't replace attached link */
8791 	if (dev_xdp_link(dev, mode)) {
8792 		NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link");
8793 		return -EBUSY;
8794 	}
8795 
8796 	/* don't allow if an upper device already has a program */
8797 	netdev_for_each_upper_dev_rcu(dev, upper, iter) {
8798 		if (dev_xdp_prog_count(upper) > 0) {
8799 			NL_SET_ERR_MSG(extack, "Cannot attach when an upper device already has a program");
8800 			return -EEXIST;
8801 		}
8802 	}
8803 
8804 	cur_prog = dev_xdp_prog(dev, mode);
8805 	/* can't replace attached prog with link */
8806 	if (link && cur_prog) {
8807 		NL_SET_ERR_MSG(extack, "Can't replace active XDP program with BPF link");
8808 		return -EBUSY;
8809 	}
8810 	if ((flags & XDP_FLAGS_REPLACE) && cur_prog != old_prog) {
8811 		NL_SET_ERR_MSG(extack, "Active program does not match expected");
8812 		return -EEXIST;
8813 	}
8814 
8815 	/* put effective new program into new_prog */
8816 	if (link)
8817 		new_prog = link->link.prog;
8818 
8819 	if (new_prog) {
8820 		bool offload = mode == XDP_MODE_HW;
8821 		enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB
8822 					       ? XDP_MODE_DRV : XDP_MODE_SKB;
8823 
8824 		if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) {
8825 			NL_SET_ERR_MSG(extack, "XDP program already attached");
8826 			return -EBUSY;
8827 		}
8828 		if (!offload && dev_xdp_prog(dev, other_mode)) {
8829 			NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time");
8830 			return -EEXIST;
8831 		}
8832 		if (!offload && bpf_prog_is_dev_bound(new_prog->aux)) {
8833 			NL_SET_ERR_MSG(extack, "Using device-bound program without HW_MODE flag is not supported");
8834 			return -EINVAL;
8835 		}
8836 		if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) {
8837 			NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device");
8838 			return -EINVAL;
8839 		}
8840 		if (new_prog->expected_attach_type == BPF_XDP_CPUMAP) {
8841 			NL_SET_ERR_MSG(extack, "BPF_XDP_CPUMAP programs can not be attached to a device");
8842 			return -EINVAL;
8843 		}
8844 	}
8845 
8846 	/* don't call drivers if the effective program didn't change */
8847 	if (new_prog != cur_prog) {
8848 		bpf_op = dev_xdp_bpf_op(dev, mode);
8849 		if (!bpf_op) {
8850 			NL_SET_ERR_MSG(extack, "Underlying driver does not support XDP in native mode");
8851 			return -EOPNOTSUPP;
8852 		}
8853 
8854 		err = dev_xdp_install(dev, mode, bpf_op, extack, flags, new_prog);
8855 		if (err)
8856 			return err;
8857 	}
8858 
8859 	if (link)
8860 		dev_xdp_set_link(dev, mode, link);
8861 	else
8862 		dev_xdp_set_prog(dev, mode, new_prog);
8863 	if (cur_prog)
8864 		bpf_prog_put(cur_prog);
8865 
8866 	return 0;
8867 }
8868 
8869 static int dev_xdp_attach_link(struct net_device *dev,
8870 			       struct netlink_ext_ack *extack,
8871 			       struct bpf_xdp_link *link)
8872 {
8873 	return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags);
8874 }
8875 
8876 static int dev_xdp_detach_link(struct net_device *dev,
8877 			       struct netlink_ext_ack *extack,
8878 			       struct bpf_xdp_link *link)
8879 {
8880 	enum bpf_xdp_mode mode;
8881 	bpf_op_t bpf_op;
8882 
8883 	ASSERT_RTNL();
8884 
8885 	mode = dev_xdp_mode(dev, link->flags);
8886 	if (dev_xdp_link(dev, mode) != link)
8887 		return -EINVAL;
8888 
8889 	bpf_op = dev_xdp_bpf_op(dev, mode);
8890 	WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
8891 	dev_xdp_set_link(dev, mode, NULL);
8892 	return 0;
8893 }
8894 
8895 static void bpf_xdp_link_release(struct bpf_link *link)
8896 {
8897 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
8898 
8899 	rtnl_lock();
8900 
8901 	/* if racing with net_device's tear down, xdp_link->dev might be
8902 	 * already NULL, in which case link was already auto-detached
8903 	 */
8904 	if (xdp_link->dev) {
8905 		WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link));
8906 		xdp_link->dev = NULL;
8907 	}
8908 
8909 	rtnl_unlock();
8910 }
8911 
8912 static int bpf_xdp_link_detach(struct bpf_link *link)
8913 {
8914 	bpf_xdp_link_release(link);
8915 	return 0;
8916 }
8917 
8918 static void bpf_xdp_link_dealloc(struct bpf_link *link)
8919 {
8920 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
8921 
8922 	kfree(xdp_link);
8923 }
8924 
8925 static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link,
8926 				     struct seq_file *seq)
8927 {
8928 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
8929 	u32 ifindex = 0;
8930 
8931 	rtnl_lock();
8932 	if (xdp_link->dev)
8933 		ifindex = xdp_link->dev->ifindex;
8934 	rtnl_unlock();
8935 
8936 	seq_printf(seq, "ifindex:\t%u\n", ifindex);
8937 }
8938 
8939 static int bpf_xdp_link_fill_link_info(const struct bpf_link *link,
8940 				       struct bpf_link_info *info)
8941 {
8942 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
8943 	u32 ifindex = 0;
8944 
8945 	rtnl_lock();
8946 	if (xdp_link->dev)
8947 		ifindex = xdp_link->dev->ifindex;
8948 	rtnl_unlock();
8949 
8950 	info->xdp.ifindex = ifindex;
8951 	return 0;
8952 }
8953 
8954 static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog,
8955 			       struct bpf_prog *old_prog)
8956 {
8957 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
8958 	enum bpf_xdp_mode mode;
8959 	bpf_op_t bpf_op;
8960 	int err = 0;
8961 
8962 	rtnl_lock();
8963 
8964 	/* link might have been auto-released already, so fail */
8965 	if (!xdp_link->dev) {
8966 		err = -ENOLINK;
8967 		goto out_unlock;
8968 	}
8969 
8970 	if (old_prog && link->prog != old_prog) {
8971 		err = -EPERM;
8972 		goto out_unlock;
8973 	}
8974 	old_prog = link->prog;
8975 	if (old_prog == new_prog) {
8976 		/* no-op, don't disturb drivers */
8977 		bpf_prog_put(new_prog);
8978 		goto out_unlock;
8979 	}
8980 
8981 	mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags);
8982 	bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode);
8983 	err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL,
8984 			      xdp_link->flags, new_prog);
8985 	if (err)
8986 		goto out_unlock;
8987 
8988 	old_prog = xchg(&link->prog, new_prog);
8989 	bpf_prog_put(old_prog);
8990 
8991 out_unlock:
8992 	rtnl_unlock();
8993 	return err;
8994 }
8995 
8996 static const struct bpf_link_ops bpf_xdp_link_lops = {
8997 	.release = bpf_xdp_link_release,
8998 	.dealloc = bpf_xdp_link_dealloc,
8999 	.detach = bpf_xdp_link_detach,
9000 	.show_fdinfo = bpf_xdp_link_show_fdinfo,
9001 	.fill_link_info = bpf_xdp_link_fill_link_info,
9002 	.update_prog = bpf_xdp_link_update,
9003 };
9004 
9005 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
9006 {
9007 	struct net *net = current->nsproxy->net_ns;
9008 	struct bpf_link_primer link_primer;
9009 	struct bpf_xdp_link *link;
9010 	struct net_device *dev;
9011 	int err, fd;
9012 
9013 	rtnl_lock();
9014 	dev = dev_get_by_index(net, attr->link_create.target_ifindex);
9015 	if (!dev) {
9016 		rtnl_unlock();
9017 		return -EINVAL;
9018 	}
9019 
9020 	link = kzalloc(sizeof(*link), GFP_USER);
9021 	if (!link) {
9022 		err = -ENOMEM;
9023 		goto unlock;
9024 	}
9025 
9026 	bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog);
9027 	link->dev = dev;
9028 	link->flags = attr->link_create.flags;
9029 
9030 	err = bpf_link_prime(&link->link, &link_primer);
9031 	if (err) {
9032 		kfree(link);
9033 		goto unlock;
9034 	}
9035 
9036 	err = dev_xdp_attach_link(dev, NULL, link);
9037 	rtnl_unlock();
9038 
9039 	if (err) {
9040 		link->dev = NULL;
9041 		bpf_link_cleanup(&link_primer);
9042 		goto out_put_dev;
9043 	}
9044 
9045 	fd = bpf_link_settle(&link_primer);
9046 	/* link itself doesn't hold dev's refcnt to not complicate shutdown */
9047 	dev_put(dev);
9048 	return fd;
9049 
9050 unlock:
9051 	rtnl_unlock();
9052 
9053 out_put_dev:
9054 	dev_put(dev);
9055 	return err;
9056 }
9057 
9058 /**
9059  *	dev_change_xdp_fd - set or clear a bpf program for a device rx path
9060  *	@dev: device
9061  *	@extack: netlink extended ack
9062  *	@fd: new program fd or negative value to clear
9063  *	@expected_fd: old program fd that userspace expects to replace or clear
9064  *	@flags: xdp-related flags
9065  *
9066  *	Set or clear a bpf program for a device
9067  */
9068 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
9069 		      int fd, int expected_fd, u32 flags)
9070 {
9071 	enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags);
9072 	struct bpf_prog *new_prog = NULL, *old_prog = NULL;
9073 	int err;
9074 
9075 	ASSERT_RTNL();
9076 
9077 	if (fd >= 0) {
9078 		new_prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
9079 						 mode != XDP_MODE_SKB);
9080 		if (IS_ERR(new_prog))
9081 			return PTR_ERR(new_prog);
9082 	}
9083 
9084 	if (expected_fd >= 0) {
9085 		old_prog = bpf_prog_get_type_dev(expected_fd, BPF_PROG_TYPE_XDP,
9086 						 mode != XDP_MODE_SKB);
9087 		if (IS_ERR(old_prog)) {
9088 			err = PTR_ERR(old_prog);
9089 			old_prog = NULL;
9090 			goto err_out;
9091 		}
9092 	}
9093 
9094 	err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags);
9095 
9096 err_out:
9097 	if (err && new_prog)
9098 		bpf_prog_put(new_prog);
9099 	if (old_prog)
9100 		bpf_prog_put(old_prog);
9101 	return err;
9102 }
9103 
9104 /**
9105  *	dev_new_index	-	allocate an ifindex
9106  *	@net: the applicable net namespace
9107  *
9108  *	Returns a suitable unique value for a new device interface
9109  *	number.  The caller must hold the rtnl semaphore or the
9110  *	dev_base_lock to be sure it remains unique.
9111  */
9112 static int dev_new_index(struct net *net)
9113 {
9114 	int ifindex = net->ifindex;
9115 
9116 	for (;;) {
9117 		if (++ifindex <= 0)
9118 			ifindex = 1;
9119 		if (!__dev_get_by_index(net, ifindex))
9120 			return net->ifindex = ifindex;
9121 	}
9122 }
9123 
9124 /* Delayed registration/unregisteration */
9125 static LIST_HEAD(net_todo_list);
9126 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
9127 
9128 static void net_set_todo(struct net_device *dev)
9129 {
9130 	list_add_tail(&dev->todo_list, &net_todo_list);
9131 	dev_net(dev)->dev_unreg_count++;
9132 }
9133 
9134 static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
9135 	struct net_device *upper, netdev_features_t features)
9136 {
9137 	netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9138 	netdev_features_t feature;
9139 	int feature_bit;
9140 
9141 	for_each_netdev_feature(upper_disables, feature_bit) {
9142 		feature = __NETIF_F_BIT(feature_bit);
9143 		if (!(upper->wanted_features & feature)
9144 		    && (features & feature)) {
9145 			netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
9146 				   &feature, upper->name);
9147 			features &= ~feature;
9148 		}
9149 	}
9150 
9151 	return features;
9152 }
9153 
9154 static void netdev_sync_lower_features(struct net_device *upper,
9155 	struct net_device *lower, netdev_features_t features)
9156 {
9157 	netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9158 	netdev_features_t feature;
9159 	int feature_bit;
9160 
9161 	for_each_netdev_feature(upper_disables, feature_bit) {
9162 		feature = __NETIF_F_BIT(feature_bit);
9163 		if (!(features & feature) && (lower->features & feature)) {
9164 			netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
9165 				   &feature, lower->name);
9166 			lower->wanted_features &= ~feature;
9167 			__netdev_update_features(lower);
9168 
9169 			if (unlikely(lower->features & feature))
9170 				netdev_WARN(upper, "failed to disable %pNF on %s!\n",
9171 					    &feature, lower->name);
9172 			else
9173 				netdev_features_change(lower);
9174 		}
9175 	}
9176 }
9177 
9178 static netdev_features_t netdev_fix_features(struct net_device *dev,
9179 	netdev_features_t features)
9180 {
9181 	/* Fix illegal checksum combinations */
9182 	if ((features & NETIF_F_HW_CSUM) &&
9183 	    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
9184 		netdev_warn(dev, "mixed HW and IP checksum settings.\n");
9185 		features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
9186 	}
9187 
9188 	/* TSO requires that SG is present as well. */
9189 	if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
9190 		netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
9191 		features &= ~NETIF_F_ALL_TSO;
9192 	}
9193 
9194 	if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
9195 					!(features & NETIF_F_IP_CSUM)) {
9196 		netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
9197 		features &= ~NETIF_F_TSO;
9198 		features &= ~NETIF_F_TSO_ECN;
9199 	}
9200 
9201 	if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
9202 					 !(features & NETIF_F_IPV6_CSUM)) {
9203 		netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
9204 		features &= ~NETIF_F_TSO6;
9205 	}
9206 
9207 	/* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
9208 	if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
9209 		features &= ~NETIF_F_TSO_MANGLEID;
9210 
9211 	/* TSO ECN requires that TSO is present as well. */
9212 	if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
9213 		features &= ~NETIF_F_TSO_ECN;
9214 
9215 	/* Software GSO depends on SG. */
9216 	if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
9217 		netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
9218 		features &= ~NETIF_F_GSO;
9219 	}
9220 
9221 	/* GSO partial features require GSO partial be set */
9222 	if ((features & dev->gso_partial_features) &&
9223 	    !(features & NETIF_F_GSO_PARTIAL)) {
9224 		netdev_dbg(dev,
9225 			   "Dropping partially supported GSO features since no GSO partial.\n");
9226 		features &= ~dev->gso_partial_features;
9227 	}
9228 
9229 	if (!(features & NETIF_F_RXCSUM)) {
9230 		/* NETIF_F_GRO_HW implies doing RXCSUM since every packet
9231 		 * successfully merged by hardware must also have the
9232 		 * checksum verified by hardware.  If the user does not
9233 		 * want to enable RXCSUM, logically, we should disable GRO_HW.
9234 		 */
9235 		if (features & NETIF_F_GRO_HW) {
9236 			netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
9237 			features &= ~NETIF_F_GRO_HW;
9238 		}
9239 	}
9240 
9241 	/* LRO/HW-GRO features cannot be combined with RX-FCS */
9242 	if (features & NETIF_F_RXFCS) {
9243 		if (features & NETIF_F_LRO) {
9244 			netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
9245 			features &= ~NETIF_F_LRO;
9246 		}
9247 
9248 		if (features & NETIF_F_GRO_HW) {
9249 			netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
9250 			features &= ~NETIF_F_GRO_HW;
9251 		}
9252 	}
9253 
9254 	if ((features & NETIF_F_GRO_HW) && (features & NETIF_F_LRO)) {
9255 		netdev_dbg(dev, "Dropping LRO feature since HW-GRO is requested.\n");
9256 		features &= ~NETIF_F_LRO;
9257 	}
9258 
9259 	if (features & NETIF_F_HW_TLS_TX) {
9260 		bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) ==
9261 			(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
9262 		bool hw_csum = features & NETIF_F_HW_CSUM;
9263 
9264 		if (!ip_csum && !hw_csum) {
9265 			netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
9266 			features &= ~NETIF_F_HW_TLS_TX;
9267 		}
9268 	}
9269 
9270 	if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
9271 		netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
9272 		features &= ~NETIF_F_HW_TLS_RX;
9273 	}
9274 
9275 	return features;
9276 }
9277 
9278 int __netdev_update_features(struct net_device *dev)
9279 {
9280 	struct net_device *upper, *lower;
9281 	netdev_features_t features;
9282 	struct list_head *iter;
9283 	int err = -1;
9284 
9285 	ASSERT_RTNL();
9286 
9287 	features = netdev_get_wanted_features(dev);
9288 
9289 	if (dev->netdev_ops->ndo_fix_features)
9290 		features = dev->netdev_ops->ndo_fix_features(dev, features);
9291 
9292 	/* driver might be less strict about feature dependencies */
9293 	features = netdev_fix_features(dev, features);
9294 
9295 	/* some features can't be enabled if they're off on an upper device */
9296 	netdev_for_each_upper_dev_rcu(dev, upper, iter)
9297 		features = netdev_sync_upper_features(dev, upper, features);
9298 
9299 	if (dev->features == features)
9300 		goto sync_lower;
9301 
9302 	netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
9303 		&dev->features, &features);
9304 
9305 	if (dev->netdev_ops->ndo_set_features)
9306 		err = dev->netdev_ops->ndo_set_features(dev, features);
9307 	else
9308 		err = 0;
9309 
9310 	if (unlikely(err < 0)) {
9311 		netdev_err(dev,
9312 			"set_features() failed (%d); wanted %pNF, left %pNF\n",
9313 			err, &features, &dev->features);
9314 		/* return non-0 since some features might have changed and
9315 		 * it's better to fire a spurious notification than miss it
9316 		 */
9317 		return -1;
9318 	}
9319 
9320 sync_lower:
9321 	/* some features must be disabled on lower devices when disabled
9322 	 * on an upper device (think: bonding master or bridge)
9323 	 */
9324 	netdev_for_each_lower_dev(dev, lower, iter)
9325 		netdev_sync_lower_features(dev, lower, features);
9326 
9327 	if (!err) {
9328 		netdev_features_t diff = features ^ dev->features;
9329 
9330 		if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
9331 			/* udp_tunnel_{get,drop}_rx_info both need
9332 			 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
9333 			 * device, or they won't do anything.
9334 			 * Thus we need to update dev->features
9335 			 * *before* calling udp_tunnel_get_rx_info,
9336 			 * but *after* calling udp_tunnel_drop_rx_info.
9337 			 */
9338 			if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
9339 				dev->features = features;
9340 				udp_tunnel_get_rx_info(dev);
9341 			} else {
9342 				udp_tunnel_drop_rx_info(dev);
9343 			}
9344 		}
9345 
9346 		if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
9347 			if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
9348 				dev->features = features;
9349 				err |= vlan_get_rx_ctag_filter_info(dev);
9350 			} else {
9351 				vlan_drop_rx_ctag_filter_info(dev);
9352 			}
9353 		}
9354 
9355 		if (diff & NETIF_F_HW_VLAN_STAG_FILTER) {
9356 			if (features & NETIF_F_HW_VLAN_STAG_FILTER) {
9357 				dev->features = features;
9358 				err |= vlan_get_rx_stag_filter_info(dev);
9359 			} else {
9360 				vlan_drop_rx_stag_filter_info(dev);
9361 			}
9362 		}
9363 
9364 		dev->features = features;
9365 	}
9366 
9367 	return err < 0 ? 0 : 1;
9368 }
9369 
9370 /**
9371  *	netdev_update_features - recalculate device features
9372  *	@dev: the device to check
9373  *
9374  *	Recalculate dev->features set and send notifications if it
9375  *	has changed. Should be called after driver or hardware dependent
9376  *	conditions might have changed that influence the features.
9377  */
9378 void netdev_update_features(struct net_device *dev)
9379 {
9380 	if (__netdev_update_features(dev))
9381 		netdev_features_change(dev);
9382 }
9383 EXPORT_SYMBOL(netdev_update_features);
9384 
9385 /**
9386  *	netdev_change_features - recalculate device features
9387  *	@dev: the device to check
9388  *
9389  *	Recalculate dev->features set and send notifications even
9390  *	if they have not changed. Should be called instead of
9391  *	netdev_update_features() if also dev->vlan_features might
9392  *	have changed to allow the changes to be propagated to stacked
9393  *	VLAN devices.
9394  */
9395 void netdev_change_features(struct net_device *dev)
9396 {
9397 	__netdev_update_features(dev);
9398 	netdev_features_change(dev);
9399 }
9400 EXPORT_SYMBOL(netdev_change_features);
9401 
9402 /**
9403  *	netif_stacked_transfer_operstate -	transfer operstate
9404  *	@rootdev: the root or lower level device to transfer state from
9405  *	@dev: the device to transfer operstate to
9406  *
9407  *	Transfer operational state from root to device. This is normally
9408  *	called when a stacking relationship exists between the root
9409  *	device and the device(a leaf device).
9410  */
9411 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
9412 					struct net_device *dev)
9413 {
9414 	if (rootdev->operstate == IF_OPER_DORMANT)
9415 		netif_dormant_on(dev);
9416 	else
9417 		netif_dormant_off(dev);
9418 
9419 	if (rootdev->operstate == IF_OPER_TESTING)
9420 		netif_testing_on(dev);
9421 	else
9422 		netif_testing_off(dev);
9423 
9424 	if (netif_carrier_ok(rootdev))
9425 		netif_carrier_on(dev);
9426 	else
9427 		netif_carrier_off(dev);
9428 }
9429 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
9430 
9431 static int netif_alloc_rx_queues(struct net_device *dev)
9432 {
9433 	unsigned int i, count = dev->num_rx_queues;
9434 	struct netdev_rx_queue *rx;
9435 	size_t sz = count * sizeof(*rx);
9436 	int err = 0;
9437 
9438 	BUG_ON(count < 1);
9439 
9440 	rx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
9441 	if (!rx)
9442 		return -ENOMEM;
9443 
9444 	dev->_rx = rx;
9445 
9446 	for (i = 0; i < count; i++) {
9447 		rx[i].dev = dev;
9448 
9449 		/* XDP RX-queue setup */
9450 		err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0);
9451 		if (err < 0)
9452 			goto err_rxq_info;
9453 	}
9454 	return 0;
9455 
9456 err_rxq_info:
9457 	/* Rollback successful reg's and free other resources */
9458 	while (i--)
9459 		xdp_rxq_info_unreg(&rx[i].xdp_rxq);
9460 	kvfree(dev->_rx);
9461 	dev->_rx = NULL;
9462 	return err;
9463 }
9464 
9465 static void netif_free_rx_queues(struct net_device *dev)
9466 {
9467 	unsigned int i, count = dev->num_rx_queues;
9468 
9469 	/* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
9470 	if (!dev->_rx)
9471 		return;
9472 
9473 	for (i = 0; i < count; i++)
9474 		xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
9475 
9476 	kvfree(dev->_rx);
9477 }
9478 
9479 static void netdev_init_one_queue(struct net_device *dev,
9480 				  struct netdev_queue *queue, void *_unused)
9481 {
9482 	/* Initialize queue lock */
9483 	spin_lock_init(&queue->_xmit_lock);
9484 	netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
9485 	queue->xmit_lock_owner = -1;
9486 	netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
9487 	queue->dev = dev;
9488 #ifdef CONFIG_BQL
9489 	dql_init(&queue->dql, HZ);
9490 #endif
9491 }
9492 
9493 static void netif_free_tx_queues(struct net_device *dev)
9494 {
9495 	kvfree(dev->_tx);
9496 }
9497 
9498 static int netif_alloc_netdev_queues(struct net_device *dev)
9499 {
9500 	unsigned int count = dev->num_tx_queues;
9501 	struct netdev_queue *tx;
9502 	size_t sz = count * sizeof(*tx);
9503 
9504 	if (count < 1 || count > 0xffff)
9505 		return -EINVAL;
9506 
9507 	tx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
9508 	if (!tx)
9509 		return -ENOMEM;
9510 
9511 	dev->_tx = tx;
9512 
9513 	netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
9514 	spin_lock_init(&dev->tx_global_lock);
9515 
9516 	return 0;
9517 }
9518 
9519 void netif_tx_stop_all_queues(struct net_device *dev)
9520 {
9521 	unsigned int i;
9522 
9523 	for (i = 0; i < dev->num_tx_queues; i++) {
9524 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
9525 
9526 		netif_tx_stop_queue(txq);
9527 	}
9528 }
9529 EXPORT_SYMBOL(netif_tx_stop_all_queues);
9530 
9531 /**
9532  *	register_netdevice	- register a network device
9533  *	@dev: device to register
9534  *
9535  *	Take a completed network device structure and add it to the kernel
9536  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
9537  *	chain. 0 is returned on success. A negative errno code is returned
9538  *	on a failure to set up the device, or if the name is a duplicate.
9539  *
9540  *	Callers must hold the rtnl semaphore. You may want
9541  *	register_netdev() instead of this.
9542  *
9543  *	BUGS:
9544  *	The locking appears insufficient to guarantee two parallel registers
9545  *	will not get the same name.
9546  */
9547 
9548 int register_netdevice(struct net_device *dev)
9549 {
9550 	int ret;
9551 	struct net *net = dev_net(dev);
9552 
9553 	BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE <
9554 		     NETDEV_FEATURE_COUNT);
9555 	BUG_ON(dev_boot_phase);
9556 	ASSERT_RTNL();
9557 
9558 	might_sleep();
9559 
9560 	/* When net_device's are persistent, this will be fatal. */
9561 	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
9562 	BUG_ON(!net);
9563 
9564 	ret = ethtool_check_ops(dev->ethtool_ops);
9565 	if (ret)
9566 		return ret;
9567 
9568 	spin_lock_init(&dev->addr_list_lock);
9569 	netdev_set_addr_lockdep_class(dev);
9570 
9571 	ret = dev_get_valid_name(net, dev, dev->name);
9572 	if (ret < 0)
9573 		goto out;
9574 
9575 	ret = -ENOMEM;
9576 	dev->name_node = netdev_name_node_head_alloc(dev);
9577 	if (!dev->name_node)
9578 		goto out;
9579 
9580 	/* Init, if this function is available */
9581 	if (dev->netdev_ops->ndo_init) {
9582 		ret = dev->netdev_ops->ndo_init(dev);
9583 		if (ret) {
9584 			if (ret > 0)
9585 				ret = -EIO;
9586 			goto err_free_name;
9587 		}
9588 	}
9589 
9590 	if (((dev->hw_features | dev->features) &
9591 	     NETIF_F_HW_VLAN_CTAG_FILTER) &&
9592 	    (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
9593 	     !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
9594 		netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
9595 		ret = -EINVAL;
9596 		goto err_uninit;
9597 	}
9598 
9599 	ret = -EBUSY;
9600 	if (!dev->ifindex)
9601 		dev->ifindex = dev_new_index(net);
9602 	else if (__dev_get_by_index(net, dev->ifindex))
9603 		goto err_uninit;
9604 
9605 	/* Transfer changeable features to wanted_features and enable
9606 	 * software offloads (GSO and GRO).
9607 	 */
9608 	dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF);
9609 	dev->features |= NETIF_F_SOFT_FEATURES;
9610 
9611 	if (dev->udp_tunnel_nic_info) {
9612 		dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
9613 		dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
9614 	}
9615 
9616 	dev->wanted_features = dev->features & dev->hw_features;
9617 
9618 	if (!(dev->flags & IFF_LOOPBACK))
9619 		dev->hw_features |= NETIF_F_NOCACHE_COPY;
9620 
9621 	/* If IPv4 TCP segmentation offload is supported we should also
9622 	 * allow the device to enable segmenting the frame with the option
9623 	 * of ignoring a static IP ID value.  This doesn't enable the
9624 	 * feature itself but allows the user to enable it later.
9625 	 */
9626 	if (dev->hw_features & NETIF_F_TSO)
9627 		dev->hw_features |= NETIF_F_TSO_MANGLEID;
9628 	if (dev->vlan_features & NETIF_F_TSO)
9629 		dev->vlan_features |= NETIF_F_TSO_MANGLEID;
9630 	if (dev->mpls_features & NETIF_F_TSO)
9631 		dev->mpls_features |= NETIF_F_TSO_MANGLEID;
9632 	if (dev->hw_enc_features & NETIF_F_TSO)
9633 		dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
9634 
9635 	/* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
9636 	 */
9637 	dev->vlan_features |= NETIF_F_HIGHDMA;
9638 
9639 	/* Make NETIF_F_SG inheritable to tunnel devices.
9640 	 */
9641 	dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
9642 
9643 	/* Make NETIF_F_SG inheritable to MPLS.
9644 	 */
9645 	dev->mpls_features |= NETIF_F_SG;
9646 
9647 	ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
9648 	ret = notifier_to_errno(ret);
9649 	if (ret)
9650 		goto err_uninit;
9651 
9652 	ret = netdev_register_kobject(dev);
9653 	if (ret) {
9654 		dev->reg_state = NETREG_UNREGISTERED;
9655 		goto err_uninit;
9656 	}
9657 	dev->reg_state = NETREG_REGISTERED;
9658 
9659 	__netdev_update_features(dev);
9660 
9661 	/*
9662 	 *	Default initial state at registry is that the
9663 	 *	device is present.
9664 	 */
9665 
9666 	set_bit(__LINK_STATE_PRESENT, &dev->state);
9667 
9668 	linkwatch_init_dev(dev);
9669 
9670 	dev_init_scheduler(dev);
9671 	dev_hold(dev);
9672 	list_netdevice(dev);
9673 	add_device_randomness(dev->dev_addr, dev->addr_len);
9674 
9675 	/* If the device has permanent device address, driver should
9676 	 * set dev_addr and also addr_assign_type should be set to
9677 	 * NET_ADDR_PERM (default value).
9678 	 */
9679 	if (dev->addr_assign_type == NET_ADDR_PERM)
9680 		memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
9681 
9682 	/* Notify protocols, that a new device appeared. */
9683 	ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
9684 	ret = notifier_to_errno(ret);
9685 	if (ret) {
9686 		/* Expect explicit free_netdev() on failure */
9687 		dev->needs_free_netdev = false;
9688 		unregister_netdevice_queue(dev, NULL);
9689 		goto out;
9690 	}
9691 	/*
9692 	 *	Prevent userspace races by waiting until the network
9693 	 *	device is fully setup before sending notifications.
9694 	 */
9695 	if (!dev->rtnl_link_ops ||
9696 	    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
9697 		rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
9698 
9699 out:
9700 	return ret;
9701 
9702 err_uninit:
9703 	if (dev->netdev_ops->ndo_uninit)
9704 		dev->netdev_ops->ndo_uninit(dev);
9705 	if (dev->priv_destructor)
9706 		dev->priv_destructor(dev);
9707 err_free_name:
9708 	netdev_name_node_free(dev->name_node);
9709 	goto out;
9710 }
9711 EXPORT_SYMBOL(register_netdevice);
9712 
9713 /**
9714  *	init_dummy_netdev	- init a dummy network device for NAPI
9715  *	@dev: device to init
9716  *
9717  *	This takes a network device structure and initialize the minimum
9718  *	amount of fields so it can be used to schedule NAPI polls without
9719  *	registering a full blown interface. This is to be used by drivers
9720  *	that need to tie several hardware interfaces to a single NAPI
9721  *	poll scheduler due to HW limitations.
9722  */
9723 int init_dummy_netdev(struct net_device *dev)
9724 {
9725 	/* Clear everything. Note we don't initialize spinlocks
9726 	 * are they aren't supposed to be taken by any of the
9727 	 * NAPI code and this dummy netdev is supposed to be
9728 	 * only ever used for NAPI polls
9729 	 */
9730 	memset(dev, 0, sizeof(struct net_device));
9731 
9732 	/* make sure we BUG if trying to hit standard
9733 	 * register/unregister code path
9734 	 */
9735 	dev->reg_state = NETREG_DUMMY;
9736 
9737 	/* NAPI wants this */
9738 	INIT_LIST_HEAD(&dev->napi_list);
9739 
9740 	/* a dummy interface is started by default */
9741 	set_bit(__LINK_STATE_PRESENT, &dev->state);
9742 	set_bit(__LINK_STATE_START, &dev->state);
9743 
9744 	/* napi_busy_loop stats accounting wants this */
9745 	dev_net_set(dev, &init_net);
9746 
9747 	/* Note : We dont allocate pcpu_refcnt for dummy devices,
9748 	 * because users of this 'device' dont need to change
9749 	 * its refcount.
9750 	 */
9751 
9752 	return 0;
9753 }
9754 EXPORT_SYMBOL_GPL(init_dummy_netdev);
9755 
9756 
9757 /**
9758  *	register_netdev	- register a network device
9759  *	@dev: device to register
9760  *
9761  *	Take a completed network device structure and add it to the kernel
9762  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
9763  *	chain. 0 is returned on success. A negative errno code is returned
9764  *	on a failure to set up the device, or if the name is a duplicate.
9765  *
9766  *	This is a wrapper around register_netdevice that takes the rtnl semaphore
9767  *	and expands the device name if you passed a format string to
9768  *	alloc_netdev.
9769  */
9770 int register_netdev(struct net_device *dev)
9771 {
9772 	int err;
9773 
9774 	if (rtnl_lock_killable())
9775 		return -EINTR;
9776 	err = register_netdevice(dev);
9777 	rtnl_unlock();
9778 	return err;
9779 }
9780 EXPORT_SYMBOL(register_netdev);
9781 
9782 int netdev_refcnt_read(const struct net_device *dev)
9783 {
9784 #ifdef CONFIG_PCPU_DEV_REFCNT
9785 	int i, refcnt = 0;
9786 
9787 	for_each_possible_cpu(i)
9788 		refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
9789 	return refcnt;
9790 #else
9791 	return refcount_read(&dev->dev_refcnt);
9792 #endif
9793 }
9794 EXPORT_SYMBOL(netdev_refcnt_read);
9795 
9796 int netdev_unregister_timeout_secs __read_mostly = 10;
9797 
9798 #define WAIT_REFS_MIN_MSECS 1
9799 #define WAIT_REFS_MAX_MSECS 250
9800 /**
9801  * netdev_wait_allrefs - wait until all references are gone.
9802  * @dev: target net_device
9803  *
9804  * This is called when unregistering network devices.
9805  *
9806  * Any protocol or device that holds a reference should register
9807  * for netdevice notification, and cleanup and put back the
9808  * reference if they receive an UNREGISTER event.
9809  * We can get stuck here if buggy protocols don't correctly
9810  * call dev_put.
9811  */
9812 static void netdev_wait_allrefs(struct net_device *dev)
9813 {
9814 	unsigned long rebroadcast_time, warning_time;
9815 	int wait = 0, refcnt;
9816 
9817 	linkwatch_forget_dev(dev);
9818 
9819 	rebroadcast_time = warning_time = jiffies;
9820 	refcnt = netdev_refcnt_read(dev);
9821 
9822 	while (refcnt != 1) {
9823 		if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
9824 			rtnl_lock();
9825 
9826 			/* Rebroadcast unregister notification */
9827 			call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
9828 
9829 			__rtnl_unlock();
9830 			rcu_barrier();
9831 			rtnl_lock();
9832 
9833 			if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
9834 				     &dev->state)) {
9835 				/* We must not have linkwatch events
9836 				 * pending on unregister. If this
9837 				 * happens, we simply run the queue
9838 				 * unscheduled, resulting in a noop
9839 				 * for this device.
9840 				 */
9841 				linkwatch_run_queue();
9842 			}
9843 
9844 			__rtnl_unlock();
9845 
9846 			rebroadcast_time = jiffies;
9847 		}
9848 
9849 		if (!wait) {
9850 			rcu_barrier();
9851 			wait = WAIT_REFS_MIN_MSECS;
9852 		} else {
9853 			msleep(wait);
9854 			wait = min(wait << 1, WAIT_REFS_MAX_MSECS);
9855 		}
9856 
9857 		refcnt = netdev_refcnt_read(dev);
9858 
9859 		if (refcnt != 1 &&
9860 		    time_after(jiffies, warning_time +
9861 			       netdev_unregister_timeout_secs * HZ)) {
9862 			pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
9863 				 dev->name, refcnt);
9864 			warning_time = jiffies;
9865 		}
9866 	}
9867 }
9868 
9869 /* The sequence is:
9870  *
9871  *	rtnl_lock();
9872  *	...
9873  *	register_netdevice(x1);
9874  *	register_netdevice(x2);
9875  *	...
9876  *	unregister_netdevice(y1);
9877  *	unregister_netdevice(y2);
9878  *      ...
9879  *	rtnl_unlock();
9880  *	free_netdev(y1);
9881  *	free_netdev(y2);
9882  *
9883  * We are invoked by rtnl_unlock().
9884  * This allows us to deal with problems:
9885  * 1) We can delete sysfs objects which invoke hotplug
9886  *    without deadlocking with linkwatch via keventd.
9887  * 2) Since we run with the RTNL semaphore not held, we can sleep
9888  *    safely in order to wait for the netdev refcnt to drop to zero.
9889  *
9890  * We must not return until all unregister events added during
9891  * the interval the lock was held have been completed.
9892  */
9893 void netdev_run_todo(void)
9894 {
9895 	struct list_head list;
9896 #ifdef CONFIG_LOCKDEP
9897 	struct list_head unlink_list;
9898 
9899 	list_replace_init(&net_unlink_list, &unlink_list);
9900 
9901 	while (!list_empty(&unlink_list)) {
9902 		struct net_device *dev = list_first_entry(&unlink_list,
9903 							  struct net_device,
9904 							  unlink_list);
9905 		list_del_init(&dev->unlink_list);
9906 		dev->nested_level = dev->lower_level - 1;
9907 	}
9908 #endif
9909 
9910 	/* Snapshot list, allow later requests */
9911 	list_replace_init(&net_todo_list, &list);
9912 
9913 	__rtnl_unlock();
9914 
9915 
9916 	/* Wait for rcu callbacks to finish before next phase */
9917 	if (!list_empty(&list))
9918 		rcu_barrier();
9919 
9920 	while (!list_empty(&list)) {
9921 		struct net_device *dev
9922 			= list_first_entry(&list, struct net_device, todo_list);
9923 		list_del(&dev->todo_list);
9924 
9925 		if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
9926 			pr_err("network todo '%s' but state %d\n",
9927 			       dev->name, dev->reg_state);
9928 			dump_stack();
9929 			continue;
9930 		}
9931 
9932 		dev->reg_state = NETREG_UNREGISTERED;
9933 
9934 		netdev_wait_allrefs(dev);
9935 
9936 		/* paranoia */
9937 		BUG_ON(netdev_refcnt_read(dev) != 1);
9938 		BUG_ON(!list_empty(&dev->ptype_all));
9939 		BUG_ON(!list_empty(&dev->ptype_specific));
9940 		WARN_ON(rcu_access_pointer(dev->ip_ptr));
9941 		WARN_ON(rcu_access_pointer(dev->ip6_ptr));
9942 #if IS_ENABLED(CONFIG_DECNET)
9943 		WARN_ON(dev->dn_ptr);
9944 #endif
9945 		if (dev->priv_destructor)
9946 			dev->priv_destructor(dev);
9947 		if (dev->needs_free_netdev)
9948 			free_netdev(dev);
9949 
9950 		/* Report a network device has been unregistered */
9951 		rtnl_lock();
9952 		dev_net(dev)->dev_unreg_count--;
9953 		__rtnl_unlock();
9954 		wake_up(&netdev_unregistering_wq);
9955 
9956 		/* Free network device */
9957 		kobject_put(&dev->dev.kobj);
9958 	}
9959 }
9960 
9961 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
9962  * all the same fields in the same order as net_device_stats, with only
9963  * the type differing, but rtnl_link_stats64 may have additional fields
9964  * at the end for newer counters.
9965  */
9966 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
9967 			     const struct net_device_stats *netdev_stats)
9968 {
9969 #if BITS_PER_LONG == 64
9970 	BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
9971 	memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
9972 	/* zero out counters that only exist in rtnl_link_stats64 */
9973 	memset((char *)stats64 + sizeof(*netdev_stats), 0,
9974 	       sizeof(*stats64) - sizeof(*netdev_stats));
9975 #else
9976 	size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
9977 	const unsigned long *src = (const unsigned long *)netdev_stats;
9978 	u64 *dst = (u64 *)stats64;
9979 
9980 	BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
9981 	for (i = 0; i < n; i++)
9982 		dst[i] = src[i];
9983 	/* zero out counters that only exist in rtnl_link_stats64 */
9984 	memset((char *)stats64 + n * sizeof(u64), 0,
9985 	       sizeof(*stats64) - n * sizeof(u64));
9986 #endif
9987 }
9988 EXPORT_SYMBOL(netdev_stats_to_stats64);
9989 
9990 /**
9991  *	dev_get_stats	- get network device statistics
9992  *	@dev: device to get statistics from
9993  *	@storage: place to store stats
9994  *
9995  *	Get network statistics from device. Return @storage.
9996  *	The device driver may provide its own method by setting
9997  *	dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
9998  *	otherwise the internal statistics structure is used.
9999  */
10000 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
10001 					struct rtnl_link_stats64 *storage)
10002 {
10003 	const struct net_device_ops *ops = dev->netdev_ops;
10004 
10005 	if (ops->ndo_get_stats64) {
10006 		memset(storage, 0, sizeof(*storage));
10007 		ops->ndo_get_stats64(dev, storage);
10008 	} else if (ops->ndo_get_stats) {
10009 		netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
10010 	} else {
10011 		netdev_stats_to_stats64(storage, &dev->stats);
10012 	}
10013 	storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
10014 	storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
10015 	storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
10016 	return storage;
10017 }
10018 EXPORT_SYMBOL(dev_get_stats);
10019 
10020 /**
10021  *	dev_fetch_sw_netstats - get per-cpu network device statistics
10022  *	@s: place to store stats
10023  *	@netstats: per-cpu network stats to read from
10024  *
10025  *	Read per-cpu network statistics and populate the related fields in @s.
10026  */
10027 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
10028 			   const struct pcpu_sw_netstats __percpu *netstats)
10029 {
10030 	int cpu;
10031 
10032 	for_each_possible_cpu(cpu) {
10033 		const struct pcpu_sw_netstats *stats;
10034 		struct pcpu_sw_netstats tmp;
10035 		unsigned int start;
10036 
10037 		stats = per_cpu_ptr(netstats, cpu);
10038 		do {
10039 			start = u64_stats_fetch_begin_irq(&stats->syncp);
10040 			tmp.rx_packets = stats->rx_packets;
10041 			tmp.rx_bytes   = stats->rx_bytes;
10042 			tmp.tx_packets = stats->tx_packets;
10043 			tmp.tx_bytes   = stats->tx_bytes;
10044 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
10045 
10046 		s->rx_packets += tmp.rx_packets;
10047 		s->rx_bytes   += tmp.rx_bytes;
10048 		s->tx_packets += tmp.tx_packets;
10049 		s->tx_bytes   += tmp.tx_bytes;
10050 	}
10051 }
10052 EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats);
10053 
10054 /**
10055  *	dev_get_tstats64 - ndo_get_stats64 implementation
10056  *	@dev: device to get statistics from
10057  *	@s: place to store stats
10058  *
10059  *	Populate @s from dev->stats and dev->tstats. Can be used as
10060  *	ndo_get_stats64() callback.
10061  */
10062 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s)
10063 {
10064 	netdev_stats_to_stats64(s, &dev->stats);
10065 	dev_fetch_sw_netstats(s, dev->tstats);
10066 }
10067 EXPORT_SYMBOL_GPL(dev_get_tstats64);
10068 
10069 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
10070 {
10071 	struct netdev_queue *queue = dev_ingress_queue(dev);
10072 
10073 #ifdef CONFIG_NET_CLS_ACT
10074 	if (queue)
10075 		return queue;
10076 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
10077 	if (!queue)
10078 		return NULL;
10079 	netdev_init_one_queue(dev, queue, NULL);
10080 	RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
10081 	queue->qdisc_sleeping = &noop_qdisc;
10082 	rcu_assign_pointer(dev->ingress_queue, queue);
10083 #endif
10084 	return queue;
10085 }
10086 
10087 static const struct ethtool_ops default_ethtool_ops;
10088 
10089 void netdev_set_default_ethtool_ops(struct net_device *dev,
10090 				    const struct ethtool_ops *ops)
10091 {
10092 	if (dev->ethtool_ops == &default_ethtool_ops)
10093 		dev->ethtool_ops = ops;
10094 }
10095 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
10096 
10097 void netdev_freemem(struct net_device *dev)
10098 {
10099 	char *addr = (char *)dev - dev->padded;
10100 
10101 	kvfree(addr);
10102 }
10103 
10104 /**
10105  * alloc_netdev_mqs - allocate network device
10106  * @sizeof_priv: size of private data to allocate space for
10107  * @name: device name format string
10108  * @name_assign_type: origin of device name
10109  * @setup: callback to initialize device
10110  * @txqs: the number of TX subqueues to allocate
10111  * @rxqs: the number of RX subqueues to allocate
10112  *
10113  * Allocates a struct net_device with private data area for driver use
10114  * and performs basic initialization.  Also allocates subqueue structs
10115  * for each queue on the device.
10116  */
10117 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
10118 		unsigned char name_assign_type,
10119 		void (*setup)(struct net_device *),
10120 		unsigned int txqs, unsigned int rxqs)
10121 {
10122 	struct net_device *dev;
10123 	unsigned int alloc_size;
10124 	struct net_device *p;
10125 
10126 	BUG_ON(strlen(name) >= sizeof(dev->name));
10127 
10128 	if (txqs < 1) {
10129 		pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
10130 		return NULL;
10131 	}
10132 
10133 	if (rxqs < 1) {
10134 		pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
10135 		return NULL;
10136 	}
10137 
10138 	alloc_size = sizeof(struct net_device);
10139 	if (sizeof_priv) {
10140 		/* ensure 32-byte alignment of private area */
10141 		alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
10142 		alloc_size += sizeof_priv;
10143 	}
10144 	/* ensure 32-byte alignment of whole construct */
10145 	alloc_size += NETDEV_ALIGN - 1;
10146 
10147 	p = kvzalloc(alloc_size, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
10148 	if (!p)
10149 		return NULL;
10150 
10151 	dev = PTR_ALIGN(p, NETDEV_ALIGN);
10152 	dev->padded = (char *)dev - (char *)p;
10153 
10154 #ifdef CONFIG_PCPU_DEV_REFCNT
10155 	dev->pcpu_refcnt = alloc_percpu(int);
10156 	if (!dev->pcpu_refcnt)
10157 		goto free_dev;
10158 	dev_hold(dev);
10159 #else
10160 	refcount_set(&dev->dev_refcnt, 1);
10161 #endif
10162 
10163 	if (dev_addr_init(dev))
10164 		goto free_pcpu;
10165 
10166 	dev_mc_init(dev);
10167 	dev_uc_init(dev);
10168 
10169 	dev_net_set(dev, &init_net);
10170 
10171 	dev->gso_max_size = GSO_MAX_SIZE;
10172 	dev->gso_max_segs = GSO_MAX_SEGS;
10173 	dev->upper_level = 1;
10174 	dev->lower_level = 1;
10175 #ifdef CONFIG_LOCKDEP
10176 	dev->nested_level = 0;
10177 	INIT_LIST_HEAD(&dev->unlink_list);
10178 #endif
10179 
10180 	INIT_LIST_HEAD(&dev->napi_list);
10181 	INIT_LIST_HEAD(&dev->unreg_list);
10182 	INIT_LIST_HEAD(&dev->close_list);
10183 	INIT_LIST_HEAD(&dev->link_watch_list);
10184 	INIT_LIST_HEAD(&dev->adj_list.upper);
10185 	INIT_LIST_HEAD(&dev->adj_list.lower);
10186 	INIT_LIST_HEAD(&dev->ptype_all);
10187 	INIT_LIST_HEAD(&dev->ptype_specific);
10188 	INIT_LIST_HEAD(&dev->net_notifier_list);
10189 #ifdef CONFIG_NET_SCHED
10190 	hash_init(dev->qdisc_hash);
10191 #endif
10192 	dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
10193 	setup(dev);
10194 
10195 	if (!dev->tx_queue_len) {
10196 		dev->priv_flags |= IFF_NO_QUEUE;
10197 		dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
10198 	}
10199 
10200 	dev->num_tx_queues = txqs;
10201 	dev->real_num_tx_queues = txqs;
10202 	if (netif_alloc_netdev_queues(dev))
10203 		goto free_all;
10204 
10205 	dev->num_rx_queues = rxqs;
10206 	dev->real_num_rx_queues = rxqs;
10207 	if (netif_alloc_rx_queues(dev))
10208 		goto free_all;
10209 
10210 	strcpy(dev->name, name);
10211 	dev->name_assign_type = name_assign_type;
10212 	dev->group = INIT_NETDEV_GROUP;
10213 	if (!dev->ethtool_ops)
10214 		dev->ethtool_ops = &default_ethtool_ops;
10215 
10216 	nf_hook_netdev_init(dev);
10217 
10218 	return dev;
10219 
10220 free_all:
10221 	free_netdev(dev);
10222 	return NULL;
10223 
10224 free_pcpu:
10225 #ifdef CONFIG_PCPU_DEV_REFCNT
10226 	free_percpu(dev->pcpu_refcnt);
10227 free_dev:
10228 #endif
10229 	netdev_freemem(dev);
10230 	return NULL;
10231 }
10232 EXPORT_SYMBOL(alloc_netdev_mqs);
10233 
10234 /**
10235  * free_netdev - free network device
10236  * @dev: device
10237  *
10238  * This function does the last stage of destroying an allocated device
10239  * interface. The reference to the device object is released. If this
10240  * is the last reference then it will be freed.Must be called in process
10241  * context.
10242  */
10243 void free_netdev(struct net_device *dev)
10244 {
10245 	struct napi_struct *p, *n;
10246 
10247 	might_sleep();
10248 
10249 	/* When called immediately after register_netdevice() failed the unwind
10250 	 * handling may still be dismantling the device. Handle that case by
10251 	 * deferring the free.
10252 	 */
10253 	if (dev->reg_state == NETREG_UNREGISTERING) {
10254 		ASSERT_RTNL();
10255 		dev->needs_free_netdev = true;
10256 		return;
10257 	}
10258 
10259 	netif_free_tx_queues(dev);
10260 	netif_free_rx_queues(dev);
10261 
10262 	kfree(rcu_dereference_protected(dev->ingress_queue, 1));
10263 
10264 	/* Flush device addresses */
10265 	dev_addr_flush(dev);
10266 
10267 	list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
10268 		netif_napi_del(p);
10269 
10270 #ifdef CONFIG_PCPU_DEV_REFCNT
10271 	free_percpu(dev->pcpu_refcnt);
10272 	dev->pcpu_refcnt = NULL;
10273 #endif
10274 	free_percpu(dev->xdp_bulkq);
10275 	dev->xdp_bulkq = NULL;
10276 
10277 	/*  Compatibility with error handling in drivers */
10278 	if (dev->reg_state == NETREG_UNINITIALIZED) {
10279 		netdev_freemem(dev);
10280 		return;
10281 	}
10282 
10283 	BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
10284 	dev->reg_state = NETREG_RELEASED;
10285 
10286 	/* will free via device release */
10287 	put_device(&dev->dev);
10288 }
10289 EXPORT_SYMBOL(free_netdev);
10290 
10291 /**
10292  *	synchronize_net -  Synchronize with packet receive processing
10293  *
10294  *	Wait for packets currently being received to be done.
10295  *	Does not block later packets from starting.
10296  */
10297 void synchronize_net(void)
10298 {
10299 	might_sleep();
10300 	if (rtnl_is_locked())
10301 		synchronize_rcu_expedited();
10302 	else
10303 		synchronize_rcu();
10304 }
10305 EXPORT_SYMBOL(synchronize_net);
10306 
10307 /**
10308  *	unregister_netdevice_queue - remove device from the kernel
10309  *	@dev: device
10310  *	@head: list
10311  *
10312  *	This function shuts down a device interface and removes it
10313  *	from the kernel tables.
10314  *	If head not NULL, device is queued to be unregistered later.
10315  *
10316  *	Callers must hold the rtnl semaphore.  You may want
10317  *	unregister_netdev() instead of this.
10318  */
10319 
10320 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
10321 {
10322 	ASSERT_RTNL();
10323 
10324 	if (head) {
10325 		list_move_tail(&dev->unreg_list, head);
10326 	} else {
10327 		LIST_HEAD(single);
10328 
10329 		list_add(&dev->unreg_list, &single);
10330 		unregister_netdevice_many(&single);
10331 	}
10332 }
10333 EXPORT_SYMBOL(unregister_netdevice_queue);
10334 
10335 /**
10336  *	unregister_netdevice_many - unregister many devices
10337  *	@head: list of devices
10338  *
10339  *  Note: As most callers use a stack allocated list_head,
10340  *  we force a list_del() to make sure stack wont be corrupted later.
10341  */
10342 void unregister_netdevice_many(struct list_head *head)
10343 {
10344 	struct net_device *dev, *tmp;
10345 	LIST_HEAD(close_head);
10346 
10347 	BUG_ON(dev_boot_phase);
10348 	ASSERT_RTNL();
10349 
10350 	if (list_empty(head))
10351 		return;
10352 
10353 	list_for_each_entry_safe(dev, tmp, head, unreg_list) {
10354 		/* Some devices call without registering
10355 		 * for initialization unwind. Remove those
10356 		 * devices and proceed with the remaining.
10357 		 */
10358 		if (dev->reg_state == NETREG_UNINITIALIZED) {
10359 			pr_debug("unregister_netdevice: device %s/%p never was registered\n",
10360 				 dev->name, dev);
10361 
10362 			WARN_ON(1);
10363 			list_del(&dev->unreg_list);
10364 			continue;
10365 		}
10366 		dev->dismantle = true;
10367 		BUG_ON(dev->reg_state != NETREG_REGISTERED);
10368 	}
10369 
10370 	/* If device is running, close it first. */
10371 	list_for_each_entry(dev, head, unreg_list)
10372 		list_add_tail(&dev->close_list, &close_head);
10373 	dev_close_many(&close_head, true);
10374 
10375 	list_for_each_entry(dev, head, unreg_list) {
10376 		/* And unlink it from device chain. */
10377 		unlist_netdevice(dev);
10378 
10379 		dev->reg_state = NETREG_UNREGISTERING;
10380 	}
10381 	flush_all_backlogs();
10382 
10383 	synchronize_net();
10384 
10385 	list_for_each_entry(dev, head, unreg_list) {
10386 		struct sk_buff *skb = NULL;
10387 
10388 		/* Shutdown queueing discipline. */
10389 		dev_shutdown(dev);
10390 
10391 		dev_xdp_uninstall(dev);
10392 
10393 		/* Notify protocols, that we are about to destroy
10394 		 * this device. They should clean all the things.
10395 		 */
10396 		call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
10397 
10398 		if (!dev->rtnl_link_ops ||
10399 		    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
10400 			skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
10401 						     GFP_KERNEL, NULL, 0);
10402 
10403 		/*
10404 		 *	Flush the unicast and multicast chains
10405 		 */
10406 		dev_uc_flush(dev);
10407 		dev_mc_flush(dev);
10408 
10409 		netdev_name_node_alt_flush(dev);
10410 		netdev_name_node_free(dev->name_node);
10411 
10412 		if (dev->netdev_ops->ndo_uninit)
10413 			dev->netdev_ops->ndo_uninit(dev);
10414 
10415 		if (skb)
10416 			rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
10417 
10418 		/* Notifier chain MUST detach us all upper devices. */
10419 		WARN_ON(netdev_has_any_upper_dev(dev));
10420 		WARN_ON(netdev_has_any_lower_dev(dev));
10421 
10422 		/* Remove entries from kobject tree */
10423 		netdev_unregister_kobject(dev);
10424 #ifdef CONFIG_XPS
10425 		/* Remove XPS queueing entries */
10426 		netif_reset_xps_queues_gt(dev, 0);
10427 #endif
10428 	}
10429 
10430 	synchronize_net();
10431 
10432 	list_for_each_entry(dev, head, unreg_list) {
10433 		dev_put(dev);
10434 		net_set_todo(dev);
10435 	}
10436 
10437 	list_del(head);
10438 }
10439 EXPORT_SYMBOL(unregister_netdevice_many);
10440 
10441 /**
10442  *	unregister_netdev - remove device from the kernel
10443  *	@dev: device
10444  *
10445  *	This function shuts down a device interface and removes it
10446  *	from the kernel tables.
10447  *
10448  *	This is just a wrapper for unregister_netdevice that takes
10449  *	the rtnl semaphore.  In general you want to use this and not
10450  *	unregister_netdevice.
10451  */
10452 void unregister_netdev(struct net_device *dev)
10453 {
10454 	rtnl_lock();
10455 	unregister_netdevice(dev);
10456 	rtnl_unlock();
10457 }
10458 EXPORT_SYMBOL(unregister_netdev);
10459 
10460 /**
10461  *	__dev_change_net_namespace - move device to different nethost namespace
10462  *	@dev: device
10463  *	@net: network namespace
10464  *	@pat: If not NULL name pattern to try if the current device name
10465  *	      is already taken in the destination network namespace.
10466  *	@new_ifindex: If not zero, specifies device index in the target
10467  *	              namespace.
10468  *
10469  *	This function shuts down a device interface and moves it
10470  *	to a new network namespace. On success 0 is returned, on
10471  *	a failure a netagive errno code is returned.
10472  *
10473  *	Callers must hold the rtnl semaphore.
10474  */
10475 
10476 int __dev_change_net_namespace(struct net_device *dev, struct net *net,
10477 			       const char *pat, int new_ifindex)
10478 {
10479 	struct net *net_old = dev_net(dev);
10480 	int err, new_nsid;
10481 
10482 	ASSERT_RTNL();
10483 
10484 	/* Don't allow namespace local devices to be moved. */
10485 	err = -EINVAL;
10486 	if (dev->features & NETIF_F_NETNS_LOCAL)
10487 		goto out;
10488 
10489 	/* Ensure the device has been registrered */
10490 	if (dev->reg_state != NETREG_REGISTERED)
10491 		goto out;
10492 
10493 	/* Get out if there is nothing todo */
10494 	err = 0;
10495 	if (net_eq(net_old, net))
10496 		goto out;
10497 
10498 	/* Pick the destination device name, and ensure
10499 	 * we can use it in the destination network namespace.
10500 	 */
10501 	err = -EEXIST;
10502 	if (netdev_name_in_use(net, dev->name)) {
10503 		/* We get here if we can't use the current device name */
10504 		if (!pat)
10505 			goto out;
10506 		err = dev_get_valid_name(net, dev, pat);
10507 		if (err < 0)
10508 			goto out;
10509 	}
10510 
10511 	/* Check that new_ifindex isn't used yet. */
10512 	err = -EBUSY;
10513 	if (new_ifindex && __dev_get_by_index(net, new_ifindex))
10514 		goto out;
10515 
10516 	/*
10517 	 * And now a mini version of register_netdevice unregister_netdevice.
10518 	 */
10519 
10520 	/* If device is running close it first. */
10521 	dev_close(dev);
10522 
10523 	/* And unlink it from device chain */
10524 	unlist_netdevice(dev);
10525 
10526 	synchronize_net();
10527 
10528 	/* Shutdown queueing discipline. */
10529 	dev_shutdown(dev);
10530 
10531 	/* Notify protocols, that we are about to destroy
10532 	 * this device. They should clean all the things.
10533 	 *
10534 	 * Note that dev->reg_state stays at NETREG_REGISTERED.
10535 	 * This is wanted because this way 8021q and macvlan know
10536 	 * the device is just moving and can keep their slaves up.
10537 	 */
10538 	call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
10539 	rcu_barrier();
10540 
10541 	new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
10542 	/* If there is an ifindex conflict assign a new one */
10543 	if (!new_ifindex) {
10544 		if (__dev_get_by_index(net, dev->ifindex))
10545 			new_ifindex = dev_new_index(net);
10546 		else
10547 			new_ifindex = dev->ifindex;
10548 	}
10549 
10550 	rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
10551 			    new_ifindex);
10552 
10553 	/*
10554 	 *	Flush the unicast and multicast chains
10555 	 */
10556 	dev_uc_flush(dev);
10557 	dev_mc_flush(dev);
10558 
10559 	/* Send a netdev-removed uevent to the old namespace */
10560 	kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
10561 	netdev_adjacent_del_links(dev);
10562 
10563 	/* Move per-net netdevice notifiers that are following the netdevice */
10564 	move_netdevice_notifiers_dev_net(dev, net);
10565 
10566 	/* Actually switch the network namespace */
10567 	dev_net_set(dev, net);
10568 	dev->ifindex = new_ifindex;
10569 
10570 	/* Send a netdev-add uevent to the new namespace */
10571 	kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
10572 	netdev_adjacent_add_links(dev);
10573 
10574 	/* Fixup kobjects */
10575 	err = device_rename(&dev->dev, dev->name);
10576 	WARN_ON(err);
10577 
10578 	/* Adapt owner in case owning user namespace of target network
10579 	 * namespace is different from the original one.
10580 	 */
10581 	err = netdev_change_owner(dev, net_old, net);
10582 	WARN_ON(err);
10583 
10584 	/* Add the device back in the hashes */
10585 	list_netdevice(dev);
10586 
10587 	/* Notify protocols, that a new device appeared. */
10588 	call_netdevice_notifiers(NETDEV_REGISTER, dev);
10589 
10590 	/*
10591 	 *	Prevent userspace races by waiting until the network
10592 	 *	device is fully setup before sending notifications.
10593 	 */
10594 	rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
10595 
10596 	synchronize_net();
10597 	err = 0;
10598 out:
10599 	return err;
10600 }
10601 EXPORT_SYMBOL_GPL(__dev_change_net_namespace);
10602 
10603 static int dev_cpu_dead(unsigned int oldcpu)
10604 {
10605 	struct sk_buff **list_skb;
10606 	struct sk_buff *skb;
10607 	unsigned int cpu;
10608 	struct softnet_data *sd, *oldsd, *remsd = NULL;
10609 
10610 	local_irq_disable();
10611 	cpu = smp_processor_id();
10612 	sd = &per_cpu(softnet_data, cpu);
10613 	oldsd = &per_cpu(softnet_data, oldcpu);
10614 
10615 	/* Find end of our completion_queue. */
10616 	list_skb = &sd->completion_queue;
10617 	while (*list_skb)
10618 		list_skb = &(*list_skb)->next;
10619 	/* Append completion queue from offline CPU. */
10620 	*list_skb = oldsd->completion_queue;
10621 	oldsd->completion_queue = NULL;
10622 
10623 	/* Append output queue from offline CPU. */
10624 	if (oldsd->output_queue) {
10625 		*sd->output_queue_tailp = oldsd->output_queue;
10626 		sd->output_queue_tailp = oldsd->output_queue_tailp;
10627 		oldsd->output_queue = NULL;
10628 		oldsd->output_queue_tailp = &oldsd->output_queue;
10629 	}
10630 	/* Append NAPI poll list from offline CPU, with one exception :
10631 	 * process_backlog() must be called by cpu owning percpu backlog.
10632 	 * We properly handle process_queue & input_pkt_queue later.
10633 	 */
10634 	while (!list_empty(&oldsd->poll_list)) {
10635 		struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
10636 							    struct napi_struct,
10637 							    poll_list);
10638 
10639 		list_del_init(&napi->poll_list);
10640 		if (napi->poll == process_backlog)
10641 			napi->state = 0;
10642 		else
10643 			____napi_schedule(sd, napi);
10644 	}
10645 
10646 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
10647 	local_irq_enable();
10648 
10649 #ifdef CONFIG_RPS
10650 	remsd = oldsd->rps_ipi_list;
10651 	oldsd->rps_ipi_list = NULL;
10652 #endif
10653 	/* send out pending IPI's on offline CPU */
10654 	net_rps_send_ipi(remsd);
10655 
10656 	/* Process offline CPU's input_pkt_queue */
10657 	while ((skb = __skb_dequeue(&oldsd->process_queue))) {
10658 		netif_rx_ni(skb);
10659 		input_queue_head_incr(oldsd);
10660 	}
10661 	while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
10662 		netif_rx_ni(skb);
10663 		input_queue_head_incr(oldsd);
10664 	}
10665 
10666 	return 0;
10667 }
10668 
10669 /**
10670  *	netdev_increment_features - increment feature set by one
10671  *	@all: current feature set
10672  *	@one: new feature set
10673  *	@mask: mask feature set
10674  *
10675  *	Computes a new feature set after adding a device with feature set
10676  *	@one to the master device with current feature set @all.  Will not
10677  *	enable anything that is off in @mask. Returns the new feature set.
10678  */
10679 netdev_features_t netdev_increment_features(netdev_features_t all,
10680 	netdev_features_t one, netdev_features_t mask)
10681 {
10682 	if (mask & NETIF_F_HW_CSUM)
10683 		mask |= NETIF_F_CSUM_MASK;
10684 	mask |= NETIF_F_VLAN_CHALLENGED;
10685 
10686 	all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
10687 	all &= one | ~NETIF_F_ALL_FOR_ALL;
10688 
10689 	/* If one device supports hw checksumming, set for all. */
10690 	if (all & NETIF_F_HW_CSUM)
10691 		all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
10692 
10693 	return all;
10694 }
10695 EXPORT_SYMBOL(netdev_increment_features);
10696 
10697 static struct hlist_head * __net_init netdev_create_hash(void)
10698 {
10699 	int i;
10700 	struct hlist_head *hash;
10701 
10702 	hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
10703 	if (hash != NULL)
10704 		for (i = 0; i < NETDEV_HASHENTRIES; i++)
10705 			INIT_HLIST_HEAD(&hash[i]);
10706 
10707 	return hash;
10708 }
10709 
10710 /* Initialize per network namespace state */
10711 static int __net_init netdev_init(struct net *net)
10712 {
10713 	BUILD_BUG_ON(GRO_HASH_BUCKETS >
10714 		     8 * sizeof_field(struct napi_struct, gro_bitmask));
10715 
10716 	if (net != &init_net)
10717 		INIT_LIST_HEAD(&net->dev_base_head);
10718 
10719 	net->dev_name_head = netdev_create_hash();
10720 	if (net->dev_name_head == NULL)
10721 		goto err_name;
10722 
10723 	net->dev_index_head = netdev_create_hash();
10724 	if (net->dev_index_head == NULL)
10725 		goto err_idx;
10726 
10727 	RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain);
10728 
10729 	return 0;
10730 
10731 err_idx:
10732 	kfree(net->dev_name_head);
10733 err_name:
10734 	return -ENOMEM;
10735 }
10736 
10737 /**
10738  *	netdev_drivername - network driver for the device
10739  *	@dev: network device
10740  *
10741  *	Determine network driver for device.
10742  */
10743 const char *netdev_drivername(const struct net_device *dev)
10744 {
10745 	const struct device_driver *driver;
10746 	const struct device *parent;
10747 	const char *empty = "";
10748 
10749 	parent = dev->dev.parent;
10750 	if (!parent)
10751 		return empty;
10752 
10753 	driver = parent->driver;
10754 	if (driver && driver->name)
10755 		return driver->name;
10756 	return empty;
10757 }
10758 
10759 static void __netdev_printk(const char *level, const struct net_device *dev,
10760 			    struct va_format *vaf)
10761 {
10762 	if (dev && dev->dev.parent) {
10763 		dev_printk_emit(level[1] - '0',
10764 				dev->dev.parent,
10765 				"%s %s %s%s: %pV",
10766 				dev_driver_string(dev->dev.parent),
10767 				dev_name(dev->dev.parent),
10768 				netdev_name(dev), netdev_reg_state(dev),
10769 				vaf);
10770 	} else if (dev) {
10771 		printk("%s%s%s: %pV",
10772 		       level, netdev_name(dev), netdev_reg_state(dev), vaf);
10773 	} else {
10774 		printk("%s(NULL net_device): %pV", level, vaf);
10775 	}
10776 }
10777 
10778 void netdev_printk(const char *level, const struct net_device *dev,
10779 		   const char *format, ...)
10780 {
10781 	struct va_format vaf;
10782 	va_list args;
10783 
10784 	va_start(args, format);
10785 
10786 	vaf.fmt = format;
10787 	vaf.va = &args;
10788 
10789 	__netdev_printk(level, dev, &vaf);
10790 
10791 	va_end(args);
10792 }
10793 EXPORT_SYMBOL(netdev_printk);
10794 
10795 #define define_netdev_printk_level(func, level)			\
10796 void func(const struct net_device *dev, const char *fmt, ...)	\
10797 {								\
10798 	struct va_format vaf;					\
10799 	va_list args;						\
10800 								\
10801 	va_start(args, fmt);					\
10802 								\
10803 	vaf.fmt = fmt;						\
10804 	vaf.va = &args;						\
10805 								\
10806 	__netdev_printk(level, dev, &vaf);			\
10807 								\
10808 	va_end(args);						\
10809 }								\
10810 EXPORT_SYMBOL(func);
10811 
10812 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
10813 define_netdev_printk_level(netdev_alert, KERN_ALERT);
10814 define_netdev_printk_level(netdev_crit, KERN_CRIT);
10815 define_netdev_printk_level(netdev_err, KERN_ERR);
10816 define_netdev_printk_level(netdev_warn, KERN_WARNING);
10817 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
10818 define_netdev_printk_level(netdev_info, KERN_INFO);
10819 
10820 static void __net_exit netdev_exit(struct net *net)
10821 {
10822 	kfree(net->dev_name_head);
10823 	kfree(net->dev_index_head);
10824 	if (net != &init_net)
10825 		WARN_ON_ONCE(!list_empty(&net->dev_base_head));
10826 }
10827 
10828 static struct pernet_operations __net_initdata netdev_net_ops = {
10829 	.init = netdev_init,
10830 	.exit = netdev_exit,
10831 };
10832 
10833 static void __net_exit default_device_exit(struct net *net)
10834 {
10835 	struct net_device *dev, *aux;
10836 	/*
10837 	 * Push all migratable network devices back to the
10838 	 * initial network namespace
10839 	 */
10840 	rtnl_lock();
10841 	for_each_netdev_safe(net, dev, aux) {
10842 		int err;
10843 		char fb_name[IFNAMSIZ];
10844 
10845 		/* Ignore unmoveable devices (i.e. loopback) */
10846 		if (dev->features & NETIF_F_NETNS_LOCAL)
10847 			continue;
10848 
10849 		/* Leave virtual devices for the generic cleanup */
10850 		if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund)
10851 			continue;
10852 
10853 		/* Push remaining network devices to init_net */
10854 		snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
10855 		if (netdev_name_in_use(&init_net, fb_name))
10856 			snprintf(fb_name, IFNAMSIZ, "dev%%d");
10857 		err = dev_change_net_namespace(dev, &init_net, fb_name);
10858 		if (err) {
10859 			pr_emerg("%s: failed to move %s to init_net: %d\n",
10860 				 __func__, dev->name, err);
10861 			BUG();
10862 		}
10863 	}
10864 	rtnl_unlock();
10865 }
10866 
10867 static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
10868 {
10869 	/* Return with the rtnl_lock held when there are no network
10870 	 * devices unregistering in any network namespace in net_list.
10871 	 */
10872 	struct net *net;
10873 	bool unregistering;
10874 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
10875 
10876 	add_wait_queue(&netdev_unregistering_wq, &wait);
10877 	for (;;) {
10878 		unregistering = false;
10879 		rtnl_lock();
10880 		list_for_each_entry(net, net_list, exit_list) {
10881 			if (net->dev_unreg_count > 0) {
10882 				unregistering = true;
10883 				break;
10884 			}
10885 		}
10886 		if (!unregistering)
10887 			break;
10888 		__rtnl_unlock();
10889 
10890 		wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
10891 	}
10892 	remove_wait_queue(&netdev_unregistering_wq, &wait);
10893 }
10894 
10895 static void __net_exit default_device_exit_batch(struct list_head *net_list)
10896 {
10897 	/* At exit all network devices most be removed from a network
10898 	 * namespace.  Do this in the reverse order of registration.
10899 	 * Do this across as many network namespaces as possible to
10900 	 * improve batching efficiency.
10901 	 */
10902 	struct net_device *dev;
10903 	struct net *net;
10904 	LIST_HEAD(dev_kill_list);
10905 
10906 	/* To prevent network device cleanup code from dereferencing
10907 	 * loopback devices or network devices that have been freed
10908 	 * wait here for all pending unregistrations to complete,
10909 	 * before unregistring the loopback device and allowing the
10910 	 * network namespace be freed.
10911 	 *
10912 	 * The netdev todo list containing all network devices
10913 	 * unregistrations that happen in default_device_exit_batch
10914 	 * will run in the rtnl_unlock() at the end of
10915 	 * default_device_exit_batch.
10916 	 */
10917 	rtnl_lock_unregistering(net_list);
10918 	list_for_each_entry(net, net_list, exit_list) {
10919 		for_each_netdev_reverse(net, dev) {
10920 			if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
10921 				dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
10922 			else
10923 				unregister_netdevice_queue(dev, &dev_kill_list);
10924 		}
10925 	}
10926 	unregister_netdevice_many(&dev_kill_list);
10927 	rtnl_unlock();
10928 }
10929 
10930 static struct pernet_operations __net_initdata default_device_ops = {
10931 	.exit = default_device_exit,
10932 	.exit_batch = default_device_exit_batch,
10933 };
10934 
10935 /*
10936  *	Initialize the DEV module. At boot time this walks the device list and
10937  *	unhooks any devices that fail to initialise (normally hardware not
10938  *	present) and leaves us with a valid list of present and active devices.
10939  *
10940  */
10941 
10942 /*
10943  *       This is called single threaded during boot, so no need
10944  *       to take the rtnl semaphore.
10945  */
10946 static int __init net_dev_init(void)
10947 {
10948 	int i, rc = -ENOMEM;
10949 
10950 	BUG_ON(!dev_boot_phase);
10951 
10952 	if (dev_proc_init())
10953 		goto out;
10954 
10955 	if (netdev_kobject_init())
10956 		goto out;
10957 
10958 	INIT_LIST_HEAD(&ptype_all);
10959 	for (i = 0; i < PTYPE_HASH_SIZE; i++)
10960 		INIT_LIST_HEAD(&ptype_base[i]);
10961 
10962 	if (register_pernet_subsys(&netdev_net_ops))
10963 		goto out;
10964 
10965 	/*
10966 	 *	Initialise the packet receive queues.
10967 	 */
10968 
10969 	for_each_possible_cpu(i) {
10970 		struct work_struct *flush = per_cpu_ptr(&flush_works, i);
10971 		struct softnet_data *sd = &per_cpu(softnet_data, i);
10972 
10973 		INIT_WORK(flush, flush_backlog);
10974 
10975 		skb_queue_head_init(&sd->input_pkt_queue);
10976 		skb_queue_head_init(&sd->process_queue);
10977 #ifdef CONFIG_XFRM_OFFLOAD
10978 		skb_queue_head_init(&sd->xfrm_backlog);
10979 #endif
10980 		INIT_LIST_HEAD(&sd->poll_list);
10981 		sd->output_queue_tailp = &sd->output_queue;
10982 #ifdef CONFIG_RPS
10983 		INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
10984 		sd->cpu = i;
10985 #endif
10986 
10987 		init_gro_hash(&sd->backlog);
10988 		sd->backlog.poll = process_backlog;
10989 		sd->backlog.weight = weight_p;
10990 	}
10991 
10992 	dev_boot_phase = 0;
10993 
10994 	/* The loopback device is special if any other network devices
10995 	 * is present in a network namespace the loopback device must
10996 	 * be present. Since we now dynamically allocate and free the
10997 	 * loopback device ensure this invariant is maintained by
10998 	 * keeping the loopback device as the first device on the
10999 	 * list of network devices.  Ensuring the loopback devices
11000 	 * is the first device that appears and the last network device
11001 	 * that disappears.
11002 	 */
11003 	if (register_pernet_device(&loopback_net_ops))
11004 		goto out;
11005 
11006 	if (register_pernet_device(&default_device_ops))
11007 		goto out;
11008 
11009 	open_softirq(NET_TX_SOFTIRQ, net_tx_action);
11010 	open_softirq(NET_RX_SOFTIRQ, net_rx_action);
11011 
11012 	rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
11013 				       NULL, dev_cpu_dead);
11014 	WARN_ON(rc < 0);
11015 	rc = 0;
11016 out:
11017 	return rc;
11018 }
11019 
11020 subsys_initcall(net_dev_init);
11021