xref: /linux/net/core/dev.c (revision e5c5d22e8dcf7c2d430336cbf8e180bd38e8daf1)
1 /*
2  * 	NET3	Protocol independent device support routines.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  *	Derived from the non IP parts of dev.c 1.0.19
10  * 		Authors:	Ross Biro
11  *				Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *				Mark Evans, <evansmp@uhura.aston.ac.uk>
13  *
14  *	Additional Authors:
15  *		Florian la Roche <rzsfl@rz.uni-sb.de>
16  *		Alan Cox <gw4pts@gw4pts.ampr.org>
17  *		David Hinds <dahinds@users.sourceforge.net>
18  *		Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19  *		Adam Sulmicki <adam@cfar.umd.edu>
20  *              Pekka Riikonen <priikone@poesidon.pspt.fi>
21  *
22  *	Changes:
23  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
24  *              			to 2 if register_netdev gets called
25  *              			before net_dev_init & also removed a
26  *              			few lines of code in the process.
27  *		Alan Cox	:	device private ioctl copies fields back.
28  *		Alan Cox	:	Transmit queue code does relevant
29  *					stunts to keep the queue safe.
30  *		Alan Cox	:	Fixed double lock.
31  *		Alan Cox	:	Fixed promisc NULL pointer trap
32  *		????????	:	Support the full private ioctl range
33  *		Alan Cox	:	Moved ioctl permission check into
34  *					drivers
35  *		Tim Kordas	:	SIOCADDMULTI/SIOCDELMULTI
36  *		Alan Cox	:	100 backlog just doesn't cut it when
37  *					you start doing multicast video 8)
38  *		Alan Cox	:	Rewrote net_bh and list manager.
39  *		Alan Cox	: 	Fix ETH_P_ALL echoback lengths.
40  *		Alan Cox	:	Took out transmit every packet pass
41  *					Saved a few bytes in the ioctl handler
42  *		Alan Cox	:	Network driver sets packet type before
43  *					calling netif_rx. Saves a function
44  *					call a packet.
45  *		Alan Cox	:	Hashed net_bh()
46  *		Richard Kooijman:	Timestamp fixes.
47  *		Alan Cox	:	Wrong field in SIOCGIFDSTADDR
48  *		Alan Cox	:	Device lock protection.
49  *		Alan Cox	: 	Fixed nasty side effect of device close
50  *					changes.
51  *		Rudi Cilibrasi	:	Pass the right thing to
52  *					set_mac_address()
53  *		Dave Miller	:	32bit quantity for the device lock to
54  *					make it work out on a Sparc.
55  *		Bjorn Ekwall	:	Added KERNELD hack.
56  *		Alan Cox	:	Cleaned up the backlog initialise.
57  *		Craig Metz	:	SIOCGIFCONF fix if space for under
58  *					1 device.
59  *	    Thomas Bogendoerfer :	Return ENODEV for dev_open, if there
60  *					is no device open function.
61  *		Andi Kleen	:	Fix error reporting for SIOCGIFCONF
62  *	    Michael Chastain	:	Fix signed/unsigned for SIOCGIFCONF
63  *		Cyrus Durgin	:	Cleaned for KMOD
64  *		Adam Sulmicki   :	Bug Fix : Network Device Unload
65  *					A network device unload needs to purge
66  *					the backlog queue.
67  *	Paul Rusty Russell	:	SIOCSIFNAME
68  *              Pekka Riikonen  :	Netdev boot-time settings code
69  *              Andrew Morton   :       Make unregister_netdevice wait
70  *              			indefinitely on dev->refcnt
71  * 		J Hadi Salim	:	- Backlog queue sampling
72  *				        - netif_rx() feedback
73  */
74 
75 #include <asm/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/mutex.h>
85 #include <linux/string.h>
86 #include <linux/mm.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/notifier.h>
96 #include <linux/skbuff.h>
97 #include <net/net_namespace.h>
98 #include <net/sock.h>
99 #include <linux/rtnetlink.h>
100 #include <linux/stat.h>
101 #include <net/dst.h>
102 #include <net/pkt_sched.h>
103 #include <net/checksum.h>
104 #include <net/xfrm.h>
105 #include <linux/highmem.h>
106 #include <linux/init.h>
107 #include <linux/module.h>
108 #include <linux/netpoll.h>
109 #include <linux/rcupdate.h>
110 #include <linux/delay.h>
111 #include <net/iw_handler.h>
112 #include <asm/current.h>
113 #include <linux/audit.h>
114 #include <linux/dmaengine.h>
115 #include <linux/err.h>
116 #include <linux/ctype.h>
117 #include <linux/if_arp.h>
118 #include <linux/if_vlan.h>
119 #include <linux/ip.h>
120 #include <net/ip.h>
121 #include <linux/ipv6.h>
122 #include <linux/in.h>
123 #include <linux/jhash.h>
124 #include <linux/random.h>
125 #include <trace/events/napi.h>
126 #include <trace/events/net.h>
127 #include <trace/events/skb.h>
128 #include <linux/pci.h>
129 #include <linux/inetdevice.h>
130 #include <linux/cpu_rmap.h>
131 #include <linux/static_key.h>
132 
133 #include "net-sysfs.h"
134 
135 /* Instead of increasing this, you should create a hash table. */
136 #define MAX_GRO_SKBS 8
137 
138 /* This should be increased if a protocol with a bigger head is added. */
139 #define GRO_MAX_HEAD (MAX_HEADER + 128)
140 
141 static DEFINE_SPINLOCK(ptype_lock);
142 static DEFINE_SPINLOCK(offload_lock);
143 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
144 struct list_head ptype_all __read_mostly;	/* Taps */
145 static struct list_head offload_base __read_mostly;
146 
147 /*
148  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
149  * semaphore.
150  *
151  * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
152  *
153  * Writers must hold the rtnl semaphore while they loop through the
154  * dev_base_head list, and hold dev_base_lock for writing when they do the
155  * actual updates.  This allows pure readers to access the list even
156  * while a writer is preparing to update it.
157  *
158  * To put it another way, dev_base_lock is held for writing only to
159  * protect against pure readers; the rtnl semaphore provides the
160  * protection against other writers.
161  *
162  * See, for example usages, register_netdevice() and
163  * unregister_netdevice(), which must be called with the rtnl
164  * semaphore held.
165  */
166 DEFINE_RWLOCK(dev_base_lock);
167 EXPORT_SYMBOL(dev_base_lock);
168 
169 seqcount_t devnet_rename_seq;
170 
171 static inline void dev_base_seq_inc(struct net *net)
172 {
173 	while (++net->dev_base_seq == 0);
174 }
175 
176 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
177 {
178 	unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
179 
180 	return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
181 }
182 
183 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
184 {
185 	return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
186 }
187 
188 static inline void rps_lock(struct softnet_data *sd)
189 {
190 #ifdef CONFIG_RPS
191 	spin_lock(&sd->input_pkt_queue.lock);
192 #endif
193 }
194 
195 static inline void rps_unlock(struct softnet_data *sd)
196 {
197 #ifdef CONFIG_RPS
198 	spin_unlock(&sd->input_pkt_queue.lock);
199 #endif
200 }
201 
202 /* Device list insertion */
203 static int list_netdevice(struct net_device *dev)
204 {
205 	struct net *net = dev_net(dev);
206 
207 	ASSERT_RTNL();
208 
209 	write_lock_bh(&dev_base_lock);
210 	list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
211 	hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
212 	hlist_add_head_rcu(&dev->index_hlist,
213 			   dev_index_hash(net, dev->ifindex));
214 	write_unlock_bh(&dev_base_lock);
215 
216 	dev_base_seq_inc(net);
217 
218 	return 0;
219 }
220 
221 /* Device list removal
222  * caller must respect a RCU grace period before freeing/reusing dev
223  */
224 static void unlist_netdevice(struct net_device *dev)
225 {
226 	ASSERT_RTNL();
227 
228 	/* Unlink dev from the device chain */
229 	write_lock_bh(&dev_base_lock);
230 	list_del_rcu(&dev->dev_list);
231 	hlist_del_rcu(&dev->name_hlist);
232 	hlist_del_rcu(&dev->index_hlist);
233 	write_unlock_bh(&dev_base_lock);
234 
235 	dev_base_seq_inc(dev_net(dev));
236 }
237 
238 /*
239  *	Our notifier list
240  */
241 
242 static RAW_NOTIFIER_HEAD(netdev_chain);
243 
244 /*
245  *	Device drivers call our routines to queue packets here. We empty the
246  *	queue in the local softnet handler.
247  */
248 
249 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
250 EXPORT_PER_CPU_SYMBOL(softnet_data);
251 
252 #ifdef CONFIG_LOCKDEP
253 /*
254  * register_netdevice() inits txq->_xmit_lock and sets lockdep class
255  * according to dev->type
256  */
257 static const unsigned short netdev_lock_type[] =
258 	{ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
259 	 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
260 	 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
261 	 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
262 	 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
263 	 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
264 	 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
265 	 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
266 	 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
267 	 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
268 	 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
269 	 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
270 	 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
271 	 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
272 	 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
273 
274 static const char *const netdev_lock_name[] =
275 	{"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
276 	 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
277 	 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
278 	 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
279 	 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
280 	 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
281 	 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
282 	 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
283 	 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
284 	 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
285 	 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
286 	 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
287 	 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
288 	 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
289 	 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
290 
291 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
292 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
293 
294 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
295 {
296 	int i;
297 
298 	for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
299 		if (netdev_lock_type[i] == dev_type)
300 			return i;
301 	/* the last key is used by default */
302 	return ARRAY_SIZE(netdev_lock_type) - 1;
303 }
304 
305 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
306 						 unsigned short dev_type)
307 {
308 	int i;
309 
310 	i = netdev_lock_pos(dev_type);
311 	lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
312 				   netdev_lock_name[i]);
313 }
314 
315 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
316 {
317 	int i;
318 
319 	i = netdev_lock_pos(dev->type);
320 	lockdep_set_class_and_name(&dev->addr_list_lock,
321 				   &netdev_addr_lock_key[i],
322 				   netdev_lock_name[i]);
323 }
324 #else
325 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
326 						 unsigned short dev_type)
327 {
328 }
329 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
330 {
331 }
332 #endif
333 
334 /*******************************************************************************
335 
336 		Protocol management and registration routines
337 
338 *******************************************************************************/
339 
340 /*
341  *	Add a protocol ID to the list. Now that the input handler is
342  *	smarter we can dispense with all the messy stuff that used to be
343  *	here.
344  *
345  *	BEWARE!!! Protocol handlers, mangling input packets,
346  *	MUST BE last in hash buckets and checking protocol handlers
347  *	MUST start from promiscuous ptype_all chain in net_bh.
348  *	It is true now, do not change it.
349  *	Explanation follows: if protocol handler, mangling packet, will
350  *	be the first on list, it is not able to sense, that packet
351  *	is cloned and should be copied-on-write, so that it will
352  *	change it and subsequent readers will get broken packet.
353  *							--ANK (980803)
354  */
355 
356 static inline struct list_head *ptype_head(const struct packet_type *pt)
357 {
358 	if (pt->type == htons(ETH_P_ALL))
359 		return &ptype_all;
360 	else
361 		return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
362 }
363 
364 /**
365  *	dev_add_pack - add packet handler
366  *	@pt: packet type declaration
367  *
368  *	Add a protocol handler to the networking stack. The passed &packet_type
369  *	is linked into kernel lists and may not be freed until it has been
370  *	removed from the kernel lists.
371  *
372  *	This call does not sleep therefore it can not
373  *	guarantee all CPU's that are in middle of receiving packets
374  *	will see the new packet type (until the next received packet).
375  */
376 
377 void dev_add_pack(struct packet_type *pt)
378 {
379 	struct list_head *head = ptype_head(pt);
380 
381 	spin_lock(&ptype_lock);
382 	list_add_rcu(&pt->list, head);
383 	spin_unlock(&ptype_lock);
384 }
385 EXPORT_SYMBOL(dev_add_pack);
386 
387 /**
388  *	__dev_remove_pack	 - remove packet handler
389  *	@pt: packet type declaration
390  *
391  *	Remove a protocol handler that was previously added to the kernel
392  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
393  *	from the kernel lists and can be freed or reused once this function
394  *	returns.
395  *
396  *      The packet type might still be in use by receivers
397  *	and must not be freed until after all the CPU's have gone
398  *	through a quiescent state.
399  */
400 void __dev_remove_pack(struct packet_type *pt)
401 {
402 	struct list_head *head = ptype_head(pt);
403 	struct packet_type *pt1;
404 
405 	spin_lock(&ptype_lock);
406 
407 	list_for_each_entry(pt1, head, list) {
408 		if (pt == pt1) {
409 			list_del_rcu(&pt->list);
410 			goto out;
411 		}
412 	}
413 
414 	pr_warn("dev_remove_pack: %p not found\n", pt);
415 out:
416 	spin_unlock(&ptype_lock);
417 }
418 EXPORT_SYMBOL(__dev_remove_pack);
419 
420 /**
421  *	dev_remove_pack	 - remove packet handler
422  *	@pt: packet type declaration
423  *
424  *	Remove a protocol handler that was previously added to the kernel
425  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
426  *	from the kernel lists and can be freed or reused once this function
427  *	returns.
428  *
429  *	This call sleeps to guarantee that no CPU is looking at the packet
430  *	type after return.
431  */
432 void dev_remove_pack(struct packet_type *pt)
433 {
434 	__dev_remove_pack(pt);
435 
436 	synchronize_net();
437 }
438 EXPORT_SYMBOL(dev_remove_pack);
439 
440 
441 /**
442  *	dev_add_offload - register offload handlers
443  *	@po: protocol offload declaration
444  *
445  *	Add protocol offload handlers to the networking stack. The passed
446  *	&proto_offload is linked into kernel lists and may not be freed until
447  *	it has been removed from the kernel lists.
448  *
449  *	This call does not sleep therefore it can not
450  *	guarantee all CPU's that are in middle of receiving packets
451  *	will see the new offload handlers (until the next received packet).
452  */
453 void dev_add_offload(struct packet_offload *po)
454 {
455 	struct list_head *head = &offload_base;
456 
457 	spin_lock(&offload_lock);
458 	list_add_rcu(&po->list, head);
459 	spin_unlock(&offload_lock);
460 }
461 EXPORT_SYMBOL(dev_add_offload);
462 
463 /**
464  *	__dev_remove_offload	 - remove offload handler
465  *	@po: packet offload declaration
466  *
467  *	Remove a protocol offload handler that was previously added to the
468  *	kernel offload handlers by dev_add_offload(). The passed &offload_type
469  *	is removed from the kernel lists and can be freed or reused once this
470  *	function returns.
471  *
472  *      The packet type might still be in use by receivers
473  *	and must not be freed until after all the CPU's have gone
474  *	through a quiescent state.
475  */
476 void __dev_remove_offload(struct packet_offload *po)
477 {
478 	struct list_head *head = &offload_base;
479 	struct packet_offload *po1;
480 
481 	spin_lock(&offload_lock);
482 
483 	list_for_each_entry(po1, head, list) {
484 		if (po == po1) {
485 			list_del_rcu(&po->list);
486 			goto out;
487 		}
488 	}
489 
490 	pr_warn("dev_remove_offload: %p not found\n", po);
491 out:
492 	spin_unlock(&offload_lock);
493 }
494 EXPORT_SYMBOL(__dev_remove_offload);
495 
496 /**
497  *	dev_remove_offload	 - remove packet offload handler
498  *	@po: packet offload declaration
499  *
500  *	Remove a packet offload handler that was previously added to the kernel
501  *	offload handlers by dev_add_offload(). The passed &offload_type is
502  *	removed from the kernel lists and can be freed or reused once this
503  *	function returns.
504  *
505  *	This call sleeps to guarantee that no CPU is looking at the packet
506  *	type after return.
507  */
508 void dev_remove_offload(struct packet_offload *po)
509 {
510 	__dev_remove_offload(po);
511 
512 	synchronize_net();
513 }
514 EXPORT_SYMBOL(dev_remove_offload);
515 
516 /******************************************************************************
517 
518 		      Device Boot-time Settings Routines
519 
520 *******************************************************************************/
521 
522 /* Boot time configuration table */
523 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
524 
525 /**
526  *	netdev_boot_setup_add	- add new setup entry
527  *	@name: name of the device
528  *	@map: configured settings for the device
529  *
530  *	Adds new setup entry to the dev_boot_setup list.  The function
531  *	returns 0 on error and 1 on success.  This is a generic routine to
532  *	all netdevices.
533  */
534 static int netdev_boot_setup_add(char *name, struct ifmap *map)
535 {
536 	struct netdev_boot_setup *s;
537 	int i;
538 
539 	s = dev_boot_setup;
540 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
541 		if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
542 			memset(s[i].name, 0, sizeof(s[i].name));
543 			strlcpy(s[i].name, name, IFNAMSIZ);
544 			memcpy(&s[i].map, map, sizeof(s[i].map));
545 			break;
546 		}
547 	}
548 
549 	return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
550 }
551 
552 /**
553  *	netdev_boot_setup_check	- check boot time settings
554  *	@dev: the netdevice
555  *
556  * 	Check boot time settings for the device.
557  *	The found settings are set for the device to be used
558  *	later in the device probing.
559  *	Returns 0 if no settings found, 1 if they are.
560  */
561 int netdev_boot_setup_check(struct net_device *dev)
562 {
563 	struct netdev_boot_setup *s = dev_boot_setup;
564 	int i;
565 
566 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
567 		if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
568 		    !strcmp(dev->name, s[i].name)) {
569 			dev->irq 	= s[i].map.irq;
570 			dev->base_addr 	= s[i].map.base_addr;
571 			dev->mem_start 	= s[i].map.mem_start;
572 			dev->mem_end 	= s[i].map.mem_end;
573 			return 1;
574 		}
575 	}
576 	return 0;
577 }
578 EXPORT_SYMBOL(netdev_boot_setup_check);
579 
580 
581 /**
582  *	netdev_boot_base	- get address from boot time settings
583  *	@prefix: prefix for network device
584  *	@unit: id for network device
585  *
586  * 	Check boot time settings for the base address of device.
587  *	The found settings are set for the device to be used
588  *	later in the device probing.
589  *	Returns 0 if no settings found.
590  */
591 unsigned long netdev_boot_base(const char *prefix, int unit)
592 {
593 	const struct netdev_boot_setup *s = dev_boot_setup;
594 	char name[IFNAMSIZ];
595 	int i;
596 
597 	sprintf(name, "%s%d", prefix, unit);
598 
599 	/*
600 	 * If device already registered then return base of 1
601 	 * to indicate not to probe for this interface
602 	 */
603 	if (__dev_get_by_name(&init_net, name))
604 		return 1;
605 
606 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
607 		if (!strcmp(name, s[i].name))
608 			return s[i].map.base_addr;
609 	return 0;
610 }
611 
612 /*
613  * Saves at boot time configured settings for any netdevice.
614  */
615 int __init netdev_boot_setup(char *str)
616 {
617 	int ints[5];
618 	struct ifmap map;
619 
620 	str = get_options(str, ARRAY_SIZE(ints), ints);
621 	if (!str || !*str)
622 		return 0;
623 
624 	/* Save settings */
625 	memset(&map, 0, sizeof(map));
626 	if (ints[0] > 0)
627 		map.irq = ints[1];
628 	if (ints[0] > 1)
629 		map.base_addr = ints[2];
630 	if (ints[0] > 2)
631 		map.mem_start = ints[3];
632 	if (ints[0] > 3)
633 		map.mem_end = ints[4];
634 
635 	/* Add new entry to the list */
636 	return netdev_boot_setup_add(str, &map);
637 }
638 
639 __setup("netdev=", netdev_boot_setup);
640 
641 /*******************************************************************************
642 
643 			    Device Interface Subroutines
644 
645 *******************************************************************************/
646 
647 /**
648  *	__dev_get_by_name	- find a device by its name
649  *	@net: the applicable net namespace
650  *	@name: name to find
651  *
652  *	Find an interface by name. Must be called under RTNL semaphore
653  *	or @dev_base_lock. If the name is found a pointer to the device
654  *	is returned. If the name is not found then %NULL is returned. The
655  *	reference counters are not incremented so the caller must be
656  *	careful with locks.
657  */
658 
659 struct net_device *__dev_get_by_name(struct net *net, const char *name)
660 {
661 	struct net_device *dev;
662 	struct hlist_head *head = dev_name_hash(net, name);
663 
664 	hlist_for_each_entry(dev, head, name_hlist)
665 		if (!strncmp(dev->name, name, IFNAMSIZ))
666 			return dev;
667 
668 	return NULL;
669 }
670 EXPORT_SYMBOL(__dev_get_by_name);
671 
672 /**
673  *	dev_get_by_name_rcu	- find a device by its name
674  *	@net: the applicable net namespace
675  *	@name: name to find
676  *
677  *	Find an interface by name.
678  *	If the name is found a pointer to the device is returned.
679  * 	If the name is not found then %NULL is returned.
680  *	The reference counters are not incremented so the caller must be
681  *	careful with locks. The caller must hold RCU lock.
682  */
683 
684 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
685 {
686 	struct net_device *dev;
687 	struct hlist_head *head = dev_name_hash(net, name);
688 
689 	hlist_for_each_entry_rcu(dev, head, name_hlist)
690 		if (!strncmp(dev->name, name, IFNAMSIZ))
691 			return dev;
692 
693 	return NULL;
694 }
695 EXPORT_SYMBOL(dev_get_by_name_rcu);
696 
697 /**
698  *	dev_get_by_name		- find a device by its name
699  *	@net: the applicable net namespace
700  *	@name: name to find
701  *
702  *	Find an interface by name. This can be called from any
703  *	context and does its own locking. The returned handle has
704  *	the usage count incremented and the caller must use dev_put() to
705  *	release it when it is no longer needed. %NULL is returned if no
706  *	matching device is found.
707  */
708 
709 struct net_device *dev_get_by_name(struct net *net, const char *name)
710 {
711 	struct net_device *dev;
712 
713 	rcu_read_lock();
714 	dev = dev_get_by_name_rcu(net, name);
715 	if (dev)
716 		dev_hold(dev);
717 	rcu_read_unlock();
718 	return dev;
719 }
720 EXPORT_SYMBOL(dev_get_by_name);
721 
722 /**
723  *	__dev_get_by_index - find a device by its ifindex
724  *	@net: the applicable net namespace
725  *	@ifindex: index of device
726  *
727  *	Search for an interface by index. Returns %NULL if the device
728  *	is not found or a pointer to the device. The device has not
729  *	had its reference counter increased so the caller must be careful
730  *	about locking. The caller must hold either the RTNL semaphore
731  *	or @dev_base_lock.
732  */
733 
734 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
735 {
736 	struct net_device *dev;
737 	struct hlist_head *head = dev_index_hash(net, ifindex);
738 
739 	hlist_for_each_entry(dev, head, index_hlist)
740 		if (dev->ifindex == ifindex)
741 			return dev;
742 
743 	return NULL;
744 }
745 EXPORT_SYMBOL(__dev_get_by_index);
746 
747 /**
748  *	dev_get_by_index_rcu - find a device by its ifindex
749  *	@net: the applicable net namespace
750  *	@ifindex: index of device
751  *
752  *	Search for an interface by index. Returns %NULL if the device
753  *	is not found or a pointer to the device. The device has not
754  *	had its reference counter increased so the caller must be careful
755  *	about locking. The caller must hold RCU lock.
756  */
757 
758 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
759 {
760 	struct net_device *dev;
761 	struct hlist_head *head = dev_index_hash(net, ifindex);
762 
763 	hlist_for_each_entry_rcu(dev, head, index_hlist)
764 		if (dev->ifindex == ifindex)
765 			return dev;
766 
767 	return NULL;
768 }
769 EXPORT_SYMBOL(dev_get_by_index_rcu);
770 
771 
772 /**
773  *	dev_get_by_index - find a device by its ifindex
774  *	@net: the applicable net namespace
775  *	@ifindex: index of device
776  *
777  *	Search for an interface by index. Returns NULL if the device
778  *	is not found or a pointer to the device. The device returned has
779  *	had a reference added and the pointer is safe until the user calls
780  *	dev_put to indicate they have finished with it.
781  */
782 
783 struct net_device *dev_get_by_index(struct net *net, int ifindex)
784 {
785 	struct net_device *dev;
786 
787 	rcu_read_lock();
788 	dev = dev_get_by_index_rcu(net, ifindex);
789 	if (dev)
790 		dev_hold(dev);
791 	rcu_read_unlock();
792 	return dev;
793 }
794 EXPORT_SYMBOL(dev_get_by_index);
795 
796 /**
797  *	dev_getbyhwaddr_rcu - find a device by its hardware address
798  *	@net: the applicable net namespace
799  *	@type: media type of device
800  *	@ha: hardware address
801  *
802  *	Search for an interface by MAC address. Returns NULL if the device
803  *	is not found or a pointer to the device.
804  *	The caller must hold RCU or RTNL.
805  *	The returned device has not had its ref count increased
806  *	and the caller must therefore be careful about locking
807  *
808  */
809 
810 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
811 				       const char *ha)
812 {
813 	struct net_device *dev;
814 
815 	for_each_netdev_rcu(net, dev)
816 		if (dev->type == type &&
817 		    !memcmp(dev->dev_addr, ha, dev->addr_len))
818 			return dev;
819 
820 	return NULL;
821 }
822 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
823 
824 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
825 {
826 	struct net_device *dev;
827 
828 	ASSERT_RTNL();
829 	for_each_netdev(net, dev)
830 		if (dev->type == type)
831 			return dev;
832 
833 	return NULL;
834 }
835 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
836 
837 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
838 {
839 	struct net_device *dev, *ret = NULL;
840 
841 	rcu_read_lock();
842 	for_each_netdev_rcu(net, dev)
843 		if (dev->type == type) {
844 			dev_hold(dev);
845 			ret = dev;
846 			break;
847 		}
848 	rcu_read_unlock();
849 	return ret;
850 }
851 EXPORT_SYMBOL(dev_getfirstbyhwtype);
852 
853 /**
854  *	dev_get_by_flags_rcu - find any device with given flags
855  *	@net: the applicable net namespace
856  *	@if_flags: IFF_* values
857  *	@mask: bitmask of bits in if_flags to check
858  *
859  *	Search for any interface with the given flags. Returns NULL if a device
860  *	is not found or a pointer to the device. Must be called inside
861  *	rcu_read_lock(), and result refcount is unchanged.
862  */
863 
864 struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
865 				    unsigned short mask)
866 {
867 	struct net_device *dev, *ret;
868 
869 	ret = NULL;
870 	for_each_netdev_rcu(net, dev) {
871 		if (((dev->flags ^ if_flags) & mask) == 0) {
872 			ret = dev;
873 			break;
874 		}
875 	}
876 	return ret;
877 }
878 EXPORT_SYMBOL(dev_get_by_flags_rcu);
879 
880 /**
881  *	dev_valid_name - check if name is okay for network device
882  *	@name: name string
883  *
884  *	Network device names need to be valid file names to
885  *	to allow sysfs to work.  We also disallow any kind of
886  *	whitespace.
887  */
888 bool dev_valid_name(const char *name)
889 {
890 	if (*name == '\0')
891 		return false;
892 	if (strlen(name) >= IFNAMSIZ)
893 		return false;
894 	if (!strcmp(name, ".") || !strcmp(name, ".."))
895 		return false;
896 
897 	while (*name) {
898 		if (*name == '/' || isspace(*name))
899 			return false;
900 		name++;
901 	}
902 	return true;
903 }
904 EXPORT_SYMBOL(dev_valid_name);
905 
906 /**
907  *	__dev_alloc_name - allocate a name for a device
908  *	@net: network namespace to allocate the device name in
909  *	@name: name format string
910  *	@buf:  scratch buffer and result name string
911  *
912  *	Passed a format string - eg "lt%d" it will try and find a suitable
913  *	id. It scans list of devices to build up a free map, then chooses
914  *	the first empty slot. The caller must hold the dev_base or rtnl lock
915  *	while allocating the name and adding the device in order to avoid
916  *	duplicates.
917  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
918  *	Returns the number of the unit assigned or a negative errno code.
919  */
920 
921 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
922 {
923 	int i = 0;
924 	const char *p;
925 	const int max_netdevices = 8*PAGE_SIZE;
926 	unsigned long *inuse;
927 	struct net_device *d;
928 
929 	p = strnchr(name, IFNAMSIZ-1, '%');
930 	if (p) {
931 		/*
932 		 * Verify the string as this thing may have come from
933 		 * the user.  There must be either one "%d" and no other "%"
934 		 * characters.
935 		 */
936 		if (p[1] != 'd' || strchr(p + 2, '%'))
937 			return -EINVAL;
938 
939 		/* Use one page as a bit array of possible slots */
940 		inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
941 		if (!inuse)
942 			return -ENOMEM;
943 
944 		for_each_netdev(net, d) {
945 			if (!sscanf(d->name, name, &i))
946 				continue;
947 			if (i < 0 || i >= max_netdevices)
948 				continue;
949 
950 			/*  avoid cases where sscanf is not exact inverse of printf */
951 			snprintf(buf, IFNAMSIZ, name, i);
952 			if (!strncmp(buf, d->name, IFNAMSIZ))
953 				set_bit(i, inuse);
954 		}
955 
956 		i = find_first_zero_bit(inuse, max_netdevices);
957 		free_page((unsigned long) inuse);
958 	}
959 
960 	if (buf != name)
961 		snprintf(buf, IFNAMSIZ, name, i);
962 	if (!__dev_get_by_name(net, buf))
963 		return i;
964 
965 	/* It is possible to run out of possible slots
966 	 * when the name is long and there isn't enough space left
967 	 * for the digits, or if all bits are used.
968 	 */
969 	return -ENFILE;
970 }
971 
972 /**
973  *	dev_alloc_name - allocate a name for a device
974  *	@dev: device
975  *	@name: name format string
976  *
977  *	Passed a format string - eg "lt%d" it will try and find a suitable
978  *	id. It scans list of devices to build up a free map, then chooses
979  *	the first empty slot. The caller must hold the dev_base or rtnl lock
980  *	while allocating the name and adding the device in order to avoid
981  *	duplicates.
982  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
983  *	Returns the number of the unit assigned or a negative errno code.
984  */
985 
986 int dev_alloc_name(struct net_device *dev, const char *name)
987 {
988 	char buf[IFNAMSIZ];
989 	struct net *net;
990 	int ret;
991 
992 	BUG_ON(!dev_net(dev));
993 	net = dev_net(dev);
994 	ret = __dev_alloc_name(net, name, buf);
995 	if (ret >= 0)
996 		strlcpy(dev->name, buf, IFNAMSIZ);
997 	return ret;
998 }
999 EXPORT_SYMBOL(dev_alloc_name);
1000 
1001 static int dev_alloc_name_ns(struct net *net,
1002 			     struct net_device *dev,
1003 			     const char *name)
1004 {
1005 	char buf[IFNAMSIZ];
1006 	int ret;
1007 
1008 	ret = __dev_alloc_name(net, name, buf);
1009 	if (ret >= 0)
1010 		strlcpy(dev->name, buf, IFNAMSIZ);
1011 	return ret;
1012 }
1013 
1014 static int dev_get_valid_name(struct net *net,
1015 			      struct net_device *dev,
1016 			      const char *name)
1017 {
1018 	BUG_ON(!net);
1019 
1020 	if (!dev_valid_name(name))
1021 		return -EINVAL;
1022 
1023 	if (strchr(name, '%'))
1024 		return dev_alloc_name_ns(net, dev, name);
1025 	else if (__dev_get_by_name(net, name))
1026 		return -EEXIST;
1027 	else if (dev->name != name)
1028 		strlcpy(dev->name, name, IFNAMSIZ);
1029 
1030 	return 0;
1031 }
1032 
1033 /**
1034  *	dev_change_name - change name of a device
1035  *	@dev: device
1036  *	@newname: name (or format string) must be at least IFNAMSIZ
1037  *
1038  *	Change name of a device, can pass format strings "eth%d".
1039  *	for wildcarding.
1040  */
1041 int dev_change_name(struct net_device *dev, const char *newname)
1042 {
1043 	char oldname[IFNAMSIZ];
1044 	int err = 0;
1045 	int ret;
1046 	struct net *net;
1047 
1048 	ASSERT_RTNL();
1049 	BUG_ON(!dev_net(dev));
1050 
1051 	net = dev_net(dev);
1052 	if (dev->flags & IFF_UP)
1053 		return -EBUSY;
1054 
1055 	write_seqcount_begin(&devnet_rename_seq);
1056 
1057 	if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1058 		write_seqcount_end(&devnet_rename_seq);
1059 		return 0;
1060 	}
1061 
1062 	memcpy(oldname, dev->name, IFNAMSIZ);
1063 
1064 	err = dev_get_valid_name(net, dev, newname);
1065 	if (err < 0) {
1066 		write_seqcount_end(&devnet_rename_seq);
1067 		return err;
1068 	}
1069 
1070 rollback:
1071 	ret = device_rename(&dev->dev, dev->name);
1072 	if (ret) {
1073 		memcpy(dev->name, oldname, IFNAMSIZ);
1074 		write_seqcount_end(&devnet_rename_seq);
1075 		return ret;
1076 	}
1077 
1078 	write_seqcount_end(&devnet_rename_seq);
1079 
1080 	write_lock_bh(&dev_base_lock);
1081 	hlist_del_rcu(&dev->name_hlist);
1082 	write_unlock_bh(&dev_base_lock);
1083 
1084 	synchronize_rcu();
1085 
1086 	write_lock_bh(&dev_base_lock);
1087 	hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1088 	write_unlock_bh(&dev_base_lock);
1089 
1090 	ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1091 	ret = notifier_to_errno(ret);
1092 
1093 	if (ret) {
1094 		/* err >= 0 after dev_alloc_name() or stores the first errno */
1095 		if (err >= 0) {
1096 			err = ret;
1097 			write_seqcount_begin(&devnet_rename_seq);
1098 			memcpy(dev->name, oldname, IFNAMSIZ);
1099 			goto rollback;
1100 		} else {
1101 			pr_err("%s: name change rollback failed: %d\n",
1102 			       dev->name, ret);
1103 		}
1104 	}
1105 
1106 	return err;
1107 }
1108 
1109 /**
1110  *	dev_set_alias - change ifalias of a device
1111  *	@dev: device
1112  *	@alias: name up to IFALIASZ
1113  *	@len: limit of bytes to copy from info
1114  *
1115  *	Set ifalias for a device,
1116  */
1117 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1118 {
1119 	char *new_ifalias;
1120 
1121 	ASSERT_RTNL();
1122 
1123 	if (len >= IFALIASZ)
1124 		return -EINVAL;
1125 
1126 	if (!len) {
1127 		kfree(dev->ifalias);
1128 		dev->ifalias = NULL;
1129 		return 0;
1130 	}
1131 
1132 	new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1133 	if (!new_ifalias)
1134 		return -ENOMEM;
1135 	dev->ifalias = new_ifalias;
1136 
1137 	strlcpy(dev->ifalias, alias, len+1);
1138 	return len;
1139 }
1140 
1141 
1142 /**
1143  *	netdev_features_change - device changes features
1144  *	@dev: device to cause notification
1145  *
1146  *	Called to indicate a device has changed features.
1147  */
1148 void netdev_features_change(struct net_device *dev)
1149 {
1150 	call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1151 }
1152 EXPORT_SYMBOL(netdev_features_change);
1153 
1154 /**
1155  *	netdev_state_change - device changes state
1156  *	@dev: device to cause notification
1157  *
1158  *	Called to indicate a device has changed state. This function calls
1159  *	the notifier chains for netdev_chain and sends a NEWLINK message
1160  *	to the routing socket.
1161  */
1162 void netdev_state_change(struct net_device *dev)
1163 {
1164 	if (dev->flags & IFF_UP) {
1165 		call_netdevice_notifiers(NETDEV_CHANGE, dev);
1166 		rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1167 	}
1168 }
1169 EXPORT_SYMBOL(netdev_state_change);
1170 
1171 /**
1172  * 	netdev_notify_peers - notify network peers about existence of @dev
1173  * 	@dev: network device
1174  *
1175  * Generate traffic such that interested network peers are aware of
1176  * @dev, such as by generating a gratuitous ARP. This may be used when
1177  * a device wants to inform the rest of the network about some sort of
1178  * reconfiguration such as a failover event or virtual machine
1179  * migration.
1180  */
1181 void netdev_notify_peers(struct net_device *dev)
1182 {
1183 	rtnl_lock();
1184 	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1185 	rtnl_unlock();
1186 }
1187 EXPORT_SYMBOL(netdev_notify_peers);
1188 
1189 static int __dev_open(struct net_device *dev)
1190 {
1191 	const struct net_device_ops *ops = dev->netdev_ops;
1192 	int ret;
1193 
1194 	ASSERT_RTNL();
1195 
1196 	if (!netif_device_present(dev))
1197 		return -ENODEV;
1198 
1199 	/* Block netpoll from trying to do any rx path servicing.
1200 	 * If we don't do this there is a chance ndo_poll_controller
1201 	 * or ndo_poll may be running while we open the device
1202 	 */
1203 	ret = netpoll_rx_disable(dev);
1204 	if (ret)
1205 		return ret;
1206 
1207 	ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1208 	ret = notifier_to_errno(ret);
1209 	if (ret)
1210 		return ret;
1211 
1212 	set_bit(__LINK_STATE_START, &dev->state);
1213 
1214 	if (ops->ndo_validate_addr)
1215 		ret = ops->ndo_validate_addr(dev);
1216 
1217 	if (!ret && ops->ndo_open)
1218 		ret = ops->ndo_open(dev);
1219 
1220 	netpoll_rx_enable(dev);
1221 
1222 	if (ret)
1223 		clear_bit(__LINK_STATE_START, &dev->state);
1224 	else {
1225 		dev->flags |= IFF_UP;
1226 		net_dmaengine_get();
1227 		dev_set_rx_mode(dev);
1228 		dev_activate(dev);
1229 		add_device_randomness(dev->dev_addr, dev->addr_len);
1230 	}
1231 
1232 	return ret;
1233 }
1234 
1235 /**
1236  *	dev_open	- prepare an interface for use.
1237  *	@dev:	device to open
1238  *
1239  *	Takes a device from down to up state. The device's private open
1240  *	function is invoked and then the multicast lists are loaded. Finally
1241  *	the device is moved into the up state and a %NETDEV_UP message is
1242  *	sent to the netdev notifier chain.
1243  *
1244  *	Calling this function on an active interface is a nop. On a failure
1245  *	a negative errno code is returned.
1246  */
1247 int dev_open(struct net_device *dev)
1248 {
1249 	int ret;
1250 
1251 	if (dev->flags & IFF_UP)
1252 		return 0;
1253 
1254 	ret = __dev_open(dev);
1255 	if (ret < 0)
1256 		return ret;
1257 
1258 	rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1259 	call_netdevice_notifiers(NETDEV_UP, dev);
1260 
1261 	return ret;
1262 }
1263 EXPORT_SYMBOL(dev_open);
1264 
1265 static int __dev_close_many(struct list_head *head)
1266 {
1267 	struct net_device *dev;
1268 
1269 	ASSERT_RTNL();
1270 	might_sleep();
1271 
1272 	list_for_each_entry(dev, head, unreg_list) {
1273 		call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1274 
1275 		clear_bit(__LINK_STATE_START, &dev->state);
1276 
1277 		/* Synchronize to scheduled poll. We cannot touch poll list, it
1278 		 * can be even on different cpu. So just clear netif_running().
1279 		 *
1280 		 * dev->stop() will invoke napi_disable() on all of it's
1281 		 * napi_struct instances on this device.
1282 		 */
1283 		smp_mb__after_clear_bit(); /* Commit netif_running(). */
1284 	}
1285 
1286 	dev_deactivate_many(head);
1287 
1288 	list_for_each_entry(dev, head, unreg_list) {
1289 		const struct net_device_ops *ops = dev->netdev_ops;
1290 
1291 		/*
1292 		 *	Call the device specific close. This cannot fail.
1293 		 *	Only if device is UP
1294 		 *
1295 		 *	We allow it to be called even after a DETACH hot-plug
1296 		 *	event.
1297 		 */
1298 		if (ops->ndo_stop)
1299 			ops->ndo_stop(dev);
1300 
1301 		dev->flags &= ~IFF_UP;
1302 		net_dmaengine_put();
1303 	}
1304 
1305 	return 0;
1306 }
1307 
1308 static int __dev_close(struct net_device *dev)
1309 {
1310 	int retval;
1311 	LIST_HEAD(single);
1312 
1313 	/* Temporarily disable netpoll until the interface is down */
1314 	retval = netpoll_rx_disable(dev);
1315 	if (retval)
1316 		return retval;
1317 
1318 	list_add(&dev->unreg_list, &single);
1319 	retval = __dev_close_many(&single);
1320 	list_del(&single);
1321 
1322 	netpoll_rx_enable(dev);
1323 	return retval;
1324 }
1325 
1326 static int dev_close_many(struct list_head *head)
1327 {
1328 	struct net_device *dev, *tmp;
1329 	LIST_HEAD(tmp_list);
1330 
1331 	list_for_each_entry_safe(dev, tmp, head, unreg_list)
1332 		if (!(dev->flags & IFF_UP))
1333 			list_move(&dev->unreg_list, &tmp_list);
1334 
1335 	__dev_close_many(head);
1336 
1337 	list_for_each_entry(dev, head, unreg_list) {
1338 		rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1339 		call_netdevice_notifiers(NETDEV_DOWN, dev);
1340 	}
1341 
1342 	/* rollback_registered_many needs the complete original list */
1343 	list_splice(&tmp_list, head);
1344 	return 0;
1345 }
1346 
1347 /**
1348  *	dev_close - shutdown an interface.
1349  *	@dev: device to shutdown
1350  *
1351  *	This function moves an active device into down state. A
1352  *	%NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1353  *	is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1354  *	chain.
1355  */
1356 int dev_close(struct net_device *dev)
1357 {
1358 	int ret = 0;
1359 	if (dev->flags & IFF_UP) {
1360 		LIST_HEAD(single);
1361 
1362 		/* Block netpoll rx while the interface is going down */
1363 		ret = netpoll_rx_disable(dev);
1364 		if (ret)
1365 			return ret;
1366 
1367 		list_add(&dev->unreg_list, &single);
1368 		dev_close_many(&single);
1369 		list_del(&single);
1370 
1371 		netpoll_rx_enable(dev);
1372 	}
1373 	return ret;
1374 }
1375 EXPORT_SYMBOL(dev_close);
1376 
1377 
1378 /**
1379  *	dev_disable_lro - disable Large Receive Offload on a device
1380  *	@dev: device
1381  *
1382  *	Disable Large Receive Offload (LRO) on a net device.  Must be
1383  *	called under RTNL.  This is needed if received packets may be
1384  *	forwarded to another interface.
1385  */
1386 void dev_disable_lro(struct net_device *dev)
1387 {
1388 	/*
1389 	 * If we're trying to disable lro on a vlan device
1390 	 * use the underlying physical device instead
1391 	 */
1392 	if (is_vlan_dev(dev))
1393 		dev = vlan_dev_real_dev(dev);
1394 
1395 	dev->wanted_features &= ~NETIF_F_LRO;
1396 	netdev_update_features(dev);
1397 
1398 	if (unlikely(dev->features & NETIF_F_LRO))
1399 		netdev_WARN(dev, "failed to disable LRO!\n");
1400 }
1401 EXPORT_SYMBOL(dev_disable_lro);
1402 
1403 
1404 static int dev_boot_phase = 1;
1405 
1406 /**
1407  *	register_netdevice_notifier - register a network notifier block
1408  *	@nb: notifier
1409  *
1410  *	Register a notifier to be called when network device events occur.
1411  *	The notifier passed is linked into the kernel structures and must
1412  *	not be reused until it has been unregistered. A negative errno code
1413  *	is returned on a failure.
1414  *
1415  * 	When registered all registration and up events are replayed
1416  *	to the new notifier to allow device to have a race free
1417  *	view of the network device list.
1418  */
1419 
1420 int register_netdevice_notifier(struct notifier_block *nb)
1421 {
1422 	struct net_device *dev;
1423 	struct net_device *last;
1424 	struct net *net;
1425 	int err;
1426 
1427 	rtnl_lock();
1428 	err = raw_notifier_chain_register(&netdev_chain, nb);
1429 	if (err)
1430 		goto unlock;
1431 	if (dev_boot_phase)
1432 		goto unlock;
1433 	for_each_net(net) {
1434 		for_each_netdev(net, dev) {
1435 			err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1436 			err = notifier_to_errno(err);
1437 			if (err)
1438 				goto rollback;
1439 
1440 			if (!(dev->flags & IFF_UP))
1441 				continue;
1442 
1443 			nb->notifier_call(nb, NETDEV_UP, dev);
1444 		}
1445 	}
1446 
1447 unlock:
1448 	rtnl_unlock();
1449 	return err;
1450 
1451 rollback:
1452 	last = dev;
1453 	for_each_net(net) {
1454 		for_each_netdev(net, dev) {
1455 			if (dev == last)
1456 				goto outroll;
1457 
1458 			if (dev->flags & IFF_UP) {
1459 				nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1460 				nb->notifier_call(nb, NETDEV_DOWN, dev);
1461 			}
1462 			nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1463 		}
1464 	}
1465 
1466 outroll:
1467 	raw_notifier_chain_unregister(&netdev_chain, nb);
1468 	goto unlock;
1469 }
1470 EXPORT_SYMBOL(register_netdevice_notifier);
1471 
1472 /**
1473  *	unregister_netdevice_notifier - unregister a network notifier block
1474  *	@nb: notifier
1475  *
1476  *	Unregister a notifier previously registered by
1477  *	register_netdevice_notifier(). The notifier is unlinked into the
1478  *	kernel structures and may then be reused. A negative errno code
1479  *	is returned on a failure.
1480  *
1481  * 	After unregistering unregister and down device events are synthesized
1482  *	for all devices on the device list to the removed notifier to remove
1483  *	the need for special case cleanup code.
1484  */
1485 
1486 int unregister_netdevice_notifier(struct notifier_block *nb)
1487 {
1488 	struct net_device *dev;
1489 	struct net *net;
1490 	int err;
1491 
1492 	rtnl_lock();
1493 	err = raw_notifier_chain_unregister(&netdev_chain, nb);
1494 	if (err)
1495 		goto unlock;
1496 
1497 	for_each_net(net) {
1498 		for_each_netdev(net, dev) {
1499 			if (dev->flags & IFF_UP) {
1500 				nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1501 				nb->notifier_call(nb, NETDEV_DOWN, dev);
1502 			}
1503 			nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1504 		}
1505 	}
1506 unlock:
1507 	rtnl_unlock();
1508 	return err;
1509 }
1510 EXPORT_SYMBOL(unregister_netdevice_notifier);
1511 
1512 /**
1513  *	call_netdevice_notifiers - call all network notifier blocks
1514  *      @val: value passed unmodified to notifier function
1515  *      @dev: net_device pointer passed unmodified to notifier function
1516  *
1517  *	Call all network notifier blocks.  Parameters and return value
1518  *	are as for raw_notifier_call_chain().
1519  */
1520 
1521 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1522 {
1523 	ASSERT_RTNL();
1524 	return raw_notifier_call_chain(&netdev_chain, val, dev);
1525 }
1526 EXPORT_SYMBOL(call_netdevice_notifiers);
1527 
1528 static struct static_key netstamp_needed __read_mostly;
1529 #ifdef HAVE_JUMP_LABEL
1530 /* We are not allowed to call static_key_slow_dec() from irq context
1531  * If net_disable_timestamp() is called from irq context, defer the
1532  * static_key_slow_dec() calls.
1533  */
1534 static atomic_t netstamp_needed_deferred;
1535 #endif
1536 
1537 void net_enable_timestamp(void)
1538 {
1539 #ifdef HAVE_JUMP_LABEL
1540 	int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1541 
1542 	if (deferred) {
1543 		while (--deferred)
1544 			static_key_slow_dec(&netstamp_needed);
1545 		return;
1546 	}
1547 #endif
1548 	static_key_slow_inc(&netstamp_needed);
1549 }
1550 EXPORT_SYMBOL(net_enable_timestamp);
1551 
1552 void net_disable_timestamp(void)
1553 {
1554 #ifdef HAVE_JUMP_LABEL
1555 	if (in_interrupt()) {
1556 		atomic_inc(&netstamp_needed_deferred);
1557 		return;
1558 	}
1559 #endif
1560 	static_key_slow_dec(&netstamp_needed);
1561 }
1562 EXPORT_SYMBOL(net_disable_timestamp);
1563 
1564 static inline void net_timestamp_set(struct sk_buff *skb)
1565 {
1566 	skb->tstamp.tv64 = 0;
1567 	if (static_key_false(&netstamp_needed))
1568 		__net_timestamp(skb);
1569 }
1570 
1571 #define net_timestamp_check(COND, SKB)			\
1572 	if (static_key_false(&netstamp_needed)) {		\
1573 		if ((COND) && !(SKB)->tstamp.tv64)	\
1574 			__net_timestamp(SKB);		\
1575 	}						\
1576 
1577 static inline bool is_skb_forwardable(struct net_device *dev,
1578 				      struct sk_buff *skb)
1579 {
1580 	unsigned int len;
1581 
1582 	if (!(dev->flags & IFF_UP))
1583 		return false;
1584 
1585 	len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1586 	if (skb->len <= len)
1587 		return true;
1588 
1589 	/* if TSO is enabled, we don't care about the length as the packet
1590 	 * could be forwarded without being segmented before
1591 	 */
1592 	if (skb_is_gso(skb))
1593 		return true;
1594 
1595 	return false;
1596 }
1597 
1598 /**
1599  * dev_forward_skb - loopback an skb to another netif
1600  *
1601  * @dev: destination network device
1602  * @skb: buffer to forward
1603  *
1604  * return values:
1605  *	NET_RX_SUCCESS	(no congestion)
1606  *	NET_RX_DROP     (packet was dropped, but freed)
1607  *
1608  * dev_forward_skb can be used for injecting an skb from the
1609  * start_xmit function of one device into the receive queue
1610  * of another device.
1611  *
1612  * The receiving device may be in another namespace, so
1613  * we have to clear all information in the skb that could
1614  * impact namespace isolation.
1615  */
1616 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1617 {
1618 	if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1619 		if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1620 			atomic_long_inc(&dev->rx_dropped);
1621 			kfree_skb(skb);
1622 			return NET_RX_DROP;
1623 		}
1624 	}
1625 
1626 	skb_orphan(skb);
1627 	nf_reset(skb);
1628 
1629 	if (unlikely(!is_skb_forwardable(dev, skb))) {
1630 		atomic_long_inc(&dev->rx_dropped);
1631 		kfree_skb(skb);
1632 		return NET_RX_DROP;
1633 	}
1634 	skb->skb_iif = 0;
1635 	skb->dev = dev;
1636 	skb_dst_drop(skb);
1637 	skb->tstamp.tv64 = 0;
1638 	skb->pkt_type = PACKET_HOST;
1639 	skb->protocol = eth_type_trans(skb, dev);
1640 	skb->mark = 0;
1641 	secpath_reset(skb);
1642 	nf_reset(skb);
1643 	return netif_rx(skb);
1644 }
1645 EXPORT_SYMBOL_GPL(dev_forward_skb);
1646 
1647 static inline int deliver_skb(struct sk_buff *skb,
1648 			      struct packet_type *pt_prev,
1649 			      struct net_device *orig_dev)
1650 {
1651 	if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1652 		return -ENOMEM;
1653 	atomic_inc(&skb->users);
1654 	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1655 }
1656 
1657 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1658 {
1659 	if (!ptype->af_packet_priv || !skb->sk)
1660 		return false;
1661 
1662 	if (ptype->id_match)
1663 		return ptype->id_match(ptype, skb->sk);
1664 	else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1665 		return true;
1666 
1667 	return false;
1668 }
1669 
1670 /*
1671  *	Support routine. Sends outgoing frames to any network
1672  *	taps currently in use.
1673  */
1674 
1675 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1676 {
1677 	struct packet_type *ptype;
1678 	struct sk_buff *skb2 = NULL;
1679 	struct packet_type *pt_prev = NULL;
1680 
1681 	rcu_read_lock();
1682 	list_for_each_entry_rcu(ptype, &ptype_all, list) {
1683 		/* Never send packets back to the socket
1684 		 * they originated from - MvS (miquels@drinkel.ow.org)
1685 		 */
1686 		if ((ptype->dev == dev || !ptype->dev) &&
1687 		    (!skb_loop_sk(ptype, skb))) {
1688 			if (pt_prev) {
1689 				deliver_skb(skb2, pt_prev, skb->dev);
1690 				pt_prev = ptype;
1691 				continue;
1692 			}
1693 
1694 			skb2 = skb_clone(skb, GFP_ATOMIC);
1695 			if (!skb2)
1696 				break;
1697 
1698 			net_timestamp_set(skb2);
1699 
1700 			/* skb->nh should be correctly
1701 			   set by sender, so that the second statement is
1702 			   just protection against buggy protocols.
1703 			 */
1704 			skb_reset_mac_header(skb2);
1705 
1706 			if (skb_network_header(skb2) < skb2->data ||
1707 			    skb2->network_header > skb2->tail) {
1708 				net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1709 						     ntohs(skb2->protocol),
1710 						     dev->name);
1711 				skb_reset_network_header(skb2);
1712 			}
1713 
1714 			skb2->transport_header = skb2->network_header;
1715 			skb2->pkt_type = PACKET_OUTGOING;
1716 			pt_prev = ptype;
1717 		}
1718 	}
1719 	if (pt_prev)
1720 		pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1721 	rcu_read_unlock();
1722 }
1723 
1724 /**
1725  * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1726  * @dev: Network device
1727  * @txq: number of queues available
1728  *
1729  * If real_num_tx_queues is changed the tc mappings may no longer be
1730  * valid. To resolve this verify the tc mapping remains valid and if
1731  * not NULL the mapping. With no priorities mapping to this
1732  * offset/count pair it will no longer be used. In the worst case TC0
1733  * is invalid nothing can be done so disable priority mappings. If is
1734  * expected that drivers will fix this mapping if they can before
1735  * calling netif_set_real_num_tx_queues.
1736  */
1737 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1738 {
1739 	int i;
1740 	struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1741 
1742 	/* If TC0 is invalidated disable TC mapping */
1743 	if (tc->offset + tc->count > txq) {
1744 		pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1745 		dev->num_tc = 0;
1746 		return;
1747 	}
1748 
1749 	/* Invalidated prio to tc mappings set to TC0 */
1750 	for (i = 1; i < TC_BITMASK + 1; i++) {
1751 		int q = netdev_get_prio_tc_map(dev, i);
1752 
1753 		tc = &dev->tc_to_txq[q];
1754 		if (tc->offset + tc->count > txq) {
1755 			pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1756 				i, q);
1757 			netdev_set_prio_tc_map(dev, i, 0);
1758 		}
1759 	}
1760 }
1761 
1762 #ifdef CONFIG_XPS
1763 static DEFINE_MUTEX(xps_map_mutex);
1764 #define xmap_dereference(P)		\
1765 	rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1766 
1767 static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1768 					int cpu, u16 index)
1769 {
1770 	struct xps_map *map = NULL;
1771 	int pos;
1772 
1773 	if (dev_maps)
1774 		map = xmap_dereference(dev_maps->cpu_map[cpu]);
1775 
1776 	for (pos = 0; map && pos < map->len; pos++) {
1777 		if (map->queues[pos] == index) {
1778 			if (map->len > 1) {
1779 				map->queues[pos] = map->queues[--map->len];
1780 			} else {
1781 				RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1782 				kfree_rcu(map, rcu);
1783 				map = NULL;
1784 			}
1785 			break;
1786 		}
1787 	}
1788 
1789 	return map;
1790 }
1791 
1792 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
1793 {
1794 	struct xps_dev_maps *dev_maps;
1795 	int cpu, i;
1796 	bool active = false;
1797 
1798 	mutex_lock(&xps_map_mutex);
1799 	dev_maps = xmap_dereference(dev->xps_maps);
1800 
1801 	if (!dev_maps)
1802 		goto out_no_maps;
1803 
1804 	for_each_possible_cpu(cpu) {
1805 		for (i = index; i < dev->num_tx_queues; i++) {
1806 			if (!remove_xps_queue(dev_maps, cpu, i))
1807 				break;
1808 		}
1809 		if (i == dev->num_tx_queues)
1810 			active = true;
1811 	}
1812 
1813 	if (!active) {
1814 		RCU_INIT_POINTER(dev->xps_maps, NULL);
1815 		kfree_rcu(dev_maps, rcu);
1816 	}
1817 
1818 	for (i = index; i < dev->num_tx_queues; i++)
1819 		netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1820 					     NUMA_NO_NODE);
1821 
1822 out_no_maps:
1823 	mutex_unlock(&xps_map_mutex);
1824 }
1825 
1826 static struct xps_map *expand_xps_map(struct xps_map *map,
1827 				      int cpu, u16 index)
1828 {
1829 	struct xps_map *new_map;
1830 	int alloc_len = XPS_MIN_MAP_ALLOC;
1831 	int i, pos;
1832 
1833 	for (pos = 0; map && pos < map->len; pos++) {
1834 		if (map->queues[pos] != index)
1835 			continue;
1836 		return map;
1837 	}
1838 
1839 	/* Need to add queue to this CPU's existing map */
1840 	if (map) {
1841 		if (pos < map->alloc_len)
1842 			return map;
1843 
1844 		alloc_len = map->alloc_len * 2;
1845 	}
1846 
1847 	/* Need to allocate new map to store queue on this CPU's map */
1848 	new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1849 			       cpu_to_node(cpu));
1850 	if (!new_map)
1851 		return NULL;
1852 
1853 	for (i = 0; i < pos; i++)
1854 		new_map->queues[i] = map->queues[i];
1855 	new_map->alloc_len = alloc_len;
1856 	new_map->len = pos;
1857 
1858 	return new_map;
1859 }
1860 
1861 int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index)
1862 {
1863 	struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
1864 	struct xps_map *map, *new_map;
1865 	int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
1866 	int cpu, numa_node_id = -2;
1867 	bool active = false;
1868 
1869 	mutex_lock(&xps_map_mutex);
1870 
1871 	dev_maps = xmap_dereference(dev->xps_maps);
1872 
1873 	/* allocate memory for queue storage */
1874 	for_each_online_cpu(cpu) {
1875 		if (!cpumask_test_cpu(cpu, mask))
1876 			continue;
1877 
1878 		if (!new_dev_maps)
1879 			new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
1880 		if (!new_dev_maps) {
1881 			mutex_unlock(&xps_map_mutex);
1882 			return -ENOMEM;
1883 		}
1884 
1885 		map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1886 				 NULL;
1887 
1888 		map = expand_xps_map(map, cpu, index);
1889 		if (!map)
1890 			goto error;
1891 
1892 		RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1893 	}
1894 
1895 	if (!new_dev_maps)
1896 		goto out_no_new_maps;
1897 
1898 	for_each_possible_cpu(cpu) {
1899 		if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
1900 			/* add queue to CPU maps */
1901 			int pos = 0;
1902 
1903 			map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1904 			while ((pos < map->len) && (map->queues[pos] != index))
1905 				pos++;
1906 
1907 			if (pos == map->len)
1908 				map->queues[map->len++] = index;
1909 #ifdef CONFIG_NUMA
1910 			if (numa_node_id == -2)
1911 				numa_node_id = cpu_to_node(cpu);
1912 			else if (numa_node_id != cpu_to_node(cpu))
1913 				numa_node_id = -1;
1914 #endif
1915 		} else if (dev_maps) {
1916 			/* fill in the new device map from the old device map */
1917 			map = xmap_dereference(dev_maps->cpu_map[cpu]);
1918 			RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1919 		}
1920 
1921 	}
1922 
1923 	rcu_assign_pointer(dev->xps_maps, new_dev_maps);
1924 
1925 	/* Cleanup old maps */
1926 	if (dev_maps) {
1927 		for_each_possible_cpu(cpu) {
1928 			new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1929 			map = xmap_dereference(dev_maps->cpu_map[cpu]);
1930 			if (map && map != new_map)
1931 				kfree_rcu(map, rcu);
1932 		}
1933 
1934 		kfree_rcu(dev_maps, rcu);
1935 	}
1936 
1937 	dev_maps = new_dev_maps;
1938 	active = true;
1939 
1940 out_no_new_maps:
1941 	/* update Tx queue numa node */
1942 	netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
1943 				     (numa_node_id >= 0) ? numa_node_id :
1944 				     NUMA_NO_NODE);
1945 
1946 	if (!dev_maps)
1947 		goto out_no_maps;
1948 
1949 	/* removes queue from unused CPUs */
1950 	for_each_possible_cpu(cpu) {
1951 		if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
1952 			continue;
1953 
1954 		if (remove_xps_queue(dev_maps, cpu, index))
1955 			active = true;
1956 	}
1957 
1958 	/* free map if not active */
1959 	if (!active) {
1960 		RCU_INIT_POINTER(dev->xps_maps, NULL);
1961 		kfree_rcu(dev_maps, rcu);
1962 	}
1963 
1964 out_no_maps:
1965 	mutex_unlock(&xps_map_mutex);
1966 
1967 	return 0;
1968 error:
1969 	/* remove any maps that we added */
1970 	for_each_possible_cpu(cpu) {
1971 		new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1972 		map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1973 				 NULL;
1974 		if (new_map && new_map != map)
1975 			kfree(new_map);
1976 	}
1977 
1978 	mutex_unlock(&xps_map_mutex);
1979 
1980 	kfree(new_dev_maps);
1981 	return -ENOMEM;
1982 }
1983 EXPORT_SYMBOL(netif_set_xps_queue);
1984 
1985 #endif
1986 /*
1987  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1988  * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
1989  */
1990 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1991 {
1992 	int rc;
1993 
1994 	if (txq < 1 || txq > dev->num_tx_queues)
1995 		return -EINVAL;
1996 
1997 	if (dev->reg_state == NETREG_REGISTERED ||
1998 	    dev->reg_state == NETREG_UNREGISTERING) {
1999 		ASSERT_RTNL();
2000 
2001 		rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2002 						  txq);
2003 		if (rc)
2004 			return rc;
2005 
2006 		if (dev->num_tc)
2007 			netif_setup_tc(dev, txq);
2008 
2009 		if (txq < dev->real_num_tx_queues) {
2010 			qdisc_reset_all_tx_gt(dev, txq);
2011 #ifdef CONFIG_XPS
2012 			netif_reset_xps_queues_gt(dev, txq);
2013 #endif
2014 		}
2015 	}
2016 
2017 	dev->real_num_tx_queues = txq;
2018 	return 0;
2019 }
2020 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2021 
2022 #ifdef CONFIG_RPS
2023 /**
2024  *	netif_set_real_num_rx_queues - set actual number of RX queues used
2025  *	@dev: Network device
2026  *	@rxq: Actual number of RX queues
2027  *
2028  *	This must be called either with the rtnl_lock held or before
2029  *	registration of the net device.  Returns 0 on success, or a
2030  *	negative error code.  If called before registration, it always
2031  *	succeeds.
2032  */
2033 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2034 {
2035 	int rc;
2036 
2037 	if (rxq < 1 || rxq > dev->num_rx_queues)
2038 		return -EINVAL;
2039 
2040 	if (dev->reg_state == NETREG_REGISTERED) {
2041 		ASSERT_RTNL();
2042 
2043 		rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2044 						  rxq);
2045 		if (rc)
2046 			return rc;
2047 	}
2048 
2049 	dev->real_num_rx_queues = rxq;
2050 	return 0;
2051 }
2052 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2053 #endif
2054 
2055 /**
2056  * netif_get_num_default_rss_queues - default number of RSS queues
2057  *
2058  * This routine should set an upper limit on the number of RSS queues
2059  * used by default by multiqueue devices.
2060  */
2061 int netif_get_num_default_rss_queues(void)
2062 {
2063 	return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2064 }
2065 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2066 
2067 static inline void __netif_reschedule(struct Qdisc *q)
2068 {
2069 	struct softnet_data *sd;
2070 	unsigned long flags;
2071 
2072 	local_irq_save(flags);
2073 	sd = &__get_cpu_var(softnet_data);
2074 	q->next_sched = NULL;
2075 	*sd->output_queue_tailp = q;
2076 	sd->output_queue_tailp = &q->next_sched;
2077 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
2078 	local_irq_restore(flags);
2079 }
2080 
2081 void __netif_schedule(struct Qdisc *q)
2082 {
2083 	if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2084 		__netif_reschedule(q);
2085 }
2086 EXPORT_SYMBOL(__netif_schedule);
2087 
2088 void dev_kfree_skb_irq(struct sk_buff *skb)
2089 {
2090 	if (atomic_dec_and_test(&skb->users)) {
2091 		struct softnet_data *sd;
2092 		unsigned long flags;
2093 
2094 		local_irq_save(flags);
2095 		sd = &__get_cpu_var(softnet_data);
2096 		skb->next = sd->completion_queue;
2097 		sd->completion_queue = skb;
2098 		raise_softirq_irqoff(NET_TX_SOFTIRQ);
2099 		local_irq_restore(flags);
2100 	}
2101 }
2102 EXPORT_SYMBOL(dev_kfree_skb_irq);
2103 
2104 void dev_kfree_skb_any(struct sk_buff *skb)
2105 {
2106 	if (in_irq() || irqs_disabled())
2107 		dev_kfree_skb_irq(skb);
2108 	else
2109 		dev_kfree_skb(skb);
2110 }
2111 EXPORT_SYMBOL(dev_kfree_skb_any);
2112 
2113 
2114 /**
2115  * netif_device_detach - mark device as removed
2116  * @dev: network device
2117  *
2118  * Mark device as removed from system and therefore no longer available.
2119  */
2120 void netif_device_detach(struct net_device *dev)
2121 {
2122 	if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2123 	    netif_running(dev)) {
2124 		netif_tx_stop_all_queues(dev);
2125 	}
2126 }
2127 EXPORT_SYMBOL(netif_device_detach);
2128 
2129 /**
2130  * netif_device_attach - mark device as attached
2131  * @dev: network device
2132  *
2133  * Mark device as attached from system and restart if needed.
2134  */
2135 void netif_device_attach(struct net_device *dev)
2136 {
2137 	if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2138 	    netif_running(dev)) {
2139 		netif_tx_wake_all_queues(dev);
2140 		__netdev_watchdog_up(dev);
2141 	}
2142 }
2143 EXPORT_SYMBOL(netif_device_attach);
2144 
2145 static void skb_warn_bad_offload(const struct sk_buff *skb)
2146 {
2147 	static const netdev_features_t null_features = 0;
2148 	struct net_device *dev = skb->dev;
2149 	const char *driver = "";
2150 
2151 	if (dev && dev->dev.parent)
2152 		driver = dev_driver_string(dev->dev.parent);
2153 
2154 	WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2155 	     "gso_type=%d ip_summed=%d\n",
2156 	     driver, dev ? &dev->features : &null_features,
2157 	     skb->sk ? &skb->sk->sk_route_caps : &null_features,
2158 	     skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2159 	     skb_shinfo(skb)->gso_type, skb->ip_summed);
2160 }
2161 
2162 /*
2163  * Invalidate hardware checksum when packet is to be mangled, and
2164  * complete checksum manually on outgoing path.
2165  */
2166 int skb_checksum_help(struct sk_buff *skb)
2167 {
2168 	__wsum csum;
2169 	int ret = 0, offset;
2170 
2171 	if (skb->ip_summed == CHECKSUM_COMPLETE)
2172 		goto out_set_summed;
2173 
2174 	if (unlikely(skb_shinfo(skb)->gso_size)) {
2175 		skb_warn_bad_offload(skb);
2176 		return -EINVAL;
2177 	}
2178 
2179 	/* Before computing a checksum, we should make sure no frag could
2180 	 * be modified by an external entity : checksum could be wrong.
2181 	 */
2182 	if (skb_has_shared_frag(skb)) {
2183 		ret = __skb_linearize(skb);
2184 		if (ret)
2185 			goto out;
2186 	}
2187 
2188 	offset = skb_checksum_start_offset(skb);
2189 	BUG_ON(offset >= skb_headlen(skb));
2190 	csum = skb_checksum(skb, offset, skb->len - offset, 0);
2191 
2192 	offset += skb->csum_offset;
2193 	BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2194 
2195 	if (skb_cloned(skb) &&
2196 	    !skb_clone_writable(skb, offset + sizeof(__sum16))) {
2197 		ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2198 		if (ret)
2199 			goto out;
2200 	}
2201 
2202 	*(__sum16 *)(skb->data + offset) = csum_fold(csum);
2203 out_set_summed:
2204 	skb->ip_summed = CHECKSUM_NONE;
2205 out:
2206 	return ret;
2207 }
2208 EXPORT_SYMBOL(skb_checksum_help);
2209 
2210 __be16 skb_network_protocol(struct sk_buff *skb)
2211 {
2212 	__be16 type = skb->protocol;
2213 	int vlan_depth = ETH_HLEN;
2214 
2215 	while (type == htons(ETH_P_8021Q)) {
2216 		struct vlan_hdr *vh;
2217 
2218 		if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
2219 			return 0;
2220 
2221 		vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2222 		type = vh->h_vlan_encapsulated_proto;
2223 		vlan_depth += VLAN_HLEN;
2224 	}
2225 
2226 	return type;
2227 }
2228 
2229 /**
2230  *	skb_mac_gso_segment - mac layer segmentation handler.
2231  *	@skb: buffer to segment
2232  *	@features: features for the output path (see dev->features)
2233  */
2234 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2235 				    netdev_features_t features)
2236 {
2237 	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2238 	struct packet_offload *ptype;
2239 	__be16 type = skb_network_protocol(skb);
2240 
2241 	if (unlikely(!type))
2242 		return ERR_PTR(-EINVAL);
2243 
2244 	__skb_pull(skb, skb->mac_len);
2245 
2246 	rcu_read_lock();
2247 	list_for_each_entry_rcu(ptype, &offload_base, list) {
2248 		if (ptype->type == type && ptype->callbacks.gso_segment) {
2249 			if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2250 				int err;
2251 
2252 				err = ptype->callbacks.gso_send_check(skb);
2253 				segs = ERR_PTR(err);
2254 				if (err || skb_gso_ok(skb, features))
2255 					break;
2256 				__skb_push(skb, (skb->data -
2257 						 skb_network_header(skb)));
2258 			}
2259 			segs = ptype->callbacks.gso_segment(skb, features);
2260 			break;
2261 		}
2262 	}
2263 	rcu_read_unlock();
2264 
2265 	__skb_push(skb, skb->data - skb_mac_header(skb));
2266 
2267 	return segs;
2268 }
2269 EXPORT_SYMBOL(skb_mac_gso_segment);
2270 
2271 
2272 /* openvswitch calls this on rx path, so we need a different check.
2273  */
2274 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2275 {
2276 	if (tx_path)
2277 		return skb->ip_summed != CHECKSUM_PARTIAL;
2278 	else
2279 		return skb->ip_summed == CHECKSUM_NONE;
2280 }
2281 
2282 /**
2283  *	__skb_gso_segment - Perform segmentation on skb.
2284  *	@skb: buffer to segment
2285  *	@features: features for the output path (see dev->features)
2286  *	@tx_path: whether it is called in TX path
2287  *
2288  *	This function segments the given skb and returns a list of segments.
2289  *
2290  *	It may return NULL if the skb requires no segmentation.  This is
2291  *	only possible when GSO is used for verifying header integrity.
2292  */
2293 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2294 				  netdev_features_t features, bool tx_path)
2295 {
2296 	if (unlikely(skb_needs_check(skb, tx_path))) {
2297 		int err;
2298 
2299 		skb_warn_bad_offload(skb);
2300 
2301 		if (skb_header_cloned(skb) &&
2302 		    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2303 			return ERR_PTR(err);
2304 	}
2305 
2306 	SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
2307 	skb_reset_mac_header(skb);
2308 	skb_reset_mac_len(skb);
2309 
2310 	return skb_mac_gso_segment(skb, features);
2311 }
2312 EXPORT_SYMBOL(__skb_gso_segment);
2313 
2314 /* Take action when hardware reception checksum errors are detected. */
2315 #ifdef CONFIG_BUG
2316 void netdev_rx_csum_fault(struct net_device *dev)
2317 {
2318 	if (net_ratelimit()) {
2319 		pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2320 		dump_stack();
2321 	}
2322 }
2323 EXPORT_SYMBOL(netdev_rx_csum_fault);
2324 #endif
2325 
2326 /* Actually, we should eliminate this check as soon as we know, that:
2327  * 1. IOMMU is present and allows to map all the memory.
2328  * 2. No high memory really exists on this machine.
2329  */
2330 
2331 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2332 {
2333 #ifdef CONFIG_HIGHMEM
2334 	int i;
2335 	if (!(dev->features & NETIF_F_HIGHDMA)) {
2336 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2337 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2338 			if (PageHighMem(skb_frag_page(frag)))
2339 				return 1;
2340 		}
2341 	}
2342 
2343 	if (PCI_DMA_BUS_IS_PHYS) {
2344 		struct device *pdev = dev->dev.parent;
2345 
2346 		if (!pdev)
2347 			return 0;
2348 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2349 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2350 			dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2351 			if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2352 				return 1;
2353 		}
2354 	}
2355 #endif
2356 	return 0;
2357 }
2358 
2359 struct dev_gso_cb {
2360 	void (*destructor)(struct sk_buff *skb);
2361 };
2362 
2363 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2364 
2365 static void dev_gso_skb_destructor(struct sk_buff *skb)
2366 {
2367 	struct dev_gso_cb *cb;
2368 
2369 	do {
2370 		struct sk_buff *nskb = skb->next;
2371 
2372 		skb->next = nskb->next;
2373 		nskb->next = NULL;
2374 		kfree_skb(nskb);
2375 	} while (skb->next);
2376 
2377 	cb = DEV_GSO_CB(skb);
2378 	if (cb->destructor)
2379 		cb->destructor(skb);
2380 }
2381 
2382 /**
2383  *	dev_gso_segment - Perform emulated hardware segmentation on skb.
2384  *	@skb: buffer to segment
2385  *	@features: device features as applicable to this skb
2386  *
2387  *	This function segments the given skb and stores the list of segments
2388  *	in skb->next.
2389  */
2390 static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2391 {
2392 	struct sk_buff *segs;
2393 
2394 	segs = skb_gso_segment(skb, features);
2395 
2396 	/* Verifying header integrity only. */
2397 	if (!segs)
2398 		return 0;
2399 
2400 	if (IS_ERR(segs))
2401 		return PTR_ERR(segs);
2402 
2403 	skb->next = segs;
2404 	DEV_GSO_CB(skb)->destructor = skb->destructor;
2405 	skb->destructor = dev_gso_skb_destructor;
2406 
2407 	return 0;
2408 }
2409 
2410 static netdev_features_t harmonize_features(struct sk_buff *skb,
2411 	__be16 protocol, netdev_features_t features)
2412 {
2413 	if (skb->ip_summed != CHECKSUM_NONE &&
2414 	    !can_checksum_protocol(features, protocol)) {
2415 		features &= ~NETIF_F_ALL_CSUM;
2416 	} else if (illegal_highdma(skb->dev, skb)) {
2417 		features &= ~NETIF_F_SG;
2418 	}
2419 
2420 	return features;
2421 }
2422 
2423 netdev_features_t netif_skb_features(struct sk_buff *skb)
2424 {
2425 	__be16 protocol = skb->protocol;
2426 	netdev_features_t features = skb->dev->features;
2427 
2428 	if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2429 		features &= ~NETIF_F_GSO_MASK;
2430 
2431 	if (protocol == htons(ETH_P_8021Q)) {
2432 		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2433 		protocol = veh->h_vlan_encapsulated_proto;
2434 	} else if (!vlan_tx_tag_present(skb)) {
2435 		return harmonize_features(skb, protocol, features);
2436 	}
2437 
2438 	features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
2439 
2440 	if (protocol != htons(ETH_P_8021Q)) {
2441 		return harmonize_features(skb, protocol, features);
2442 	} else {
2443 		features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
2444 				NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
2445 		return harmonize_features(skb, protocol, features);
2446 	}
2447 }
2448 EXPORT_SYMBOL(netif_skb_features);
2449 
2450 /*
2451  * Returns true if either:
2452  *	1. skb has frag_list and the device doesn't support FRAGLIST, or
2453  *	2. skb is fragmented and the device does not support SG.
2454  */
2455 static inline int skb_needs_linearize(struct sk_buff *skb,
2456 				      int features)
2457 {
2458 	return skb_is_nonlinear(skb) &&
2459 			((skb_has_frag_list(skb) &&
2460 				!(features & NETIF_F_FRAGLIST)) ||
2461 			(skb_shinfo(skb)->nr_frags &&
2462 				!(features & NETIF_F_SG)));
2463 }
2464 
2465 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2466 			struct netdev_queue *txq)
2467 {
2468 	const struct net_device_ops *ops = dev->netdev_ops;
2469 	int rc = NETDEV_TX_OK;
2470 	unsigned int skb_len;
2471 
2472 	if (likely(!skb->next)) {
2473 		netdev_features_t features;
2474 
2475 		/*
2476 		 * If device doesn't need skb->dst, release it right now while
2477 		 * its hot in this cpu cache
2478 		 */
2479 		if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2480 			skb_dst_drop(skb);
2481 
2482 		features = netif_skb_features(skb);
2483 
2484 		if (vlan_tx_tag_present(skb) &&
2485 		    !(features & NETIF_F_HW_VLAN_TX)) {
2486 			skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
2487 			if (unlikely(!skb))
2488 				goto out;
2489 
2490 			skb->vlan_tci = 0;
2491 		}
2492 
2493 		/* If encapsulation offload request, verify we are testing
2494 		 * hardware encapsulation features instead of standard
2495 		 * features for the netdev
2496 		 */
2497 		if (skb->encapsulation)
2498 			features &= dev->hw_enc_features;
2499 
2500 		if (netif_needs_gso(skb, features)) {
2501 			if (unlikely(dev_gso_segment(skb, features)))
2502 				goto out_kfree_skb;
2503 			if (skb->next)
2504 				goto gso;
2505 		} else {
2506 			if (skb_needs_linearize(skb, features) &&
2507 			    __skb_linearize(skb))
2508 				goto out_kfree_skb;
2509 
2510 			/* If packet is not checksummed and device does not
2511 			 * support checksumming for this protocol, complete
2512 			 * checksumming here.
2513 			 */
2514 			if (skb->ip_summed == CHECKSUM_PARTIAL) {
2515 				if (skb->encapsulation)
2516 					skb_set_inner_transport_header(skb,
2517 						skb_checksum_start_offset(skb));
2518 				else
2519 					skb_set_transport_header(skb,
2520 						skb_checksum_start_offset(skb));
2521 				if (!(features & NETIF_F_ALL_CSUM) &&
2522 				     skb_checksum_help(skb))
2523 					goto out_kfree_skb;
2524 			}
2525 		}
2526 
2527 		if (!list_empty(&ptype_all))
2528 			dev_queue_xmit_nit(skb, dev);
2529 
2530 		skb_len = skb->len;
2531 		rc = ops->ndo_start_xmit(skb, dev);
2532 		trace_net_dev_xmit(skb, rc, dev, skb_len);
2533 		if (rc == NETDEV_TX_OK)
2534 			txq_trans_update(txq);
2535 		return rc;
2536 	}
2537 
2538 gso:
2539 	do {
2540 		struct sk_buff *nskb = skb->next;
2541 
2542 		skb->next = nskb->next;
2543 		nskb->next = NULL;
2544 
2545 		/*
2546 		 * If device doesn't need nskb->dst, release it right now while
2547 		 * its hot in this cpu cache
2548 		 */
2549 		if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2550 			skb_dst_drop(nskb);
2551 
2552 		if (!list_empty(&ptype_all))
2553 			dev_queue_xmit_nit(nskb, dev);
2554 
2555 		skb_len = nskb->len;
2556 		rc = ops->ndo_start_xmit(nskb, dev);
2557 		trace_net_dev_xmit(nskb, rc, dev, skb_len);
2558 		if (unlikely(rc != NETDEV_TX_OK)) {
2559 			if (rc & ~NETDEV_TX_MASK)
2560 				goto out_kfree_gso_skb;
2561 			nskb->next = skb->next;
2562 			skb->next = nskb;
2563 			return rc;
2564 		}
2565 		txq_trans_update(txq);
2566 		if (unlikely(netif_xmit_stopped(txq) && skb->next))
2567 			return NETDEV_TX_BUSY;
2568 	} while (skb->next);
2569 
2570 out_kfree_gso_skb:
2571 	if (likely(skb->next == NULL))
2572 		skb->destructor = DEV_GSO_CB(skb)->destructor;
2573 out_kfree_skb:
2574 	kfree_skb(skb);
2575 out:
2576 	return rc;
2577 }
2578 
2579 static void qdisc_pkt_len_init(struct sk_buff *skb)
2580 {
2581 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
2582 
2583 	qdisc_skb_cb(skb)->pkt_len = skb->len;
2584 
2585 	/* To get more precise estimation of bytes sent on wire,
2586 	 * we add to pkt_len the headers size of all segments
2587 	 */
2588 	if (shinfo->gso_size)  {
2589 		unsigned int hdr_len;
2590 		u16 gso_segs = shinfo->gso_segs;
2591 
2592 		/* mac layer + network layer */
2593 		hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2594 
2595 		/* + transport layer */
2596 		if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2597 			hdr_len += tcp_hdrlen(skb);
2598 		else
2599 			hdr_len += sizeof(struct udphdr);
2600 
2601 		if (shinfo->gso_type & SKB_GSO_DODGY)
2602 			gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2603 						shinfo->gso_size);
2604 
2605 		qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
2606 	}
2607 }
2608 
2609 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2610 				 struct net_device *dev,
2611 				 struct netdev_queue *txq)
2612 {
2613 	spinlock_t *root_lock = qdisc_lock(q);
2614 	bool contended;
2615 	int rc;
2616 
2617 	qdisc_pkt_len_init(skb);
2618 	qdisc_calculate_pkt_len(skb, q);
2619 	/*
2620 	 * Heuristic to force contended enqueues to serialize on a
2621 	 * separate lock before trying to get qdisc main lock.
2622 	 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2623 	 * and dequeue packets faster.
2624 	 */
2625 	contended = qdisc_is_running(q);
2626 	if (unlikely(contended))
2627 		spin_lock(&q->busylock);
2628 
2629 	spin_lock(root_lock);
2630 	if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2631 		kfree_skb(skb);
2632 		rc = NET_XMIT_DROP;
2633 	} else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2634 		   qdisc_run_begin(q)) {
2635 		/*
2636 		 * This is a work-conserving queue; there are no old skbs
2637 		 * waiting to be sent out; and the qdisc is not running -
2638 		 * xmit the skb directly.
2639 		 */
2640 		if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2641 			skb_dst_force(skb);
2642 
2643 		qdisc_bstats_update(q, skb);
2644 
2645 		if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2646 			if (unlikely(contended)) {
2647 				spin_unlock(&q->busylock);
2648 				contended = false;
2649 			}
2650 			__qdisc_run(q);
2651 		} else
2652 			qdisc_run_end(q);
2653 
2654 		rc = NET_XMIT_SUCCESS;
2655 	} else {
2656 		skb_dst_force(skb);
2657 		rc = q->enqueue(skb, q) & NET_XMIT_MASK;
2658 		if (qdisc_run_begin(q)) {
2659 			if (unlikely(contended)) {
2660 				spin_unlock(&q->busylock);
2661 				contended = false;
2662 			}
2663 			__qdisc_run(q);
2664 		}
2665 	}
2666 	spin_unlock(root_lock);
2667 	if (unlikely(contended))
2668 		spin_unlock(&q->busylock);
2669 	return rc;
2670 }
2671 
2672 #if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
2673 static void skb_update_prio(struct sk_buff *skb)
2674 {
2675 	struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
2676 
2677 	if (!skb->priority && skb->sk && map) {
2678 		unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2679 
2680 		if (prioidx < map->priomap_len)
2681 			skb->priority = map->priomap[prioidx];
2682 	}
2683 }
2684 #else
2685 #define skb_update_prio(skb)
2686 #endif
2687 
2688 static DEFINE_PER_CPU(int, xmit_recursion);
2689 #define RECURSION_LIMIT 10
2690 
2691 /**
2692  *	dev_loopback_xmit - loop back @skb
2693  *	@skb: buffer to transmit
2694  */
2695 int dev_loopback_xmit(struct sk_buff *skb)
2696 {
2697 	skb_reset_mac_header(skb);
2698 	__skb_pull(skb, skb_network_offset(skb));
2699 	skb->pkt_type = PACKET_LOOPBACK;
2700 	skb->ip_summed = CHECKSUM_UNNECESSARY;
2701 	WARN_ON(!skb_dst(skb));
2702 	skb_dst_force(skb);
2703 	netif_rx_ni(skb);
2704 	return 0;
2705 }
2706 EXPORT_SYMBOL(dev_loopback_xmit);
2707 
2708 /**
2709  *	dev_queue_xmit - transmit a buffer
2710  *	@skb: buffer to transmit
2711  *
2712  *	Queue a buffer for transmission to a network device. The caller must
2713  *	have set the device and priority and built the buffer before calling
2714  *	this function. The function can be called from an interrupt.
2715  *
2716  *	A negative errno code is returned on a failure. A success does not
2717  *	guarantee the frame will be transmitted as it may be dropped due
2718  *	to congestion or traffic shaping.
2719  *
2720  * -----------------------------------------------------------------------------------
2721  *      I notice this method can also return errors from the queue disciplines,
2722  *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
2723  *      be positive.
2724  *
2725  *      Regardless of the return value, the skb is consumed, so it is currently
2726  *      difficult to retry a send to this method.  (You can bump the ref count
2727  *      before sending to hold a reference for retry if you are careful.)
2728  *
2729  *      When calling this method, interrupts MUST be enabled.  This is because
2730  *      the BH enable code must have IRQs enabled so that it will not deadlock.
2731  *          --BLG
2732  */
2733 int dev_queue_xmit(struct sk_buff *skb)
2734 {
2735 	struct net_device *dev = skb->dev;
2736 	struct netdev_queue *txq;
2737 	struct Qdisc *q;
2738 	int rc = -ENOMEM;
2739 
2740 	skb_reset_mac_header(skb);
2741 
2742 	/* Disable soft irqs for various locks below. Also
2743 	 * stops preemption for RCU.
2744 	 */
2745 	rcu_read_lock_bh();
2746 
2747 	skb_update_prio(skb);
2748 
2749 	txq = netdev_pick_tx(dev, skb);
2750 	q = rcu_dereference_bh(txq->qdisc);
2751 
2752 #ifdef CONFIG_NET_CLS_ACT
2753 	skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
2754 #endif
2755 	trace_net_dev_queue(skb);
2756 	if (q->enqueue) {
2757 		rc = __dev_xmit_skb(skb, q, dev, txq);
2758 		goto out;
2759 	}
2760 
2761 	/* The device has no queue. Common case for software devices:
2762 	   loopback, all the sorts of tunnels...
2763 
2764 	   Really, it is unlikely that netif_tx_lock protection is necessary
2765 	   here.  (f.e. loopback and IP tunnels are clean ignoring statistics
2766 	   counters.)
2767 	   However, it is possible, that they rely on protection
2768 	   made by us here.
2769 
2770 	   Check this and shot the lock. It is not prone from deadlocks.
2771 	   Either shot noqueue qdisc, it is even simpler 8)
2772 	 */
2773 	if (dev->flags & IFF_UP) {
2774 		int cpu = smp_processor_id(); /* ok because BHs are off */
2775 
2776 		if (txq->xmit_lock_owner != cpu) {
2777 
2778 			if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2779 				goto recursion_alert;
2780 
2781 			HARD_TX_LOCK(dev, txq, cpu);
2782 
2783 			if (!netif_xmit_stopped(txq)) {
2784 				__this_cpu_inc(xmit_recursion);
2785 				rc = dev_hard_start_xmit(skb, dev, txq);
2786 				__this_cpu_dec(xmit_recursion);
2787 				if (dev_xmit_complete(rc)) {
2788 					HARD_TX_UNLOCK(dev, txq);
2789 					goto out;
2790 				}
2791 			}
2792 			HARD_TX_UNLOCK(dev, txq);
2793 			net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2794 					     dev->name);
2795 		} else {
2796 			/* Recursion is detected! It is possible,
2797 			 * unfortunately
2798 			 */
2799 recursion_alert:
2800 			net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2801 					     dev->name);
2802 		}
2803 	}
2804 
2805 	rc = -ENETDOWN;
2806 	rcu_read_unlock_bh();
2807 
2808 	kfree_skb(skb);
2809 	return rc;
2810 out:
2811 	rcu_read_unlock_bh();
2812 	return rc;
2813 }
2814 EXPORT_SYMBOL(dev_queue_xmit);
2815 
2816 
2817 /*=======================================================================
2818 			Receiver routines
2819   =======================================================================*/
2820 
2821 int netdev_max_backlog __read_mostly = 1000;
2822 EXPORT_SYMBOL(netdev_max_backlog);
2823 
2824 int netdev_tstamp_prequeue __read_mostly = 1;
2825 int netdev_budget __read_mostly = 300;
2826 int weight_p __read_mostly = 64;            /* old backlog weight */
2827 
2828 /* Called with irq disabled */
2829 static inline void ____napi_schedule(struct softnet_data *sd,
2830 				     struct napi_struct *napi)
2831 {
2832 	list_add_tail(&napi->poll_list, &sd->poll_list);
2833 	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
2834 }
2835 
2836 #ifdef CONFIG_RPS
2837 
2838 /* One global table that all flow-based protocols share. */
2839 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
2840 EXPORT_SYMBOL(rps_sock_flow_table);
2841 
2842 struct static_key rps_needed __read_mostly;
2843 
2844 static struct rps_dev_flow *
2845 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2846 	    struct rps_dev_flow *rflow, u16 next_cpu)
2847 {
2848 	if (next_cpu != RPS_NO_CPU) {
2849 #ifdef CONFIG_RFS_ACCEL
2850 		struct netdev_rx_queue *rxqueue;
2851 		struct rps_dev_flow_table *flow_table;
2852 		struct rps_dev_flow *old_rflow;
2853 		u32 flow_id;
2854 		u16 rxq_index;
2855 		int rc;
2856 
2857 		/* Should we steer this flow to a different hardware queue? */
2858 		if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
2859 		    !(dev->features & NETIF_F_NTUPLE))
2860 			goto out;
2861 		rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
2862 		if (rxq_index == skb_get_rx_queue(skb))
2863 			goto out;
2864 
2865 		rxqueue = dev->_rx + rxq_index;
2866 		flow_table = rcu_dereference(rxqueue->rps_flow_table);
2867 		if (!flow_table)
2868 			goto out;
2869 		flow_id = skb->rxhash & flow_table->mask;
2870 		rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2871 							rxq_index, flow_id);
2872 		if (rc < 0)
2873 			goto out;
2874 		old_rflow = rflow;
2875 		rflow = &flow_table->flows[flow_id];
2876 		rflow->filter = rc;
2877 		if (old_rflow->filter == rflow->filter)
2878 			old_rflow->filter = RPS_NO_FILTER;
2879 	out:
2880 #endif
2881 		rflow->last_qtail =
2882 			per_cpu(softnet_data, next_cpu).input_queue_head;
2883 	}
2884 
2885 	rflow->cpu = next_cpu;
2886 	return rflow;
2887 }
2888 
2889 /*
2890  * get_rps_cpu is called from netif_receive_skb and returns the target
2891  * CPU from the RPS map of the receiving queue for a given skb.
2892  * rcu_read_lock must be held on entry.
2893  */
2894 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2895 		       struct rps_dev_flow **rflowp)
2896 {
2897 	struct netdev_rx_queue *rxqueue;
2898 	struct rps_map *map;
2899 	struct rps_dev_flow_table *flow_table;
2900 	struct rps_sock_flow_table *sock_flow_table;
2901 	int cpu = -1;
2902 	u16 tcpu;
2903 
2904 	if (skb_rx_queue_recorded(skb)) {
2905 		u16 index = skb_get_rx_queue(skb);
2906 		if (unlikely(index >= dev->real_num_rx_queues)) {
2907 			WARN_ONCE(dev->real_num_rx_queues > 1,
2908 				  "%s received packet on queue %u, but number "
2909 				  "of RX queues is %u\n",
2910 				  dev->name, index, dev->real_num_rx_queues);
2911 			goto done;
2912 		}
2913 		rxqueue = dev->_rx + index;
2914 	} else
2915 		rxqueue = dev->_rx;
2916 
2917 	map = rcu_dereference(rxqueue->rps_map);
2918 	if (map) {
2919 		if (map->len == 1 &&
2920 		    !rcu_access_pointer(rxqueue->rps_flow_table)) {
2921 			tcpu = map->cpus[0];
2922 			if (cpu_online(tcpu))
2923 				cpu = tcpu;
2924 			goto done;
2925 		}
2926 	} else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
2927 		goto done;
2928 	}
2929 
2930 	skb_reset_network_header(skb);
2931 	if (!skb_get_rxhash(skb))
2932 		goto done;
2933 
2934 	flow_table = rcu_dereference(rxqueue->rps_flow_table);
2935 	sock_flow_table = rcu_dereference(rps_sock_flow_table);
2936 	if (flow_table && sock_flow_table) {
2937 		u16 next_cpu;
2938 		struct rps_dev_flow *rflow;
2939 
2940 		rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2941 		tcpu = rflow->cpu;
2942 
2943 		next_cpu = sock_flow_table->ents[skb->rxhash &
2944 		    sock_flow_table->mask];
2945 
2946 		/*
2947 		 * If the desired CPU (where last recvmsg was done) is
2948 		 * different from current CPU (one in the rx-queue flow
2949 		 * table entry), switch if one of the following holds:
2950 		 *   - Current CPU is unset (equal to RPS_NO_CPU).
2951 		 *   - Current CPU is offline.
2952 		 *   - The current CPU's queue tail has advanced beyond the
2953 		 *     last packet that was enqueued using this table entry.
2954 		 *     This guarantees that all previous packets for the flow
2955 		 *     have been dequeued, thus preserving in order delivery.
2956 		 */
2957 		if (unlikely(tcpu != next_cpu) &&
2958 		    (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2959 		     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2960 		      rflow->last_qtail)) >= 0)) {
2961 			tcpu = next_cpu;
2962 			rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
2963 		}
2964 
2965 		if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2966 			*rflowp = rflow;
2967 			cpu = tcpu;
2968 			goto done;
2969 		}
2970 	}
2971 
2972 	if (map) {
2973 		tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
2974 
2975 		if (cpu_online(tcpu)) {
2976 			cpu = tcpu;
2977 			goto done;
2978 		}
2979 	}
2980 
2981 done:
2982 	return cpu;
2983 }
2984 
2985 #ifdef CONFIG_RFS_ACCEL
2986 
2987 /**
2988  * rps_may_expire_flow - check whether an RFS hardware filter may be removed
2989  * @dev: Device on which the filter was set
2990  * @rxq_index: RX queue index
2991  * @flow_id: Flow ID passed to ndo_rx_flow_steer()
2992  * @filter_id: Filter ID returned by ndo_rx_flow_steer()
2993  *
2994  * Drivers that implement ndo_rx_flow_steer() should periodically call
2995  * this function for each installed filter and remove the filters for
2996  * which it returns %true.
2997  */
2998 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
2999 			 u32 flow_id, u16 filter_id)
3000 {
3001 	struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3002 	struct rps_dev_flow_table *flow_table;
3003 	struct rps_dev_flow *rflow;
3004 	bool expire = true;
3005 	int cpu;
3006 
3007 	rcu_read_lock();
3008 	flow_table = rcu_dereference(rxqueue->rps_flow_table);
3009 	if (flow_table && flow_id <= flow_table->mask) {
3010 		rflow = &flow_table->flows[flow_id];
3011 		cpu = ACCESS_ONCE(rflow->cpu);
3012 		if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
3013 		    ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3014 			   rflow->last_qtail) <
3015 		     (int)(10 * flow_table->mask)))
3016 			expire = false;
3017 	}
3018 	rcu_read_unlock();
3019 	return expire;
3020 }
3021 EXPORT_SYMBOL(rps_may_expire_flow);
3022 
3023 #endif /* CONFIG_RFS_ACCEL */
3024 
3025 /* Called from hardirq (IPI) context */
3026 static void rps_trigger_softirq(void *data)
3027 {
3028 	struct softnet_data *sd = data;
3029 
3030 	____napi_schedule(sd, &sd->backlog);
3031 	sd->received_rps++;
3032 }
3033 
3034 #endif /* CONFIG_RPS */
3035 
3036 /*
3037  * Check if this softnet_data structure is another cpu one
3038  * If yes, queue it to our IPI list and return 1
3039  * If no, return 0
3040  */
3041 static int rps_ipi_queued(struct softnet_data *sd)
3042 {
3043 #ifdef CONFIG_RPS
3044 	struct softnet_data *mysd = &__get_cpu_var(softnet_data);
3045 
3046 	if (sd != mysd) {
3047 		sd->rps_ipi_next = mysd->rps_ipi_list;
3048 		mysd->rps_ipi_list = sd;
3049 
3050 		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
3051 		return 1;
3052 	}
3053 #endif /* CONFIG_RPS */
3054 	return 0;
3055 }
3056 
3057 /*
3058  * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3059  * queue (may be a remote CPU queue).
3060  */
3061 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3062 			      unsigned int *qtail)
3063 {
3064 	struct softnet_data *sd;
3065 	unsigned long flags;
3066 
3067 	sd = &per_cpu(softnet_data, cpu);
3068 
3069 	local_irq_save(flags);
3070 
3071 	rps_lock(sd);
3072 	if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
3073 		if (skb_queue_len(&sd->input_pkt_queue)) {
3074 enqueue:
3075 			__skb_queue_tail(&sd->input_pkt_queue, skb);
3076 			input_queue_tail_incr_save(sd, qtail);
3077 			rps_unlock(sd);
3078 			local_irq_restore(flags);
3079 			return NET_RX_SUCCESS;
3080 		}
3081 
3082 		/* Schedule NAPI for backlog device
3083 		 * We can use non atomic operation since we own the queue lock
3084 		 */
3085 		if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
3086 			if (!rps_ipi_queued(sd))
3087 				____napi_schedule(sd, &sd->backlog);
3088 		}
3089 		goto enqueue;
3090 	}
3091 
3092 	sd->dropped++;
3093 	rps_unlock(sd);
3094 
3095 	local_irq_restore(flags);
3096 
3097 	atomic_long_inc(&skb->dev->rx_dropped);
3098 	kfree_skb(skb);
3099 	return NET_RX_DROP;
3100 }
3101 
3102 /**
3103  *	netif_rx	-	post buffer to the network code
3104  *	@skb: buffer to post
3105  *
3106  *	This function receives a packet from a device driver and queues it for
3107  *	the upper (protocol) levels to process.  It always succeeds. The buffer
3108  *	may be dropped during processing for congestion control or by the
3109  *	protocol layers.
3110  *
3111  *	return values:
3112  *	NET_RX_SUCCESS	(no congestion)
3113  *	NET_RX_DROP     (packet was dropped)
3114  *
3115  */
3116 
3117 int netif_rx(struct sk_buff *skb)
3118 {
3119 	int ret;
3120 
3121 	/* if netpoll wants it, pretend we never saw it */
3122 	if (netpoll_rx(skb))
3123 		return NET_RX_DROP;
3124 
3125 	net_timestamp_check(netdev_tstamp_prequeue, skb);
3126 
3127 	trace_netif_rx(skb);
3128 #ifdef CONFIG_RPS
3129 	if (static_key_false(&rps_needed)) {
3130 		struct rps_dev_flow voidflow, *rflow = &voidflow;
3131 		int cpu;
3132 
3133 		preempt_disable();
3134 		rcu_read_lock();
3135 
3136 		cpu = get_rps_cpu(skb->dev, skb, &rflow);
3137 		if (cpu < 0)
3138 			cpu = smp_processor_id();
3139 
3140 		ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3141 
3142 		rcu_read_unlock();
3143 		preempt_enable();
3144 	} else
3145 #endif
3146 	{
3147 		unsigned int qtail;
3148 		ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3149 		put_cpu();
3150 	}
3151 	return ret;
3152 }
3153 EXPORT_SYMBOL(netif_rx);
3154 
3155 int netif_rx_ni(struct sk_buff *skb)
3156 {
3157 	int err;
3158 
3159 	preempt_disable();
3160 	err = netif_rx(skb);
3161 	if (local_softirq_pending())
3162 		do_softirq();
3163 	preempt_enable();
3164 
3165 	return err;
3166 }
3167 EXPORT_SYMBOL(netif_rx_ni);
3168 
3169 static void net_tx_action(struct softirq_action *h)
3170 {
3171 	struct softnet_data *sd = &__get_cpu_var(softnet_data);
3172 
3173 	if (sd->completion_queue) {
3174 		struct sk_buff *clist;
3175 
3176 		local_irq_disable();
3177 		clist = sd->completion_queue;
3178 		sd->completion_queue = NULL;
3179 		local_irq_enable();
3180 
3181 		while (clist) {
3182 			struct sk_buff *skb = clist;
3183 			clist = clist->next;
3184 
3185 			WARN_ON(atomic_read(&skb->users));
3186 			trace_kfree_skb(skb, net_tx_action);
3187 			__kfree_skb(skb);
3188 		}
3189 	}
3190 
3191 	if (sd->output_queue) {
3192 		struct Qdisc *head;
3193 
3194 		local_irq_disable();
3195 		head = sd->output_queue;
3196 		sd->output_queue = NULL;
3197 		sd->output_queue_tailp = &sd->output_queue;
3198 		local_irq_enable();
3199 
3200 		while (head) {
3201 			struct Qdisc *q = head;
3202 			spinlock_t *root_lock;
3203 
3204 			head = head->next_sched;
3205 
3206 			root_lock = qdisc_lock(q);
3207 			if (spin_trylock(root_lock)) {
3208 				smp_mb__before_clear_bit();
3209 				clear_bit(__QDISC_STATE_SCHED,
3210 					  &q->state);
3211 				qdisc_run(q);
3212 				spin_unlock(root_lock);
3213 			} else {
3214 				if (!test_bit(__QDISC_STATE_DEACTIVATED,
3215 					      &q->state)) {
3216 					__netif_reschedule(q);
3217 				} else {
3218 					smp_mb__before_clear_bit();
3219 					clear_bit(__QDISC_STATE_SCHED,
3220 						  &q->state);
3221 				}
3222 			}
3223 		}
3224 	}
3225 }
3226 
3227 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3228     (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3229 /* This hook is defined here for ATM LANE */
3230 int (*br_fdb_test_addr_hook)(struct net_device *dev,
3231 			     unsigned char *addr) __read_mostly;
3232 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
3233 #endif
3234 
3235 #ifdef CONFIG_NET_CLS_ACT
3236 /* TODO: Maybe we should just force sch_ingress to be compiled in
3237  * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3238  * a compare and 2 stores extra right now if we dont have it on
3239  * but have CONFIG_NET_CLS_ACT
3240  * NOTE: This doesn't stop any functionality; if you dont have
3241  * the ingress scheduler, you just can't add policies on ingress.
3242  *
3243  */
3244 static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
3245 {
3246 	struct net_device *dev = skb->dev;
3247 	u32 ttl = G_TC_RTTL(skb->tc_verd);
3248 	int result = TC_ACT_OK;
3249 	struct Qdisc *q;
3250 
3251 	if (unlikely(MAX_RED_LOOP < ttl++)) {
3252 		net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3253 				     skb->skb_iif, dev->ifindex);
3254 		return TC_ACT_SHOT;
3255 	}
3256 
3257 	skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3258 	skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3259 
3260 	q = rxq->qdisc;
3261 	if (q != &noop_qdisc) {
3262 		spin_lock(qdisc_lock(q));
3263 		if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3264 			result = qdisc_enqueue_root(skb, q);
3265 		spin_unlock(qdisc_lock(q));
3266 	}
3267 
3268 	return result;
3269 }
3270 
3271 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3272 					 struct packet_type **pt_prev,
3273 					 int *ret, struct net_device *orig_dev)
3274 {
3275 	struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3276 
3277 	if (!rxq || rxq->qdisc == &noop_qdisc)
3278 		goto out;
3279 
3280 	if (*pt_prev) {
3281 		*ret = deliver_skb(skb, *pt_prev, orig_dev);
3282 		*pt_prev = NULL;
3283 	}
3284 
3285 	switch (ing_filter(skb, rxq)) {
3286 	case TC_ACT_SHOT:
3287 	case TC_ACT_STOLEN:
3288 		kfree_skb(skb);
3289 		return NULL;
3290 	}
3291 
3292 out:
3293 	skb->tc_verd = 0;
3294 	return skb;
3295 }
3296 #endif
3297 
3298 /**
3299  *	netdev_rx_handler_register - register receive handler
3300  *	@dev: device to register a handler for
3301  *	@rx_handler: receive handler to register
3302  *	@rx_handler_data: data pointer that is used by rx handler
3303  *
3304  *	Register a receive hander for a device. This handler will then be
3305  *	called from __netif_receive_skb. A negative errno code is returned
3306  *	on a failure.
3307  *
3308  *	The caller must hold the rtnl_mutex.
3309  *
3310  *	For a general description of rx_handler, see enum rx_handler_result.
3311  */
3312 int netdev_rx_handler_register(struct net_device *dev,
3313 			       rx_handler_func_t *rx_handler,
3314 			       void *rx_handler_data)
3315 {
3316 	ASSERT_RTNL();
3317 
3318 	if (dev->rx_handler)
3319 		return -EBUSY;
3320 
3321 	rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3322 	rcu_assign_pointer(dev->rx_handler, rx_handler);
3323 
3324 	return 0;
3325 }
3326 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3327 
3328 /**
3329  *	netdev_rx_handler_unregister - unregister receive handler
3330  *	@dev: device to unregister a handler from
3331  *
3332  *	Unregister a receive handler from a device.
3333  *
3334  *	The caller must hold the rtnl_mutex.
3335  */
3336 void netdev_rx_handler_unregister(struct net_device *dev)
3337 {
3338 
3339 	ASSERT_RTNL();
3340 	RCU_INIT_POINTER(dev->rx_handler, NULL);
3341 	RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3342 }
3343 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3344 
3345 /*
3346  * Limit the use of PFMEMALLOC reserves to those protocols that implement
3347  * the special handling of PFMEMALLOC skbs.
3348  */
3349 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3350 {
3351 	switch (skb->protocol) {
3352 	case __constant_htons(ETH_P_ARP):
3353 	case __constant_htons(ETH_P_IP):
3354 	case __constant_htons(ETH_P_IPV6):
3355 	case __constant_htons(ETH_P_8021Q):
3356 		return true;
3357 	default:
3358 		return false;
3359 	}
3360 }
3361 
3362 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
3363 {
3364 	struct packet_type *ptype, *pt_prev;
3365 	rx_handler_func_t *rx_handler;
3366 	struct net_device *orig_dev;
3367 	struct net_device *null_or_dev;
3368 	bool deliver_exact = false;
3369 	int ret = NET_RX_DROP;
3370 	__be16 type;
3371 
3372 	net_timestamp_check(!netdev_tstamp_prequeue, skb);
3373 
3374 	trace_netif_receive_skb(skb);
3375 
3376 	/* if we've gotten here through NAPI, check netpoll */
3377 	if (netpoll_receive_skb(skb))
3378 		goto out;
3379 
3380 	orig_dev = skb->dev;
3381 
3382 	skb_reset_network_header(skb);
3383 	if (!skb_transport_header_was_set(skb))
3384 		skb_reset_transport_header(skb);
3385 	skb_reset_mac_len(skb);
3386 
3387 	pt_prev = NULL;
3388 
3389 	rcu_read_lock();
3390 
3391 another_round:
3392 	skb->skb_iif = skb->dev->ifindex;
3393 
3394 	__this_cpu_inc(softnet_data.processed);
3395 
3396 	if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3397 		skb = vlan_untag(skb);
3398 		if (unlikely(!skb))
3399 			goto unlock;
3400 	}
3401 
3402 #ifdef CONFIG_NET_CLS_ACT
3403 	if (skb->tc_verd & TC_NCLS) {
3404 		skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3405 		goto ncls;
3406 	}
3407 #endif
3408 
3409 	if (pfmemalloc)
3410 		goto skip_taps;
3411 
3412 	list_for_each_entry_rcu(ptype, &ptype_all, list) {
3413 		if (!ptype->dev || ptype->dev == skb->dev) {
3414 			if (pt_prev)
3415 				ret = deliver_skb(skb, pt_prev, orig_dev);
3416 			pt_prev = ptype;
3417 		}
3418 	}
3419 
3420 skip_taps:
3421 #ifdef CONFIG_NET_CLS_ACT
3422 	skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3423 	if (!skb)
3424 		goto unlock;
3425 ncls:
3426 #endif
3427 
3428 	if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
3429 		goto drop;
3430 
3431 	if (vlan_tx_tag_present(skb)) {
3432 		if (pt_prev) {
3433 			ret = deliver_skb(skb, pt_prev, orig_dev);
3434 			pt_prev = NULL;
3435 		}
3436 		if (vlan_do_receive(&skb))
3437 			goto another_round;
3438 		else if (unlikely(!skb))
3439 			goto unlock;
3440 	}
3441 
3442 	rx_handler = rcu_dereference(skb->dev->rx_handler);
3443 	if (rx_handler) {
3444 		if (pt_prev) {
3445 			ret = deliver_skb(skb, pt_prev, orig_dev);
3446 			pt_prev = NULL;
3447 		}
3448 		switch (rx_handler(&skb)) {
3449 		case RX_HANDLER_CONSUMED:
3450 			ret = NET_RX_SUCCESS;
3451 			goto unlock;
3452 		case RX_HANDLER_ANOTHER:
3453 			goto another_round;
3454 		case RX_HANDLER_EXACT:
3455 			deliver_exact = true;
3456 		case RX_HANDLER_PASS:
3457 			break;
3458 		default:
3459 			BUG();
3460 		}
3461 	}
3462 
3463 	if (vlan_tx_nonzero_tag_present(skb))
3464 		skb->pkt_type = PACKET_OTHERHOST;
3465 
3466 	/* deliver only exact match when indicated */
3467 	null_or_dev = deliver_exact ? skb->dev : NULL;
3468 
3469 	type = skb->protocol;
3470 	list_for_each_entry_rcu(ptype,
3471 			&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
3472 		if (ptype->type == type &&
3473 		    (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3474 		     ptype->dev == orig_dev)) {
3475 			if (pt_prev)
3476 				ret = deliver_skb(skb, pt_prev, orig_dev);
3477 			pt_prev = ptype;
3478 		}
3479 	}
3480 
3481 	if (pt_prev) {
3482 		if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
3483 			goto drop;
3484 		else
3485 			ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
3486 	} else {
3487 drop:
3488 		atomic_long_inc(&skb->dev->rx_dropped);
3489 		kfree_skb(skb);
3490 		/* Jamal, now you will not able to escape explaining
3491 		 * me how you were going to use this. :-)
3492 		 */
3493 		ret = NET_RX_DROP;
3494 	}
3495 
3496 unlock:
3497 	rcu_read_unlock();
3498 out:
3499 	return ret;
3500 }
3501 
3502 static int __netif_receive_skb(struct sk_buff *skb)
3503 {
3504 	int ret;
3505 
3506 	if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3507 		unsigned long pflags = current->flags;
3508 
3509 		/*
3510 		 * PFMEMALLOC skbs are special, they should
3511 		 * - be delivered to SOCK_MEMALLOC sockets only
3512 		 * - stay away from userspace
3513 		 * - have bounded memory usage
3514 		 *
3515 		 * Use PF_MEMALLOC as this saves us from propagating the allocation
3516 		 * context down to all allocation sites.
3517 		 */
3518 		current->flags |= PF_MEMALLOC;
3519 		ret = __netif_receive_skb_core(skb, true);
3520 		tsk_restore_flags(current, pflags, PF_MEMALLOC);
3521 	} else
3522 		ret = __netif_receive_skb_core(skb, false);
3523 
3524 	return ret;
3525 }
3526 
3527 /**
3528  *	netif_receive_skb - process receive buffer from network
3529  *	@skb: buffer to process
3530  *
3531  *	netif_receive_skb() is the main receive data processing function.
3532  *	It always succeeds. The buffer may be dropped during processing
3533  *	for congestion control or by the protocol layers.
3534  *
3535  *	This function may only be called from softirq context and interrupts
3536  *	should be enabled.
3537  *
3538  *	Return values (usually ignored):
3539  *	NET_RX_SUCCESS: no congestion
3540  *	NET_RX_DROP: packet was dropped
3541  */
3542 int netif_receive_skb(struct sk_buff *skb)
3543 {
3544 	net_timestamp_check(netdev_tstamp_prequeue, skb);
3545 
3546 	if (skb_defer_rx_timestamp(skb))
3547 		return NET_RX_SUCCESS;
3548 
3549 #ifdef CONFIG_RPS
3550 	if (static_key_false(&rps_needed)) {
3551 		struct rps_dev_flow voidflow, *rflow = &voidflow;
3552 		int cpu, ret;
3553 
3554 		rcu_read_lock();
3555 
3556 		cpu = get_rps_cpu(skb->dev, skb, &rflow);
3557 
3558 		if (cpu >= 0) {
3559 			ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3560 			rcu_read_unlock();
3561 			return ret;
3562 		}
3563 		rcu_read_unlock();
3564 	}
3565 #endif
3566 	return __netif_receive_skb(skb);
3567 }
3568 EXPORT_SYMBOL(netif_receive_skb);
3569 
3570 /* Network device is going away, flush any packets still pending
3571  * Called with irqs disabled.
3572  */
3573 static void flush_backlog(void *arg)
3574 {
3575 	struct net_device *dev = arg;
3576 	struct softnet_data *sd = &__get_cpu_var(softnet_data);
3577 	struct sk_buff *skb, *tmp;
3578 
3579 	rps_lock(sd);
3580 	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
3581 		if (skb->dev == dev) {
3582 			__skb_unlink(skb, &sd->input_pkt_queue);
3583 			kfree_skb(skb);
3584 			input_queue_head_incr(sd);
3585 		}
3586 	}
3587 	rps_unlock(sd);
3588 
3589 	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3590 		if (skb->dev == dev) {
3591 			__skb_unlink(skb, &sd->process_queue);
3592 			kfree_skb(skb);
3593 			input_queue_head_incr(sd);
3594 		}
3595 	}
3596 }
3597 
3598 static int napi_gro_complete(struct sk_buff *skb)
3599 {
3600 	struct packet_offload *ptype;
3601 	__be16 type = skb->protocol;
3602 	struct list_head *head = &offload_base;
3603 	int err = -ENOENT;
3604 
3605 	BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3606 
3607 	if (NAPI_GRO_CB(skb)->count == 1) {
3608 		skb_shinfo(skb)->gso_size = 0;
3609 		goto out;
3610 	}
3611 
3612 	rcu_read_lock();
3613 	list_for_each_entry_rcu(ptype, head, list) {
3614 		if (ptype->type != type || !ptype->callbacks.gro_complete)
3615 			continue;
3616 
3617 		err = ptype->callbacks.gro_complete(skb);
3618 		break;
3619 	}
3620 	rcu_read_unlock();
3621 
3622 	if (err) {
3623 		WARN_ON(&ptype->list == head);
3624 		kfree_skb(skb);
3625 		return NET_RX_SUCCESS;
3626 	}
3627 
3628 out:
3629 	return netif_receive_skb(skb);
3630 }
3631 
3632 /* napi->gro_list contains packets ordered by age.
3633  * youngest packets at the head of it.
3634  * Complete skbs in reverse order to reduce latencies.
3635  */
3636 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
3637 {
3638 	struct sk_buff *skb, *prev = NULL;
3639 
3640 	/* scan list and build reverse chain */
3641 	for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3642 		skb->prev = prev;
3643 		prev = skb;
3644 	}
3645 
3646 	for (skb = prev; skb; skb = prev) {
3647 		skb->next = NULL;
3648 
3649 		if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3650 			return;
3651 
3652 		prev = skb->prev;
3653 		napi_gro_complete(skb);
3654 		napi->gro_count--;
3655 	}
3656 
3657 	napi->gro_list = NULL;
3658 }
3659 EXPORT_SYMBOL(napi_gro_flush);
3660 
3661 static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3662 {
3663 	struct sk_buff *p;
3664 	unsigned int maclen = skb->dev->hard_header_len;
3665 
3666 	for (p = napi->gro_list; p; p = p->next) {
3667 		unsigned long diffs;
3668 
3669 		diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3670 		diffs |= p->vlan_tci ^ skb->vlan_tci;
3671 		if (maclen == ETH_HLEN)
3672 			diffs |= compare_ether_header(skb_mac_header(p),
3673 						      skb_gro_mac_header(skb));
3674 		else if (!diffs)
3675 			diffs = memcmp(skb_mac_header(p),
3676 				       skb_gro_mac_header(skb),
3677 				       maclen);
3678 		NAPI_GRO_CB(p)->same_flow = !diffs;
3679 		NAPI_GRO_CB(p)->flush = 0;
3680 	}
3681 }
3682 
3683 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3684 {
3685 	struct sk_buff **pp = NULL;
3686 	struct packet_offload *ptype;
3687 	__be16 type = skb->protocol;
3688 	struct list_head *head = &offload_base;
3689 	int same_flow;
3690 	enum gro_result ret;
3691 
3692 	if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
3693 		goto normal;
3694 
3695 	if (skb_is_gso(skb) || skb_has_frag_list(skb))
3696 		goto normal;
3697 
3698 	gro_list_prepare(napi, skb);
3699 
3700 	rcu_read_lock();
3701 	list_for_each_entry_rcu(ptype, head, list) {
3702 		if (ptype->type != type || !ptype->callbacks.gro_receive)
3703 			continue;
3704 
3705 		skb_set_network_header(skb, skb_gro_offset(skb));
3706 		skb_reset_mac_len(skb);
3707 		NAPI_GRO_CB(skb)->same_flow = 0;
3708 		NAPI_GRO_CB(skb)->flush = 0;
3709 		NAPI_GRO_CB(skb)->free = 0;
3710 
3711 		pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
3712 		break;
3713 	}
3714 	rcu_read_unlock();
3715 
3716 	if (&ptype->list == head)
3717 		goto normal;
3718 
3719 	same_flow = NAPI_GRO_CB(skb)->same_flow;
3720 	ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
3721 
3722 	if (pp) {
3723 		struct sk_buff *nskb = *pp;
3724 
3725 		*pp = nskb->next;
3726 		nskb->next = NULL;
3727 		napi_gro_complete(nskb);
3728 		napi->gro_count--;
3729 	}
3730 
3731 	if (same_flow)
3732 		goto ok;
3733 
3734 	if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
3735 		goto normal;
3736 
3737 	napi->gro_count++;
3738 	NAPI_GRO_CB(skb)->count = 1;
3739 	NAPI_GRO_CB(skb)->age = jiffies;
3740 	skb_shinfo(skb)->gso_size = skb_gro_len(skb);
3741 	skb->next = napi->gro_list;
3742 	napi->gro_list = skb;
3743 	ret = GRO_HELD;
3744 
3745 pull:
3746 	if (skb_headlen(skb) < skb_gro_offset(skb)) {
3747 		int grow = skb_gro_offset(skb) - skb_headlen(skb);
3748 
3749 		BUG_ON(skb->end - skb->tail < grow);
3750 
3751 		memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3752 
3753 		skb->tail += grow;
3754 		skb->data_len -= grow;
3755 
3756 		skb_shinfo(skb)->frags[0].page_offset += grow;
3757 		skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
3758 
3759 		if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
3760 			skb_frag_unref(skb, 0);
3761 			memmove(skb_shinfo(skb)->frags,
3762 				skb_shinfo(skb)->frags + 1,
3763 				--skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
3764 		}
3765 	}
3766 
3767 ok:
3768 	return ret;
3769 
3770 normal:
3771 	ret = GRO_NORMAL;
3772 	goto pull;
3773 }
3774 
3775 
3776 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
3777 {
3778 	switch (ret) {
3779 	case GRO_NORMAL:
3780 		if (netif_receive_skb(skb))
3781 			ret = GRO_DROP;
3782 		break;
3783 
3784 	case GRO_DROP:
3785 		kfree_skb(skb);
3786 		break;
3787 
3788 	case GRO_MERGED_FREE:
3789 		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3790 			kmem_cache_free(skbuff_head_cache, skb);
3791 		else
3792 			__kfree_skb(skb);
3793 		break;
3794 
3795 	case GRO_HELD:
3796 	case GRO_MERGED:
3797 		break;
3798 	}
3799 
3800 	return ret;
3801 }
3802 
3803 static void skb_gro_reset_offset(struct sk_buff *skb)
3804 {
3805 	const struct skb_shared_info *pinfo = skb_shinfo(skb);
3806 	const skb_frag_t *frag0 = &pinfo->frags[0];
3807 
3808 	NAPI_GRO_CB(skb)->data_offset = 0;
3809 	NAPI_GRO_CB(skb)->frag0 = NULL;
3810 	NAPI_GRO_CB(skb)->frag0_len = 0;
3811 
3812 	if (skb->mac_header == skb->tail &&
3813 	    pinfo->nr_frags &&
3814 	    !PageHighMem(skb_frag_page(frag0))) {
3815 		NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3816 		NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
3817 	}
3818 }
3819 
3820 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3821 {
3822 	skb_gro_reset_offset(skb);
3823 
3824 	return napi_skb_finish(dev_gro_receive(napi, skb), skb);
3825 }
3826 EXPORT_SYMBOL(napi_gro_receive);
3827 
3828 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3829 {
3830 	__skb_pull(skb, skb_headlen(skb));
3831 	/* restore the reserve we had after netdev_alloc_skb_ip_align() */
3832 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
3833 	skb->vlan_tci = 0;
3834 	skb->dev = napi->dev;
3835 	skb->skb_iif = 0;
3836 
3837 	napi->skb = skb;
3838 }
3839 
3840 struct sk_buff *napi_get_frags(struct napi_struct *napi)
3841 {
3842 	struct sk_buff *skb = napi->skb;
3843 
3844 	if (!skb) {
3845 		skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3846 		if (skb)
3847 			napi->skb = skb;
3848 	}
3849 	return skb;
3850 }
3851 EXPORT_SYMBOL(napi_get_frags);
3852 
3853 static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3854 			       gro_result_t ret)
3855 {
3856 	switch (ret) {
3857 	case GRO_NORMAL:
3858 	case GRO_HELD:
3859 		skb->protocol = eth_type_trans(skb, skb->dev);
3860 
3861 		if (ret == GRO_HELD)
3862 			skb_gro_pull(skb, -ETH_HLEN);
3863 		else if (netif_receive_skb(skb))
3864 			ret = GRO_DROP;
3865 		break;
3866 
3867 	case GRO_DROP:
3868 	case GRO_MERGED_FREE:
3869 		napi_reuse_skb(napi, skb);
3870 		break;
3871 
3872 	case GRO_MERGED:
3873 		break;
3874 	}
3875 
3876 	return ret;
3877 }
3878 
3879 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3880 {
3881 	struct sk_buff *skb = napi->skb;
3882 	struct ethhdr *eth;
3883 	unsigned int hlen;
3884 	unsigned int off;
3885 
3886 	napi->skb = NULL;
3887 
3888 	skb_reset_mac_header(skb);
3889 	skb_gro_reset_offset(skb);
3890 
3891 	off = skb_gro_offset(skb);
3892 	hlen = off + sizeof(*eth);
3893 	eth = skb_gro_header_fast(skb, off);
3894 	if (skb_gro_header_hard(skb, hlen)) {
3895 		eth = skb_gro_header_slow(skb, hlen, off);
3896 		if (unlikely(!eth)) {
3897 			napi_reuse_skb(napi, skb);
3898 			skb = NULL;
3899 			goto out;
3900 		}
3901 	}
3902 
3903 	skb_gro_pull(skb, sizeof(*eth));
3904 
3905 	/*
3906 	 * This works because the only protocols we care about don't require
3907 	 * special handling.  We'll fix it up properly at the end.
3908 	 */
3909 	skb->protocol = eth->h_proto;
3910 
3911 out:
3912 	return skb;
3913 }
3914 
3915 gro_result_t napi_gro_frags(struct napi_struct *napi)
3916 {
3917 	struct sk_buff *skb = napi_frags_skb(napi);
3918 
3919 	if (!skb)
3920 		return GRO_DROP;
3921 
3922 	return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
3923 }
3924 EXPORT_SYMBOL(napi_gro_frags);
3925 
3926 /*
3927  * net_rps_action sends any pending IPI's for rps.
3928  * Note: called with local irq disabled, but exits with local irq enabled.
3929  */
3930 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3931 {
3932 #ifdef CONFIG_RPS
3933 	struct softnet_data *remsd = sd->rps_ipi_list;
3934 
3935 	if (remsd) {
3936 		sd->rps_ipi_list = NULL;
3937 
3938 		local_irq_enable();
3939 
3940 		/* Send pending IPI's to kick RPS processing on remote cpus. */
3941 		while (remsd) {
3942 			struct softnet_data *next = remsd->rps_ipi_next;
3943 
3944 			if (cpu_online(remsd->cpu))
3945 				__smp_call_function_single(remsd->cpu,
3946 							   &remsd->csd, 0);
3947 			remsd = next;
3948 		}
3949 	} else
3950 #endif
3951 		local_irq_enable();
3952 }
3953 
3954 static int process_backlog(struct napi_struct *napi, int quota)
3955 {
3956 	int work = 0;
3957 	struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
3958 
3959 #ifdef CONFIG_RPS
3960 	/* Check if we have pending ipi, its better to send them now,
3961 	 * not waiting net_rx_action() end.
3962 	 */
3963 	if (sd->rps_ipi_list) {
3964 		local_irq_disable();
3965 		net_rps_action_and_irq_enable(sd);
3966 	}
3967 #endif
3968 	napi->weight = weight_p;
3969 	local_irq_disable();
3970 	while (work < quota) {
3971 		struct sk_buff *skb;
3972 		unsigned int qlen;
3973 
3974 		while ((skb = __skb_dequeue(&sd->process_queue))) {
3975 			local_irq_enable();
3976 			__netif_receive_skb(skb);
3977 			local_irq_disable();
3978 			input_queue_head_incr(sd);
3979 			if (++work >= quota) {
3980 				local_irq_enable();
3981 				return work;
3982 			}
3983 		}
3984 
3985 		rps_lock(sd);
3986 		qlen = skb_queue_len(&sd->input_pkt_queue);
3987 		if (qlen)
3988 			skb_queue_splice_tail_init(&sd->input_pkt_queue,
3989 						   &sd->process_queue);
3990 
3991 		if (qlen < quota - work) {
3992 			/*
3993 			 * Inline a custom version of __napi_complete().
3994 			 * only current cpu owns and manipulates this napi,
3995 			 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
3996 			 * we can use a plain write instead of clear_bit(),
3997 			 * and we dont need an smp_mb() memory barrier.
3998 			 */
3999 			list_del(&napi->poll_list);
4000 			napi->state = 0;
4001 
4002 			quota = work + qlen;
4003 		}
4004 		rps_unlock(sd);
4005 	}
4006 	local_irq_enable();
4007 
4008 	return work;
4009 }
4010 
4011 /**
4012  * __napi_schedule - schedule for receive
4013  * @n: entry to schedule
4014  *
4015  * The entry's receive function will be scheduled to run
4016  */
4017 void __napi_schedule(struct napi_struct *n)
4018 {
4019 	unsigned long flags;
4020 
4021 	local_irq_save(flags);
4022 	____napi_schedule(&__get_cpu_var(softnet_data), n);
4023 	local_irq_restore(flags);
4024 }
4025 EXPORT_SYMBOL(__napi_schedule);
4026 
4027 void __napi_complete(struct napi_struct *n)
4028 {
4029 	BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4030 	BUG_ON(n->gro_list);
4031 
4032 	list_del(&n->poll_list);
4033 	smp_mb__before_clear_bit();
4034 	clear_bit(NAPI_STATE_SCHED, &n->state);
4035 }
4036 EXPORT_SYMBOL(__napi_complete);
4037 
4038 void napi_complete(struct napi_struct *n)
4039 {
4040 	unsigned long flags;
4041 
4042 	/*
4043 	 * don't let napi dequeue from the cpu poll list
4044 	 * just in case its running on a different cpu
4045 	 */
4046 	if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4047 		return;
4048 
4049 	napi_gro_flush(n, false);
4050 	local_irq_save(flags);
4051 	__napi_complete(n);
4052 	local_irq_restore(flags);
4053 }
4054 EXPORT_SYMBOL(napi_complete);
4055 
4056 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4057 		    int (*poll)(struct napi_struct *, int), int weight)
4058 {
4059 	INIT_LIST_HEAD(&napi->poll_list);
4060 	napi->gro_count = 0;
4061 	napi->gro_list = NULL;
4062 	napi->skb = NULL;
4063 	napi->poll = poll;
4064 	if (weight > NAPI_POLL_WEIGHT)
4065 		pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4066 			    weight, dev->name);
4067 	napi->weight = weight;
4068 	list_add(&napi->dev_list, &dev->napi_list);
4069 	napi->dev = dev;
4070 #ifdef CONFIG_NETPOLL
4071 	spin_lock_init(&napi->poll_lock);
4072 	napi->poll_owner = -1;
4073 #endif
4074 	set_bit(NAPI_STATE_SCHED, &napi->state);
4075 }
4076 EXPORT_SYMBOL(netif_napi_add);
4077 
4078 void netif_napi_del(struct napi_struct *napi)
4079 {
4080 	struct sk_buff *skb, *next;
4081 
4082 	list_del_init(&napi->dev_list);
4083 	napi_free_frags(napi);
4084 
4085 	for (skb = napi->gro_list; skb; skb = next) {
4086 		next = skb->next;
4087 		skb->next = NULL;
4088 		kfree_skb(skb);
4089 	}
4090 
4091 	napi->gro_list = NULL;
4092 	napi->gro_count = 0;
4093 }
4094 EXPORT_SYMBOL(netif_napi_del);
4095 
4096 static void net_rx_action(struct softirq_action *h)
4097 {
4098 	struct softnet_data *sd = &__get_cpu_var(softnet_data);
4099 	unsigned long time_limit = jiffies + 2;
4100 	int budget = netdev_budget;
4101 	void *have;
4102 
4103 	local_irq_disable();
4104 
4105 	while (!list_empty(&sd->poll_list)) {
4106 		struct napi_struct *n;
4107 		int work, weight;
4108 
4109 		/* If softirq window is exhuasted then punt.
4110 		 * Allow this to run for 2 jiffies since which will allow
4111 		 * an average latency of 1.5/HZ.
4112 		 */
4113 		if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
4114 			goto softnet_break;
4115 
4116 		local_irq_enable();
4117 
4118 		/* Even though interrupts have been re-enabled, this
4119 		 * access is safe because interrupts can only add new
4120 		 * entries to the tail of this list, and only ->poll()
4121 		 * calls can remove this head entry from the list.
4122 		 */
4123 		n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
4124 
4125 		have = netpoll_poll_lock(n);
4126 
4127 		weight = n->weight;
4128 
4129 		/* This NAPI_STATE_SCHED test is for avoiding a race
4130 		 * with netpoll's poll_napi().  Only the entity which
4131 		 * obtains the lock and sees NAPI_STATE_SCHED set will
4132 		 * actually make the ->poll() call.  Therefore we avoid
4133 		 * accidentally calling ->poll() when NAPI is not scheduled.
4134 		 */
4135 		work = 0;
4136 		if (test_bit(NAPI_STATE_SCHED, &n->state)) {
4137 			work = n->poll(n, weight);
4138 			trace_napi_poll(n);
4139 		}
4140 
4141 		WARN_ON_ONCE(work > weight);
4142 
4143 		budget -= work;
4144 
4145 		local_irq_disable();
4146 
4147 		/* Drivers must not modify the NAPI state if they
4148 		 * consume the entire weight.  In such cases this code
4149 		 * still "owns" the NAPI instance and therefore can
4150 		 * move the instance around on the list at-will.
4151 		 */
4152 		if (unlikely(work == weight)) {
4153 			if (unlikely(napi_disable_pending(n))) {
4154 				local_irq_enable();
4155 				napi_complete(n);
4156 				local_irq_disable();
4157 			} else {
4158 				if (n->gro_list) {
4159 					/* flush too old packets
4160 					 * If HZ < 1000, flush all packets.
4161 					 */
4162 					local_irq_enable();
4163 					napi_gro_flush(n, HZ >= 1000);
4164 					local_irq_disable();
4165 				}
4166 				list_move_tail(&n->poll_list, &sd->poll_list);
4167 			}
4168 		}
4169 
4170 		netpoll_poll_unlock(have);
4171 	}
4172 out:
4173 	net_rps_action_and_irq_enable(sd);
4174 
4175 #ifdef CONFIG_NET_DMA
4176 	/*
4177 	 * There may not be any more sk_buffs coming right now, so push
4178 	 * any pending DMA copies to hardware
4179 	 */
4180 	dma_issue_pending_all();
4181 #endif
4182 
4183 	return;
4184 
4185 softnet_break:
4186 	sd->time_squeeze++;
4187 	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
4188 	goto out;
4189 }
4190 
4191 struct netdev_upper {
4192 	struct net_device *dev;
4193 	bool master;
4194 	struct list_head list;
4195 	struct rcu_head rcu;
4196 	struct list_head search_list;
4197 };
4198 
4199 static void __append_search_uppers(struct list_head *search_list,
4200 				   struct net_device *dev)
4201 {
4202 	struct netdev_upper *upper;
4203 
4204 	list_for_each_entry(upper, &dev->upper_dev_list, list) {
4205 		/* check if this upper is not already in search list */
4206 		if (list_empty(&upper->search_list))
4207 			list_add_tail(&upper->search_list, search_list);
4208 	}
4209 }
4210 
4211 static bool __netdev_search_upper_dev(struct net_device *dev,
4212 				      struct net_device *upper_dev)
4213 {
4214 	LIST_HEAD(search_list);
4215 	struct netdev_upper *upper;
4216 	struct netdev_upper *tmp;
4217 	bool ret = false;
4218 
4219 	__append_search_uppers(&search_list, dev);
4220 	list_for_each_entry(upper, &search_list, search_list) {
4221 		if (upper->dev == upper_dev) {
4222 			ret = true;
4223 			break;
4224 		}
4225 		__append_search_uppers(&search_list, upper->dev);
4226 	}
4227 	list_for_each_entry_safe(upper, tmp, &search_list, search_list)
4228 		INIT_LIST_HEAD(&upper->search_list);
4229 	return ret;
4230 }
4231 
4232 static struct netdev_upper *__netdev_find_upper(struct net_device *dev,
4233 						struct net_device *upper_dev)
4234 {
4235 	struct netdev_upper *upper;
4236 
4237 	list_for_each_entry(upper, &dev->upper_dev_list, list) {
4238 		if (upper->dev == upper_dev)
4239 			return upper;
4240 	}
4241 	return NULL;
4242 }
4243 
4244 /**
4245  * netdev_has_upper_dev - Check if device is linked to an upper device
4246  * @dev: device
4247  * @upper_dev: upper device to check
4248  *
4249  * Find out if a device is linked to specified upper device and return true
4250  * in case it is. Note that this checks only immediate upper device,
4251  * not through a complete stack of devices. The caller must hold the RTNL lock.
4252  */
4253 bool netdev_has_upper_dev(struct net_device *dev,
4254 			  struct net_device *upper_dev)
4255 {
4256 	ASSERT_RTNL();
4257 
4258 	return __netdev_find_upper(dev, upper_dev);
4259 }
4260 EXPORT_SYMBOL(netdev_has_upper_dev);
4261 
4262 /**
4263  * netdev_has_any_upper_dev - Check if device is linked to some device
4264  * @dev: device
4265  *
4266  * Find out if a device is linked to an upper device and return true in case
4267  * it is. The caller must hold the RTNL lock.
4268  */
4269 bool netdev_has_any_upper_dev(struct net_device *dev)
4270 {
4271 	ASSERT_RTNL();
4272 
4273 	return !list_empty(&dev->upper_dev_list);
4274 }
4275 EXPORT_SYMBOL(netdev_has_any_upper_dev);
4276 
4277 /**
4278  * netdev_master_upper_dev_get - Get master upper device
4279  * @dev: device
4280  *
4281  * Find a master upper device and return pointer to it or NULL in case
4282  * it's not there. The caller must hold the RTNL lock.
4283  */
4284 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4285 {
4286 	struct netdev_upper *upper;
4287 
4288 	ASSERT_RTNL();
4289 
4290 	if (list_empty(&dev->upper_dev_list))
4291 		return NULL;
4292 
4293 	upper = list_first_entry(&dev->upper_dev_list,
4294 				 struct netdev_upper, list);
4295 	if (likely(upper->master))
4296 		return upper->dev;
4297 	return NULL;
4298 }
4299 EXPORT_SYMBOL(netdev_master_upper_dev_get);
4300 
4301 /**
4302  * netdev_master_upper_dev_get_rcu - Get master upper device
4303  * @dev: device
4304  *
4305  * Find a master upper device and return pointer to it or NULL in case
4306  * it's not there. The caller must hold the RCU read lock.
4307  */
4308 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4309 {
4310 	struct netdev_upper *upper;
4311 
4312 	upper = list_first_or_null_rcu(&dev->upper_dev_list,
4313 				       struct netdev_upper, list);
4314 	if (upper && likely(upper->master))
4315 		return upper->dev;
4316 	return NULL;
4317 }
4318 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4319 
4320 static int __netdev_upper_dev_link(struct net_device *dev,
4321 				   struct net_device *upper_dev, bool master)
4322 {
4323 	struct netdev_upper *upper;
4324 
4325 	ASSERT_RTNL();
4326 
4327 	if (dev == upper_dev)
4328 		return -EBUSY;
4329 
4330 	/* To prevent loops, check if dev is not upper device to upper_dev. */
4331 	if (__netdev_search_upper_dev(upper_dev, dev))
4332 		return -EBUSY;
4333 
4334 	if (__netdev_find_upper(dev, upper_dev))
4335 		return -EEXIST;
4336 
4337 	if (master && netdev_master_upper_dev_get(dev))
4338 		return -EBUSY;
4339 
4340 	upper = kmalloc(sizeof(*upper), GFP_KERNEL);
4341 	if (!upper)
4342 		return -ENOMEM;
4343 
4344 	upper->dev = upper_dev;
4345 	upper->master = master;
4346 	INIT_LIST_HEAD(&upper->search_list);
4347 
4348 	/* Ensure that master upper link is always the first item in list. */
4349 	if (master)
4350 		list_add_rcu(&upper->list, &dev->upper_dev_list);
4351 	else
4352 		list_add_tail_rcu(&upper->list, &dev->upper_dev_list);
4353 	dev_hold(upper_dev);
4354 
4355 	return 0;
4356 }
4357 
4358 /**
4359  * netdev_upper_dev_link - Add a link to the upper device
4360  * @dev: device
4361  * @upper_dev: new upper device
4362  *
4363  * Adds a link to device which is upper to this one. The caller must hold
4364  * the RTNL lock. On a failure a negative errno code is returned.
4365  * On success the reference counts are adjusted and the function
4366  * returns zero.
4367  */
4368 int netdev_upper_dev_link(struct net_device *dev,
4369 			  struct net_device *upper_dev)
4370 {
4371 	return __netdev_upper_dev_link(dev, upper_dev, false);
4372 }
4373 EXPORT_SYMBOL(netdev_upper_dev_link);
4374 
4375 /**
4376  * netdev_master_upper_dev_link - Add a master link to the upper device
4377  * @dev: device
4378  * @upper_dev: new upper device
4379  *
4380  * Adds a link to device which is upper to this one. In this case, only
4381  * one master upper device can be linked, although other non-master devices
4382  * might be linked as well. The caller must hold the RTNL lock.
4383  * On a failure a negative errno code is returned. On success the reference
4384  * counts are adjusted and the function returns zero.
4385  */
4386 int netdev_master_upper_dev_link(struct net_device *dev,
4387 				 struct net_device *upper_dev)
4388 {
4389 	return __netdev_upper_dev_link(dev, upper_dev, true);
4390 }
4391 EXPORT_SYMBOL(netdev_master_upper_dev_link);
4392 
4393 /**
4394  * netdev_upper_dev_unlink - Removes a link to upper device
4395  * @dev: device
4396  * @upper_dev: new upper device
4397  *
4398  * Removes a link to device which is upper to this one. The caller must hold
4399  * the RTNL lock.
4400  */
4401 void netdev_upper_dev_unlink(struct net_device *dev,
4402 			     struct net_device *upper_dev)
4403 {
4404 	struct netdev_upper *upper;
4405 
4406 	ASSERT_RTNL();
4407 
4408 	upper = __netdev_find_upper(dev, upper_dev);
4409 	if (!upper)
4410 		return;
4411 	list_del_rcu(&upper->list);
4412 	dev_put(upper_dev);
4413 	kfree_rcu(upper, rcu);
4414 }
4415 EXPORT_SYMBOL(netdev_upper_dev_unlink);
4416 
4417 static void dev_change_rx_flags(struct net_device *dev, int flags)
4418 {
4419 	const struct net_device_ops *ops = dev->netdev_ops;
4420 
4421 	if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
4422 		ops->ndo_change_rx_flags(dev, flags);
4423 }
4424 
4425 static int __dev_set_promiscuity(struct net_device *dev, int inc)
4426 {
4427 	unsigned int old_flags = dev->flags;
4428 	kuid_t uid;
4429 	kgid_t gid;
4430 
4431 	ASSERT_RTNL();
4432 
4433 	dev->flags |= IFF_PROMISC;
4434 	dev->promiscuity += inc;
4435 	if (dev->promiscuity == 0) {
4436 		/*
4437 		 * Avoid overflow.
4438 		 * If inc causes overflow, untouch promisc and return error.
4439 		 */
4440 		if (inc < 0)
4441 			dev->flags &= ~IFF_PROMISC;
4442 		else {
4443 			dev->promiscuity -= inc;
4444 			pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
4445 				dev->name);
4446 			return -EOVERFLOW;
4447 		}
4448 	}
4449 	if (dev->flags != old_flags) {
4450 		pr_info("device %s %s promiscuous mode\n",
4451 			dev->name,
4452 			dev->flags & IFF_PROMISC ? "entered" : "left");
4453 		if (audit_enabled) {
4454 			current_uid_gid(&uid, &gid);
4455 			audit_log(current->audit_context, GFP_ATOMIC,
4456 				AUDIT_ANOM_PROMISCUOUS,
4457 				"dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4458 				dev->name, (dev->flags & IFF_PROMISC),
4459 				(old_flags & IFF_PROMISC),
4460 				from_kuid(&init_user_ns, audit_get_loginuid(current)),
4461 				from_kuid(&init_user_ns, uid),
4462 				from_kgid(&init_user_ns, gid),
4463 				audit_get_sessionid(current));
4464 		}
4465 
4466 		dev_change_rx_flags(dev, IFF_PROMISC);
4467 	}
4468 	return 0;
4469 }
4470 
4471 /**
4472  *	dev_set_promiscuity	- update promiscuity count on a device
4473  *	@dev: device
4474  *	@inc: modifier
4475  *
4476  *	Add or remove promiscuity from a device. While the count in the device
4477  *	remains above zero the interface remains promiscuous. Once it hits zero
4478  *	the device reverts back to normal filtering operation. A negative inc
4479  *	value is used to drop promiscuity on the device.
4480  *	Return 0 if successful or a negative errno code on error.
4481  */
4482 int dev_set_promiscuity(struct net_device *dev, int inc)
4483 {
4484 	unsigned int old_flags = dev->flags;
4485 	int err;
4486 
4487 	err = __dev_set_promiscuity(dev, inc);
4488 	if (err < 0)
4489 		return err;
4490 	if (dev->flags != old_flags)
4491 		dev_set_rx_mode(dev);
4492 	return err;
4493 }
4494 EXPORT_SYMBOL(dev_set_promiscuity);
4495 
4496 /**
4497  *	dev_set_allmulti	- update allmulti count on a device
4498  *	@dev: device
4499  *	@inc: modifier
4500  *
4501  *	Add or remove reception of all multicast frames to a device. While the
4502  *	count in the device remains above zero the interface remains listening
4503  *	to all interfaces. Once it hits zero the device reverts back to normal
4504  *	filtering operation. A negative @inc value is used to drop the counter
4505  *	when releasing a resource needing all multicasts.
4506  *	Return 0 if successful or a negative errno code on error.
4507  */
4508 
4509 int dev_set_allmulti(struct net_device *dev, int inc)
4510 {
4511 	unsigned int old_flags = dev->flags;
4512 
4513 	ASSERT_RTNL();
4514 
4515 	dev->flags |= IFF_ALLMULTI;
4516 	dev->allmulti += inc;
4517 	if (dev->allmulti == 0) {
4518 		/*
4519 		 * Avoid overflow.
4520 		 * If inc causes overflow, untouch allmulti and return error.
4521 		 */
4522 		if (inc < 0)
4523 			dev->flags &= ~IFF_ALLMULTI;
4524 		else {
4525 			dev->allmulti -= inc;
4526 			pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
4527 				dev->name);
4528 			return -EOVERFLOW;
4529 		}
4530 	}
4531 	if (dev->flags ^ old_flags) {
4532 		dev_change_rx_flags(dev, IFF_ALLMULTI);
4533 		dev_set_rx_mode(dev);
4534 	}
4535 	return 0;
4536 }
4537 EXPORT_SYMBOL(dev_set_allmulti);
4538 
4539 /*
4540  *	Upload unicast and multicast address lists to device and
4541  *	configure RX filtering. When the device doesn't support unicast
4542  *	filtering it is put in promiscuous mode while unicast addresses
4543  *	are present.
4544  */
4545 void __dev_set_rx_mode(struct net_device *dev)
4546 {
4547 	const struct net_device_ops *ops = dev->netdev_ops;
4548 
4549 	/* dev_open will call this function so the list will stay sane. */
4550 	if (!(dev->flags&IFF_UP))
4551 		return;
4552 
4553 	if (!netif_device_present(dev))
4554 		return;
4555 
4556 	if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4557 		/* Unicast addresses changes may only happen under the rtnl,
4558 		 * therefore calling __dev_set_promiscuity here is safe.
4559 		 */
4560 		if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4561 			__dev_set_promiscuity(dev, 1);
4562 			dev->uc_promisc = true;
4563 		} else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4564 			__dev_set_promiscuity(dev, -1);
4565 			dev->uc_promisc = false;
4566 		}
4567 	}
4568 
4569 	if (ops->ndo_set_rx_mode)
4570 		ops->ndo_set_rx_mode(dev);
4571 }
4572 
4573 void dev_set_rx_mode(struct net_device *dev)
4574 {
4575 	netif_addr_lock_bh(dev);
4576 	__dev_set_rx_mode(dev);
4577 	netif_addr_unlock_bh(dev);
4578 }
4579 
4580 /**
4581  *	dev_get_flags - get flags reported to userspace
4582  *	@dev: device
4583  *
4584  *	Get the combination of flag bits exported through APIs to userspace.
4585  */
4586 unsigned int dev_get_flags(const struct net_device *dev)
4587 {
4588 	unsigned int flags;
4589 
4590 	flags = (dev->flags & ~(IFF_PROMISC |
4591 				IFF_ALLMULTI |
4592 				IFF_RUNNING |
4593 				IFF_LOWER_UP |
4594 				IFF_DORMANT)) |
4595 		(dev->gflags & (IFF_PROMISC |
4596 				IFF_ALLMULTI));
4597 
4598 	if (netif_running(dev)) {
4599 		if (netif_oper_up(dev))
4600 			flags |= IFF_RUNNING;
4601 		if (netif_carrier_ok(dev))
4602 			flags |= IFF_LOWER_UP;
4603 		if (netif_dormant(dev))
4604 			flags |= IFF_DORMANT;
4605 	}
4606 
4607 	return flags;
4608 }
4609 EXPORT_SYMBOL(dev_get_flags);
4610 
4611 int __dev_change_flags(struct net_device *dev, unsigned int flags)
4612 {
4613 	unsigned int old_flags = dev->flags;
4614 	int ret;
4615 
4616 	ASSERT_RTNL();
4617 
4618 	/*
4619 	 *	Set the flags on our device.
4620 	 */
4621 
4622 	dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4623 			       IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4624 			       IFF_AUTOMEDIA)) |
4625 		     (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4626 				    IFF_ALLMULTI));
4627 
4628 	/*
4629 	 *	Load in the correct multicast list now the flags have changed.
4630 	 */
4631 
4632 	if ((old_flags ^ flags) & IFF_MULTICAST)
4633 		dev_change_rx_flags(dev, IFF_MULTICAST);
4634 
4635 	dev_set_rx_mode(dev);
4636 
4637 	/*
4638 	 *	Have we downed the interface. We handle IFF_UP ourselves
4639 	 *	according to user attempts to set it, rather than blindly
4640 	 *	setting it.
4641 	 */
4642 
4643 	ret = 0;
4644 	if ((old_flags ^ flags) & IFF_UP) {	/* Bit is different  ? */
4645 		ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
4646 
4647 		if (!ret)
4648 			dev_set_rx_mode(dev);
4649 	}
4650 
4651 	if ((flags ^ dev->gflags) & IFF_PROMISC) {
4652 		int inc = (flags & IFF_PROMISC) ? 1 : -1;
4653 
4654 		dev->gflags ^= IFF_PROMISC;
4655 		dev_set_promiscuity(dev, inc);
4656 	}
4657 
4658 	/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4659 	   is important. Some (broken) drivers set IFF_PROMISC, when
4660 	   IFF_ALLMULTI is requested not asking us and not reporting.
4661 	 */
4662 	if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4663 		int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4664 
4665 		dev->gflags ^= IFF_ALLMULTI;
4666 		dev_set_allmulti(dev, inc);
4667 	}
4668 
4669 	return ret;
4670 }
4671 
4672 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4673 {
4674 	unsigned int changes = dev->flags ^ old_flags;
4675 
4676 	if (changes & IFF_UP) {
4677 		if (dev->flags & IFF_UP)
4678 			call_netdevice_notifiers(NETDEV_UP, dev);
4679 		else
4680 			call_netdevice_notifiers(NETDEV_DOWN, dev);
4681 	}
4682 
4683 	if (dev->flags & IFF_UP &&
4684 	    (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4685 		call_netdevice_notifiers(NETDEV_CHANGE, dev);
4686 }
4687 
4688 /**
4689  *	dev_change_flags - change device settings
4690  *	@dev: device
4691  *	@flags: device state flags
4692  *
4693  *	Change settings on device based state flags. The flags are
4694  *	in the userspace exported format.
4695  */
4696 int dev_change_flags(struct net_device *dev, unsigned int flags)
4697 {
4698 	int ret;
4699 	unsigned int changes, old_flags = dev->flags;
4700 
4701 	ret = __dev_change_flags(dev, flags);
4702 	if (ret < 0)
4703 		return ret;
4704 
4705 	changes = old_flags ^ dev->flags;
4706 	if (changes)
4707 		rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
4708 
4709 	__dev_notify_flags(dev, old_flags);
4710 	return ret;
4711 }
4712 EXPORT_SYMBOL(dev_change_flags);
4713 
4714 /**
4715  *	dev_set_mtu - Change maximum transfer unit
4716  *	@dev: device
4717  *	@new_mtu: new transfer unit
4718  *
4719  *	Change the maximum transfer size of the network device.
4720  */
4721 int dev_set_mtu(struct net_device *dev, int new_mtu)
4722 {
4723 	const struct net_device_ops *ops = dev->netdev_ops;
4724 	int err;
4725 
4726 	if (new_mtu == dev->mtu)
4727 		return 0;
4728 
4729 	/*	MTU must be positive.	 */
4730 	if (new_mtu < 0)
4731 		return -EINVAL;
4732 
4733 	if (!netif_device_present(dev))
4734 		return -ENODEV;
4735 
4736 	err = 0;
4737 	if (ops->ndo_change_mtu)
4738 		err = ops->ndo_change_mtu(dev, new_mtu);
4739 	else
4740 		dev->mtu = new_mtu;
4741 
4742 	if (!err)
4743 		call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
4744 	return err;
4745 }
4746 EXPORT_SYMBOL(dev_set_mtu);
4747 
4748 /**
4749  *	dev_set_group - Change group this device belongs to
4750  *	@dev: device
4751  *	@new_group: group this device should belong to
4752  */
4753 void dev_set_group(struct net_device *dev, int new_group)
4754 {
4755 	dev->group = new_group;
4756 }
4757 EXPORT_SYMBOL(dev_set_group);
4758 
4759 /**
4760  *	dev_set_mac_address - Change Media Access Control Address
4761  *	@dev: device
4762  *	@sa: new address
4763  *
4764  *	Change the hardware (MAC) address of the device
4765  */
4766 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4767 {
4768 	const struct net_device_ops *ops = dev->netdev_ops;
4769 	int err;
4770 
4771 	if (!ops->ndo_set_mac_address)
4772 		return -EOPNOTSUPP;
4773 	if (sa->sa_family != dev->type)
4774 		return -EINVAL;
4775 	if (!netif_device_present(dev))
4776 		return -ENODEV;
4777 	err = ops->ndo_set_mac_address(dev, sa);
4778 	if (err)
4779 		return err;
4780 	dev->addr_assign_type = NET_ADDR_SET;
4781 	call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4782 	add_device_randomness(dev->dev_addr, dev->addr_len);
4783 	return 0;
4784 }
4785 EXPORT_SYMBOL(dev_set_mac_address);
4786 
4787 /**
4788  *	dev_change_carrier - Change device carrier
4789  *	@dev: device
4790  *	@new_carrier: new value
4791  *
4792  *	Change device carrier
4793  */
4794 int dev_change_carrier(struct net_device *dev, bool new_carrier)
4795 {
4796 	const struct net_device_ops *ops = dev->netdev_ops;
4797 
4798 	if (!ops->ndo_change_carrier)
4799 		return -EOPNOTSUPP;
4800 	if (!netif_device_present(dev))
4801 		return -ENODEV;
4802 	return ops->ndo_change_carrier(dev, new_carrier);
4803 }
4804 EXPORT_SYMBOL(dev_change_carrier);
4805 
4806 /**
4807  *	dev_new_index	-	allocate an ifindex
4808  *	@net: the applicable net namespace
4809  *
4810  *	Returns a suitable unique value for a new device interface
4811  *	number.  The caller must hold the rtnl semaphore or the
4812  *	dev_base_lock to be sure it remains unique.
4813  */
4814 static int dev_new_index(struct net *net)
4815 {
4816 	int ifindex = net->ifindex;
4817 	for (;;) {
4818 		if (++ifindex <= 0)
4819 			ifindex = 1;
4820 		if (!__dev_get_by_index(net, ifindex))
4821 			return net->ifindex = ifindex;
4822 	}
4823 }
4824 
4825 /* Delayed registration/unregisteration */
4826 static LIST_HEAD(net_todo_list);
4827 
4828 static void net_set_todo(struct net_device *dev)
4829 {
4830 	list_add_tail(&dev->todo_list, &net_todo_list);
4831 }
4832 
4833 static void rollback_registered_many(struct list_head *head)
4834 {
4835 	struct net_device *dev, *tmp;
4836 
4837 	BUG_ON(dev_boot_phase);
4838 	ASSERT_RTNL();
4839 
4840 	list_for_each_entry_safe(dev, tmp, head, unreg_list) {
4841 		/* Some devices call without registering
4842 		 * for initialization unwind. Remove those
4843 		 * devices and proceed with the remaining.
4844 		 */
4845 		if (dev->reg_state == NETREG_UNINITIALIZED) {
4846 			pr_debug("unregister_netdevice: device %s/%p never was registered\n",
4847 				 dev->name, dev);
4848 
4849 			WARN_ON(1);
4850 			list_del(&dev->unreg_list);
4851 			continue;
4852 		}
4853 		dev->dismantle = true;
4854 		BUG_ON(dev->reg_state != NETREG_REGISTERED);
4855 	}
4856 
4857 	/* If device is running, close it first. */
4858 	dev_close_many(head);
4859 
4860 	list_for_each_entry(dev, head, unreg_list) {
4861 		/* And unlink it from device chain. */
4862 		unlist_netdevice(dev);
4863 
4864 		dev->reg_state = NETREG_UNREGISTERING;
4865 	}
4866 
4867 	synchronize_net();
4868 
4869 	list_for_each_entry(dev, head, unreg_list) {
4870 		/* Shutdown queueing discipline. */
4871 		dev_shutdown(dev);
4872 
4873 
4874 		/* Notify protocols, that we are about to destroy
4875 		   this device. They should clean all the things.
4876 		*/
4877 		call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4878 
4879 		if (!dev->rtnl_link_ops ||
4880 		    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
4881 			rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
4882 
4883 		/*
4884 		 *	Flush the unicast and multicast chains
4885 		 */
4886 		dev_uc_flush(dev);
4887 		dev_mc_flush(dev);
4888 
4889 		if (dev->netdev_ops->ndo_uninit)
4890 			dev->netdev_ops->ndo_uninit(dev);
4891 
4892 		/* Notifier chain MUST detach us all upper devices. */
4893 		WARN_ON(netdev_has_any_upper_dev(dev));
4894 
4895 		/* Remove entries from kobject tree */
4896 		netdev_unregister_kobject(dev);
4897 #ifdef CONFIG_XPS
4898 		/* Remove XPS queueing entries */
4899 		netif_reset_xps_queues_gt(dev, 0);
4900 #endif
4901 	}
4902 
4903 	synchronize_net();
4904 
4905 	list_for_each_entry(dev, head, unreg_list)
4906 		dev_put(dev);
4907 }
4908 
4909 static void rollback_registered(struct net_device *dev)
4910 {
4911 	LIST_HEAD(single);
4912 
4913 	list_add(&dev->unreg_list, &single);
4914 	rollback_registered_many(&single);
4915 	list_del(&single);
4916 }
4917 
4918 static netdev_features_t netdev_fix_features(struct net_device *dev,
4919 	netdev_features_t features)
4920 {
4921 	/* Fix illegal checksum combinations */
4922 	if ((features & NETIF_F_HW_CSUM) &&
4923 	    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4924 		netdev_warn(dev, "mixed HW and IP checksum settings.\n");
4925 		features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4926 	}
4927 
4928 	/* TSO requires that SG is present as well. */
4929 	if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
4930 		netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
4931 		features &= ~NETIF_F_ALL_TSO;
4932 	}
4933 
4934 	if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
4935 					!(features & NETIF_F_IP_CSUM)) {
4936 		netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
4937 		features &= ~NETIF_F_TSO;
4938 		features &= ~NETIF_F_TSO_ECN;
4939 	}
4940 
4941 	if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
4942 					 !(features & NETIF_F_IPV6_CSUM)) {
4943 		netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
4944 		features &= ~NETIF_F_TSO6;
4945 	}
4946 
4947 	/* TSO ECN requires that TSO is present as well. */
4948 	if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
4949 		features &= ~NETIF_F_TSO_ECN;
4950 
4951 	/* Software GSO depends on SG. */
4952 	if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
4953 		netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
4954 		features &= ~NETIF_F_GSO;
4955 	}
4956 
4957 	/* UFO needs SG and checksumming */
4958 	if (features & NETIF_F_UFO) {
4959 		/* maybe split UFO into V4 and V6? */
4960 		if (!((features & NETIF_F_GEN_CSUM) ||
4961 		    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
4962 			    == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4963 			netdev_dbg(dev,
4964 				"Dropping NETIF_F_UFO since no checksum offload features.\n");
4965 			features &= ~NETIF_F_UFO;
4966 		}
4967 
4968 		if (!(features & NETIF_F_SG)) {
4969 			netdev_dbg(dev,
4970 				"Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
4971 			features &= ~NETIF_F_UFO;
4972 		}
4973 	}
4974 
4975 	return features;
4976 }
4977 
4978 int __netdev_update_features(struct net_device *dev)
4979 {
4980 	netdev_features_t features;
4981 	int err = 0;
4982 
4983 	ASSERT_RTNL();
4984 
4985 	features = netdev_get_wanted_features(dev);
4986 
4987 	if (dev->netdev_ops->ndo_fix_features)
4988 		features = dev->netdev_ops->ndo_fix_features(dev, features);
4989 
4990 	/* driver might be less strict about feature dependencies */
4991 	features = netdev_fix_features(dev, features);
4992 
4993 	if (dev->features == features)
4994 		return 0;
4995 
4996 	netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
4997 		&dev->features, &features);
4998 
4999 	if (dev->netdev_ops->ndo_set_features)
5000 		err = dev->netdev_ops->ndo_set_features(dev, features);
5001 
5002 	if (unlikely(err < 0)) {
5003 		netdev_err(dev,
5004 			"set_features() failed (%d); wanted %pNF, left %pNF\n",
5005 			err, &features, &dev->features);
5006 		return -1;
5007 	}
5008 
5009 	if (!err)
5010 		dev->features = features;
5011 
5012 	return 1;
5013 }
5014 
5015 /**
5016  *	netdev_update_features - recalculate device features
5017  *	@dev: the device to check
5018  *
5019  *	Recalculate dev->features set and send notifications if it
5020  *	has changed. Should be called after driver or hardware dependent
5021  *	conditions might have changed that influence the features.
5022  */
5023 void netdev_update_features(struct net_device *dev)
5024 {
5025 	if (__netdev_update_features(dev))
5026 		netdev_features_change(dev);
5027 }
5028 EXPORT_SYMBOL(netdev_update_features);
5029 
5030 /**
5031  *	netdev_change_features - recalculate device features
5032  *	@dev: the device to check
5033  *
5034  *	Recalculate dev->features set and send notifications even
5035  *	if they have not changed. Should be called instead of
5036  *	netdev_update_features() if also dev->vlan_features might
5037  *	have changed to allow the changes to be propagated to stacked
5038  *	VLAN devices.
5039  */
5040 void netdev_change_features(struct net_device *dev)
5041 {
5042 	__netdev_update_features(dev);
5043 	netdev_features_change(dev);
5044 }
5045 EXPORT_SYMBOL(netdev_change_features);
5046 
5047 /**
5048  *	netif_stacked_transfer_operstate -	transfer operstate
5049  *	@rootdev: the root or lower level device to transfer state from
5050  *	@dev: the device to transfer operstate to
5051  *
5052  *	Transfer operational state from root to device. This is normally
5053  *	called when a stacking relationship exists between the root
5054  *	device and the device(a leaf device).
5055  */
5056 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5057 					struct net_device *dev)
5058 {
5059 	if (rootdev->operstate == IF_OPER_DORMANT)
5060 		netif_dormant_on(dev);
5061 	else
5062 		netif_dormant_off(dev);
5063 
5064 	if (netif_carrier_ok(rootdev)) {
5065 		if (!netif_carrier_ok(dev))
5066 			netif_carrier_on(dev);
5067 	} else {
5068 		if (netif_carrier_ok(dev))
5069 			netif_carrier_off(dev);
5070 	}
5071 }
5072 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5073 
5074 #ifdef CONFIG_RPS
5075 static int netif_alloc_rx_queues(struct net_device *dev)
5076 {
5077 	unsigned int i, count = dev->num_rx_queues;
5078 	struct netdev_rx_queue *rx;
5079 
5080 	BUG_ON(count < 1);
5081 
5082 	rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5083 	if (!rx)
5084 		return -ENOMEM;
5085 
5086 	dev->_rx = rx;
5087 
5088 	for (i = 0; i < count; i++)
5089 		rx[i].dev = dev;
5090 	return 0;
5091 }
5092 #endif
5093 
5094 static void netdev_init_one_queue(struct net_device *dev,
5095 				  struct netdev_queue *queue, void *_unused)
5096 {
5097 	/* Initialize queue lock */
5098 	spin_lock_init(&queue->_xmit_lock);
5099 	netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5100 	queue->xmit_lock_owner = -1;
5101 	netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
5102 	queue->dev = dev;
5103 #ifdef CONFIG_BQL
5104 	dql_init(&queue->dql, HZ);
5105 #endif
5106 }
5107 
5108 static int netif_alloc_netdev_queues(struct net_device *dev)
5109 {
5110 	unsigned int count = dev->num_tx_queues;
5111 	struct netdev_queue *tx;
5112 
5113 	BUG_ON(count < 1);
5114 
5115 	tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
5116 	if (!tx)
5117 		return -ENOMEM;
5118 
5119 	dev->_tx = tx;
5120 
5121 	netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5122 	spin_lock_init(&dev->tx_global_lock);
5123 
5124 	return 0;
5125 }
5126 
5127 /**
5128  *	register_netdevice	- register a network device
5129  *	@dev: device to register
5130  *
5131  *	Take a completed network device structure and add it to the kernel
5132  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5133  *	chain. 0 is returned on success. A negative errno code is returned
5134  *	on a failure to set up the device, or if the name is a duplicate.
5135  *
5136  *	Callers must hold the rtnl semaphore. You may want
5137  *	register_netdev() instead of this.
5138  *
5139  *	BUGS:
5140  *	The locking appears insufficient to guarantee two parallel registers
5141  *	will not get the same name.
5142  */
5143 
5144 int register_netdevice(struct net_device *dev)
5145 {
5146 	int ret;
5147 	struct net *net = dev_net(dev);
5148 
5149 	BUG_ON(dev_boot_phase);
5150 	ASSERT_RTNL();
5151 
5152 	might_sleep();
5153 
5154 	/* When net_device's are persistent, this will be fatal. */
5155 	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
5156 	BUG_ON(!net);
5157 
5158 	spin_lock_init(&dev->addr_list_lock);
5159 	netdev_set_addr_lockdep_class(dev);
5160 
5161 	dev->iflink = -1;
5162 
5163 	ret = dev_get_valid_name(net, dev, dev->name);
5164 	if (ret < 0)
5165 		goto out;
5166 
5167 	/* Init, if this function is available */
5168 	if (dev->netdev_ops->ndo_init) {
5169 		ret = dev->netdev_ops->ndo_init(dev);
5170 		if (ret) {
5171 			if (ret > 0)
5172 				ret = -EIO;
5173 			goto out;
5174 		}
5175 	}
5176 
5177 	if (((dev->hw_features | dev->features) & NETIF_F_HW_VLAN_FILTER) &&
5178 	    (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
5179 	     !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
5180 		netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
5181 		ret = -EINVAL;
5182 		goto err_uninit;
5183 	}
5184 
5185 	ret = -EBUSY;
5186 	if (!dev->ifindex)
5187 		dev->ifindex = dev_new_index(net);
5188 	else if (__dev_get_by_index(net, dev->ifindex))
5189 		goto err_uninit;
5190 
5191 	if (dev->iflink == -1)
5192 		dev->iflink = dev->ifindex;
5193 
5194 	/* Transfer changeable features to wanted_features and enable
5195 	 * software offloads (GSO and GRO).
5196 	 */
5197 	dev->hw_features |= NETIF_F_SOFT_FEATURES;
5198 	dev->features |= NETIF_F_SOFT_FEATURES;
5199 	dev->wanted_features = dev->features & dev->hw_features;
5200 
5201 	/* Turn on no cache copy if HW is doing checksum */
5202 	if (!(dev->flags & IFF_LOOPBACK)) {
5203 		dev->hw_features |= NETIF_F_NOCACHE_COPY;
5204 		if (dev->features & NETIF_F_ALL_CSUM) {
5205 			dev->wanted_features |= NETIF_F_NOCACHE_COPY;
5206 			dev->features |= NETIF_F_NOCACHE_COPY;
5207 		}
5208 	}
5209 
5210 	/* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
5211 	 */
5212 	dev->vlan_features |= NETIF_F_HIGHDMA;
5213 
5214 	/* Make NETIF_F_SG inheritable to tunnel devices.
5215 	 */
5216 	dev->hw_enc_features |= NETIF_F_SG;
5217 
5218 	ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5219 	ret = notifier_to_errno(ret);
5220 	if (ret)
5221 		goto err_uninit;
5222 
5223 	ret = netdev_register_kobject(dev);
5224 	if (ret)
5225 		goto err_uninit;
5226 	dev->reg_state = NETREG_REGISTERED;
5227 
5228 	__netdev_update_features(dev);
5229 
5230 	/*
5231 	 *	Default initial state at registry is that the
5232 	 *	device is present.
5233 	 */
5234 
5235 	set_bit(__LINK_STATE_PRESENT, &dev->state);
5236 
5237 	linkwatch_init_dev(dev);
5238 
5239 	dev_init_scheduler(dev);
5240 	dev_hold(dev);
5241 	list_netdevice(dev);
5242 	add_device_randomness(dev->dev_addr, dev->addr_len);
5243 
5244 	/* If the device has permanent device address, driver should
5245 	 * set dev_addr and also addr_assign_type should be set to
5246 	 * NET_ADDR_PERM (default value).
5247 	 */
5248 	if (dev->addr_assign_type == NET_ADDR_PERM)
5249 		memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5250 
5251 	/* Notify protocols, that a new device appeared. */
5252 	ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
5253 	ret = notifier_to_errno(ret);
5254 	if (ret) {
5255 		rollback_registered(dev);
5256 		dev->reg_state = NETREG_UNREGISTERED;
5257 	}
5258 	/*
5259 	 *	Prevent userspace races by waiting until the network
5260 	 *	device is fully setup before sending notifications.
5261 	 */
5262 	if (!dev->rtnl_link_ops ||
5263 	    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5264 		rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5265 
5266 out:
5267 	return ret;
5268 
5269 err_uninit:
5270 	if (dev->netdev_ops->ndo_uninit)
5271 		dev->netdev_ops->ndo_uninit(dev);
5272 	goto out;
5273 }
5274 EXPORT_SYMBOL(register_netdevice);
5275 
5276 /**
5277  *	init_dummy_netdev	- init a dummy network device for NAPI
5278  *	@dev: device to init
5279  *
5280  *	This takes a network device structure and initialize the minimum
5281  *	amount of fields so it can be used to schedule NAPI polls without
5282  *	registering a full blown interface. This is to be used by drivers
5283  *	that need to tie several hardware interfaces to a single NAPI
5284  *	poll scheduler due to HW limitations.
5285  */
5286 int init_dummy_netdev(struct net_device *dev)
5287 {
5288 	/* Clear everything. Note we don't initialize spinlocks
5289 	 * are they aren't supposed to be taken by any of the
5290 	 * NAPI code and this dummy netdev is supposed to be
5291 	 * only ever used for NAPI polls
5292 	 */
5293 	memset(dev, 0, sizeof(struct net_device));
5294 
5295 	/* make sure we BUG if trying to hit standard
5296 	 * register/unregister code path
5297 	 */
5298 	dev->reg_state = NETREG_DUMMY;
5299 
5300 	/* NAPI wants this */
5301 	INIT_LIST_HEAD(&dev->napi_list);
5302 
5303 	/* a dummy interface is started by default */
5304 	set_bit(__LINK_STATE_PRESENT, &dev->state);
5305 	set_bit(__LINK_STATE_START, &dev->state);
5306 
5307 	/* Note : We dont allocate pcpu_refcnt for dummy devices,
5308 	 * because users of this 'device' dont need to change
5309 	 * its refcount.
5310 	 */
5311 
5312 	return 0;
5313 }
5314 EXPORT_SYMBOL_GPL(init_dummy_netdev);
5315 
5316 
5317 /**
5318  *	register_netdev	- register a network device
5319  *	@dev: device to register
5320  *
5321  *	Take a completed network device structure and add it to the kernel
5322  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5323  *	chain. 0 is returned on success. A negative errno code is returned
5324  *	on a failure to set up the device, or if the name is a duplicate.
5325  *
5326  *	This is a wrapper around register_netdevice that takes the rtnl semaphore
5327  *	and expands the device name if you passed a format string to
5328  *	alloc_netdev.
5329  */
5330 int register_netdev(struct net_device *dev)
5331 {
5332 	int err;
5333 
5334 	rtnl_lock();
5335 	err = register_netdevice(dev);
5336 	rtnl_unlock();
5337 	return err;
5338 }
5339 EXPORT_SYMBOL(register_netdev);
5340 
5341 int netdev_refcnt_read(const struct net_device *dev)
5342 {
5343 	int i, refcnt = 0;
5344 
5345 	for_each_possible_cpu(i)
5346 		refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5347 	return refcnt;
5348 }
5349 EXPORT_SYMBOL(netdev_refcnt_read);
5350 
5351 /**
5352  * netdev_wait_allrefs - wait until all references are gone.
5353  * @dev: target net_device
5354  *
5355  * This is called when unregistering network devices.
5356  *
5357  * Any protocol or device that holds a reference should register
5358  * for netdevice notification, and cleanup and put back the
5359  * reference if they receive an UNREGISTER event.
5360  * We can get stuck here if buggy protocols don't correctly
5361  * call dev_put.
5362  */
5363 static void netdev_wait_allrefs(struct net_device *dev)
5364 {
5365 	unsigned long rebroadcast_time, warning_time;
5366 	int refcnt;
5367 
5368 	linkwatch_forget_dev(dev);
5369 
5370 	rebroadcast_time = warning_time = jiffies;
5371 	refcnt = netdev_refcnt_read(dev);
5372 
5373 	while (refcnt != 0) {
5374 		if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
5375 			rtnl_lock();
5376 
5377 			/* Rebroadcast unregister notification */
5378 			call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5379 
5380 			__rtnl_unlock();
5381 			rcu_barrier();
5382 			rtnl_lock();
5383 
5384 			call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
5385 			if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5386 				     &dev->state)) {
5387 				/* We must not have linkwatch events
5388 				 * pending on unregister. If this
5389 				 * happens, we simply run the queue
5390 				 * unscheduled, resulting in a noop
5391 				 * for this device.
5392 				 */
5393 				linkwatch_run_queue();
5394 			}
5395 
5396 			__rtnl_unlock();
5397 
5398 			rebroadcast_time = jiffies;
5399 		}
5400 
5401 		msleep(250);
5402 
5403 		refcnt = netdev_refcnt_read(dev);
5404 
5405 		if (time_after(jiffies, warning_time + 10 * HZ)) {
5406 			pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
5407 				 dev->name, refcnt);
5408 			warning_time = jiffies;
5409 		}
5410 	}
5411 }
5412 
5413 /* The sequence is:
5414  *
5415  *	rtnl_lock();
5416  *	...
5417  *	register_netdevice(x1);
5418  *	register_netdevice(x2);
5419  *	...
5420  *	unregister_netdevice(y1);
5421  *	unregister_netdevice(y2);
5422  *      ...
5423  *	rtnl_unlock();
5424  *	free_netdev(y1);
5425  *	free_netdev(y2);
5426  *
5427  * We are invoked by rtnl_unlock().
5428  * This allows us to deal with problems:
5429  * 1) We can delete sysfs objects which invoke hotplug
5430  *    without deadlocking with linkwatch via keventd.
5431  * 2) Since we run with the RTNL semaphore not held, we can sleep
5432  *    safely in order to wait for the netdev refcnt to drop to zero.
5433  *
5434  * We must not return until all unregister events added during
5435  * the interval the lock was held have been completed.
5436  */
5437 void netdev_run_todo(void)
5438 {
5439 	struct list_head list;
5440 
5441 	/* Snapshot list, allow later requests */
5442 	list_replace_init(&net_todo_list, &list);
5443 
5444 	__rtnl_unlock();
5445 
5446 
5447 	/* Wait for rcu callbacks to finish before next phase */
5448 	if (!list_empty(&list))
5449 		rcu_barrier();
5450 
5451 	while (!list_empty(&list)) {
5452 		struct net_device *dev
5453 			= list_first_entry(&list, struct net_device, todo_list);
5454 		list_del(&dev->todo_list);
5455 
5456 		rtnl_lock();
5457 		call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
5458 		__rtnl_unlock();
5459 
5460 		if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5461 			pr_err("network todo '%s' but state %d\n",
5462 			       dev->name, dev->reg_state);
5463 			dump_stack();
5464 			continue;
5465 		}
5466 
5467 		dev->reg_state = NETREG_UNREGISTERED;
5468 
5469 		on_each_cpu(flush_backlog, dev, 1);
5470 
5471 		netdev_wait_allrefs(dev);
5472 
5473 		/* paranoia */
5474 		BUG_ON(netdev_refcnt_read(dev));
5475 		WARN_ON(rcu_access_pointer(dev->ip_ptr));
5476 		WARN_ON(rcu_access_pointer(dev->ip6_ptr));
5477 		WARN_ON(dev->dn_ptr);
5478 
5479 		if (dev->destructor)
5480 			dev->destructor(dev);
5481 
5482 		/* Free network device */
5483 		kobject_put(&dev->dev.kobj);
5484 	}
5485 }
5486 
5487 /* Convert net_device_stats to rtnl_link_stats64.  They have the same
5488  * fields in the same order, with only the type differing.
5489  */
5490 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5491 			     const struct net_device_stats *netdev_stats)
5492 {
5493 #if BITS_PER_LONG == 64
5494 	BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5495 	memcpy(stats64, netdev_stats, sizeof(*stats64));
5496 #else
5497 	size_t i, n = sizeof(*stats64) / sizeof(u64);
5498 	const unsigned long *src = (const unsigned long *)netdev_stats;
5499 	u64 *dst = (u64 *)stats64;
5500 
5501 	BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
5502 		     sizeof(*stats64) / sizeof(u64));
5503 	for (i = 0; i < n; i++)
5504 		dst[i] = src[i];
5505 #endif
5506 }
5507 EXPORT_SYMBOL(netdev_stats_to_stats64);
5508 
5509 /**
5510  *	dev_get_stats	- get network device statistics
5511  *	@dev: device to get statistics from
5512  *	@storage: place to store stats
5513  *
5514  *	Get network statistics from device. Return @storage.
5515  *	The device driver may provide its own method by setting
5516  *	dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
5517  *	otherwise the internal statistics structure is used.
5518  */
5519 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
5520 					struct rtnl_link_stats64 *storage)
5521 {
5522 	const struct net_device_ops *ops = dev->netdev_ops;
5523 
5524 	if (ops->ndo_get_stats64) {
5525 		memset(storage, 0, sizeof(*storage));
5526 		ops->ndo_get_stats64(dev, storage);
5527 	} else if (ops->ndo_get_stats) {
5528 		netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
5529 	} else {
5530 		netdev_stats_to_stats64(storage, &dev->stats);
5531 	}
5532 	storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
5533 	return storage;
5534 }
5535 EXPORT_SYMBOL(dev_get_stats);
5536 
5537 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
5538 {
5539 	struct netdev_queue *queue = dev_ingress_queue(dev);
5540 
5541 #ifdef CONFIG_NET_CLS_ACT
5542 	if (queue)
5543 		return queue;
5544 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
5545 	if (!queue)
5546 		return NULL;
5547 	netdev_init_one_queue(dev, queue, NULL);
5548 	queue->qdisc = &noop_qdisc;
5549 	queue->qdisc_sleeping = &noop_qdisc;
5550 	rcu_assign_pointer(dev->ingress_queue, queue);
5551 #endif
5552 	return queue;
5553 }
5554 
5555 static const struct ethtool_ops default_ethtool_ops;
5556 
5557 void netdev_set_default_ethtool_ops(struct net_device *dev,
5558 				    const struct ethtool_ops *ops)
5559 {
5560 	if (dev->ethtool_ops == &default_ethtool_ops)
5561 		dev->ethtool_ops = ops;
5562 }
5563 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
5564 
5565 /**
5566  *	alloc_netdev_mqs - allocate network device
5567  *	@sizeof_priv:	size of private data to allocate space for
5568  *	@name:		device name format string
5569  *	@setup:		callback to initialize device
5570  *	@txqs:		the number of TX subqueues to allocate
5571  *	@rxqs:		the number of RX subqueues to allocate
5572  *
5573  *	Allocates a struct net_device with private data area for driver use
5574  *	and performs basic initialization.  Also allocates subquue structs
5575  *	for each queue on the device.
5576  */
5577 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5578 		void (*setup)(struct net_device *),
5579 		unsigned int txqs, unsigned int rxqs)
5580 {
5581 	struct net_device *dev;
5582 	size_t alloc_size;
5583 	struct net_device *p;
5584 
5585 	BUG_ON(strlen(name) >= sizeof(dev->name));
5586 
5587 	if (txqs < 1) {
5588 		pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
5589 		return NULL;
5590 	}
5591 
5592 #ifdef CONFIG_RPS
5593 	if (rxqs < 1) {
5594 		pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
5595 		return NULL;
5596 	}
5597 #endif
5598 
5599 	alloc_size = sizeof(struct net_device);
5600 	if (sizeof_priv) {
5601 		/* ensure 32-byte alignment of private area */
5602 		alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
5603 		alloc_size += sizeof_priv;
5604 	}
5605 	/* ensure 32-byte alignment of whole construct */
5606 	alloc_size += NETDEV_ALIGN - 1;
5607 
5608 	p = kzalloc(alloc_size, GFP_KERNEL);
5609 	if (!p)
5610 		return NULL;
5611 
5612 	dev = PTR_ALIGN(p, NETDEV_ALIGN);
5613 	dev->padded = (char *)dev - (char *)p;
5614 
5615 	dev->pcpu_refcnt = alloc_percpu(int);
5616 	if (!dev->pcpu_refcnt)
5617 		goto free_p;
5618 
5619 	if (dev_addr_init(dev))
5620 		goto free_pcpu;
5621 
5622 	dev_mc_init(dev);
5623 	dev_uc_init(dev);
5624 
5625 	dev_net_set(dev, &init_net);
5626 
5627 	dev->gso_max_size = GSO_MAX_SIZE;
5628 	dev->gso_max_segs = GSO_MAX_SEGS;
5629 
5630 	INIT_LIST_HEAD(&dev->napi_list);
5631 	INIT_LIST_HEAD(&dev->unreg_list);
5632 	INIT_LIST_HEAD(&dev->link_watch_list);
5633 	INIT_LIST_HEAD(&dev->upper_dev_list);
5634 	dev->priv_flags = IFF_XMIT_DST_RELEASE;
5635 	setup(dev);
5636 
5637 	dev->num_tx_queues = txqs;
5638 	dev->real_num_tx_queues = txqs;
5639 	if (netif_alloc_netdev_queues(dev))
5640 		goto free_all;
5641 
5642 #ifdef CONFIG_RPS
5643 	dev->num_rx_queues = rxqs;
5644 	dev->real_num_rx_queues = rxqs;
5645 	if (netif_alloc_rx_queues(dev))
5646 		goto free_all;
5647 #endif
5648 
5649 	strcpy(dev->name, name);
5650 	dev->group = INIT_NETDEV_GROUP;
5651 	if (!dev->ethtool_ops)
5652 		dev->ethtool_ops = &default_ethtool_ops;
5653 	return dev;
5654 
5655 free_all:
5656 	free_netdev(dev);
5657 	return NULL;
5658 
5659 free_pcpu:
5660 	free_percpu(dev->pcpu_refcnt);
5661 	kfree(dev->_tx);
5662 #ifdef CONFIG_RPS
5663 	kfree(dev->_rx);
5664 #endif
5665 
5666 free_p:
5667 	kfree(p);
5668 	return NULL;
5669 }
5670 EXPORT_SYMBOL(alloc_netdev_mqs);
5671 
5672 /**
5673  *	free_netdev - free network device
5674  *	@dev: device
5675  *
5676  *	This function does the last stage of destroying an allocated device
5677  * 	interface. The reference to the device object is released.
5678  *	If this is the last reference then it will be freed.
5679  */
5680 void free_netdev(struct net_device *dev)
5681 {
5682 	struct napi_struct *p, *n;
5683 
5684 	release_net(dev_net(dev));
5685 
5686 	kfree(dev->_tx);
5687 #ifdef CONFIG_RPS
5688 	kfree(dev->_rx);
5689 #endif
5690 
5691 	kfree(rcu_dereference_protected(dev->ingress_queue, 1));
5692 
5693 	/* Flush device addresses */
5694 	dev_addr_flush(dev);
5695 
5696 	list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5697 		netif_napi_del(p);
5698 
5699 	free_percpu(dev->pcpu_refcnt);
5700 	dev->pcpu_refcnt = NULL;
5701 
5702 	/*  Compatibility with error handling in drivers */
5703 	if (dev->reg_state == NETREG_UNINITIALIZED) {
5704 		kfree((char *)dev - dev->padded);
5705 		return;
5706 	}
5707 
5708 	BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5709 	dev->reg_state = NETREG_RELEASED;
5710 
5711 	/* will free via device release */
5712 	put_device(&dev->dev);
5713 }
5714 EXPORT_SYMBOL(free_netdev);
5715 
5716 /**
5717  *	synchronize_net -  Synchronize with packet receive processing
5718  *
5719  *	Wait for packets currently being received to be done.
5720  *	Does not block later packets from starting.
5721  */
5722 void synchronize_net(void)
5723 {
5724 	might_sleep();
5725 	if (rtnl_is_locked())
5726 		synchronize_rcu_expedited();
5727 	else
5728 		synchronize_rcu();
5729 }
5730 EXPORT_SYMBOL(synchronize_net);
5731 
5732 /**
5733  *	unregister_netdevice_queue - remove device from the kernel
5734  *	@dev: device
5735  *	@head: list
5736  *
5737  *	This function shuts down a device interface and removes it
5738  *	from the kernel tables.
5739  *	If head not NULL, device is queued to be unregistered later.
5740  *
5741  *	Callers must hold the rtnl semaphore.  You may want
5742  *	unregister_netdev() instead of this.
5743  */
5744 
5745 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
5746 {
5747 	ASSERT_RTNL();
5748 
5749 	if (head) {
5750 		list_move_tail(&dev->unreg_list, head);
5751 	} else {
5752 		rollback_registered(dev);
5753 		/* Finish processing unregister after unlock */
5754 		net_set_todo(dev);
5755 	}
5756 }
5757 EXPORT_SYMBOL(unregister_netdevice_queue);
5758 
5759 /**
5760  *	unregister_netdevice_many - unregister many devices
5761  *	@head: list of devices
5762  */
5763 void unregister_netdevice_many(struct list_head *head)
5764 {
5765 	struct net_device *dev;
5766 
5767 	if (!list_empty(head)) {
5768 		rollback_registered_many(head);
5769 		list_for_each_entry(dev, head, unreg_list)
5770 			net_set_todo(dev);
5771 	}
5772 }
5773 EXPORT_SYMBOL(unregister_netdevice_many);
5774 
5775 /**
5776  *	unregister_netdev - remove device from the kernel
5777  *	@dev: device
5778  *
5779  *	This function shuts down a device interface and removes it
5780  *	from the kernel tables.
5781  *
5782  *	This is just a wrapper for unregister_netdevice that takes
5783  *	the rtnl semaphore.  In general you want to use this and not
5784  *	unregister_netdevice.
5785  */
5786 void unregister_netdev(struct net_device *dev)
5787 {
5788 	rtnl_lock();
5789 	unregister_netdevice(dev);
5790 	rtnl_unlock();
5791 }
5792 EXPORT_SYMBOL(unregister_netdev);
5793 
5794 /**
5795  *	dev_change_net_namespace - move device to different nethost namespace
5796  *	@dev: device
5797  *	@net: network namespace
5798  *	@pat: If not NULL name pattern to try if the current device name
5799  *	      is already taken in the destination network namespace.
5800  *
5801  *	This function shuts down a device interface and moves it
5802  *	to a new network namespace. On success 0 is returned, on
5803  *	a failure a netagive errno code is returned.
5804  *
5805  *	Callers must hold the rtnl semaphore.
5806  */
5807 
5808 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5809 {
5810 	int err;
5811 
5812 	ASSERT_RTNL();
5813 
5814 	/* Don't allow namespace local devices to be moved. */
5815 	err = -EINVAL;
5816 	if (dev->features & NETIF_F_NETNS_LOCAL)
5817 		goto out;
5818 
5819 	/* Ensure the device has been registrered */
5820 	if (dev->reg_state != NETREG_REGISTERED)
5821 		goto out;
5822 
5823 	/* Get out if there is nothing todo */
5824 	err = 0;
5825 	if (net_eq(dev_net(dev), net))
5826 		goto out;
5827 
5828 	/* Pick the destination device name, and ensure
5829 	 * we can use it in the destination network namespace.
5830 	 */
5831 	err = -EEXIST;
5832 	if (__dev_get_by_name(net, dev->name)) {
5833 		/* We get here if we can't use the current device name */
5834 		if (!pat)
5835 			goto out;
5836 		if (dev_get_valid_name(net, dev, pat) < 0)
5837 			goto out;
5838 	}
5839 
5840 	/*
5841 	 * And now a mini version of register_netdevice unregister_netdevice.
5842 	 */
5843 
5844 	/* If device is running close it first. */
5845 	dev_close(dev);
5846 
5847 	/* And unlink it from device chain */
5848 	err = -ENODEV;
5849 	unlist_netdevice(dev);
5850 
5851 	synchronize_net();
5852 
5853 	/* Shutdown queueing discipline. */
5854 	dev_shutdown(dev);
5855 
5856 	/* Notify protocols, that we are about to destroy
5857 	   this device. They should clean all the things.
5858 
5859 	   Note that dev->reg_state stays at NETREG_REGISTERED.
5860 	   This is wanted because this way 8021q and macvlan know
5861 	   the device is just moving and can keep their slaves up.
5862 	*/
5863 	call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5864 	rcu_barrier();
5865 	call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
5866 	rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
5867 
5868 	/*
5869 	 *	Flush the unicast and multicast chains
5870 	 */
5871 	dev_uc_flush(dev);
5872 	dev_mc_flush(dev);
5873 
5874 	/* Send a netdev-removed uevent to the old namespace */
5875 	kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
5876 
5877 	/* Actually switch the network namespace */
5878 	dev_net_set(dev, net);
5879 
5880 	/* If there is an ifindex conflict assign a new one */
5881 	if (__dev_get_by_index(net, dev->ifindex)) {
5882 		int iflink = (dev->iflink == dev->ifindex);
5883 		dev->ifindex = dev_new_index(net);
5884 		if (iflink)
5885 			dev->iflink = dev->ifindex;
5886 	}
5887 
5888 	/* Send a netdev-add uevent to the new namespace */
5889 	kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
5890 
5891 	/* Fixup kobjects */
5892 	err = device_rename(&dev->dev, dev->name);
5893 	WARN_ON(err);
5894 
5895 	/* Add the device back in the hashes */
5896 	list_netdevice(dev);
5897 
5898 	/* Notify protocols, that a new device appeared. */
5899 	call_netdevice_notifiers(NETDEV_REGISTER, dev);
5900 
5901 	/*
5902 	 *	Prevent userspace races by waiting until the network
5903 	 *	device is fully setup before sending notifications.
5904 	 */
5905 	rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5906 
5907 	synchronize_net();
5908 	err = 0;
5909 out:
5910 	return err;
5911 }
5912 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
5913 
5914 static int dev_cpu_callback(struct notifier_block *nfb,
5915 			    unsigned long action,
5916 			    void *ocpu)
5917 {
5918 	struct sk_buff **list_skb;
5919 	struct sk_buff *skb;
5920 	unsigned int cpu, oldcpu = (unsigned long)ocpu;
5921 	struct softnet_data *sd, *oldsd;
5922 
5923 	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
5924 		return NOTIFY_OK;
5925 
5926 	local_irq_disable();
5927 	cpu = smp_processor_id();
5928 	sd = &per_cpu(softnet_data, cpu);
5929 	oldsd = &per_cpu(softnet_data, oldcpu);
5930 
5931 	/* Find end of our completion_queue. */
5932 	list_skb = &sd->completion_queue;
5933 	while (*list_skb)
5934 		list_skb = &(*list_skb)->next;
5935 	/* Append completion queue from offline CPU. */
5936 	*list_skb = oldsd->completion_queue;
5937 	oldsd->completion_queue = NULL;
5938 
5939 	/* Append output queue from offline CPU. */
5940 	if (oldsd->output_queue) {
5941 		*sd->output_queue_tailp = oldsd->output_queue;
5942 		sd->output_queue_tailp = oldsd->output_queue_tailp;
5943 		oldsd->output_queue = NULL;
5944 		oldsd->output_queue_tailp = &oldsd->output_queue;
5945 	}
5946 	/* Append NAPI poll list from offline CPU. */
5947 	if (!list_empty(&oldsd->poll_list)) {
5948 		list_splice_init(&oldsd->poll_list, &sd->poll_list);
5949 		raise_softirq_irqoff(NET_RX_SOFTIRQ);
5950 	}
5951 
5952 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
5953 	local_irq_enable();
5954 
5955 	/* Process offline CPU's input_pkt_queue */
5956 	while ((skb = __skb_dequeue(&oldsd->process_queue))) {
5957 		netif_rx(skb);
5958 		input_queue_head_incr(oldsd);
5959 	}
5960 	while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
5961 		netif_rx(skb);
5962 		input_queue_head_incr(oldsd);
5963 	}
5964 
5965 	return NOTIFY_OK;
5966 }
5967 
5968 
5969 /**
5970  *	netdev_increment_features - increment feature set by one
5971  *	@all: current feature set
5972  *	@one: new feature set
5973  *	@mask: mask feature set
5974  *
5975  *	Computes a new feature set after adding a device with feature set
5976  *	@one to the master device with current feature set @all.  Will not
5977  *	enable anything that is off in @mask. Returns the new feature set.
5978  */
5979 netdev_features_t netdev_increment_features(netdev_features_t all,
5980 	netdev_features_t one, netdev_features_t mask)
5981 {
5982 	if (mask & NETIF_F_GEN_CSUM)
5983 		mask |= NETIF_F_ALL_CSUM;
5984 	mask |= NETIF_F_VLAN_CHALLENGED;
5985 
5986 	all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
5987 	all &= one | ~NETIF_F_ALL_FOR_ALL;
5988 
5989 	/* If one device supports hw checksumming, set for all. */
5990 	if (all & NETIF_F_GEN_CSUM)
5991 		all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
5992 
5993 	return all;
5994 }
5995 EXPORT_SYMBOL(netdev_increment_features);
5996 
5997 static struct hlist_head *netdev_create_hash(void)
5998 {
5999 	int i;
6000 	struct hlist_head *hash;
6001 
6002 	hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6003 	if (hash != NULL)
6004 		for (i = 0; i < NETDEV_HASHENTRIES; i++)
6005 			INIT_HLIST_HEAD(&hash[i]);
6006 
6007 	return hash;
6008 }
6009 
6010 /* Initialize per network namespace state */
6011 static int __net_init netdev_init(struct net *net)
6012 {
6013 	if (net != &init_net)
6014 		INIT_LIST_HEAD(&net->dev_base_head);
6015 
6016 	net->dev_name_head = netdev_create_hash();
6017 	if (net->dev_name_head == NULL)
6018 		goto err_name;
6019 
6020 	net->dev_index_head = netdev_create_hash();
6021 	if (net->dev_index_head == NULL)
6022 		goto err_idx;
6023 
6024 	return 0;
6025 
6026 err_idx:
6027 	kfree(net->dev_name_head);
6028 err_name:
6029 	return -ENOMEM;
6030 }
6031 
6032 /**
6033  *	netdev_drivername - network driver for the device
6034  *	@dev: network device
6035  *
6036  *	Determine network driver for device.
6037  */
6038 const char *netdev_drivername(const struct net_device *dev)
6039 {
6040 	const struct device_driver *driver;
6041 	const struct device *parent;
6042 	const char *empty = "";
6043 
6044 	parent = dev->dev.parent;
6045 	if (!parent)
6046 		return empty;
6047 
6048 	driver = parent->driver;
6049 	if (driver && driver->name)
6050 		return driver->name;
6051 	return empty;
6052 }
6053 
6054 static int __netdev_printk(const char *level, const struct net_device *dev,
6055 			   struct va_format *vaf)
6056 {
6057 	int r;
6058 
6059 	if (dev && dev->dev.parent) {
6060 		r = dev_printk_emit(level[1] - '0',
6061 				    dev->dev.parent,
6062 				    "%s %s %s: %pV",
6063 				    dev_driver_string(dev->dev.parent),
6064 				    dev_name(dev->dev.parent),
6065 				    netdev_name(dev), vaf);
6066 	} else if (dev) {
6067 		r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
6068 	} else {
6069 		r = printk("%s(NULL net_device): %pV", level, vaf);
6070 	}
6071 
6072 	return r;
6073 }
6074 
6075 int netdev_printk(const char *level, const struct net_device *dev,
6076 		  const char *format, ...)
6077 {
6078 	struct va_format vaf;
6079 	va_list args;
6080 	int r;
6081 
6082 	va_start(args, format);
6083 
6084 	vaf.fmt = format;
6085 	vaf.va = &args;
6086 
6087 	r = __netdev_printk(level, dev, &vaf);
6088 
6089 	va_end(args);
6090 
6091 	return r;
6092 }
6093 EXPORT_SYMBOL(netdev_printk);
6094 
6095 #define define_netdev_printk_level(func, level)			\
6096 int func(const struct net_device *dev, const char *fmt, ...)	\
6097 {								\
6098 	int r;							\
6099 	struct va_format vaf;					\
6100 	va_list args;						\
6101 								\
6102 	va_start(args, fmt);					\
6103 								\
6104 	vaf.fmt = fmt;						\
6105 	vaf.va = &args;						\
6106 								\
6107 	r = __netdev_printk(level, dev, &vaf);			\
6108 								\
6109 	va_end(args);						\
6110 								\
6111 	return r;						\
6112 }								\
6113 EXPORT_SYMBOL(func);
6114 
6115 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6116 define_netdev_printk_level(netdev_alert, KERN_ALERT);
6117 define_netdev_printk_level(netdev_crit, KERN_CRIT);
6118 define_netdev_printk_level(netdev_err, KERN_ERR);
6119 define_netdev_printk_level(netdev_warn, KERN_WARNING);
6120 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6121 define_netdev_printk_level(netdev_info, KERN_INFO);
6122 
6123 static void __net_exit netdev_exit(struct net *net)
6124 {
6125 	kfree(net->dev_name_head);
6126 	kfree(net->dev_index_head);
6127 }
6128 
6129 static struct pernet_operations __net_initdata netdev_net_ops = {
6130 	.init = netdev_init,
6131 	.exit = netdev_exit,
6132 };
6133 
6134 static void __net_exit default_device_exit(struct net *net)
6135 {
6136 	struct net_device *dev, *aux;
6137 	/*
6138 	 * Push all migratable network devices back to the
6139 	 * initial network namespace
6140 	 */
6141 	rtnl_lock();
6142 	for_each_netdev_safe(net, dev, aux) {
6143 		int err;
6144 		char fb_name[IFNAMSIZ];
6145 
6146 		/* Ignore unmoveable devices (i.e. loopback) */
6147 		if (dev->features & NETIF_F_NETNS_LOCAL)
6148 			continue;
6149 
6150 		/* Leave virtual devices for the generic cleanup */
6151 		if (dev->rtnl_link_ops)
6152 			continue;
6153 
6154 		/* Push remaining network devices to init_net */
6155 		snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6156 		err = dev_change_net_namespace(dev, &init_net, fb_name);
6157 		if (err) {
6158 			pr_emerg("%s: failed to move %s to init_net: %d\n",
6159 				 __func__, dev->name, err);
6160 			BUG();
6161 		}
6162 	}
6163 	rtnl_unlock();
6164 }
6165 
6166 static void __net_exit default_device_exit_batch(struct list_head *net_list)
6167 {
6168 	/* At exit all network devices most be removed from a network
6169 	 * namespace.  Do this in the reverse order of registration.
6170 	 * Do this across as many network namespaces as possible to
6171 	 * improve batching efficiency.
6172 	 */
6173 	struct net_device *dev;
6174 	struct net *net;
6175 	LIST_HEAD(dev_kill_list);
6176 
6177 	rtnl_lock();
6178 	list_for_each_entry(net, net_list, exit_list) {
6179 		for_each_netdev_reverse(net, dev) {
6180 			if (dev->rtnl_link_ops)
6181 				dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6182 			else
6183 				unregister_netdevice_queue(dev, &dev_kill_list);
6184 		}
6185 	}
6186 	unregister_netdevice_many(&dev_kill_list);
6187 	list_del(&dev_kill_list);
6188 	rtnl_unlock();
6189 }
6190 
6191 static struct pernet_operations __net_initdata default_device_ops = {
6192 	.exit = default_device_exit,
6193 	.exit_batch = default_device_exit_batch,
6194 };
6195 
6196 /*
6197  *	Initialize the DEV module. At boot time this walks the device list and
6198  *	unhooks any devices that fail to initialise (normally hardware not
6199  *	present) and leaves us with a valid list of present and active devices.
6200  *
6201  */
6202 
6203 /*
6204  *       This is called single threaded during boot, so no need
6205  *       to take the rtnl semaphore.
6206  */
6207 static int __init net_dev_init(void)
6208 {
6209 	int i, rc = -ENOMEM;
6210 
6211 	BUG_ON(!dev_boot_phase);
6212 
6213 	if (dev_proc_init())
6214 		goto out;
6215 
6216 	if (netdev_kobject_init())
6217 		goto out;
6218 
6219 	INIT_LIST_HEAD(&ptype_all);
6220 	for (i = 0; i < PTYPE_HASH_SIZE; i++)
6221 		INIT_LIST_HEAD(&ptype_base[i]);
6222 
6223 	INIT_LIST_HEAD(&offload_base);
6224 
6225 	if (register_pernet_subsys(&netdev_net_ops))
6226 		goto out;
6227 
6228 	/*
6229 	 *	Initialise the packet receive queues.
6230 	 */
6231 
6232 	for_each_possible_cpu(i) {
6233 		struct softnet_data *sd = &per_cpu(softnet_data, i);
6234 
6235 		memset(sd, 0, sizeof(*sd));
6236 		skb_queue_head_init(&sd->input_pkt_queue);
6237 		skb_queue_head_init(&sd->process_queue);
6238 		sd->completion_queue = NULL;
6239 		INIT_LIST_HEAD(&sd->poll_list);
6240 		sd->output_queue = NULL;
6241 		sd->output_queue_tailp = &sd->output_queue;
6242 #ifdef CONFIG_RPS
6243 		sd->csd.func = rps_trigger_softirq;
6244 		sd->csd.info = sd;
6245 		sd->csd.flags = 0;
6246 		sd->cpu = i;
6247 #endif
6248 
6249 		sd->backlog.poll = process_backlog;
6250 		sd->backlog.weight = weight_p;
6251 		sd->backlog.gro_list = NULL;
6252 		sd->backlog.gro_count = 0;
6253 	}
6254 
6255 	dev_boot_phase = 0;
6256 
6257 	/* The loopback device is special if any other network devices
6258 	 * is present in a network namespace the loopback device must
6259 	 * be present. Since we now dynamically allocate and free the
6260 	 * loopback device ensure this invariant is maintained by
6261 	 * keeping the loopback device as the first device on the
6262 	 * list of network devices.  Ensuring the loopback devices
6263 	 * is the first device that appears and the last network device
6264 	 * that disappears.
6265 	 */
6266 	if (register_pernet_device(&loopback_net_ops))
6267 		goto out;
6268 
6269 	if (register_pernet_device(&default_device_ops))
6270 		goto out;
6271 
6272 	open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6273 	open_softirq(NET_RX_SOFTIRQ, net_rx_action);
6274 
6275 	hotcpu_notifier(dev_cpu_callback, 0);
6276 	dst_init();
6277 	rc = 0;
6278 out:
6279 	return rc;
6280 }
6281 
6282 subsys_initcall(net_dev_init);
6283