xref: /linux/net/core/dev.c (revision 3b64b1881143ce9e461c211cc81acc72d0cdc476)
1 /*
2  * 	NET3	Protocol independent device support routines.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  *	Derived from the non IP parts of dev.c 1.0.19
10  * 		Authors:	Ross Biro
11  *				Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *				Mark Evans, <evansmp@uhura.aston.ac.uk>
13  *
14  *	Additional Authors:
15  *		Florian la Roche <rzsfl@rz.uni-sb.de>
16  *		Alan Cox <gw4pts@gw4pts.ampr.org>
17  *		David Hinds <dahinds@users.sourceforge.net>
18  *		Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19  *		Adam Sulmicki <adam@cfar.umd.edu>
20  *              Pekka Riikonen <priikone@poesidon.pspt.fi>
21  *
22  *	Changes:
23  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
24  *              			to 2 if register_netdev gets called
25  *              			before net_dev_init & also removed a
26  *              			few lines of code in the process.
27  *		Alan Cox	:	device private ioctl copies fields back.
28  *		Alan Cox	:	Transmit queue code does relevant
29  *					stunts to keep the queue safe.
30  *		Alan Cox	:	Fixed double lock.
31  *		Alan Cox	:	Fixed promisc NULL pointer trap
32  *		????????	:	Support the full private ioctl range
33  *		Alan Cox	:	Moved ioctl permission check into
34  *					drivers
35  *		Tim Kordas	:	SIOCADDMULTI/SIOCDELMULTI
36  *		Alan Cox	:	100 backlog just doesn't cut it when
37  *					you start doing multicast video 8)
38  *		Alan Cox	:	Rewrote net_bh and list manager.
39  *		Alan Cox	: 	Fix ETH_P_ALL echoback lengths.
40  *		Alan Cox	:	Took out transmit every packet pass
41  *					Saved a few bytes in the ioctl handler
42  *		Alan Cox	:	Network driver sets packet type before
43  *					calling netif_rx. Saves a function
44  *					call a packet.
45  *		Alan Cox	:	Hashed net_bh()
46  *		Richard Kooijman:	Timestamp fixes.
47  *		Alan Cox	:	Wrong field in SIOCGIFDSTADDR
48  *		Alan Cox	:	Device lock protection.
49  *		Alan Cox	: 	Fixed nasty side effect of device close
50  *					changes.
51  *		Rudi Cilibrasi	:	Pass the right thing to
52  *					set_mac_address()
53  *		Dave Miller	:	32bit quantity for the device lock to
54  *					make it work out on a Sparc.
55  *		Bjorn Ekwall	:	Added KERNELD hack.
56  *		Alan Cox	:	Cleaned up the backlog initialise.
57  *		Craig Metz	:	SIOCGIFCONF fix if space for under
58  *					1 device.
59  *	    Thomas Bogendoerfer :	Return ENODEV for dev_open, if there
60  *					is no device open function.
61  *		Andi Kleen	:	Fix error reporting for SIOCGIFCONF
62  *	    Michael Chastain	:	Fix signed/unsigned for SIOCGIFCONF
63  *		Cyrus Durgin	:	Cleaned for KMOD
64  *		Adam Sulmicki   :	Bug Fix : Network Device Unload
65  *					A network device unload needs to purge
66  *					the backlog queue.
67  *	Paul Rusty Russell	:	SIOCSIFNAME
68  *              Pekka Riikonen  :	Netdev boot-time settings code
69  *              Andrew Morton   :       Make unregister_netdevice wait
70  *              			indefinitely on dev->refcnt
71  * 		J Hadi Salim	:	- Backlog queue sampling
72  *				        - netif_rx() feedback
73  */
74 
75 #include <asm/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/mutex.h>
85 #include <linux/string.h>
86 #include <linux/mm.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/notifier.h>
96 #include <linux/skbuff.h>
97 #include <net/net_namespace.h>
98 #include <net/sock.h>
99 #include <linux/rtnetlink.h>
100 #include <linux/proc_fs.h>
101 #include <linux/seq_file.h>
102 #include <linux/stat.h>
103 #include <net/dst.h>
104 #include <net/pkt_sched.h>
105 #include <net/checksum.h>
106 #include <net/xfrm.h>
107 #include <linux/highmem.h>
108 #include <linux/init.h>
109 #include <linux/kmod.h>
110 #include <linux/module.h>
111 #include <linux/netpoll.h>
112 #include <linux/rcupdate.h>
113 #include <linux/delay.h>
114 #include <net/wext.h>
115 #include <net/iw_handler.h>
116 #include <asm/current.h>
117 #include <linux/audit.h>
118 #include <linux/dmaengine.h>
119 #include <linux/err.h>
120 #include <linux/ctype.h>
121 #include <linux/if_arp.h>
122 #include <linux/if_vlan.h>
123 #include <linux/ip.h>
124 #include <net/ip.h>
125 #include <linux/ipv6.h>
126 #include <linux/in.h>
127 #include <linux/jhash.h>
128 #include <linux/random.h>
129 #include <trace/events/napi.h>
130 #include <trace/events/net.h>
131 #include <trace/events/skb.h>
132 #include <linux/pci.h>
133 #include <linux/inetdevice.h>
134 #include <linux/cpu_rmap.h>
135 #include <linux/net_tstamp.h>
136 #include <linux/static_key.h>
137 #include <net/flow_keys.h>
138 
139 #include "net-sysfs.h"
140 
141 /* Instead of increasing this, you should create a hash table. */
142 #define MAX_GRO_SKBS 8
143 
144 /* This should be increased if a protocol with a bigger head is added. */
145 #define GRO_MAX_HEAD (MAX_HEADER + 128)
146 
147 /*
148  *	The list of packet types we will receive (as opposed to discard)
149  *	and the routines to invoke.
150  *
151  *	Why 16. Because with 16 the only overlap we get on a hash of the
152  *	low nibble of the protocol value is RARP/SNAP/X.25.
153  *
154  *      NOTE:  That is no longer true with the addition of VLAN tags.  Not
155  *             sure which should go first, but I bet it won't make much
156  *             difference if we are running VLANs.  The good news is that
157  *             this protocol won't be in the list unless compiled in, so
158  *             the average user (w/out VLANs) will not be adversely affected.
159  *             --BLG
160  *
161  *		0800	IP
162  *		8100    802.1Q VLAN
163  *		0001	802.3
164  *		0002	AX.25
165  *		0004	802.2
166  *		8035	RARP
167  *		0005	SNAP
168  *		0805	X.25
169  *		0806	ARP
170  *		8137	IPX
171  *		0009	Localtalk
172  *		86DD	IPv6
173  */
174 
175 #define PTYPE_HASH_SIZE	(16)
176 #define PTYPE_HASH_MASK	(PTYPE_HASH_SIZE - 1)
177 
178 static DEFINE_SPINLOCK(ptype_lock);
179 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
180 static struct list_head ptype_all __read_mostly;	/* Taps */
181 
182 /*
183  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
184  * semaphore.
185  *
186  * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
187  *
188  * Writers must hold the rtnl semaphore while they loop through the
189  * dev_base_head list, and hold dev_base_lock for writing when they do the
190  * actual updates.  This allows pure readers to access the list even
191  * while a writer is preparing to update it.
192  *
193  * To put it another way, dev_base_lock is held for writing only to
194  * protect against pure readers; the rtnl semaphore provides the
195  * protection against other writers.
196  *
197  * See, for example usages, register_netdevice() and
198  * unregister_netdevice(), which must be called with the rtnl
199  * semaphore held.
200  */
201 DEFINE_RWLOCK(dev_base_lock);
202 EXPORT_SYMBOL(dev_base_lock);
203 
204 static inline void dev_base_seq_inc(struct net *net)
205 {
206 	while (++net->dev_base_seq == 0);
207 }
208 
209 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
210 {
211 	unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
212 
213 	return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
214 }
215 
216 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
217 {
218 	return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
219 }
220 
221 static inline void rps_lock(struct softnet_data *sd)
222 {
223 #ifdef CONFIG_RPS
224 	spin_lock(&sd->input_pkt_queue.lock);
225 #endif
226 }
227 
228 static inline void rps_unlock(struct softnet_data *sd)
229 {
230 #ifdef CONFIG_RPS
231 	spin_unlock(&sd->input_pkt_queue.lock);
232 #endif
233 }
234 
235 /* Device list insertion */
236 static int list_netdevice(struct net_device *dev)
237 {
238 	struct net *net = dev_net(dev);
239 
240 	ASSERT_RTNL();
241 
242 	write_lock_bh(&dev_base_lock);
243 	list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
244 	hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
245 	hlist_add_head_rcu(&dev->index_hlist,
246 			   dev_index_hash(net, dev->ifindex));
247 	write_unlock_bh(&dev_base_lock);
248 
249 	dev_base_seq_inc(net);
250 
251 	return 0;
252 }
253 
254 /* Device list removal
255  * caller must respect a RCU grace period before freeing/reusing dev
256  */
257 static void unlist_netdevice(struct net_device *dev)
258 {
259 	ASSERT_RTNL();
260 
261 	/* Unlink dev from the device chain */
262 	write_lock_bh(&dev_base_lock);
263 	list_del_rcu(&dev->dev_list);
264 	hlist_del_rcu(&dev->name_hlist);
265 	hlist_del_rcu(&dev->index_hlist);
266 	write_unlock_bh(&dev_base_lock);
267 
268 	dev_base_seq_inc(dev_net(dev));
269 }
270 
271 /*
272  *	Our notifier list
273  */
274 
275 static RAW_NOTIFIER_HEAD(netdev_chain);
276 
277 /*
278  *	Device drivers call our routines to queue packets here. We empty the
279  *	queue in the local softnet handler.
280  */
281 
282 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
283 EXPORT_PER_CPU_SYMBOL(softnet_data);
284 
285 #ifdef CONFIG_LOCKDEP
286 /*
287  * register_netdevice() inits txq->_xmit_lock and sets lockdep class
288  * according to dev->type
289  */
290 static const unsigned short netdev_lock_type[] =
291 	{ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
292 	 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
293 	 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
294 	 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
295 	 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
296 	 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
297 	 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
298 	 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
299 	 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
300 	 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
301 	 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
302 	 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
303 	 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
304 	 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
305 	 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
306 
307 static const char *const netdev_lock_name[] =
308 	{"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
309 	 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
310 	 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
311 	 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
312 	 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
313 	 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
314 	 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
315 	 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
316 	 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
317 	 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
318 	 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
319 	 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
320 	 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
321 	 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
322 	 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
323 
324 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
325 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
326 
327 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
328 {
329 	int i;
330 
331 	for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
332 		if (netdev_lock_type[i] == dev_type)
333 			return i;
334 	/* the last key is used by default */
335 	return ARRAY_SIZE(netdev_lock_type) - 1;
336 }
337 
338 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
339 						 unsigned short dev_type)
340 {
341 	int i;
342 
343 	i = netdev_lock_pos(dev_type);
344 	lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
345 				   netdev_lock_name[i]);
346 }
347 
348 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
349 {
350 	int i;
351 
352 	i = netdev_lock_pos(dev->type);
353 	lockdep_set_class_and_name(&dev->addr_list_lock,
354 				   &netdev_addr_lock_key[i],
355 				   netdev_lock_name[i]);
356 }
357 #else
358 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
359 						 unsigned short dev_type)
360 {
361 }
362 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
363 {
364 }
365 #endif
366 
367 /*******************************************************************************
368 
369 		Protocol management and registration routines
370 
371 *******************************************************************************/
372 
373 /*
374  *	Add a protocol ID to the list. Now that the input handler is
375  *	smarter we can dispense with all the messy stuff that used to be
376  *	here.
377  *
378  *	BEWARE!!! Protocol handlers, mangling input packets,
379  *	MUST BE last in hash buckets and checking protocol handlers
380  *	MUST start from promiscuous ptype_all chain in net_bh.
381  *	It is true now, do not change it.
382  *	Explanation follows: if protocol handler, mangling packet, will
383  *	be the first on list, it is not able to sense, that packet
384  *	is cloned and should be copied-on-write, so that it will
385  *	change it and subsequent readers will get broken packet.
386  *							--ANK (980803)
387  */
388 
389 static inline struct list_head *ptype_head(const struct packet_type *pt)
390 {
391 	if (pt->type == htons(ETH_P_ALL))
392 		return &ptype_all;
393 	else
394 		return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
395 }
396 
397 /**
398  *	dev_add_pack - add packet handler
399  *	@pt: packet type declaration
400  *
401  *	Add a protocol handler to the networking stack. The passed &packet_type
402  *	is linked into kernel lists and may not be freed until it has been
403  *	removed from the kernel lists.
404  *
405  *	This call does not sleep therefore it can not
406  *	guarantee all CPU's that are in middle of receiving packets
407  *	will see the new packet type (until the next received packet).
408  */
409 
410 void dev_add_pack(struct packet_type *pt)
411 {
412 	struct list_head *head = ptype_head(pt);
413 
414 	spin_lock(&ptype_lock);
415 	list_add_rcu(&pt->list, head);
416 	spin_unlock(&ptype_lock);
417 }
418 EXPORT_SYMBOL(dev_add_pack);
419 
420 /**
421  *	__dev_remove_pack	 - remove packet handler
422  *	@pt: packet type declaration
423  *
424  *	Remove a protocol handler that was previously added to the kernel
425  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
426  *	from the kernel lists and can be freed or reused once this function
427  *	returns.
428  *
429  *      The packet type might still be in use by receivers
430  *	and must not be freed until after all the CPU's have gone
431  *	through a quiescent state.
432  */
433 void __dev_remove_pack(struct packet_type *pt)
434 {
435 	struct list_head *head = ptype_head(pt);
436 	struct packet_type *pt1;
437 
438 	spin_lock(&ptype_lock);
439 
440 	list_for_each_entry(pt1, head, list) {
441 		if (pt == pt1) {
442 			list_del_rcu(&pt->list);
443 			goto out;
444 		}
445 	}
446 
447 	pr_warn("dev_remove_pack: %p not found\n", pt);
448 out:
449 	spin_unlock(&ptype_lock);
450 }
451 EXPORT_SYMBOL(__dev_remove_pack);
452 
453 /**
454  *	dev_remove_pack	 - remove packet handler
455  *	@pt: packet type declaration
456  *
457  *	Remove a protocol handler that was previously added to the kernel
458  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
459  *	from the kernel lists and can be freed or reused once this function
460  *	returns.
461  *
462  *	This call sleeps to guarantee that no CPU is looking at the packet
463  *	type after return.
464  */
465 void dev_remove_pack(struct packet_type *pt)
466 {
467 	__dev_remove_pack(pt);
468 
469 	synchronize_net();
470 }
471 EXPORT_SYMBOL(dev_remove_pack);
472 
473 /******************************************************************************
474 
475 		      Device Boot-time Settings Routines
476 
477 *******************************************************************************/
478 
479 /* Boot time configuration table */
480 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
481 
482 /**
483  *	netdev_boot_setup_add	- add new setup entry
484  *	@name: name of the device
485  *	@map: configured settings for the device
486  *
487  *	Adds new setup entry to the dev_boot_setup list.  The function
488  *	returns 0 on error and 1 on success.  This is a generic routine to
489  *	all netdevices.
490  */
491 static int netdev_boot_setup_add(char *name, struct ifmap *map)
492 {
493 	struct netdev_boot_setup *s;
494 	int i;
495 
496 	s = dev_boot_setup;
497 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
498 		if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
499 			memset(s[i].name, 0, sizeof(s[i].name));
500 			strlcpy(s[i].name, name, IFNAMSIZ);
501 			memcpy(&s[i].map, map, sizeof(s[i].map));
502 			break;
503 		}
504 	}
505 
506 	return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
507 }
508 
509 /**
510  *	netdev_boot_setup_check	- check boot time settings
511  *	@dev: the netdevice
512  *
513  * 	Check boot time settings for the device.
514  *	The found settings are set for the device to be used
515  *	later in the device probing.
516  *	Returns 0 if no settings found, 1 if they are.
517  */
518 int netdev_boot_setup_check(struct net_device *dev)
519 {
520 	struct netdev_boot_setup *s = dev_boot_setup;
521 	int i;
522 
523 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
524 		if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
525 		    !strcmp(dev->name, s[i].name)) {
526 			dev->irq 	= s[i].map.irq;
527 			dev->base_addr 	= s[i].map.base_addr;
528 			dev->mem_start 	= s[i].map.mem_start;
529 			dev->mem_end 	= s[i].map.mem_end;
530 			return 1;
531 		}
532 	}
533 	return 0;
534 }
535 EXPORT_SYMBOL(netdev_boot_setup_check);
536 
537 
538 /**
539  *	netdev_boot_base	- get address from boot time settings
540  *	@prefix: prefix for network device
541  *	@unit: id for network device
542  *
543  * 	Check boot time settings for the base address of device.
544  *	The found settings are set for the device to be used
545  *	later in the device probing.
546  *	Returns 0 if no settings found.
547  */
548 unsigned long netdev_boot_base(const char *prefix, int unit)
549 {
550 	const struct netdev_boot_setup *s = dev_boot_setup;
551 	char name[IFNAMSIZ];
552 	int i;
553 
554 	sprintf(name, "%s%d", prefix, unit);
555 
556 	/*
557 	 * If device already registered then return base of 1
558 	 * to indicate not to probe for this interface
559 	 */
560 	if (__dev_get_by_name(&init_net, name))
561 		return 1;
562 
563 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
564 		if (!strcmp(name, s[i].name))
565 			return s[i].map.base_addr;
566 	return 0;
567 }
568 
569 /*
570  * Saves at boot time configured settings for any netdevice.
571  */
572 int __init netdev_boot_setup(char *str)
573 {
574 	int ints[5];
575 	struct ifmap map;
576 
577 	str = get_options(str, ARRAY_SIZE(ints), ints);
578 	if (!str || !*str)
579 		return 0;
580 
581 	/* Save settings */
582 	memset(&map, 0, sizeof(map));
583 	if (ints[0] > 0)
584 		map.irq = ints[1];
585 	if (ints[0] > 1)
586 		map.base_addr = ints[2];
587 	if (ints[0] > 2)
588 		map.mem_start = ints[3];
589 	if (ints[0] > 3)
590 		map.mem_end = ints[4];
591 
592 	/* Add new entry to the list */
593 	return netdev_boot_setup_add(str, &map);
594 }
595 
596 __setup("netdev=", netdev_boot_setup);
597 
598 /*******************************************************************************
599 
600 			    Device Interface Subroutines
601 
602 *******************************************************************************/
603 
604 /**
605  *	__dev_get_by_name	- find a device by its name
606  *	@net: the applicable net namespace
607  *	@name: name to find
608  *
609  *	Find an interface by name. Must be called under RTNL semaphore
610  *	or @dev_base_lock. If the name is found a pointer to the device
611  *	is returned. If the name is not found then %NULL is returned. The
612  *	reference counters are not incremented so the caller must be
613  *	careful with locks.
614  */
615 
616 struct net_device *__dev_get_by_name(struct net *net, const char *name)
617 {
618 	struct hlist_node *p;
619 	struct net_device *dev;
620 	struct hlist_head *head = dev_name_hash(net, name);
621 
622 	hlist_for_each_entry(dev, p, head, name_hlist)
623 		if (!strncmp(dev->name, name, IFNAMSIZ))
624 			return dev;
625 
626 	return NULL;
627 }
628 EXPORT_SYMBOL(__dev_get_by_name);
629 
630 /**
631  *	dev_get_by_name_rcu	- find a device by its name
632  *	@net: the applicable net namespace
633  *	@name: name to find
634  *
635  *	Find an interface by name.
636  *	If the name is found a pointer to the device is returned.
637  * 	If the name is not found then %NULL is returned.
638  *	The reference counters are not incremented so the caller must be
639  *	careful with locks. The caller must hold RCU lock.
640  */
641 
642 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
643 {
644 	struct hlist_node *p;
645 	struct net_device *dev;
646 	struct hlist_head *head = dev_name_hash(net, name);
647 
648 	hlist_for_each_entry_rcu(dev, p, head, name_hlist)
649 		if (!strncmp(dev->name, name, IFNAMSIZ))
650 			return dev;
651 
652 	return NULL;
653 }
654 EXPORT_SYMBOL(dev_get_by_name_rcu);
655 
656 /**
657  *	dev_get_by_name		- find a device by its name
658  *	@net: the applicable net namespace
659  *	@name: name to find
660  *
661  *	Find an interface by name. This can be called from any
662  *	context and does its own locking. The returned handle has
663  *	the usage count incremented and the caller must use dev_put() to
664  *	release it when it is no longer needed. %NULL is returned if no
665  *	matching device is found.
666  */
667 
668 struct net_device *dev_get_by_name(struct net *net, const char *name)
669 {
670 	struct net_device *dev;
671 
672 	rcu_read_lock();
673 	dev = dev_get_by_name_rcu(net, name);
674 	if (dev)
675 		dev_hold(dev);
676 	rcu_read_unlock();
677 	return dev;
678 }
679 EXPORT_SYMBOL(dev_get_by_name);
680 
681 /**
682  *	__dev_get_by_index - find a device by its ifindex
683  *	@net: the applicable net namespace
684  *	@ifindex: index of device
685  *
686  *	Search for an interface by index. Returns %NULL if the device
687  *	is not found or a pointer to the device. The device has not
688  *	had its reference counter increased so the caller must be careful
689  *	about locking. The caller must hold either the RTNL semaphore
690  *	or @dev_base_lock.
691  */
692 
693 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
694 {
695 	struct hlist_node *p;
696 	struct net_device *dev;
697 	struct hlist_head *head = dev_index_hash(net, ifindex);
698 
699 	hlist_for_each_entry(dev, p, head, index_hlist)
700 		if (dev->ifindex == ifindex)
701 			return dev;
702 
703 	return NULL;
704 }
705 EXPORT_SYMBOL(__dev_get_by_index);
706 
707 /**
708  *	dev_get_by_index_rcu - find a device by its ifindex
709  *	@net: the applicable net namespace
710  *	@ifindex: index of device
711  *
712  *	Search for an interface by index. Returns %NULL if the device
713  *	is not found or a pointer to the device. The device has not
714  *	had its reference counter increased so the caller must be careful
715  *	about locking. The caller must hold RCU lock.
716  */
717 
718 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
719 {
720 	struct hlist_node *p;
721 	struct net_device *dev;
722 	struct hlist_head *head = dev_index_hash(net, ifindex);
723 
724 	hlist_for_each_entry_rcu(dev, p, head, index_hlist)
725 		if (dev->ifindex == ifindex)
726 			return dev;
727 
728 	return NULL;
729 }
730 EXPORT_SYMBOL(dev_get_by_index_rcu);
731 
732 
733 /**
734  *	dev_get_by_index - find a device by its ifindex
735  *	@net: the applicable net namespace
736  *	@ifindex: index of device
737  *
738  *	Search for an interface by index. Returns NULL if the device
739  *	is not found or a pointer to the device. The device returned has
740  *	had a reference added and the pointer is safe until the user calls
741  *	dev_put to indicate they have finished with it.
742  */
743 
744 struct net_device *dev_get_by_index(struct net *net, int ifindex)
745 {
746 	struct net_device *dev;
747 
748 	rcu_read_lock();
749 	dev = dev_get_by_index_rcu(net, ifindex);
750 	if (dev)
751 		dev_hold(dev);
752 	rcu_read_unlock();
753 	return dev;
754 }
755 EXPORT_SYMBOL(dev_get_by_index);
756 
757 /**
758  *	dev_getbyhwaddr_rcu - find a device by its hardware address
759  *	@net: the applicable net namespace
760  *	@type: media type of device
761  *	@ha: hardware address
762  *
763  *	Search for an interface by MAC address. Returns NULL if the device
764  *	is not found or a pointer to the device.
765  *	The caller must hold RCU or RTNL.
766  *	The returned device has not had its ref count increased
767  *	and the caller must therefore be careful about locking
768  *
769  */
770 
771 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
772 				       const char *ha)
773 {
774 	struct net_device *dev;
775 
776 	for_each_netdev_rcu(net, dev)
777 		if (dev->type == type &&
778 		    !memcmp(dev->dev_addr, ha, dev->addr_len))
779 			return dev;
780 
781 	return NULL;
782 }
783 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
784 
785 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
786 {
787 	struct net_device *dev;
788 
789 	ASSERT_RTNL();
790 	for_each_netdev(net, dev)
791 		if (dev->type == type)
792 			return dev;
793 
794 	return NULL;
795 }
796 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
797 
798 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
799 {
800 	struct net_device *dev, *ret = NULL;
801 
802 	rcu_read_lock();
803 	for_each_netdev_rcu(net, dev)
804 		if (dev->type == type) {
805 			dev_hold(dev);
806 			ret = dev;
807 			break;
808 		}
809 	rcu_read_unlock();
810 	return ret;
811 }
812 EXPORT_SYMBOL(dev_getfirstbyhwtype);
813 
814 /**
815  *	dev_get_by_flags_rcu - find any device with given flags
816  *	@net: the applicable net namespace
817  *	@if_flags: IFF_* values
818  *	@mask: bitmask of bits in if_flags to check
819  *
820  *	Search for any interface with the given flags. Returns NULL if a device
821  *	is not found or a pointer to the device. Must be called inside
822  *	rcu_read_lock(), and result refcount is unchanged.
823  */
824 
825 struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
826 				    unsigned short mask)
827 {
828 	struct net_device *dev, *ret;
829 
830 	ret = NULL;
831 	for_each_netdev_rcu(net, dev) {
832 		if (((dev->flags ^ if_flags) & mask) == 0) {
833 			ret = dev;
834 			break;
835 		}
836 	}
837 	return ret;
838 }
839 EXPORT_SYMBOL(dev_get_by_flags_rcu);
840 
841 /**
842  *	dev_valid_name - check if name is okay for network device
843  *	@name: name string
844  *
845  *	Network device names need to be valid file names to
846  *	to allow sysfs to work.  We also disallow any kind of
847  *	whitespace.
848  */
849 bool dev_valid_name(const char *name)
850 {
851 	if (*name == '\0')
852 		return false;
853 	if (strlen(name) >= IFNAMSIZ)
854 		return false;
855 	if (!strcmp(name, ".") || !strcmp(name, ".."))
856 		return false;
857 
858 	while (*name) {
859 		if (*name == '/' || isspace(*name))
860 			return false;
861 		name++;
862 	}
863 	return true;
864 }
865 EXPORT_SYMBOL(dev_valid_name);
866 
867 /**
868  *	__dev_alloc_name - allocate a name for a device
869  *	@net: network namespace to allocate the device name in
870  *	@name: name format string
871  *	@buf:  scratch buffer and result name string
872  *
873  *	Passed a format string - eg "lt%d" it will try and find a suitable
874  *	id. It scans list of devices to build up a free map, then chooses
875  *	the first empty slot. The caller must hold the dev_base or rtnl lock
876  *	while allocating the name and adding the device in order to avoid
877  *	duplicates.
878  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
879  *	Returns the number of the unit assigned or a negative errno code.
880  */
881 
882 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
883 {
884 	int i = 0;
885 	const char *p;
886 	const int max_netdevices = 8*PAGE_SIZE;
887 	unsigned long *inuse;
888 	struct net_device *d;
889 
890 	p = strnchr(name, IFNAMSIZ-1, '%');
891 	if (p) {
892 		/*
893 		 * Verify the string as this thing may have come from
894 		 * the user.  There must be either one "%d" and no other "%"
895 		 * characters.
896 		 */
897 		if (p[1] != 'd' || strchr(p + 2, '%'))
898 			return -EINVAL;
899 
900 		/* Use one page as a bit array of possible slots */
901 		inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
902 		if (!inuse)
903 			return -ENOMEM;
904 
905 		for_each_netdev(net, d) {
906 			if (!sscanf(d->name, name, &i))
907 				continue;
908 			if (i < 0 || i >= max_netdevices)
909 				continue;
910 
911 			/*  avoid cases where sscanf is not exact inverse of printf */
912 			snprintf(buf, IFNAMSIZ, name, i);
913 			if (!strncmp(buf, d->name, IFNAMSIZ))
914 				set_bit(i, inuse);
915 		}
916 
917 		i = find_first_zero_bit(inuse, max_netdevices);
918 		free_page((unsigned long) inuse);
919 	}
920 
921 	if (buf != name)
922 		snprintf(buf, IFNAMSIZ, name, i);
923 	if (!__dev_get_by_name(net, buf))
924 		return i;
925 
926 	/* It is possible to run out of possible slots
927 	 * when the name is long and there isn't enough space left
928 	 * for the digits, or if all bits are used.
929 	 */
930 	return -ENFILE;
931 }
932 
933 /**
934  *	dev_alloc_name - allocate a name for a device
935  *	@dev: device
936  *	@name: name format string
937  *
938  *	Passed a format string - eg "lt%d" it will try and find a suitable
939  *	id. It scans list of devices to build up a free map, then chooses
940  *	the first empty slot. The caller must hold the dev_base or rtnl lock
941  *	while allocating the name and adding the device in order to avoid
942  *	duplicates.
943  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
944  *	Returns the number of the unit assigned or a negative errno code.
945  */
946 
947 int dev_alloc_name(struct net_device *dev, const char *name)
948 {
949 	char buf[IFNAMSIZ];
950 	struct net *net;
951 	int ret;
952 
953 	BUG_ON(!dev_net(dev));
954 	net = dev_net(dev);
955 	ret = __dev_alloc_name(net, name, buf);
956 	if (ret >= 0)
957 		strlcpy(dev->name, buf, IFNAMSIZ);
958 	return ret;
959 }
960 EXPORT_SYMBOL(dev_alloc_name);
961 
962 static int dev_alloc_name_ns(struct net *net,
963 			     struct net_device *dev,
964 			     const char *name)
965 {
966 	char buf[IFNAMSIZ];
967 	int ret;
968 
969 	ret = __dev_alloc_name(net, name, buf);
970 	if (ret >= 0)
971 		strlcpy(dev->name, buf, IFNAMSIZ);
972 	return ret;
973 }
974 
975 static int dev_get_valid_name(struct net *net,
976 			      struct net_device *dev,
977 			      const char *name)
978 {
979 	BUG_ON(!net);
980 
981 	if (!dev_valid_name(name))
982 		return -EINVAL;
983 
984 	if (strchr(name, '%'))
985 		return dev_alloc_name_ns(net, dev, name);
986 	else if (__dev_get_by_name(net, name))
987 		return -EEXIST;
988 	else if (dev->name != name)
989 		strlcpy(dev->name, name, IFNAMSIZ);
990 
991 	return 0;
992 }
993 
994 /**
995  *	dev_change_name - change name of a device
996  *	@dev: device
997  *	@newname: name (or format string) must be at least IFNAMSIZ
998  *
999  *	Change name of a device, can pass format strings "eth%d".
1000  *	for wildcarding.
1001  */
1002 int dev_change_name(struct net_device *dev, const char *newname)
1003 {
1004 	char oldname[IFNAMSIZ];
1005 	int err = 0;
1006 	int ret;
1007 	struct net *net;
1008 
1009 	ASSERT_RTNL();
1010 	BUG_ON(!dev_net(dev));
1011 
1012 	net = dev_net(dev);
1013 	if (dev->flags & IFF_UP)
1014 		return -EBUSY;
1015 
1016 	if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
1017 		return 0;
1018 
1019 	memcpy(oldname, dev->name, IFNAMSIZ);
1020 
1021 	err = dev_get_valid_name(net, dev, newname);
1022 	if (err < 0)
1023 		return err;
1024 
1025 rollback:
1026 	ret = device_rename(&dev->dev, dev->name);
1027 	if (ret) {
1028 		memcpy(dev->name, oldname, IFNAMSIZ);
1029 		return ret;
1030 	}
1031 
1032 	write_lock_bh(&dev_base_lock);
1033 	hlist_del_rcu(&dev->name_hlist);
1034 	write_unlock_bh(&dev_base_lock);
1035 
1036 	synchronize_rcu();
1037 
1038 	write_lock_bh(&dev_base_lock);
1039 	hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1040 	write_unlock_bh(&dev_base_lock);
1041 
1042 	ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1043 	ret = notifier_to_errno(ret);
1044 
1045 	if (ret) {
1046 		/* err >= 0 after dev_alloc_name() or stores the first errno */
1047 		if (err >= 0) {
1048 			err = ret;
1049 			memcpy(dev->name, oldname, IFNAMSIZ);
1050 			goto rollback;
1051 		} else {
1052 			pr_err("%s: name change rollback failed: %d\n",
1053 			       dev->name, ret);
1054 		}
1055 	}
1056 
1057 	return err;
1058 }
1059 
1060 /**
1061  *	dev_set_alias - change ifalias of a device
1062  *	@dev: device
1063  *	@alias: name up to IFALIASZ
1064  *	@len: limit of bytes to copy from info
1065  *
1066  *	Set ifalias for a device,
1067  */
1068 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1069 {
1070 	char *new_ifalias;
1071 
1072 	ASSERT_RTNL();
1073 
1074 	if (len >= IFALIASZ)
1075 		return -EINVAL;
1076 
1077 	if (!len) {
1078 		if (dev->ifalias) {
1079 			kfree(dev->ifalias);
1080 			dev->ifalias = NULL;
1081 		}
1082 		return 0;
1083 	}
1084 
1085 	new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1086 	if (!new_ifalias)
1087 		return -ENOMEM;
1088 	dev->ifalias = new_ifalias;
1089 
1090 	strlcpy(dev->ifalias, alias, len+1);
1091 	return len;
1092 }
1093 
1094 
1095 /**
1096  *	netdev_features_change - device changes features
1097  *	@dev: device to cause notification
1098  *
1099  *	Called to indicate a device has changed features.
1100  */
1101 void netdev_features_change(struct net_device *dev)
1102 {
1103 	call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1104 }
1105 EXPORT_SYMBOL(netdev_features_change);
1106 
1107 /**
1108  *	netdev_state_change - device changes state
1109  *	@dev: device to cause notification
1110  *
1111  *	Called to indicate a device has changed state. This function calls
1112  *	the notifier chains for netdev_chain and sends a NEWLINK message
1113  *	to the routing socket.
1114  */
1115 void netdev_state_change(struct net_device *dev)
1116 {
1117 	if (dev->flags & IFF_UP) {
1118 		call_netdevice_notifiers(NETDEV_CHANGE, dev);
1119 		rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1120 	}
1121 }
1122 EXPORT_SYMBOL(netdev_state_change);
1123 
1124 /**
1125  * 	netdev_notify_peers - notify network peers about existence of @dev
1126  * 	@dev: network device
1127  *
1128  * Generate traffic such that interested network peers are aware of
1129  * @dev, such as by generating a gratuitous ARP. This may be used when
1130  * a device wants to inform the rest of the network about some sort of
1131  * reconfiguration such as a failover event or virtual machine
1132  * migration.
1133  */
1134 void netdev_notify_peers(struct net_device *dev)
1135 {
1136 	rtnl_lock();
1137 	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1138 	rtnl_unlock();
1139 }
1140 EXPORT_SYMBOL(netdev_notify_peers);
1141 
1142 /**
1143  *	dev_load 	- load a network module
1144  *	@net: the applicable net namespace
1145  *	@name: name of interface
1146  *
1147  *	If a network interface is not present and the process has suitable
1148  *	privileges this function loads the module. If module loading is not
1149  *	available in this kernel then it becomes a nop.
1150  */
1151 
1152 void dev_load(struct net *net, const char *name)
1153 {
1154 	struct net_device *dev;
1155 	int no_module;
1156 
1157 	rcu_read_lock();
1158 	dev = dev_get_by_name_rcu(net, name);
1159 	rcu_read_unlock();
1160 
1161 	no_module = !dev;
1162 	if (no_module && capable(CAP_NET_ADMIN))
1163 		no_module = request_module("netdev-%s", name);
1164 	if (no_module && capable(CAP_SYS_MODULE)) {
1165 		if (!request_module("%s", name))
1166 			pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
1167 				name);
1168 	}
1169 }
1170 EXPORT_SYMBOL(dev_load);
1171 
1172 static int __dev_open(struct net_device *dev)
1173 {
1174 	const struct net_device_ops *ops = dev->netdev_ops;
1175 	int ret;
1176 
1177 	ASSERT_RTNL();
1178 
1179 	if (!netif_device_present(dev))
1180 		return -ENODEV;
1181 
1182 	ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1183 	ret = notifier_to_errno(ret);
1184 	if (ret)
1185 		return ret;
1186 
1187 	set_bit(__LINK_STATE_START, &dev->state);
1188 
1189 	if (ops->ndo_validate_addr)
1190 		ret = ops->ndo_validate_addr(dev);
1191 
1192 	if (!ret && ops->ndo_open)
1193 		ret = ops->ndo_open(dev);
1194 
1195 	if (ret)
1196 		clear_bit(__LINK_STATE_START, &dev->state);
1197 	else {
1198 		dev->flags |= IFF_UP;
1199 		net_dmaengine_get();
1200 		dev_set_rx_mode(dev);
1201 		dev_activate(dev);
1202 		add_device_randomness(dev->dev_addr, dev->addr_len);
1203 	}
1204 
1205 	return ret;
1206 }
1207 
1208 /**
1209  *	dev_open	- prepare an interface for use.
1210  *	@dev:	device to open
1211  *
1212  *	Takes a device from down to up state. The device's private open
1213  *	function is invoked and then the multicast lists are loaded. Finally
1214  *	the device is moved into the up state and a %NETDEV_UP message is
1215  *	sent to the netdev notifier chain.
1216  *
1217  *	Calling this function on an active interface is a nop. On a failure
1218  *	a negative errno code is returned.
1219  */
1220 int dev_open(struct net_device *dev)
1221 {
1222 	int ret;
1223 
1224 	if (dev->flags & IFF_UP)
1225 		return 0;
1226 
1227 	ret = __dev_open(dev);
1228 	if (ret < 0)
1229 		return ret;
1230 
1231 	rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1232 	call_netdevice_notifiers(NETDEV_UP, dev);
1233 
1234 	return ret;
1235 }
1236 EXPORT_SYMBOL(dev_open);
1237 
1238 static int __dev_close_many(struct list_head *head)
1239 {
1240 	struct net_device *dev;
1241 
1242 	ASSERT_RTNL();
1243 	might_sleep();
1244 
1245 	list_for_each_entry(dev, head, unreg_list) {
1246 		call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1247 
1248 		clear_bit(__LINK_STATE_START, &dev->state);
1249 
1250 		/* Synchronize to scheduled poll. We cannot touch poll list, it
1251 		 * can be even on different cpu. So just clear netif_running().
1252 		 *
1253 		 * dev->stop() will invoke napi_disable() on all of it's
1254 		 * napi_struct instances on this device.
1255 		 */
1256 		smp_mb__after_clear_bit(); /* Commit netif_running(). */
1257 	}
1258 
1259 	dev_deactivate_many(head);
1260 
1261 	list_for_each_entry(dev, head, unreg_list) {
1262 		const struct net_device_ops *ops = dev->netdev_ops;
1263 
1264 		/*
1265 		 *	Call the device specific close. This cannot fail.
1266 		 *	Only if device is UP
1267 		 *
1268 		 *	We allow it to be called even after a DETACH hot-plug
1269 		 *	event.
1270 		 */
1271 		if (ops->ndo_stop)
1272 			ops->ndo_stop(dev);
1273 
1274 		dev->flags &= ~IFF_UP;
1275 		net_dmaengine_put();
1276 	}
1277 
1278 	return 0;
1279 }
1280 
1281 static int __dev_close(struct net_device *dev)
1282 {
1283 	int retval;
1284 	LIST_HEAD(single);
1285 
1286 	list_add(&dev->unreg_list, &single);
1287 	retval = __dev_close_many(&single);
1288 	list_del(&single);
1289 	return retval;
1290 }
1291 
1292 static int dev_close_many(struct list_head *head)
1293 {
1294 	struct net_device *dev, *tmp;
1295 	LIST_HEAD(tmp_list);
1296 
1297 	list_for_each_entry_safe(dev, tmp, head, unreg_list)
1298 		if (!(dev->flags & IFF_UP))
1299 			list_move(&dev->unreg_list, &tmp_list);
1300 
1301 	__dev_close_many(head);
1302 
1303 	list_for_each_entry(dev, head, unreg_list) {
1304 		rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1305 		call_netdevice_notifiers(NETDEV_DOWN, dev);
1306 	}
1307 
1308 	/* rollback_registered_many needs the complete original list */
1309 	list_splice(&tmp_list, head);
1310 	return 0;
1311 }
1312 
1313 /**
1314  *	dev_close - shutdown an interface.
1315  *	@dev: device to shutdown
1316  *
1317  *	This function moves an active device into down state. A
1318  *	%NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1319  *	is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1320  *	chain.
1321  */
1322 int dev_close(struct net_device *dev)
1323 {
1324 	if (dev->flags & IFF_UP) {
1325 		LIST_HEAD(single);
1326 
1327 		list_add(&dev->unreg_list, &single);
1328 		dev_close_many(&single);
1329 		list_del(&single);
1330 	}
1331 	return 0;
1332 }
1333 EXPORT_SYMBOL(dev_close);
1334 
1335 
1336 /**
1337  *	dev_disable_lro - disable Large Receive Offload on a device
1338  *	@dev: device
1339  *
1340  *	Disable Large Receive Offload (LRO) on a net device.  Must be
1341  *	called under RTNL.  This is needed if received packets may be
1342  *	forwarded to another interface.
1343  */
1344 void dev_disable_lro(struct net_device *dev)
1345 {
1346 	/*
1347 	 * If we're trying to disable lro on a vlan device
1348 	 * use the underlying physical device instead
1349 	 */
1350 	if (is_vlan_dev(dev))
1351 		dev = vlan_dev_real_dev(dev);
1352 
1353 	dev->wanted_features &= ~NETIF_F_LRO;
1354 	netdev_update_features(dev);
1355 
1356 	if (unlikely(dev->features & NETIF_F_LRO))
1357 		netdev_WARN(dev, "failed to disable LRO!\n");
1358 }
1359 EXPORT_SYMBOL(dev_disable_lro);
1360 
1361 
1362 static int dev_boot_phase = 1;
1363 
1364 /**
1365  *	register_netdevice_notifier - register a network notifier block
1366  *	@nb: notifier
1367  *
1368  *	Register a notifier to be called when network device events occur.
1369  *	The notifier passed is linked into the kernel structures and must
1370  *	not be reused until it has been unregistered. A negative errno code
1371  *	is returned on a failure.
1372  *
1373  * 	When registered all registration and up events are replayed
1374  *	to the new notifier to allow device to have a race free
1375  *	view of the network device list.
1376  */
1377 
1378 int register_netdevice_notifier(struct notifier_block *nb)
1379 {
1380 	struct net_device *dev;
1381 	struct net_device *last;
1382 	struct net *net;
1383 	int err;
1384 
1385 	rtnl_lock();
1386 	err = raw_notifier_chain_register(&netdev_chain, nb);
1387 	if (err)
1388 		goto unlock;
1389 	if (dev_boot_phase)
1390 		goto unlock;
1391 	for_each_net(net) {
1392 		for_each_netdev(net, dev) {
1393 			err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1394 			err = notifier_to_errno(err);
1395 			if (err)
1396 				goto rollback;
1397 
1398 			if (!(dev->flags & IFF_UP))
1399 				continue;
1400 
1401 			nb->notifier_call(nb, NETDEV_UP, dev);
1402 		}
1403 	}
1404 
1405 unlock:
1406 	rtnl_unlock();
1407 	return err;
1408 
1409 rollback:
1410 	last = dev;
1411 	for_each_net(net) {
1412 		for_each_netdev(net, dev) {
1413 			if (dev == last)
1414 				goto outroll;
1415 
1416 			if (dev->flags & IFF_UP) {
1417 				nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1418 				nb->notifier_call(nb, NETDEV_DOWN, dev);
1419 			}
1420 			nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1421 		}
1422 	}
1423 
1424 outroll:
1425 	raw_notifier_chain_unregister(&netdev_chain, nb);
1426 	goto unlock;
1427 }
1428 EXPORT_SYMBOL(register_netdevice_notifier);
1429 
1430 /**
1431  *	unregister_netdevice_notifier - unregister a network notifier block
1432  *	@nb: notifier
1433  *
1434  *	Unregister a notifier previously registered by
1435  *	register_netdevice_notifier(). The notifier is unlinked into the
1436  *	kernel structures and may then be reused. A negative errno code
1437  *	is returned on a failure.
1438  *
1439  * 	After unregistering unregister and down device events are synthesized
1440  *	for all devices on the device list to the removed notifier to remove
1441  *	the need for special case cleanup code.
1442  */
1443 
1444 int unregister_netdevice_notifier(struct notifier_block *nb)
1445 {
1446 	struct net_device *dev;
1447 	struct net *net;
1448 	int err;
1449 
1450 	rtnl_lock();
1451 	err = raw_notifier_chain_unregister(&netdev_chain, nb);
1452 	if (err)
1453 		goto unlock;
1454 
1455 	for_each_net(net) {
1456 		for_each_netdev(net, dev) {
1457 			if (dev->flags & IFF_UP) {
1458 				nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1459 				nb->notifier_call(nb, NETDEV_DOWN, dev);
1460 			}
1461 			nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1462 		}
1463 	}
1464 unlock:
1465 	rtnl_unlock();
1466 	return err;
1467 }
1468 EXPORT_SYMBOL(unregister_netdevice_notifier);
1469 
1470 /**
1471  *	call_netdevice_notifiers - call all network notifier blocks
1472  *      @val: value passed unmodified to notifier function
1473  *      @dev: net_device pointer passed unmodified to notifier function
1474  *
1475  *	Call all network notifier blocks.  Parameters and return value
1476  *	are as for raw_notifier_call_chain().
1477  */
1478 
1479 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1480 {
1481 	ASSERT_RTNL();
1482 	return raw_notifier_call_chain(&netdev_chain, val, dev);
1483 }
1484 EXPORT_SYMBOL(call_netdevice_notifiers);
1485 
1486 static struct static_key netstamp_needed __read_mostly;
1487 #ifdef HAVE_JUMP_LABEL
1488 /* We are not allowed to call static_key_slow_dec() from irq context
1489  * If net_disable_timestamp() is called from irq context, defer the
1490  * static_key_slow_dec() calls.
1491  */
1492 static atomic_t netstamp_needed_deferred;
1493 #endif
1494 
1495 void net_enable_timestamp(void)
1496 {
1497 #ifdef HAVE_JUMP_LABEL
1498 	int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1499 
1500 	if (deferred) {
1501 		while (--deferred)
1502 			static_key_slow_dec(&netstamp_needed);
1503 		return;
1504 	}
1505 #endif
1506 	WARN_ON(in_interrupt());
1507 	static_key_slow_inc(&netstamp_needed);
1508 }
1509 EXPORT_SYMBOL(net_enable_timestamp);
1510 
1511 void net_disable_timestamp(void)
1512 {
1513 #ifdef HAVE_JUMP_LABEL
1514 	if (in_interrupt()) {
1515 		atomic_inc(&netstamp_needed_deferred);
1516 		return;
1517 	}
1518 #endif
1519 	static_key_slow_dec(&netstamp_needed);
1520 }
1521 EXPORT_SYMBOL(net_disable_timestamp);
1522 
1523 static inline void net_timestamp_set(struct sk_buff *skb)
1524 {
1525 	skb->tstamp.tv64 = 0;
1526 	if (static_key_false(&netstamp_needed))
1527 		__net_timestamp(skb);
1528 }
1529 
1530 #define net_timestamp_check(COND, SKB)			\
1531 	if (static_key_false(&netstamp_needed)) {		\
1532 		if ((COND) && !(SKB)->tstamp.tv64)	\
1533 			__net_timestamp(SKB);		\
1534 	}						\
1535 
1536 static int net_hwtstamp_validate(struct ifreq *ifr)
1537 {
1538 	struct hwtstamp_config cfg;
1539 	enum hwtstamp_tx_types tx_type;
1540 	enum hwtstamp_rx_filters rx_filter;
1541 	int tx_type_valid = 0;
1542 	int rx_filter_valid = 0;
1543 
1544 	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1545 		return -EFAULT;
1546 
1547 	if (cfg.flags) /* reserved for future extensions */
1548 		return -EINVAL;
1549 
1550 	tx_type = cfg.tx_type;
1551 	rx_filter = cfg.rx_filter;
1552 
1553 	switch (tx_type) {
1554 	case HWTSTAMP_TX_OFF:
1555 	case HWTSTAMP_TX_ON:
1556 	case HWTSTAMP_TX_ONESTEP_SYNC:
1557 		tx_type_valid = 1;
1558 		break;
1559 	}
1560 
1561 	switch (rx_filter) {
1562 	case HWTSTAMP_FILTER_NONE:
1563 	case HWTSTAMP_FILTER_ALL:
1564 	case HWTSTAMP_FILTER_SOME:
1565 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1566 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1567 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1568 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1569 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1570 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1571 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1572 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1573 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1574 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
1575 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
1576 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1577 		rx_filter_valid = 1;
1578 		break;
1579 	}
1580 
1581 	if (!tx_type_valid || !rx_filter_valid)
1582 		return -ERANGE;
1583 
1584 	return 0;
1585 }
1586 
1587 static inline bool is_skb_forwardable(struct net_device *dev,
1588 				      struct sk_buff *skb)
1589 {
1590 	unsigned int len;
1591 
1592 	if (!(dev->flags & IFF_UP))
1593 		return false;
1594 
1595 	len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1596 	if (skb->len <= len)
1597 		return true;
1598 
1599 	/* if TSO is enabled, we don't care about the length as the packet
1600 	 * could be forwarded without being segmented before
1601 	 */
1602 	if (skb_is_gso(skb))
1603 		return true;
1604 
1605 	return false;
1606 }
1607 
1608 /**
1609  * dev_forward_skb - loopback an skb to another netif
1610  *
1611  * @dev: destination network device
1612  * @skb: buffer to forward
1613  *
1614  * return values:
1615  *	NET_RX_SUCCESS	(no congestion)
1616  *	NET_RX_DROP     (packet was dropped, but freed)
1617  *
1618  * dev_forward_skb can be used for injecting an skb from the
1619  * start_xmit function of one device into the receive queue
1620  * of another device.
1621  *
1622  * The receiving device may be in another namespace, so
1623  * we have to clear all information in the skb that could
1624  * impact namespace isolation.
1625  */
1626 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1627 {
1628 	if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1629 		if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1630 			atomic_long_inc(&dev->rx_dropped);
1631 			kfree_skb(skb);
1632 			return NET_RX_DROP;
1633 		}
1634 	}
1635 
1636 	skb_orphan(skb);
1637 	nf_reset(skb);
1638 
1639 	if (unlikely(!is_skb_forwardable(dev, skb))) {
1640 		atomic_long_inc(&dev->rx_dropped);
1641 		kfree_skb(skb);
1642 		return NET_RX_DROP;
1643 	}
1644 	skb->skb_iif = 0;
1645 	skb->dev = dev;
1646 	skb_dst_drop(skb);
1647 	skb->tstamp.tv64 = 0;
1648 	skb->pkt_type = PACKET_HOST;
1649 	skb->protocol = eth_type_trans(skb, dev);
1650 	skb->mark = 0;
1651 	secpath_reset(skb);
1652 	nf_reset(skb);
1653 	return netif_rx(skb);
1654 }
1655 EXPORT_SYMBOL_GPL(dev_forward_skb);
1656 
1657 static inline int deliver_skb(struct sk_buff *skb,
1658 			      struct packet_type *pt_prev,
1659 			      struct net_device *orig_dev)
1660 {
1661 	if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1662 		return -ENOMEM;
1663 	atomic_inc(&skb->users);
1664 	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1665 }
1666 
1667 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1668 {
1669 	if (ptype->af_packet_priv == NULL)
1670 		return false;
1671 
1672 	if (ptype->id_match)
1673 		return ptype->id_match(ptype, skb->sk);
1674 	else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1675 		return true;
1676 
1677 	return false;
1678 }
1679 
1680 /*
1681  *	Support routine. Sends outgoing frames to any network
1682  *	taps currently in use.
1683  */
1684 
1685 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1686 {
1687 	struct packet_type *ptype;
1688 	struct sk_buff *skb2 = NULL;
1689 	struct packet_type *pt_prev = NULL;
1690 
1691 	rcu_read_lock();
1692 	list_for_each_entry_rcu(ptype, &ptype_all, list) {
1693 		/* Never send packets back to the socket
1694 		 * they originated from - MvS (miquels@drinkel.ow.org)
1695 		 */
1696 		if ((ptype->dev == dev || !ptype->dev) &&
1697 		    (!skb_loop_sk(ptype, skb))) {
1698 			if (pt_prev) {
1699 				deliver_skb(skb2, pt_prev, skb->dev);
1700 				pt_prev = ptype;
1701 				continue;
1702 			}
1703 
1704 			skb2 = skb_clone(skb, GFP_ATOMIC);
1705 			if (!skb2)
1706 				break;
1707 
1708 			net_timestamp_set(skb2);
1709 
1710 			/* skb->nh should be correctly
1711 			   set by sender, so that the second statement is
1712 			   just protection against buggy protocols.
1713 			 */
1714 			skb_reset_mac_header(skb2);
1715 
1716 			if (skb_network_header(skb2) < skb2->data ||
1717 			    skb2->network_header > skb2->tail) {
1718 				net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1719 						     ntohs(skb2->protocol),
1720 						     dev->name);
1721 				skb_reset_network_header(skb2);
1722 			}
1723 
1724 			skb2->transport_header = skb2->network_header;
1725 			skb2->pkt_type = PACKET_OUTGOING;
1726 			pt_prev = ptype;
1727 		}
1728 	}
1729 	if (pt_prev)
1730 		pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1731 	rcu_read_unlock();
1732 }
1733 
1734 /**
1735  * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1736  * @dev: Network device
1737  * @txq: number of queues available
1738  *
1739  * If real_num_tx_queues is changed the tc mappings may no longer be
1740  * valid. To resolve this verify the tc mapping remains valid and if
1741  * not NULL the mapping. With no priorities mapping to this
1742  * offset/count pair it will no longer be used. In the worst case TC0
1743  * is invalid nothing can be done so disable priority mappings. If is
1744  * expected that drivers will fix this mapping if they can before
1745  * calling netif_set_real_num_tx_queues.
1746  */
1747 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1748 {
1749 	int i;
1750 	struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1751 
1752 	/* If TC0 is invalidated disable TC mapping */
1753 	if (tc->offset + tc->count > txq) {
1754 		pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1755 		dev->num_tc = 0;
1756 		return;
1757 	}
1758 
1759 	/* Invalidated prio to tc mappings set to TC0 */
1760 	for (i = 1; i < TC_BITMASK + 1; i++) {
1761 		int q = netdev_get_prio_tc_map(dev, i);
1762 
1763 		tc = &dev->tc_to_txq[q];
1764 		if (tc->offset + tc->count > txq) {
1765 			pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1766 				i, q);
1767 			netdev_set_prio_tc_map(dev, i, 0);
1768 		}
1769 	}
1770 }
1771 
1772 /*
1773  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1774  * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
1775  */
1776 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1777 {
1778 	int rc;
1779 
1780 	if (txq < 1 || txq > dev->num_tx_queues)
1781 		return -EINVAL;
1782 
1783 	if (dev->reg_state == NETREG_REGISTERED ||
1784 	    dev->reg_state == NETREG_UNREGISTERING) {
1785 		ASSERT_RTNL();
1786 
1787 		rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
1788 						  txq);
1789 		if (rc)
1790 			return rc;
1791 
1792 		if (dev->num_tc)
1793 			netif_setup_tc(dev, txq);
1794 
1795 		if (txq < dev->real_num_tx_queues)
1796 			qdisc_reset_all_tx_gt(dev, txq);
1797 	}
1798 
1799 	dev->real_num_tx_queues = txq;
1800 	return 0;
1801 }
1802 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
1803 
1804 #ifdef CONFIG_RPS
1805 /**
1806  *	netif_set_real_num_rx_queues - set actual number of RX queues used
1807  *	@dev: Network device
1808  *	@rxq: Actual number of RX queues
1809  *
1810  *	This must be called either with the rtnl_lock held or before
1811  *	registration of the net device.  Returns 0 on success, or a
1812  *	negative error code.  If called before registration, it always
1813  *	succeeds.
1814  */
1815 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
1816 {
1817 	int rc;
1818 
1819 	if (rxq < 1 || rxq > dev->num_rx_queues)
1820 		return -EINVAL;
1821 
1822 	if (dev->reg_state == NETREG_REGISTERED) {
1823 		ASSERT_RTNL();
1824 
1825 		rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
1826 						  rxq);
1827 		if (rc)
1828 			return rc;
1829 	}
1830 
1831 	dev->real_num_rx_queues = rxq;
1832 	return 0;
1833 }
1834 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
1835 #endif
1836 
1837 /**
1838  * netif_get_num_default_rss_queues - default number of RSS queues
1839  *
1840  * This routine should set an upper limit on the number of RSS queues
1841  * used by default by multiqueue devices.
1842  */
1843 int netif_get_num_default_rss_queues(void)
1844 {
1845 	return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
1846 }
1847 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
1848 
1849 static inline void __netif_reschedule(struct Qdisc *q)
1850 {
1851 	struct softnet_data *sd;
1852 	unsigned long flags;
1853 
1854 	local_irq_save(flags);
1855 	sd = &__get_cpu_var(softnet_data);
1856 	q->next_sched = NULL;
1857 	*sd->output_queue_tailp = q;
1858 	sd->output_queue_tailp = &q->next_sched;
1859 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
1860 	local_irq_restore(flags);
1861 }
1862 
1863 void __netif_schedule(struct Qdisc *q)
1864 {
1865 	if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1866 		__netif_reschedule(q);
1867 }
1868 EXPORT_SYMBOL(__netif_schedule);
1869 
1870 void dev_kfree_skb_irq(struct sk_buff *skb)
1871 {
1872 	if (atomic_dec_and_test(&skb->users)) {
1873 		struct softnet_data *sd;
1874 		unsigned long flags;
1875 
1876 		local_irq_save(flags);
1877 		sd = &__get_cpu_var(softnet_data);
1878 		skb->next = sd->completion_queue;
1879 		sd->completion_queue = skb;
1880 		raise_softirq_irqoff(NET_TX_SOFTIRQ);
1881 		local_irq_restore(flags);
1882 	}
1883 }
1884 EXPORT_SYMBOL(dev_kfree_skb_irq);
1885 
1886 void dev_kfree_skb_any(struct sk_buff *skb)
1887 {
1888 	if (in_irq() || irqs_disabled())
1889 		dev_kfree_skb_irq(skb);
1890 	else
1891 		dev_kfree_skb(skb);
1892 }
1893 EXPORT_SYMBOL(dev_kfree_skb_any);
1894 
1895 
1896 /**
1897  * netif_device_detach - mark device as removed
1898  * @dev: network device
1899  *
1900  * Mark device as removed from system and therefore no longer available.
1901  */
1902 void netif_device_detach(struct net_device *dev)
1903 {
1904 	if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1905 	    netif_running(dev)) {
1906 		netif_tx_stop_all_queues(dev);
1907 	}
1908 }
1909 EXPORT_SYMBOL(netif_device_detach);
1910 
1911 /**
1912  * netif_device_attach - mark device as attached
1913  * @dev: network device
1914  *
1915  * Mark device as attached from system and restart if needed.
1916  */
1917 void netif_device_attach(struct net_device *dev)
1918 {
1919 	if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1920 	    netif_running(dev)) {
1921 		netif_tx_wake_all_queues(dev);
1922 		__netdev_watchdog_up(dev);
1923 	}
1924 }
1925 EXPORT_SYMBOL(netif_device_attach);
1926 
1927 static void skb_warn_bad_offload(const struct sk_buff *skb)
1928 {
1929 	static const netdev_features_t null_features = 0;
1930 	struct net_device *dev = skb->dev;
1931 	const char *driver = "";
1932 
1933 	if (dev && dev->dev.parent)
1934 		driver = dev_driver_string(dev->dev.parent);
1935 
1936 	WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
1937 	     "gso_type=%d ip_summed=%d\n",
1938 	     driver, dev ? &dev->features : &null_features,
1939 	     skb->sk ? &skb->sk->sk_route_caps : &null_features,
1940 	     skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
1941 	     skb_shinfo(skb)->gso_type, skb->ip_summed);
1942 }
1943 
1944 /*
1945  * Invalidate hardware checksum when packet is to be mangled, and
1946  * complete checksum manually on outgoing path.
1947  */
1948 int skb_checksum_help(struct sk_buff *skb)
1949 {
1950 	__wsum csum;
1951 	int ret = 0, offset;
1952 
1953 	if (skb->ip_summed == CHECKSUM_COMPLETE)
1954 		goto out_set_summed;
1955 
1956 	if (unlikely(skb_shinfo(skb)->gso_size)) {
1957 		skb_warn_bad_offload(skb);
1958 		return -EINVAL;
1959 	}
1960 
1961 	offset = skb_checksum_start_offset(skb);
1962 	BUG_ON(offset >= skb_headlen(skb));
1963 	csum = skb_checksum(skb, offset, skb->len - offset, 0);
1964 
1965 	offset += skb->csum_offset;
1966 	BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1967 
1968 	if (skb_cloned(skb) &&
1969 	    !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1970 		ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1971 		if (ret)
1972 			goto out;
1973 	}
1974 
1975 	*(__sum16 *)(skb->data + offset) = csum_fold(csum);
1976 out_set_summed:
1977 	skb->ip_summed = CHECKSUM_NONE;
1978 out:
1979 	return ret;
1980 }
1981 EXPORT_SYMBOL(skb_checksum_help);
1982 
1983 /**
1984  *	skb_gso_segment - Perform segmentation on skb.
1985  *	@skb: buffer to segment
1986  *	@features: features for the output path (see dev->features)
1987  *
1988  *	This function segments the given skb and returns a list of segments.
1989  *
1990  *	It may return NULL if the skb requires no segmentation.  This is
1991  *	only possible when GSO is used for verifying header integrity.
1992  */
1993 struct sk_buff *skb_gso_segment(struct sk_buff *skb,
1994 	netdev_features_t features)
1995 {
1996 	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1997 	struct packet_type *ptype;
1998 	__be16 type = skb->protocol;
1999 	int vlan_depth = ETH_HLEN;
2000 	int err;
2001 
2002 	while (type == htons(ETH_P_8021Q)) {
2003 		struct vlan_hdr *vh;
2004 
2005 		if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
2006 			return ERR_PTR(-EINVAL);
2007 
2008 		vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2009 		type = vh->h_vlan_encapsulated_proto;
2010 		vlan_depth += VLAN_HLEN;
2011 	}
2012 
2013 	skb_reset_mac_header(skb);
2014 	skb->mac_len = skb->network_header - skb->mac_header;
2015 	__skb_pull(skb, skb->mac_len);
2016 
2017 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2018 		skb_warn_bad_offload(skb);
2019 
2020 		if (skb_header_cloned(skb) &&
2021 		    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2022 			return ERR_PTR(err);
2023 	}
2024 
2025 	rcu_read_lock();
2026 	list_for_each_entry_rcu(ptype,
2027 			&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2028 		if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
2029 			if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2030 				err = ptype->gso_send_check(skb);
2031 				segs = ERR_PTR(err);
2032 				if (err || skb_gso_ok(skb, features))
2033 					break;
2034 				__skb_push(skb, (skb->data -
2035 						 skb_network_header(skb)));
2036 			}
2037 			segs = ptype->gso_segment(skb, features);
2038 			break;
2039 		}
2040 	}
2041 	rcu_read_unlock();
2042 
2043 	__skb_push(skb, skb->data - skb_mac_header(skb));
2044 
2045 	return segs;
2046 }
2047 EXPORT_SYMBOL(skb_gso_segment);
2048 
2049 /* Take action when hardware reception checksum errors are detected. */
2050 #ifdef CONFIG_BUG
2051 void netdev_rx_csum_fault(struct net_device *dev)
2052 {
2053 	if (net_ratelimit()) {
2054 		pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2055 		dump_stack();
2056 	}
2057 }
2058 EXPORT_SYMBOL(netdev_rx_csum_fault);
2059 #endif
2060 
2061 /* Actually, we should eliminate this check as soon as we know, that:
2062  * 1. IOMMU is present and allows to map all the memory.
2063  * 2. No high memory really exists on this machine.
2064  */
2065 
2066 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2067 {
2068 #ifdef CONFIG_HIGHMEM
2069 	int i;
2070 	if (!(dev->features & NETIF_F_HIGHDMA)) {
2071 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2072 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2073 			if (PageHighMem(skb_frag_page(frag)))
2074 				return 1;
2075 		}
2076 	}
2077 
2078 	if (PCI_DMA_BUS_IS_PHYS) {
2079 		struct device *pdev = dev->dev.parent;
2080 
2081 		if (!pdev)
2082 			return 0;
2083 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2084 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2085 			dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2086 			if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2087 				return 1;
2088 		}
2089 	}
2090 #endif
2091 	return 0;
2092 }
2093 
2094 struct dev_gso_cb {
2095 	void (*destructor)(struct sk_buff *skb);
2096 };
2097 
2098 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2099 
2100 static void dev_gso_skb_destructor(struct sk_buff *skb)
2101 {
2102 	struct dev_gso_cb *cb;
2103 
2104 	do {
2105 		struct sk_buff *nskb = skb->next;
2106 
2107 		skb->next = nskb->next;
2108 		nskb->next = NULL;
2109 		kfree_skb(nskb);
2110 	} while (skb->next);
2111 
2112 	cb = DEV_GSO_CB(skb);
2113 	if (cb->destructor)
2114 		cb->destructor(skb);
2115 }
2116 
2117 /**
2118  *	dev_gso_segment - Perform emulated hardware segmentation on skb.
2119  *	@skb: buffer to segment
2120  *	@features: device features as applicable to this skb
2121  *
2122  *	This function segments the given skb and stores the list of segments
2123  *	in skb->next.
2124  */
2125 static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2126 {
2127 	struct sk_buff *segs;
2128 
2129 	segs = skb_gso_segment(skb, features);
2130 
2131 	/* Verifying header integrity only. */
2132 	if (!segs)
2133 		return 0;
2134 
2135 	if (IS_ERR(segs))
2136 		return PTR_ERR(segs);
2137 
2138 	skb->next = segs;
2139 	DEV_GSO_CB(skb)->destructor = skb->destructor;
2140 	skb->destructor = dev_gso_skb_destructor;
2141 
2142 	return 0;
2143 }
2144 
2145 static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
2146 {
2147 	return ((features & NETIF_F_GEN_CSUM) ||
2148 		((features & NETIF_F_V4_CSUM) &&
2149 		 protocol == htons(ETH_P_IP)) ||
2150 		((features & NETIF_F_V6_CSUM) &&
2151 		 protocol == htons(ETH_P_IPV6)) ||
2152 		((features & NETIF_F_FCOE_CRC) &&
2153 		 protocol == htons(ETH_P_FCOE)));
2154 }
2155 
2156 static netdev_features_t harmonize_features(struct sk_buff *skb,
2157 	__be16 protocol, netdev_features_t features)
2158 {
2159 	if (skb->ip_summed != CHECKSUM_NONE &&
2160 	    !can_checksum_protocol(features, protocol)) {
2161 		features &= ~NETIF_F_ALL_CSUM;
2162 		features &= ~NETIF_F_SG;
2163 	} else if (illegal_highdma(skb->dev, skb)) {
2164 		features &= ~NETIF_F_SG;
2165 	}
2166 
2167 	return features;
2168 }
2169 
2170 netdev_features_t netif_skb_features(struct sk_buff *skb)
2171 {
2172 	__be16 protocol = skb->protocol;
2173 	netdev_features_t features = skb->dev->features;
2174 
2175 	if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2176 		features &= ~NETIF_F_GSO_MASK;
2177 
2178 	if (protocol == htons(ETH_P_8021Q)) {
2179 		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2180 		protocol = veh->h_vlan_encapsulated_proto;
2181 	} else if (!vlan_tx_tag_present(skb)) {
2182 		return harmonize_features(skb, protocol, features);
2183 	}
2184 
2185 	features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
2186 
2187 	if (protocol != htons(ETH_P_8021Q)) {
2188 		return harmonize_features(skb, protocol, features);
2189 	} else {
2190 		features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
2191 				NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
2192 		return harmonize_features(skb, protocol, features);
2193 	}
2194 }
2195 EXPORT_SYMBOL(netif_skb_features);
2196 
2197 /*
2198  * Returns true if either:
2199  *	1. skb has frag_list and the device doesn't support FRAGLIST, or
2200  *	2. skb is fragmented and the device does not support SG.
2201  */
2202 static inline int skb_needs_linearize(struct sk_buff *skb,
2203 				      int features)
2204 {
2205 	return skb_is_nonlinear(skb) &&
2206 			((skb_has_frag_list(skb) &&
2207 				!(features & NETIF_F_FRAGLIST)) ||
2208 			(skb_shinfo(skb)->nr_frags &&
2209 				!(features & NETIF_F_SG)));
2210 }
2211 
2212 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2213 			struct netdev_queue *txq)
2214 {
2215 	const struct net_device_ops *ops = dev->netdev_ops;
2216 	int rc = NETDEV_TX_OK;
2217 	unsigned int skb_len;
2218 
2219 	if (likely(!skb->next)) {
2220 		netdev_features_t features;
2221 
2222 		/*
2223 		 * If device doesn't need skb->dst, release it right now while
2224 		 * its hot in this cpu cache
2225 		 */
2226 		if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2227 			skb_dst_drop(skb);
2228 
2229 		features = netif_skb_features(skb);
2230 
2231 		if (vlan_tx_tag_present(skb) &&
2232 		    !(features & NETIF_F_HW_VLAN_TX)) {
2233 			skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
2234 			if (unlikely(!skb))
2235 				goto out;
2236 
2237 			skb->vlan_tci = 0;
2238 		}
2239 
2240 		if (netif_needs_gso(skb, features)) {
2241 			if (unlikely(dev_gso_segment(skb, features)))
2242 				goto out_kfree_skb;
2243 			if (skb->next)
2244 				goto gso;
2245 		} else {
2246 			if (skb_needs_linearize(skb, features) &&
2247 			    __skb_linearize(skb))
2248 				goto out_kfree_skb;
2249 
2250 			/* If packet is not checksummed and device does not
2251 			 * support checksumming for this protocol, complete
2252 			 * checksumming here.
2253 			 */
2254 			if (skb->ip_summed == CHECKSUM_PARTIAL) {
2255 				skb_set_transport_header(skb,
2256 					skb_checksum_start_offset(skb));
2257 				if (!(features & NETIF_F_ALL_CSUM) &&
2258 				     skb_checksum_help(skb))
2259 					goto out_kfree_skb;
2260 			}
2261 		}
2262 
2263 		if (!list_empty(&ptype_all))
2264 			dev_queue_xmit_nit(skb, dev);
2265 
2266 		skb_len = skb->len;
2267 		rc = ops->ndo_start_xmit(skb, dev);
2268 		trace_net_dev_xmit(skb, rc, dev, skb_len);
2269 		if (rc == NETDEV_TX_OK)
2270 			txq_trans_update(txq);
2271 		return rc;
2272 	}
2273 
2274 gso:
2275 	do {
2276 		struct sk_buff *nskb = skb->next;
2277 
2278 		skb->next = nskb->next;
2279 		nskb->next = NULL;
2280 
2281 		/*
2282 		 * If device doesn't need nskb->dst, release it right now while
2283 		 * its hot in this cpu cache
2284 		 */
2285 		if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2286 			skb_dst_drop(nskb);
2287 
2288 		if (!list_empty(&ptype_all))
2289 			dev_queue_xmit_nit(nskb, dev);
2290 
2291 		skb_len = nskb->len;
2292 		rc = ops->ndo_start_xmit(nskb, dev);
2293 		trace_net_dev_xmit(nskb, rc, dev, skb_len);
2294 		if (unlikely(rc != NETDEV_TX_OK)) {
2295 			if (rc & ~NETDEV_TX_MASK)
2296 				goto out_kfree_gso_skb;
2297 			nskb->next = skb->next;
2298 			skb->next = nskb;
2299 			return rc;
2300 		}
2301 		txq_trans_update(txq);
2302 		if (unlikely(netif_xmit_stopped(txq) && skb->next))
2303 			return NETDEV_TX_BUSY;
2304 	} while (skb->next);
2305 
2306 out_kfree_gso_skb:
2307 	if (likely(skb->next == NULL))
2308 		skb->destructor = DEV_GSO_CB(skb)->destructor;
2309 out_kfree_skb:
2310 	kfree_skb(skb);
2311 out:
2312 	return rc;
2313 }
2314 
2315 static u32 hashrnd __read_mostly;
2316 
2317 /*
2318  * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2319  * to be used as a distribution range.
2320  */
2321 u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
2322 		  unsigned int num_tx_queues)
2323 {
2324 	u32 hash;
2325 	u16 qoffset = 0;
2326 	u16 qcount = num_tx_queues;
2327 
2328 	if (skb_rx_queue_recorded(skb)) {
2329 		hash = skb_get_rx_queue(skb);
2330 		while (unlikely(hash >= num_tx_queues))
2331 			hash -= num_tx_queues;
2332 		return hash;
2333 	}
2334 
2335 	if (dev->num_tc) {
2336 		u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2337 		qoffset = dev->tc_to_txq[tc].offset;
2338 		qcount = dev->tc_to_txq[tc].count;
2339 	}
2340 
2341 	if (skb->sk && skb->sk->sk_hash)
2342 		hash = skb->sk->sk_hash;
2343 	else
2344 		hash = (__force u16) skb->protocol;
2345 	hash = jhash_1word(hash, hashrnd);
2346 
2347 	return (u16) (((u64) hash * qcount) >> 32) + qoffset;
2348 }
2349 EXPORT_SYMBOL(__skb_tx_hash);
2350 
2351 static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2352 {
2353 	if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2354 		net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
2355 				     dev->name, queue_index,
2356 				     dev->real_num_tx_queues);
2357 		return 0;
2358 	}
2359 	return queue_index;
2360 }
2361 
2362 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2363 {
2364 #ifdef CONFIG_XPS
2365 	struct xps_dev_maps *dev_maps;
2366 	struct xps_map *map;
2367 	int queue_index = -1;
2368 
2369 	rcu_read_lock();
2370 	dev_maps = rcu_dereference(dev->xps_maps);
2371 	if (dev_maps) {
2372 		map = rcu_dereference(
2373 		    dev_maps->cpu_map[raw_smp_processor_id()]);
2374 		if (map) {
2375 			if (map->len == 1)
2376 				queue_index = map->queues[0];
2377 			else {
2378 				u32 hash;
2379 				if (skb->sk && skb->sk->sk_hash)
2380 					hash = skb->sk->sk_hash;
2381 				else
2382 					hash = (__force u16) skb->protocol ^
2383 					    skb->rxhash;
2384 				hash = jhash_1word(hash, hashrnd);
2385 				queue_index = map->queues[
2386 				    ((u64)hash * map->len) >> 32];
2387 			}
2388 			if (unlikely(queue_index >= dev->real_num_tx_queues))
2389 				queue_index = -1;
2390 		}
2391 	}
2392 	rcu_read_unlock();
2393 
2394 	return queue_index;
2395 #else
2396 	return -1;
2397 #endif
2398 }
2399 
2400 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
2401 				    struct sk_buff *skb)
2402 {
2403 	int queue_index;
2404 	const struct net_device_ops *ops = dev->netdev_ops;
2405 
2406 	if (dev->real_num_tx_queues == 1)
2407 		queue_index = 0;
2408 	else if (ops->ndo_select_queue) {
2409 		queue_index = ops->ndo_select_queue(dev, skb);
2410 		queue_index = dev_cap_txqueue(dev, queue_index);
2411 	} else {
2412 		struct sock *sk = skb->sk;
2413 		queue_index = sk_tx_queue_get(sk);
2414 
2415 		if (queue_index < 0 || skb->ooo_okay ||
2416 		    queue_index >= dev->real_num_tx_queues) {
2417 			int old_index = queue_index;
2418 
2419 			queue_index = get_xps_queue(dev, skb);
2420 			if (queue_index < 0)
2421 				queue_index = skb_tx_hash(dev, skb);
2422 
2423 			if (queue_index != old_index && sk) {
2424 				struct dst_entry *dst =
2425 				    rcu_dereference_check(sk->sk_dst_cache, 1);
2426 
2427 				if (dst && skb_dst(skb) == dst)
2428 					sk_tx_queue_set(sk, queue_index);
2429 			}
2430 		}
2431 	}
2432 
2433 	skb_set_queue_mapping(skb, queue_index);
2434 	return netdev_get_tx_queue(dev, queue_index);
2435 }
2436 
2437 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2438 				 struct net_device *dev,
2439 				 struct netdev_queue *txq)
2440 {
2441 	spinlock_t *root_lock = qdisc_lock(q);
2442 	bool contended;
2443 	int rc;
2444 
2445 	qdisc_skb_cb(skb)->pkt_len = skb->len;
2446 	qdisc_calculate_pkt_len(skb, q);
2447 	/*
2448 	 * Heuristic to force contended enqueues to serialize on a
2449 	 * separate lock before trying to get qdisc main lock.
2450 	 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2451 	 * and dequeue packets faster.
2452 	 */
2453 	contended = qdisc_is_running(q);
2454 	if (unlikely(contended))
2455 		spin_lock(&q->busylock);
2456 
2457 	spin_lock(root_lock);
2458 	if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2459 		kfree_skb(skb);
2460 		rc = NET_XMIT_DROP;
2461 	} else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2462 		   qdisc_run_begin(q)) {
2463 		/*
2464 		 * This is a work-conserving queue; there are no old skbs
2465 		 * waiting to be sent out; and the qdisc is not running -
2466 		 * xmit the skb directly.
2467 		 */
2468 		if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2469 			skb_dst_force(skb);
2470 
2471 		qdisc_bstats_update(q, skb);
2472 
2473 		if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2474 			if (unlikely(contended)) {
2475 				spin_unlock(&q->busylock);
2476 				contended = false;
2477 			}
2478 			__qdisc_run(q);
2479 		} else
2480 			qdisc_run_end(q);
2481 
2482 		rc = NET_XMIT_SUCCESS;
2483 	} else {
2484 		skb_dst_force(skb);
2485 		rc = q->enqueue(skb, q) & NET_XMIT_MASK;
2486 		if (qdisc_run_begin(q)) {
2487 			if (unlikely(contended)) {
2488 				spin_unlock(&q->busylock);
2489 				contended = false;
2490 			}
2491 			__qdisc_run(q);
2492 		}
2493 	}
2494 	spin_unlock(root_lock);
2495 	if (unlikely(contended))
2496 		spin_unlock(&q->busylock);
2497 	return rc;
2498 }
2499 
2500 #if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
2501 static void skb_update_prio(struct sk_buff *skb)
2502 {
2503 	struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
2504 
2505 	if (!skb->priority && skb->sk && map) {
2506 		unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2507 
2508 		if (prioidx < map->priomap_len)
2509 			skb->priority = map->priomap[prioidx];
2510 	}
2511 }
2512 #else
2513 #define skb_update_prio(skb)
2514 #endif
2515 
2516 static DEFINE_PER_CPU(int, xmit_recursion);
2517 #define RECURSION_LIMIT 10
2518 
2519 /**
2520  *	dev_loopback_xmit - loop back @skb
2521  *	@skb: buffer to transmit
2522  */
2523 int dev_loopback_xmit(struct sk_buff *skb)
2524 {
2525 	skb_reset_mac_header(skb);
2526 	__skb_pull(skb, skb_network_offset(skb));
2527 	skb->pkt_type = PACKET_LOOPBACK;
2528 	skb->ip_summed = CHECKSUM_UNNECESSARY;
2529 	WARN_ON(!skb_dst(skb));
2530 	skb_dst_force(skb);
2531 	netif_rx_ni(skb);
2532 	return 0;
2533 }
2534 EXPORT_SYMBOL(dev_loopback_xmit);
2535 
2536 /**
2537  *	dev_queue_xmit - transmit a buffer
2538  *	@skb: buffer to transmit
2539  *
2540  *	Queue a buffer for transmission to a network device. The caller must
2541  *	have set the device and priority and built the buffer before calling
2542  *	this function. The function can be called from an interrupt.
2543  *
2544  *	A negative errno code is returned on a failure. A success does not
2545  *	guarantee the frame will be transmitted as it may be dropped due
2546  *	to congestion or traffic shaping.
2547  *
2548  * -----------------------------------------------------------------------------------
2549  *      I notice this method can also return errors from the queue disciplines,
2550  *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
2551  *      be positive.
2552  *
2553  *      Regardless of the return value, the skb is consumed, so it is currently
2554  *      difficult to retry a send to this method.  (You can bump the ref count
2555  *      before sending to hold a reference for retry if you are careful.)
2556  *
2557  *      When calling this method, interrupts MUST be enabled.  This is because
2558  *      the BH enable code must have IRQs enabled so that it will not deadlock.
2559  *          --BLG
2560  */
2561 int dev_queue_xmit(struct sk_buff *skb)
2562 {
2563 	struct net_device *dev = skb->dev;
2564 	struct netdev_queue *txq;
2565 	struct Qdisc *q;
2566 	int rc = -ENOMEM;
2567 
2568 	/* Disable soft irqs for various locks below. Also
2569 	 * stops preemption for RCU.
2570 	 */
2571 	rcu_read_lock_bh();
2572 
2573 	skb_update_prio(skb);
2574 
2575 	txq = netdev_pick_tx(dev, skb);
2576 	q = rcu_dereference_bh(txq->qdisc);
2577 
2578 #ifdef CONFIG_NET_CLS_ACT
2579 	skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
2580 #endif
2581 	trace_net_dev_queue(skb);
2582 	if (q->enqueue) {
2583 		rc = __dev_xmit_skb(skb, q, dev, txq);
2584 		goto out;
2585 	}
2586 
2587 	/* The device has no queue. Common case for software devices:
2588 	   loopback, all the sorts of tunnels...
2589 
2590 	   Really, it is unlikely that netif_tx_lock protection is necessary
2591 	   here.  (f.e. loopback and IP tunnels are clean ignoring statistics
2592 	   counters.)
2593 	   However, it is possible, that they rely on protection
2594 	   made by us here.
2595 
2596 	   Check this and shot the lock. It is not prone from deadlocks.
2597 	   Either shot noqueue qdisc, it is even simpler 8)
2598 	 */
2599 	if (dev->flags & IFF_UP) {
2600 		int cpu = smp_processor_id(); /* ok because BHs are off */
2601 
2602 		if (txq->xmit_lock_owner != cpu) {
2603 
2604 			if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2605 				goto recursion_alert;
2606 
2607 			HARD_TX_LOCK(dev, txq, cpu);
2608 
2609 			if (!netif_xmit_stopped(txq)) {
2610 				__this_cpu_inc(xmit_recursion);
2611 				rc = dev_hard_start_xmit(skb, dev, txq);
2612 				__this_cpu_dec(xmit_recursion);
2613 				if (dev_xmit_complete(rc)) {
2614 					HARD_TX_UNLOCK(dev, txq);
2615 					goto out;
2616 				}
2617 			}
2618 			HARD_TX_UNLOCK(dev, txq);
2619 			net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2620 					     dev->name);
2621 		} else {
2622 			/* Recursion is detected! It is possible,
2623 			 * unfortunately
2624 			 */
2625 recursion_alert:
2626 			net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2627 					     dev->name);
2628 		}
2629 	}
2630 
2631 	rc = -ENETDOWN;
2632 	rcu_read_unlock_bh();
2633 
2634 	kfree_skb(skb);
2635 	return rc;
2636 out:
2637 	rcu_read_unlock_bh();
2638 	return rc;
2639 }
2640 EXPORT_SYMBOL(dev_queue_xmit);
2641 
2642 
2643 /*=======================================================================
2644 			Receiver routines
2645   =======================================================================*/
2646 
2647 int netdev_max_backlog __read_mostly = 1000;
2648 EXPORT_SYMBOL(netdev_max_backlog);
2649 
2650 int netdev_tstamp_prequeue __read_mostly = 1;
2651 int netdev_budget __read_mostly = 300;
2652 int weight_p __read_mostly = 64;            /* old backlog weight */
2653 
2654 /* Called with irq disabled */
2655 static inline void ____napi_schedule(struct softnet_data *sd,
2656 				     struct napi_struct *napi)
2657 {
2658 	list_add_tail(&napi->poll_list, &sd->poll_list);
2659 	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
2660 }
2661 
2662 /*
2663  * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2664  * and src/dst port numbers.  Sets rxhash in skb to non-zero hash value
2665  * on success, zero indicates no valid hash.  Also, sets l4_rxhash in skb
2666  * if hash is a canonical 4-tuple hash over transport ports.
2667  */
2668 void __skb_get_rxhash(struct sk_buff *skb)
2669 {
2670 	struct flow_keys keys;
2671 	u32 hash;
2672 
2673 	if (!skb_flow_dissect(skb, &keys))
2674 		return;
2675 
2676 	if (keys.ports)
2677 		skb->l4_rxhash = 1;
2678 
2679 	/* get a consistent hash (same value on both flow directions) */
2680 	if (((__force u32)keys.dst < (__force u32)keys.src) ||
2681 	    (((__force u32)keys.dst == (__force u32)keys.src) &&
2682 	     ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
2683 		swap(keys.dst, keys.src);
2684 		swap(keys.port16[0], keys.port16[1]);
2685 	}
2686 
2687 	hash = jhash_3words((__force u32)keys.dst,
2688 			    (__force u32)keys.src,
2689 			    (__force u32)keys.ports, hashrnd);
2690 	if (!hash)
2691 		hash = 1;
2692 
2693 	skb->rxhash = hash;
2694 }
2695 EXPORT_SYMBOL(__skb_get_rxhash);
2696 
2697 #ifdef CONFIG_RPS
2698 
2699 /* One global table that all flow-based protocols share. */
2700 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
2701 EXPORT_SYMBOL(rps_sock_flow_table);
2702 
2703 struct static_key rps_needed __read_mostly;
2704 
2705 static struct rps_dev_flow *
2706 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2707 	    struct rps_dev_flow *rflow, u16 next_cpu)
2708 {
2709 	if (next_cpu != RPS_NO_CPU) {
2710 #ifdef CONFIG_RFS_ACCEL
2711 		struct netdev_rx_queue *rxqueue;
2712 		struct rps_dev_flow_table *flow_table;
2713 		struct rps_dev_flow *old_rflow;
2714 		u32 flow_id;
2715 		u16 rxq_index;
2716 		int rc;
2717 
2718 		/* Should we steer this flow to a different hardware queue? */
2719 		if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
2720 		    !(dev->features & NETIF_F_NTUPLE))
2721 			goto out;
2722 		rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
2723 		if (rxq_index == skb_get_rx_queue(skb))
2724 			goto out;
2725 
2726 		rxqueue = dev->_rx + rxq_index;
2727 		flow_table = rcu_dereference(rxqueue->rps_flow_table);
2728 		if (!flow_table)
2729 			goto out;
2730 		flow_id = skb->rxhash & flow_table->mask;
2731 		rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2732 							rxq_index, flow_id);
2733 		if (rc < 0)
2734 			goto out;
2735 		old_rflow = rflow;
2736 		rflow = &flow_table->flows[flow_id];
2737 		rflow->filter = rc;
2738 		if (old_rflow->filter == rflow->filter)
2739 			old_rflow->filter = RPS_NO_FILTER;
2740 	out:
2741 #endif
2742 		rflow->last_qtail =
2743 			per_cpu(softnet_data, next_cpu).input_queue_head;
2744 	}
2745 
2746 	rflow->cpu = next_cpu;
2747 	return rflow;
2748 }
2749 
2750 /*
2751  * get_rps_cpu is called from netif_receive_skb and returns the target
2752  * CPU from the RPS map of the receiving queue for a given skb.
2753  * rcu_read_lock must be held on entry.
2754  */
2755 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2756 		       struct rps_dev_flow **rflowp)
2757 {
2758 	struct netdev_rx_queue *rxqueue;
2759 	struct rps_map *map;
2760 	struct rps_dev_flow_table *flow_table;
2761 	struct rps_sock_flow_table *sock_flow_table;
2762 	int cpu = -1;
2763 	u16 tcpu;
2764 
2765 	if (skb_rx_queue_recorded(skb)) {
2766 		u16 index = skb_get_rx_queue(skb);
2767 		if (unlikely(index >= dev->real_num_rx_queues)) {
2768 			WARN_ONCE(dev->real_num_rx_queues > 1,
2769 				  "%s received packet on queue %u, but number "
2770 				  "of RX queues is %u\n",
2771 				  dev->name, index, dev->real_num_rx_queues);
2772 			goto done;
2773 		}
2774 		rxqueue = dev->_rx + index;
2775 	} else
2776 		rxqueue = dev->_rx;
2777 
2778 	map = rcu_dereference(rxqueue->rps_map);
2779 	if (map) {
2780 		if (map->len == 1 &&
2781 		    !rcu_access_pointer(rxqueue->rps_flow_table)) {
2782 			tcpu = map->cpus[0];
2783 			if (cpu_online(tcpu))
2784 				cpu = tcpu;
2785 			goto done;
2786 		}
2787 	} else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
2788 		goto done;
2789 	}
2790 
2791 	skb_reset_network_header(skb);
2792 	if (!skb_get_rxhash(skb))
2793 		goto done;
2794 
2795 	flow_table = rcu_dereference(rxqueue->rps_flow_table);
2796 	sock_flow_table = rcu_dereference(rps_sock_flow_table);
2797 	if (flow_table && sock_flow_table) {
2798 		u16 next_cpu;
2799 		struct rps_dev_flow *rflow;
2800 
2801 		rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2802 		tcpu = rflow->cpu;
2803 
2804 		next_cpu = sock_flow_table->ents[skb->rxhash &
2805 		    sock_flow_table->mask];
2806 
2807 		/*
2808 		 * If the desired CPU (where last recvmsg was done) is
2809 		 * different from current CPU (one in the rx-queue flow
2810 		 * table entry), switch if one of the following holds:
2811 		 *   - Current CPU is unset (equal to RPS_NO_CPU).
2812 		 *   - Current CPU is offline.
2813 		 *   - The current CPU's queue tail has advanced beyond the
2814 		 *     last packet that was enqueued using this table entry.
2815 		 *     This guarantees that all previous packets for the flow
2816 		 *     have been dequeued, thus preserving in order delivery.
2817 		 */
2818 		if (unlikely(tcpu != next_cpu) &&
2819 		    (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2820 		     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2821 		      rflow->last_qtail)) >= 0))
2822 			rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
2823 
2824 		if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2825 			*rflowp = rflow;
2826 			cpu = tcpu;
2827 			goto done;
2828 		}
2829 	}
2830 
2831 	if (map) {
2832 		tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
2833 
2834 		if (cpu_online(tcpu)) {
2835 			cpu = tcpu;
2836 			goto done;
2837 		}
2838 	}
2839 
2840 done:
2841 	return cpu;
2842 }
2843 
2844 #ifdef CONFIG_RFS_ACCEL
2845 
2846 /**
2847  * rps_may_expire_flow - check whether an RFS hardware filter may be removed
2848  * @dev: Device on which the filter was set
2849  * @rxq_index: RX queue index
2850  * @flow_id: Flow ID passed to ndo_rx_flow_steer()
2851  * @filter_id: Filter ID returned by ndo_rx_flow_steer()
2852  *
2853  * Drivers that implement ndo_rx_flow_steer() should periodically call
2854  * this function for each installed filter and remove the filters for
2855  * which it returns %true.
2856  */
2857 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
2858 			 u32 flow_id, u16 filter_id)
2859 {
2860 	struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
2861 	struct rps_dev_flow_table *flow_table;
2862 	struct rps_dev_flow *rflow;
2863 	bool expire = true;
2864 	int cpu;
2865 
2866 	rcu_read_lock();
2867 	flow_table = rcu_dereference(rxqueue->rps_flow_table);
2868 	if (flow_table && flow_id <= flow_table->mask) {
2869 		rflow = &flow_table->flows[flow_id];
2870 		cpu = ACCESS_ONCE(rflow->cpu);
2871 		if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
2872 		    ((int)(per_cpu(softnet_data, cpu).input_queue_head -
2873 			   rflow->last_qtail) <
2874 		     (int)(10 * flow_table->mask)))
2875 			expire = false;
2876 	}
2877 	rcu_read_unlock();
2878 	return expire;
2879 }
2880 EXPORT_SYMBOL(rps_may_expire_flow);
2881 
2882 #endif /* CONFIG_RFS_ACCEL */
2883 
2884 /* Called from hardirq (IPI) context */
2885 static void rps_trigger_softirq(void *data)
2886 {
2887 	struct softnet_data *sd = data;
2888 
2889 	____napi_schedule(sd, &sd->backlog);
2890 	sd->received_rps++;
2891 }
2892 
2893 #endif /* CONFIG_RPS */
2894 
2895 /*
2896  * Check if this softnet_data structure is another cpu one
2897  * If yes, queue it to our IPI list and return 1
2898  * If no, return 0
2899  */
2900 static int rps_ipi_queued(struct softnet_data *sd)
2901 {
2902 #ifdef CONFIG_RPS
2903 	struct softnet_data *mysd = &__get_cpu_var(softnet_data);
2904 
2905 	if (sd != mysd) {
2906 		sd->rps_ipi_next = mysd->rps_ipi_list;
2907 		mysd->rps_ipi_list = sd;
2908 
2909 		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
2910 		return 1;
2911 	}
2912 #endif /* CONFIG_RPS */
2913 	return 0;
2914 }
2915 
2916 /*
2917  * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2918  * queue (may be a remote CPU queue).
2919  */
2920 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
2921 			      unsigned int *qtail)
2922 {
2923 	struct softnet_data *sd;
2924 	unsigned long flags;
2925 
2926 	sd = &per_cpu(softnet_data, cpu);
2927 
2928 	local_irq_save(flags);
2929 
2930 	rps_lock(sd);
2931 	if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
2932 		if (skb_queue_len(&sd->input_pkt_queue)) {
2933 enqueue:
2934 			__skb_queue_tail(&sd->input_pkt_queue, skb);
2935 			input_queue_tail_incr_save(sd, qtail);
2936 			rps_unlock(sd);
2937 			local_irq_restore(flags);
2938 			return NET_RX_SUCCESS;
2939 		}
2940 
2941 		/* Schedule NAPI for backlog device
2942 		 * We can use non atomic operation since we own the queue lock
2943 		 */
2944 		if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
2945 			if (!rps_ipi_queued(sd))
2946 				____napi_schedule(sd, &sd->backlog);
2947 		}
2948 		goto enqueue;
2949 	}
2950 
2951 	sd->dropped++;
2952 	rps_unlock(sd);
2953 
2954 	local_irq_restore(flags);
2955 
2956 	atomic_long_inc(&skb->dev->rx_dropped);
2957 	kfree_skb(skb);
2958 	return NET_RX_DROP;
2959 }
2960 
2961 /**
2962  *	netif_rx	-	post buffer to the network code
2963  *	@skb: buffer to post
2964  *
2965  *	This function receives a packet from a device driver and queues it for
2966  *	the upper (protocol) levels to process.  It always succeeds. The buffer
2967  *	may be dropped during processing for congestion control or by the
2968  *	protocol layers.
2969  *
2970  *	return values:
2971  *	NET_RX_SUCCESS	(no congestion)
2972  *	NET_RX_DROP     (packet was dropped)
2973  *
2974  */
2975 
2976 int netif_rx(struct sk_buff *skb)
2977 {
2978 	int ret;
2979 
2980 	/* if netpoll wants it, pretend we never saw it */
2981 	if (netpoll_rx(skb))
2982 		return NET_RX_DROP;
2983 
2984 	net_timestamp_check(netdev_tstamp_prequeue, skb);
2985 
2986 	trace_netif_rx(skb);
2987 #ifdef CONFIG_RPS
2988 	if (static_key_false(&rps_needed)) {
2989 		struct rps_dev_flow voidflow, *rflow = &voidflow;
2990 		int cpu;
2991 
2992 		preempt_disable();
2993 		rcu_read_lock();
2994 
2995 		cpu = get_rps_cpu(skb->dev, skb, &rflow);
2996 		if (cpu < 0)
2997 			cpu = smp_processor_id();
2998 
2999 		ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3000 
3001 		rcu_read_unlock();
3002 		preempt_enable();
3003 	} else
3004 #endif
3005 	{
3006 		unsigned int qtail;
3007 		ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3008 		put_cpu();
3009 	}
3010 	return ret;
3011 }
3012 EXPORT_SYMBOL(netif_rx);
3013 
3014 int netif_rx_ni(struct sk_buff *skb)
3015 {
3016 	int err;
3017 
3018 	preempt_disable();
3019 	err = netif_rx(skb);
3020 	if (local_softirq_pending())
3021 		do_softirq();
3022 	preempt_enable();
3023 
3024 	return err;
3025 }
3026 EXPORT_SYMBOL(netif_rx_ni);
3027 
3028 static void net_tx_action(struct softirq_action *h)
3029 {
3030 	struct softnet_data *sd = &__get_cpu_var(softnet_data);
3031 
3032 	if (sd->completion_queue) {
3033 		struct sk_buff *clist;
3034 
3035 		local_irq_disable();
3036 		clist = sd->completion_queue;
3037 		sd->completion_queue = NULL;
3038 		local_irq_enable();
3039 
3040 		while (clist) {
3041 			struct sk_buff *skb = clist;
3042 			clist = clist->next;
3043 
3044 			WARN_ON(atomic_read(&skb->users));
3045 			trace_kfree_skb(skb, net_tx_action);
3046 			__kfree_skb(skb);
3047 		}
3048 	}
3049 
3050 	if (sd->output_queue) {
3051 		struct Qdisc *head;
3052 
3053 		local_irq_disable();
3054 		head = sd->output_queue;
3055 		sd->output_queue = NULL;
3056 		sd->output_queue_tailp = &sd->output_queue;
3057 		local_irq_enable();
3058 
3059 		while (head) {
3060 			struct Qdisc *q = head;
3061 			spinlock_t *root_lock;
3062 
3063 			head = head->next_sched;
3064 
3065 			root_lock = qdisc_lock(q);
3066 			if (spin_trylock(root_lock)) {
3067 				smp_mb__before_clear_bit();
3068 				clear_bit(__QDISC_STATE_SCHED,
3069 					  &q->state);
3070 				qdisc_run(q);
3071 				spin_unlock(root_lock);
3072 			} else {
3073 				if (!test_bit(__QDISC_STATE_DEACTIVATED,
3074 					      &q->state)) {
3075 					__netif_reschedule(q);
3076 				} else {
3077 					smp_mb__before_clear_bit();
3078 					clear_bit(__QDISC_STATE_SCHED,
3079 						  &q->state);
3080 				}
3081 			}
3082 		}
3083 	}
3084 }
3085 
3086 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3087     (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3088 /* This hook is defined here for ATM LANE */
3089 int (*br_fdb_test_addr_hook)(struct net_device *dev,
3090 			     unsigned char *addr) __read_mostly;
3091 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
3092 #endif
3093 
3094 #ifdef CONFIG_NET_CLS_ACT
3095 /* TODO: Maybe we should just force sch_ingress to be compiled in
3096  * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3097  * a compare and 2 stores extra right now if we dont have it on
3098  * but have CONFIG_NET_CLS_ACT
3099  * NOTE: This doesn't stop any functionality; if you dont have
3100  * the ingress scheduler, you just can't add policies on ingress.
3101  *
3102  */
3103 static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
3104 {
3105 	struct net_device *dev = skb->dev;
3106 	u32 ttl = G_TC_RTTL(skb->tc_verd);
3107 	int result = TC_ACT_OK;
3108 	struct Qdisc *q;
3109 
3110 	if (unlikely(MAX_RED_LOOP < ttl++)) {
3111 		net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3112 				     skb->skb_iif, dev->ifindex);
3113 		return TC_ACT_SHOT;
3114 	}
3115 
3116 	skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3117 	skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3118 
3119 	q = rxq->qdisc;
3120 	if (q != &noop_qdisc) {
3121 		spin_lock(qdisc_lock(q));
3122 		if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3123 			result = qdisc_enqueue_root(skb, q);
3124 		spin_unlock(qdisc_lock(q));
3125 	}
3126 
3127 	return result;
3128 }
3129 
3130 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3131 					 struct packet_type **pt_prev,
3132 					 int *ret, struct net_device *orig_dev)
3133 {
3134 	struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3135 
3136 	if (!rxq || rxq->qdisc == &noop_qdisc)
3137 		goto out;
3138 
3139 	if (*pt_prev) {
3140 		*ret = deliver_skb(skb, *pt_prev, orig_dev);
3141 		*pt_prev = NULL;
3142 	}
3143 
3144 	switch (ing_filter(skb, rxq)) {
3145 	case TC_ACT_SHOT:
3146 	case TC_ACT_STOLEN:
3147 		kfree_skb(skb);
3148 		return NULL;
3149 	}
3150 
3151 out:
3152 	skb->tc_verd = 0;
3153 	return skb;
3154 }
3155 #endif
3156 
3157 /**
3158  *	netdev_rx_handler_register - register receive handler
3159  *	@dev: device to register a handler for
3160  *	@rx_handler: receive handler to register
3161  *	@rx_handler_data: data pointer that is used by rx handler
3162  *
3163  *	Register a receive hander for a device. This handler will then be
3164  *	called from __netif_receive_skb. A negative errno code is returned
3165  *	on a failure.
3166  *
3167  *	The caller must hold the rtnl_mutex.
3168  *
3169  *	For a general description of rx_handler, see enum rx_handler_result.
3170  */
3171 int netdev_rx_handler_register(struct net_device *dev,
3172 			       rx_handler_func_t *rx_handler,
3173 			       void *rx_handler_data)
3174 {
3175 	ASSERT_RTNL();
3176 
3177 	if (dev->rx_handler)
3178 		return -EBUSY;
3179 
3180 	rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3181 	rcu_assign_pointer(dev->rx_handler, rx_handler);
3182 
3183 	return 0;
3184 }
3185 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3186 
3187 /**
3188  *	netdev_rx_handler_unregister - unregister receive handler
3189  *	@dev: device to unregister a handler from
3190  *
3191  *	Unregister a receive hander from a device.
3192  *
3193  *	The caller must hold the rtnl_mutex.
3194  */
3195 void netdev_rx_handler_unregister(struct net_device *dev)
3196 {
3197 
3198 	ASSERT_RTNL();
3199 	RCU_INIT_POINTER(dev->rx_handler, NULL);
3200 	RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3201 }
3202 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3203 
3204 /*
3205  * Limit the use of PFMEMALLOC reserves to those protocols that implement
3206  * the special handling of PFMEMALLOC skbs.
3207  */
3208 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3209 {
3210 	switch (skb->protocol) {
3211 	case __constant_htons(ETH_P_ARP):
3212 	case __constant_htons(ETH_P_IP):
3213 	case __constant_htons(ETH_P_IPV6):
3214 	case __constant_htons(ETH_P_8021Q):
3215 		return true;
3216 	default:
3217 		return false;
3218 	}
3219 }
3220 
3221 static int __netif_receive_skb(struct sk_buff *skb)
3222 {
3223 	struct packet_type *ptype, *pt_prev;
3224 	rx_handler_func_t *rx_handler;
3225 	struct net_device *orig_dev;
3226 	struct net_device *null_or_dev;
3227 	bool deliver_exact = false;
3228 	int ret = NET_RX_DROP;
3229 	__be16 type;
3230 	unsigned long pflags = current->flags;
3231 
3232 	net_timestamp_check(!netdev_tstamp_prequeue, skb);
3233 
3234 	trace_netif_receive_skb(skb);
3235 
3236 	/*
3237 	 * PFMEMALLOC skbs are special, they should
3238 	 * - be delivered to SOCK_MEMALLOC sockets only
3239 	 * - stay away from userspace
3240 	 * - have bounded memory usage
3241 	 *
3242 	 * Use PF_MEMALLOC as this saves us from propagating the allocation
3243 	 * context down to all allocation sites.
3244 	 */
3245 	if (sk_memalloc_socks() && skb_pfmemalloc(skb))
3246 		current->flags |= PF_MEMALLOC;
3247 
3248 	/* if we've gotten here through NAPI, check netpoll */
3249 	if (netpoll_receive_skb(skb))
3250 		goto out;
3251 
3252 	orig_dev = skb->dev;
3253 
3254 	skb_reset_network_header(skb);
3255 	skb_reset_transport_header(skb);
3256 	skb_reset_mac_len(skb);
3257 
3258 	pt_prev = NULL;
3259 
3260 	rcu_read_lock();
3261 
3262 another_round:
3263 	skb->skb_iif = skb->dev->ifindex;
3264 
3265 	__this_cpu_inc(softnet_data.processed);
3266 
3267 	if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3268 		skb = vlan_untag(skb);
3269 		if (unlikely(!skb))
3270 			goto unlock;
3271 	}
3272 
3273 #ifdef CONFIG_NET_CLS_ACT
3274 	if (skb->tc_verd & TC_NCLS) {
3275 		skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3276 		goto ncls;
3277 	}
3278 #endif
3279 
3280 	if (sk_memalloc_socks() && skb_pfmemalloc(skb))
3281 		goto skip_taps;
3282 
3283 	list_for_each_entry_rcu(ptype, &ptype_all, list) {
3284 		if (!ptype->dev || ptype->dev == skb->dev) {
3285 			if (pt_prev)
3286 				ret = deliver_skb(skb, pt_prev, orig_dev);
3287 			pt_prev = ptype;
3288 		}
3289 	}
3290 
3291 skip_taps:
3292 #ifdef CONFIG_NET_CLS_ACT
3293 	skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3294 	if (!skb)
3295 		goto unlock;
3296 ncls:
3297 #endif
3298 
3299 	if (sk_memalloc_socks() && skb_pfmemalloc(skb)
3300 				&& !skb_pfmemalloc_protocol(skb))
3301 		goto drop;
3302 
3303 	rx_handler = rcu_dereference(skb->dev->rx_handler);
3304 	if (vlan_tx_tag_present(skb)) {
3305 		if (pt_prev) {
3306 			ret = deliver_skb(skb, pt_prev, orig_dev);
3307 			pt_prev = NULL;
3308 		}
3309 		if (vlan_do_receive(&skb, !rx_handler))
3310 			goto another_round;
3311 		else if (unlikely(!skb))
3312 			goto unlock;
3313 	}
3314 
3315 	if (rx_handler) {
3316 		if (pt_prev) {
3317 			ret = deliver_skb(skb, pt_prev, orig_dev);
3318 			pt_prev = NULL;
3319 		}
3320 		switch (rx_handler(&skb)) {
3321 		case RX_HANDLER_CONSUMED:
3322 			goto unlock;
3323 		case RX_HANDLER_ANOTHER:
3324 			goto another_round;
3325 		case RX_HANDLER_EXACT:
3326 			deliver_exact = true;
3327 		case RX_HANDLER_PASS:
3328 			break;
3329 		default:
3330 			BUG();
3331 		}
3332 	}
3333 
3334 	/* deliver only exact match when indicated */
3335 	null_or_dev = deliver_exact ? skb->dev : NULL;
3336 
3337 	type = skb->protocol;
3338 	list_for_each_entry_rcu(ptype,
3339 			&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
3340 		if (ptype->type == type &&
3341 		    (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3342 		     ptype->dev == orig_dev)) {
3343 			if (pt_prev)
3344 				ret = deliver_skb(skb, pt_prev, orig_dev);
3345 			pt_prev = ptype;
3346 		}
3347 	}
3348 
3349 	if (pt_prev) {
3350 		if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
3351 			goto drop;
3352 		else
3353 			ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
3354 	} else {
3355 drop:
3356 		atomic_long_inc(&skb->dev->rx_dropped);
3357 		kfree_skb(skb);
3358 		/* Jamal, now you will not able to escape explaining
3359 		 * me how you were going to use this. :-)
3360 		 */
3361 		ret = NET_RX_DROP;
3362 	}
3363 
3364 unlock:
3365 	rcu_read_unlock();
3366 out:
3367 	tsk_restore_flags(current, pflags, PF_MEMALLOC);
3368 	return ret;
3369 }
3370 
3371 /**
3372  *	netif_receive_skb - process receive buffer from network
3373  *	@skb: buffer to process
3374  *
3375  *	netif_receive_skb() is the main receive data processing function.
3376  *	It always succeeds. The buffer may be dropped during processing
3377  *	for congestion control or by the protocol layers.
3378  *
3379  *	This function may only be called from softirq context and interrupts
3380  *	should be enabled.
3381  *
3382  *	Return values (usually ignored):
3383  *	NET_RX_SUCCESS: no congestion
3384  *	NET_RX_DROP: packet was dropped
3385  */
3386 int netif_receive_skb(struct sk_buff *skb)
3387 {
3388 	net_timestamp_check(netdev_tstamp_prequeue, skb);
3389 
3390 	if (skb_defer_rx_timestamp(skb))
3391 		return NET_RX_SUCCESS;
3392 
3393 #ifdef CONFIG_RPS
3394 	if (static_key_false(&rps_needed)) {
3395 		struct rps_dev_flow voidflow, *rflow = &voidflow;
3396 		int cpu, ret;
3397 
3398 		rcu_read_lock();
3399 
3400 		cpu = get_rps_cpu(skb->dev, skb, &rflow);
3401 
3402 		if (cpu >= 0) {
3403 			ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3404 			rcu_read_unlock();
3405 			return ret;
3406 		}
3407 		rcu_read_unlock();
3408 	}
3409 #endif
3410 	return __netif_receive_skb(skb);
3411 }
3412 EXPORT_SYMBOL(netif_receive_skb);
3413 
3414 /* Network device is going away, flush any packets still pending
3415  * Called with irqs disabled.
3416  */
3417 static void flush_backlog(void *arg)
3418 {
3419 	struct net_device *dev = arg;
3420 	struct softnet_data *sd = &__get_cpu_var(softnet_data);
3421 	struct sk_buff *skb, *tmp;
3422 
3423 	rps_lock(sd);
3424 	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
3425 		if (skb->dev == dev) {
3426 			__skb_unlink(skb, &sd->input_pkt_queue);
3427 			kfree_skb(skb);
3428 			input_queue_head_incr(sd);
3429 		}
3430 	}
3431 	rps_unlock(sd);
3432 
3433 	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3434 		if (skb->dev == dev) {
3435 			__skb_unlink(skb, &sd->process_queue);
3436 			kfree_skb(skb);
3437 			input_queue_head_incr(sd);
3438 		}
3439 	}
3440 }
3441 
3442 static int napi_gro_complete(struct sk_buff *skb)
3443 {
3444 	struct packet_type *ptype;
3445 	__be16 type = skb->protocol;
3446 	struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3447 	int err = -ENOENT;
3448 
3449 	if (NAPI_GRO_CB(skb)->count == 1) {
3450 		skb_shinfo(skb)->gso_size = 0;
3451 		goto out;
3452 	}
3453 
3454 	rcu_read_lock();
3455 	list_for_each_entry_rcu(ptype, head, list) {
3456 		if (ptype->type != type || ptype->dev || !ptype->gro_complete)
3457 			continue;
3458 
3459 		err = ptype->gro_complete(skb);
3460 		break;
3461 	}
3462 	rcu_read_unlock();
3463 
3464 	if (err) {
3465 		WARN_ON(&ptype->list == head);
3466 		kfree_skb(skb);
3467 		return NET_RX_SUCCESS;
3468 	}
3469 
3470 out:
3471 	return netif_receive_skb(skb);
3472 }
3473 
3474 inline void napi_gro_flush(struct napi_struct *napi)
3475 {
3476 	struct sk_buff *skb, *next;
3477 
3478 	for (skb = napi->gro_list; skb; skb = next) {
3479 		next = skb->next;
3480 		skb->next = NULL;
3481 		napi_gro_complete(skb);
3482 	}
3483 
3484 	napi->gro_count = 0;
3485 	napi->gro_list = NULL;
3486 }
3487 EXPORT_SYMBOL(napi_gro_flush);
3488 
3489 enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3490 {
3491 	struct sk_buff **pp = NULL;
3492 	struct packet_type *ptype;
3493 	__be16 type = skb->protocol;
3494 	struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3495 	int same_flow;
3496 	int mac_len;
3497 	enum gro_result ret;
3498 
3499 	if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
3500 		goto normal;
3501 
3502 	if (skb_is_gso(skb) || skb_has_frag_list(skb))
3503 		goto normal;
3504 
3505 	rcu_read_lock();
3506 	list_for_each_entry_rcu(ptype, head, list) {
3507 		if (ptype->type != type || ptype->dev || !ptype->gro_receive)
3508 			continue;
3509 
3510 		skb_set_network_header(skb, skb_gro_offset(skb));
3511 		mac_len = skb->network_header - skb->mac_header;
3512 		skb->mac_len = mac_len;
3513 		NAPI_GRO_CB(skb)->same_flow = 0;
3514 		NAPI_GRO_CB(skb)->flush = 0;
3515 		NAPI_GRO_CB(skb)->free = 0;
3516 
3517 		pp = ptype->gro_receive(&napi->gro_list, skb);
3518 		break;
3519 	}
3520 	rcu_read_unlock();
3521 
3522 	if (&ptype->list == head)
3523 		goto normal;
3524 
3525 	same_flow = NAPI_GRO_CB(skb)->same_flow;
3526 	ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
3527 
3528 	if (pp) {
3529 		struct sk_buff *nskb = *pp;
3530 
3531 		*pp = nskb->next;
3532 		nskb->next = NULL;
3533 		napi_gro_complete(nskb);
3534 		napi->gro_count--;
3535 	}
3536 
3537 	if (same_flow)
3538 		goto ok;
3539 
3540 	if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
3541 		goto normal;
3542 
3543 	napi->gro_count++;
3544 	NAPI_GRO_CB(skb)->count = 1;
3545 	skb_shinfo(skb)->gso_size = skb_gro_len(skb);
3546 	skb->next = napi->gro_list;
3547 	napi->gro_list = skb;
3548 	ret = GRO_HELD;
3549 
3550 pull:
3551 	if (skb_headlen(skb) < skb_gro_offset(skb)) {
3552 		int grow = skb_gro_offset(skb) - skb_headlen(skb);
3553 
3554 		BUG_ON(skb->end - skb->tail < grow);
3555 
3556 		memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3557 
3558 		skb->tail += grow;
3559 		skb->data_len -= grow;
3560 
3561 		skb_shinfo(skb)->frags[0].page_offset += grow;
3562 		skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
3563 
3564 		if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
3565 			skb_frag_unref(skb, 0);
3566 			memmove(skb_shinfo(skb)->frags,
3567 				skb_shinfo(skb)->frags + 1,
3568 				--skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
3569 		}
3570 	}
3571 
3572 ok:
3573 	return ret;
3574 
3575 normal:
3576 	ret = GRO_NORMAL;
3577 	goto pull;
3578 }
3579 EXPORT_SYMBOL(dev_gro_receive);
3580 
3581 static inline gro_result_t
3582 __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3583 {
3584 	struct sk_buff *p;
3585 	unsigned int maclen = skb->dev->hard_header_len;
3586 
3587 	for (p = napi->gro_list; p; p = p->next) {
3588 		unsigned long diffs;
3589 
3590 		diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3591 		diffs |= p->vlan_tci ^ skb->vlan_tci;
3592 		if (maclen == ETH_HLEN)
3593 			diffs |= compare_ether_header(skb_mac_header(p),
3594 						      skb_gro_mac_header(skb));
3595 		else if (!diffs)
3596 			diffs = memcmp(skb_mac_header(p),
3597 				       skb_gro_mac_header(skb),
3598 				       maclen);
3599 		NAPI_GRO_CB(p)->same_flow = !diffs;
3600 		NAPI_GRO_CB(p)->flush = 0;
3601 	}
3602 
3603 	return dev_gro_receive(napi, skb);
3604 }
3605 
3606 gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
3607 {
3608 	switch (ret) {
3609 	case GRO_NORMAL:
3610 		if (netif_receive_skb(skb))
3611 			ret = GRO_DROP;
3612 		break;
3613 
3614 	case GRO_DROP:
3615 		kfree_skb(skb);
3616 		break;
3617 
3618 	case GRO_MERGED_FREE:
3619 		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3620 			kmem_cache_free(skbuff_head_cache, skb);
3621 		else
3622 			__kfree_skb(skb);
3623 		break;
3624 
3625 	case GRO_HELD:
3626 	case GRO_MERGED:
3627 		break;
3628 	}
3629 
3630 	return ret;
3631 }
3632 EXPORT_SYMBOL(napi_skb_finish);
3633 
3634 void skb_gro_reset_offset(struct sk_buff *skb)
3635 {
3636 	NAPI_GRO_CB(skb)->data_offset = 0;
3637 	NAPI_GRO_CB(skb)->frag0 = NULL;
3638 	NAPI_GRO_CB(skb)->frag0_len = 0;
3639 
3640 	if (skb->mac_header == skb->tail &&
3641 	    !PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) {
3642 		NAPI_GRO_CB(skb)->frag0 =
3643 			skb_frag_address(&skb_shinfo(skb)->frags[0]);
3644 		NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]);
3645 	}
3646 }
3647 EXPORT_SYMBOL(skb_gro_reset_offset);
3648 
3649 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3650 {
3651 	skb_gro_reset_offset(skb);
3652 
3653 	return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
3654 }
3655 EXPORT_SYMBOL(napi_gro_receive);
3656 
3657 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3658 {
3659 	__skb_pull(skb, skb_headlen(skb));
3660 	/* restore the reserve we had after netdev_alloc_skb_ip_align() */
3661 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
3662 	skb->vlan_tci = 0;
3663 	skb->dev = napi->dev;
3664 	skb->skb_iif = 0;
3665 
3666 	napi->skb = skb;
3667 }
3668 
3669 struct sk_buff *napi_get_frags(struct napi_struct *napi)
3670 {
3671 	struct sk_buff *skb = napi->skb;
3672 
3673 	if (!skb) {
3674 		skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3675 		if (skb)
3676 			napi->skb = skb;
3677 	}
3678 	return skb;
3679 }
3680 EXPORT_SYMBOL(napi_get_frags);
3681 
3682 gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3683 			       gro_result_t ret)
3684 {
3685 	switch (ret) {
3686 	case GRO_NORMAL:
3687 	case GRO_HELD:
3688 		skb->protocol = eth_type_trans(skb, skb->dev);
3689 
3690 		if (ret == GRO_HELD)
3691 			skb_gro_pull(skb, -ETH_HLEN);
3692 		else if (netif_receive_skb(skb))
3693 			ret = GRO_DROP;
3694 		break;
3695 
3696 	case GRO_DROP:
3697 	case GRO_MERGED_FREE:
3698 		napi_reuse_skb(napi, skb);
3699 		break;
3700 
3701 	case GRO_MERGED:
3702 		break;
3703 	}
3704 
3705 	return ret;
3706 }
3707 EXPORT_SYMBOL(napi_frags_finish);
3708 
3709 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3710 {
3711 	struct sk_buff *skb = napi->skb;
3712 	struct ethhdr *eth;
3713 	unsigned int hlen;
3714 	unsigned int off;
3715 
3716 	napi->skb = NULL;
3717 
3718 	skb_reset_mac_header(skb);
3719 	skb_gro_reset_offset(skb);
3720 
3721 	off = skb_gro_offset(skb);
3722 	hlen = off + sizeof(*eth);
3723 	eth = skb_gro_header_fast(skb, off);
3724 	if (skb_gro_header_hard(skb, hlen)) {
3725 		eth = skb_gro_header_slow(skb, hlen, off);
3726 		if (unlikely(!eth)) {
3727 			napi_reuse_skb(napi, skb);
3728 			skb = NULL;
3729 			goto out;
3730 		}
3731 	}
3732 
3733 	skb_gro_pull(skb, sizeof(*eth));
3734 
3735 	/*
3736 	 * This works because the only protocols we care about don't require
3737 	 * special handling.  We'll fix it up properly at the end.
3738 	 */
3739 	skb->protocol = eth->h_proto;
3740 
3741 out:
3742 	return skb;
3743 }
3744 
3745 gro_result_t napi_gro_frags(struct napi_struct *napi)
3746 {
3747 	struct sk_buff *skb = napi_frags_skb(napi);
3748 
3749 	if (!skb)
3750 		return GRO_DROP;
3751 
3752 	return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
3753 }
3754 EXPORT_SYMBOL(napi_gro_frags);
3755 
3756 /*
3757  * net_rps_action sends any pending IPI's for rps.
3758  * Note: called with local irq disabled, but exits with local irq enabled.
3759  */
3760 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3761 {
3762 #ifdef CONFIG_RPS
3763 	struct softnet_data *remsd = sd->rps_ipi_list;
3764 
3765 	if (remsd) {
3766 		sd->rps_ipi_list = NULL;
3767 
3768 		local_irq_enable();
3769 
3770 		/* Send pending IPI's to kick RPS processing on remote cpus. */
3771 		while (remsd) {
3772 			struct softnet_data *next = remsd->rps_ipi_next;
3773 
3774 			if (cpu_online(remsd->cpu))
3775 				__smp_call_function_single(remsd->cpu,
3776 							   &remsd->csd, 0);
3777 			remsd = next;
3778 		}
3779 	} else
3780 #endif
3781 		local_irq_enable();
3782 }
3783 
3784 static int process_backlog(struct napi_struct *napi, int quota)
3785 {
3786 	int work = 0;
3787 	struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
3788 
3789 #ifdef CONFIG_RPS
3790 	/* Check if we have pending ipi, its better to send them now,
3791 	 * not waiting net_rx_action() end.
3792 	 */
3793 	if (sd->rps_ipi_list) {
3794 		local_irq_disable();
3795 		net_rps_action_and_irq_enable(sd);
3796 	}
3797 #endif
3798 	napi->weight = weight_p;
3799 	local_irq_disable();
3800 	while (work < quota) {
3801 		struct sk_buff *skb;
3802 		unsigned int qlen;
3803 
3804 		while ((skb = __skb_dequeue(&sd->process_queue))) {
3805 			local_irq_enable();
3806 			__netif_receive_skb(skb);
3807 			local_irq_disable();
3808 			input_queue_head_incr(sd);
3809 			if (++work >= quota) {
3810 				local_irq_enable();
3811 				return work;
3812 			}
3813 		}
3814 
3815 		rps_lock(sd);
3816 		qlen = skb_queue_len(&sd->input_pkt_queue);
3817 		if (qlen)
3818 			skb_queue_splice_tail_init(&sd->input_pkt_queue,
3819 						   &sd->process_queue);
3820 
3821 		if (qlen < quota - work) {
3822 			/*
3823 			 * Inline a custom version of __napi_complete().
3824 			 * only current cpu owns and manipulates this napi,
3825 			 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
3826 			 * we can use a plain write instead of clear_bit(),
3827 			 * and we dont need an smp_mb() memory barrier.
3828 			 */
3829 			list_del(&napi->poll_list);
3830 			napi->state = 0;
3831 
3832 			quota = work + qlen;
3833 		}
3834 		rps_unlock(sd);
3835 	}
3836 	local_irq_enable();
3837 
3838 	return work;
3839 }
3840 
3841 /**
3842  * __napi_schedule - schedule for receive
3843  * @n: entry to schedule
3844  *
3845  * The entry's receive function will be scheduled to run
3846  */
3847 void __napi_schedule(struct napi_struct *n)
3848 {
3849 	unsigned long flags;
3850 
3851 	local_irq_save(flags);
3852 	____napi_schedule(&__get_cpu_var(softnet_data), n);
3853 	local_irq_restore(flags);
3854 }
3855 EXPORT_SYMBOL(__napi_schedule);
3856 
3857 void __napi_complete(struct napi_struct *n)
3858 {
3859 	BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
3860 	BUG_ON(n->gro_list);
3861 
3862 	list_del(&n->poll_list);
3863 	smp_mb__before_clear_bit();
3864 	clear_bit(NAPI_STATE_SCHED, &n->state);
3865 }
3866 EXPORT_SYMBOL(__napi_complete);
3867 
3868 void napi_complete(struct napi_struct *n)
3869 {
3870 	unsigned long flags;
3871 
3872 	/*
3873 	 * don't let napi dequeue from the cpu poll list
3874 	 * just in case its running on a different cpu
3875 	 */
3876 	if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
3877 		return;
3878 
3879 	napi_gro_flush(n);
3880 	local_irq_save(flags);
3881 	__napi_complete(n);
3882 	local_irq_restore(flags);
3883 }
3884 EXPORT_SYMBOL(napi_complete);
3885 
3886 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
3887 		    int (*poll)(struct napi_struct *, int), int weight)
3888 {
3889 	INIT_LIST_HEAD(&napi->poll_list);
3890 	napi->gro_count = 0;
3891 	napi->gro_list = NULL;
3892 	napi->skb = NULL;
3893 	napi->poll = poll;
3894 	napi->weight = weight;
3895 	list_add(&napi->dev_list, &dev->napi_list);
3896 	napi->dev = dev;
3897 #ifdef CONFIG_NETPOLL
3898 	spin_lock_init(&napi->poll_lock);
3899 	napi->poll_owner = -1;
3900 #endif
3901 	set_bit(NAPI_STATE_SCHED, &napi->state);
3902 }
3903 EXPORT_SYMBOL(netif_napi_add);
3904 
3905 void netif_napi_del(struct napi_struct *napi)
3906 {
3907 	struct sk_buff *skb, *next;
3908 
3909 	list_del_init(&napi->dev_list);
3910 	napi_free_frags(napi);
3911 
3912 	for (skb = napi->gro_list; skb; skb = next) {
3913 		next = skb->next;
3914 		skb->next = NULL;
3915 		kfree_skb(skb);
3916 	}
3917 
3918 	napi->gro_list = NULL;
3919 	napi->gro_count = 0;
3920 }
3921 EXPORT_SYMBOL(netif_napi_del);
3922 
3923 static void net_rx_action(struct softirq_action *h)
3924 {
3925 	struct softnet_data *sd = &__get_cpu_var(softnet_data);
3926 	unsigned long time_limit = jiffies + 2;
3927 	int budget = netdev_budget;
3928 	void *have;
3929 
3930 	local_irq_disable();
3931 
3932 	while (!list_empty(&sd->poll_list)) {
3933 		struct napi_struct *n;
3934 		int work, weight;
3935 
3936 		/* If softirq window is exhuasted then punt.
3937 		 * Allow this to run for 2 jiffies since which will allow
3938 		 * an average latency of 1.5/HZ.
3939 		 */
3940 		if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
3941 			goto softnet_break;
3942 
3943 		local_irq_enable();
3944 
3945 		/* Even though interrupts have been re-enabled, this
3946 		 * access is safe because interrupts can only add new
3947 		 * entries to the tail of this list, and only ->poll()
3948 		 * calls can remove this head entry from the list.
3949 		 */
3950 		n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
3951 
3952 		have = netpoll_poll_lock(n);
3953 
3954 		weight = n->weight;
3955 
3956 		/* This NAPI_STATE_SCHED test is for avoiding a race
3957 		 * with netpoll's poll_napi().  Only the entity which
3958 		 * obtains the lock and sees NAPI_STATE_SCHED set will
3959 		 * actually make the ->poll() call.  Therefore we avoid
3960 		 * accidentally calling ->poll() when NAPI is not scheduled.
3961 		 */
3962 		work = 0;
3963 		if (test_bit(NAPI_STATE_SCHED, &n->state)) {
3964 			work = n->poll(n, weight);
3965 			trace_napi_poll(n);
3966 		}
3967 
3968 		WARN_ON_ONCE(work > weight);
3969 
3970 		budget -= work;
3971 
3972 		local_irq_disable();
3973 
3974 		/* Drivers must not modify the NAPI state if they
3975 		 * consume the entire weight.  In such cases this code
3976 		 * still "owns" the NAPI instance and therefore can
3977 		 * move the instance around on the list at-will.
3978 		 */
3979 		if (unlikely(work == weight)) {
3980 			if (unlikely(napi_disable_pending(n))) {
3981 				local_irq_enable();
3982 				napi_complete(n);
3983 				local_irq_disable();
3984 			} else
3985 				list_move_tail(&n->poll_list, &sd->poll_list);
3986 		}
3987 
3988 		netpoll_poll_unlock(have);
3989 	}
3990 out:
3991 	net_rps_action_and_irq_enable(sd);
3992 
3993 #ifdef CONFIG_NET_DMA
3994 	/*
3995 	 * There may not be any more sk_buffs coming right now, so push
3996 	 * any pending DMA copies to hardware
3997 	 */
3998 	dma_issue_pending_all();
3999 #endif
4000 
4001 	return;
4002 
4003 softnet_break:
4004 	sd->time_squeeze++;
4005 	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
4006 	goto out;
4007 }
4008 
4009 static gifconf_func_t *gifconf_list[NPROTO];
4010 
4011 /**
4012  *	register_gifconf	-	register a SIOCGIF handler
4013  *	@family: Address family
4014  *	@gifconf: Function handler
4015  *
4016  *	Register protocol dependent address dumping routines. The handler
4017  *	that is passed must not be freed or reused until it has been replaced
4018  *	by another handler.
4019  */
4020 int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
4021 {
4022 	if (family >= NPROTO)
4023 		return -EINVAL;
4024 	gifconf_list[family] = gifconf;
4025 	return 0;
4026 }
4027 EXPORT_SYMBOL(register_gifconf);
4028 
4029 
4030 /*
4031  *	Map an interface index to its name (SIOCGIFNAME)
4032  */
4033 
4034 /*
4035  *	We need this ioctl for efficient implementation of the
4036  *	if_indextoname() function required by the IPv6 API.  Without
4037  *	it, we would have to search all the interfaces to find a
4038  *	match.  --pb
4039  */
4040 
4041 static int dev_ifname(struct net *net, struct ifreq __user *arg)
4042 {
4043 	struct net_device *dev;
4044 	struct ifreq ifr;
4045 
4046 	/*
4047 	 *	Fetch the caller's info block.
4048 	 */
4049 
4050 	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4051 		return -EFAULT;
4052 
4053 	rcu_read_lock();
4054 	dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
4055 	if (!dev) {
4056 		rcu_read_unlock();
4057 		return -ENODEV;
4058 	}
4059 
4060 	strcpy(ifr.ifr_name, dev->name);
4061 	rcu_read_unlock();
4062 
4063 	if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
4064 		return -EFAULT;
4065 	return 0;
4066 }
4067 
4068 /*
4069  *	Perform a SIOCGIFCONF call. This structure will change
4070  *	size eventually, and there is nothing I can do about it.
4071  *	Thus we will need a 'compatibility mode'.
4072  */
4073 
4074 static int dev_ifconf(struct net *net, char __user *arg)
4075 {
4076 	struct ifconf ifc;
4077 	struct net_device *dev;
4078 	char __user *pos;
4079 	int len;
4080 	int total;
4081 	int i;
4082 
4083 	/*
4084 	 *	Fetch the caller's info block.
4085 	 */
4086 
4087 	if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
4088 		return -EFAULT;
4089 
4090 	pos = ifc.ifc_buf;
4091 	len = ifc.ifc_len;
4092 
4093 	/*
4094 	 *	Loop over the interfaces, and write an info block for each.
4095 	 */
4096 
4097 	total = 0;
4098 	for_each_netdev(net, dev) {
4099 		for (i = 0; i < NPROTO; i++) {
4100 			if (gifconf_list[i]) {
4101 				int done;
4102 				if (!pos)
4103 					done = gifconf_list[i](dev, NULL, 0);
4104 				else
4105 					done = gifconf_list[i](dev, pos + total,
4106 							       len - total);
4107 				if (done < 0)
4108 					return -EFAULT;
4109 				total += done;
4110 			}
4111 		}
4112 	}
4113 
4114 	/*
4115 	 *	All done.  Write the updated control block back to the caller.
4116 	 */
4117 	ifc.ifc_len = total;
4118 
4119 	/*
4120 	 * 	Both BSD and Solaris return 0 here, so we do too.
4121 	 */
4122 	return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
4123 }
4124 
4125 #ifdef CONFIG_PROC_FS
4126 
4127 #define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
4128 
4129 #define get_bucket(x) ((x) >> BUCKET_SPACE)
4130 #define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
4131 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
4132 
4133 static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
4134 {
4135 	struct net *net = seq_file_net(seq);
4136 	struct net_device *dev;
4137 	struct hlist_node *p;
4138 	struct hlist_head *h;
4139 	unsigned int count = 0, offset = get_offset(*pos);
4140 
4141 	h = &net->dev_name_head[get_bucket(*pos)];
4142 	hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
4143 		if (++count == offset)
4144 			return dev;
4145 	}
4146 
4147 	return NULL;
4148 }
4149 
4150 static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
4151 {
4152 	struct net_device *dev;
4153 	unsigned int bucket;
4154 
4155 	do {
4156 		dev = dev_from_same_bucket(seq, pos);
4157 		if (dev)
4158 			return dev;
4159 
4160 		bucket = get_bucket(*pos) + 1;
4161 		*pos = set_bucket_offset(bucket, 1);
4162 	} while (bucket < NETDEV_HASHENTRIES);
4163 
4164 	return NULL;
4165 }
4166 
4167 /*
4168  *	This is invoked by the /proc filesystem handler to display a device
4169  *	in detail.
4170  */
4171 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
4172 	__acquires(RCU)
4173 {
4174 	rcu_read_lock();
4175 	if (!*pos)
4176 		return SEQ_START_TOKEN;
4177 
4178 	if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
4179 		return NULL;
4180 
4181 	return dev_from_bucket(seq, pos);
4182 }
4183 
4184 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4185 {
4186 	++*pos;
4187 	return dev_from_bucket(seq, pos);
4188 }
4189 
4190 void dev_seq_stop(struct seq_file *seq, void *v)
4191 	__releases(RCU)
4192 {
4193 	rcu_read_unlock();
4194 }
4195 
4196 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
4197 {
4198 	struct rtnl_link_stats64 temp;
4199 	const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
4200 
4201 	seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
4202 		   "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
4203 		   dev->name, stats->rx_bytes, stats->rx_packets,
4204 		   stats->rx_errors,
4205 		   stats->rx_dropped + stats->rx_missed_errors,
4206 		   stats->rx_fifo_errors,
4207 		   stats->rx_length_errors + stats->rx_over_errors +
4208 		    stats->rx_crc_errors + stats->rx_frame_errors,
4209 		   stats->rx_compressed, stats->multicast,
4210 		   stats->tx_bytes, stats->tx_packets,
4211 		   stats->tx_errors, stats->tx_dropped,
4212 		   stats->tx_fifo_errors, stats->collisions,
4213 		   stats->tx_carrier_errors +
4214 		    stats->tx_aborted_errors +
4215 		    stats->tx_window_errors +
4216 		    stats->tx_heartbeat_errors,
4217 		   stats->tx_compressed);
4218 }
4219 
4220 /*
4221  *	Called from the PROCfs module. This now uses the new arbitrary sized
4222  *	/proc/net interface to create /proc/net/dev
4223  */
4224 static int dev_seq_show(struct seq_file *seq, void *v)
4225 {
4226 	if (v == SEQ_START_TOKEN)
4227 		seq_puts(seq, "Inter-|   Receive                            "
4228 			      "                    |  Transmit\n"
4229 			      " face |bytes    packets errs drop fifo frame "
4230 			      "compressed multicast|bytes    packets errs "
4231 			      "drop fifo colls carrier compressed\n");
4232 	else
4233 		dev_seq_printf_stats(seq, v);
4234 	return 0;
4235 }
4236 
4237 static struct softnet_data *softnet_get_online(loff_t *pos)
4238 {
4239 	struct softnet_data *sd = NULL;
4240 
4241 	while (*pos < nr_cpu_ids)
4242 		if (cpu_online(*pos)) {
4243 			sd = &per_cpu(softnet_data, *pos);
4244 			break;
4245 		} else
4246 			++*pos;
4247 	return sd;
4248 }
4249 
4250 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
4251 {
4252 	return softnet_get_online(pos);
4253 }
4254 
4255 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4256 {
4257 	++*pos;
4258 	return softnet_get_online(pos);
4259 }
4260 
4261 static void softnet_seq_stop(struct seq_file *seq, void *v)
4262 {
4263 }
4264 
4265 static int softnet_seq_show(struct seq_file *seq, void *v)
4266 {
4267 	struct softnet_data *sd = v;
4268 
4269 	seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
4270 		   sd->processed, sd->dropped, sd->time_squeeze, 0,
4271 		   0, 0, 0, 0, /* was fastroute */
4272 		   sd->cpu_collision, sd->received_rps);
4273 	return 0;
4274 }
4275 
4276 static const struct seq_operations dev_seq_ops = {
4277 	.start = dev_seq_start,
4278 	.next  = dev_seq_next,
4279 	.stop  = dev_seq_stop,
4280 	.show  = dev_seq_show,
4281 };
4282 
4283 static int dev_seq_open(struct inode *inode, struct file *file)
4284 {
4285 	return seq_open_net(inode, file, &dev_seq_ops,
4286 			    sizeof(struct seq_net_private));
4287 }
4288 
4289 static const struct file_operations dev_seq_fops = {
4290 	.owner	 = THIS_MODULE,
4291 	.open    = dev_seq_open,
4292 	.read    = seq_read,
4293 	.llseek  = seq_lseek,
4294 	.release = seq_release_net,
4295 };
4296 
4297 static const struct seq_operations softnet_seq_ops = {
4298 	.start = softnet_seq_start,
4299 	.next  = softnet_seq_next,
4300 	.stop  = softnet_seq_stop,
4301 	.show  = softnet_seq_show,
4302 };
4303 
4304 static int softnet_seq_open(struct inode *inode, struct file *file)
4305 {
4306 	return seq_open(file, &softnet_seq_ops);
4307 }
4308 
4309 static const struct file_operations softnet_seq_fops = {
4310 	.owner	 = THIS_MODULE,
4311 	.open    = softnet_seq_open,
4312 	.read    = seq_read,
4313 	.llseek  = seq_lseek,
4314 	.release = seq_release,
4315 };
4316 
4317 static void *ptype_get_idx(loff_t pos)
4318 {
4319 	struct packet_type *pt = NULL;
4320 	loff_t i = 0;
4321 	int t;
4322 
4323 	list_for_each_entry_rcu(pt, &ptype_all, list) {
4324 		if (i == pos)
4325 			return pt;
4326 		++i;
4327 	}
4328 
4329 	for (t = 0; t < PTYPE_HASH_SIZE; t++) {
4330 		list_for_each_entry_rcu(pt, &ptype_base[t], list) {
4331 			if (i == pos)
4332 				return pt;
4333 			++i;
4334 		}
4335 	}
4336 	return NULL;
4337 }
4338 
4339 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
4340 	__acquires(RCU)
4341 {
4342 	rcu_read_lock();
4343 	return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
4344 }
4345 
4346 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4347 {
4348 	struct packet_type *pt;
4349 	struct list_head *nxt;
4350 	int hash;
4351 
4352 	++*pos;
4353 	if (v == SEQ_START_TOKEN)
4354 		return ptype_get_idx(0);
4355 
4356 	pt = v;
4357 	nxt = pt->list.next;
4358 	if (pt->type == htons(ETH_P_ALL)) {
4359 		if (nxt != &ptype_all)
4360 			goto found;
4361 		hash = 0;
4362 		nxt = ptype_base[0].next;
4363 	} else
4364 		hash = ntohs(pt->type) & PTYPE_HASH_MASK;
4365 
4366 	while (nxt == &ptype_base[hash]) {
4367 		if (++hash >= PTYPE_HASH_SIZE)
4368 			return NULL;
4369 		nxt = ptype_base[hash].next;
4370 	}
4371 found:
4372 	return list_entry(nxt, struct packet_type, list);
4373 }
4374 
4375 static void ptype_seq_stop(struct seq_file *seq, void *v)
4376 	__releases(RCU)
4377 {
4378 	rcu_read_unlock();
4379 }
4380 
4381 static int ptype_seq_show(struct seq_file *seq, void *v)
4382 {
4383 	struct packet_type *pt = v;
4384 
4385 	if (v == SEQ_START_TOKEN)
4386 		seq_puts(seq, "Type Device      Function\n");
4387 	else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
4388 		if (pt->type == htons(ETH_P_ALL))
4389 			seq_puts(seq, "ALL ");
4390 		else
4391 			seq_printf(seq, "%04x", ntohs(pt->type));
4392 
4393 		seq_printf(seq, " %-8s %pF\n",
4394 			   pt->dev ? pt->dev->name : "", pt->func);
4395 	}
4396 
4397 	return 0;
4398 }
4399 
4400 static const struct seq_operations ptype_seq_ops = {
4401 	.start = ptype_seq_start,
4402 	.next  = ptype_seq_next,
4403 	.stop  = ptype_seq_stop,
4404 	.show  = ptype_seq_show,
4405 };
4406 
4407 static int ptype_seq_open(struct inode *inode, struct file *file)
4408 {
4409 	return seq_open_net(inode, file, &ptype_seq_ops,
4410 			sizeof(struct seq_net_private));
4411 }
4412 
4413 static const struct file_operations ptype_seq_fops = {
4414 	.owner	 = THIS_MODULE,
4415 	.open    = ptype_seq_open,
4416 	.read    = seq_read,
4417 	.llseek  = seq_lseek,
4418 	.release = seq_release_net,
4419 };
4420 
4421 
4422 static int __net_init dev_proc_net_init(struct net *net)
4423 {
4424 	int rc = -ENOMEM;
4425 
4426 	if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
4427 		goto out;
4428 	if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
4429 		goto out_dev;
4430 	if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
4431 		goto out_softnet;
4432 
4433 	if (wext_proc_init(net))
4434 		goto out_ptype;
4435 	rc = 0;
4436 out:
4437 	return rc;
4438 out_ptype:
4439 	proc_net_remove(net, "ptype");
4440 out_softnet:
4441 	proc_net_remove(net, "softnet_stat");
4442 out_dev:
4443 	proc_net_remove(net, "dev");
4444 	goto out;
4445 }
4446 
4447 static void __net_exit dev_proc_net_exit(struct net *net)
4448 {
4449 	wext_proc_exit(net);
4450 
4451 	proc_net_remove(net, "ptype");
4452 	proc_net_remove(net, "softnet_stat");
4453 	proc_net_remove(net, "dev");
4454 }
4455 
4456 static struct pernet_operations __net_initdata dev_proc_ops = {
4457 	.init = dev_proc_net_init,
4458 	.exit = dev_proc_net_exit,
4459 };
4460 
4461 static int __init dev_proc_init(void)
4462 {
4463 	return register_pernet_subsys(&dev_proc_ops);
4464 }
4465 #else
4466 #define dev_proc_init() 0
4467 #endif	/* CONFIG_PROC_FS */
4468 
4469 
4470 /**
4471  *	netdev_set_master	-	set up master pointer
4472  *	@slave: slave device
4473  *	@master: new master device
4474  *
4475  *	Changes the master device of the slave. Pass %NULL to break the
4476  *	bonding. The caller must hold the RTNL semaphore. On a failure
4477  *	a negative errno code is returned. On success the reference counts
4478  *	are adjusted and the function returns zero.
4479  */
4480 int netdev_set_master(struct net_device *slave, struct net_device *master)
4481 {
4482 	struct net_device *old = slave->master;
4483 
4484 	ASSERT_RTNL();
4485 
4486 	if (master) {
4487 		if (old)
4488 			return -EBUSY;
4489 		dev_hold(master);
4490 	}
4491 
4492 	slave->master = master;
4493 
4494 	if (old)
4495 		dev_put(old);
4496 	return 0;
4497 }
4498 EXPORT_SYMBOL(netdev_set_master);
4499 
4500 /**
4501  *	netdev_set_bond_master	-	set up bonding master/slave pair
4502  *	@slave: slave device
4503  *	@master: new master device
4504  *
4505  *	Changes the master device of the slave. Pass %NULL to break the
4506  *	bonding. The caller must hold the RTNL semaphore. On a failure
4507  *	a negative errno code is returned. On success %RTM_NEWLINK is sent
4508  *	to the routing socket and the function returns zero.
4509  */
4510 int netdev_set_bond_master(struct net_device *slave, struct net_device *master)
4511 {
4512 	int err;
4513 
4514 	ASSERT_RTNL();
4515 
4516 	err = netdev_set_master(slave, master);
4517 	if (err)
4518 		return err;
4519 	if (master)
4520 		slave->flags |= IFF_SLAVE;
4521 	else
4522 		slave->flags &= ~IFF_SLAVE;
4523 
4524 	rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
4525 	return 0;
4526 }
4527 EXPORT_SYMBOL(netdev_set_bond_master);
4528 
4529 static void dev_change_rx_flags(struct net_device *dev, int flags)
4530 {
4531 	const struct net_device_ops *ops = dev->netdev_ops;
4532 
4533 	if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
4534 		ops->ndo_change_rx_flags(dev, flags);
4535 }
4536 
4537 static int __dev_set_promiscuity(struct net_device *dev, int inc)
4538 {
4539 	unsigned int old_flags = dev->flags;
4540 	kuid_t uid;
4541 	kgid_t gid;
4542 
4543 	ASSERT_RTNL();
4544 
4545 	dev->flags |= IFF_PROMISC;
4546 	dev->promiscuity += inc;
4547 	if (dev->promiscuity == 0) {
4548 		/*
4549 		 * Avoid overflow.
4550 		 * If inc causes overflow, untouch promisc and return error.
4551 		 */
4552 		if (inc < 0)
4553 			dev->flags &= ~IFF_PROMISC;
4554 		else {
4555 			dev->promiscuity -= inc;
4556 			pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
4557 				dev->name);
4558 			return -EOVERFLOW;
4559 		}
4560 	}
4561 	if (dev->flags != old_flags) {
4562 		pr_info("device %s %s promiscuous mode\n",
4563 			dev->name,
4564 			dev->flags & IFF_PROMISC ? "entered" : "left");
4565 		if (audit_enabled) {
4566 			current_uid_gid(&uid, &gid);
4567 			audit_log(current->audit_context, GFP_ATOMIC,
4568 				AUDIT_ANOM_PROMISCUOUS,
4569 				"dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4570 				dev->name, (dev->flags & IFF_PROMISC),
4571 				(old_flags & IFF_PROMISC),
4572 				from_kuid(&init_user_ns, audit_get_loginuid(current)),
4573 				from_kuid(&init_user_ns, uid),
4574 				from_kgid(&init_user_ns, gid),
4575 				audit_get_sessionid(current));
4576 		}
4577 
4578 		dev_change_rx_flags(dev, IFF_PROMISC);
4579 	}
4580 	return 0;
4581 }
4582 
4583 /**
4584  *	dev_set_promiscuity	- update promiscuity count on a device
4585  *	@dev: device
4586  *	@inc: modifier
4587  *
4588  *	Add or remove promiscuity from a device. While the count in the device
4589  *	remains above zero the interface remains promiscuous. Once it hits zero
4590  *	the device reverts back to normal filtering operation. A negative inc
4591  *	value is used to drop promiscuity on the device.
4592  *	Return 0 if successful or a negative errno code on error.
4593  */
4594 int dev_set_promiscuity(struct net_device *dev, int inc)
4595 {
4596 	unsigned int old_flags = dev->flags;
4597 	int err;
4598 
4599 	err = __dev_set_promiscuity(dev, inc);
4600 	if (err < 0)
4601 		return err;
4602 	if (dev->flags != old_flags)
4603 		dev_set_rx_mode(dev);
4604 	return err;
4605 }
4606 EXPORT_SYMBOL(dev_set_promiscuity);
4607 
4608 /**
4609  *	dev_set_allmulti	- update allmulti count on a device
4610  *	@dev: device
4611  *	@inc: modifier
4612  *
4613  *	Add or remove reception of all multicast frames to a device. While the
4614  *	count in the device remains above zero the interface remains listening
4615  *	to all interfaces. Once it hits zero the device reverts back to normal
4616  *	filtering operation. A negative @inc value is used to drop the counter
4617  *	when releasing a resource needing all multicasts.
4618  *	Return 0 if successful or a negative errno code on error.
4619  */
4620 
4621 int dev_set_allmulti(struct net_device *dev, int inc)
4622 {
4623 	unsigned int old_flags = dev->flags;
4624 
4625 	ASSERT_RTNL();
4626 
4627 	dev->flags |= IFF_ALLMULTI;
4628 	dev->allmulti += inc;
4629 	if (dev->allmulti == 0) {
4630 		/*
4631 		 * Avoid overflow.
4632 		 * If inc causes overflow, untouch allmulti and return error.
4633 		 */
4634 		if (inc < 0)
4635 			dev->flags &= ~IFF_ALLMULTI;
4636 		else {
4637 			dev->allmulti -= inc;
4638 			pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
4639 				dev->name);
4640 			return -EOVERFLOW;
4641 		}
4642 	}
4643 	if (dev->flags ^ old_flags) {
4644 		dev_change_rx_flags(dev, IFF_ALLMULTI);
4645 		dev_set_rx_mode(dev);
4646 	}
4647 	return 0;
4648 }
4649 EXPORT_SYMBOL(dev_set_allmulti);
4650 
4651 /*
4652  *	Upload unicast and multicast address lists to device and
4653  *	configure RX filtering. When the device doesn't support unicast
4654  *	filtering it is put in promiscuous mode while unicast addresses
4655  *	are present.
4656  */
4657 void __dev_set_rx_mode(struct net_device *dev)
4658 {
4659 	const struct net_device_ops *ops = dev->netdev_ops;
4660 
4661 	/* dev_open will call this function so the list will stay sane. */
4662 	if (!(dev->flags&IFF_UP))
4663 		return;
4664 
4665 	if (!netif_device_present(dev))
4666 		return;
4667 
4668 	if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4669 		/* Unicast addresses changes may only happen under the rtnl,
4670 		 * therefore calling __dev_set_promiscuity here is safe.
4671 		 */
4672 		if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4673 			__dev_set_promiscuity(dev, 1);
4674 			dev->uc_promisc = true;
4675 		} else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4676 			__dev_set_promiscuity(dev, -1);
4677 			dev->uc_promisc = false;
4678 		}
4679 	}
4680 
4681 	if (ops->ndo_set_rx_mode)
4682 		ops->ndo_set_rx_mode(dev);
4683 }
4684 
4685 void dev_set_rx_mode(struct net_device *dev)
4686 {
4687 	netif_addr_lock_bh(dev);
4688 	__dev_set_rx_mode(dev);
4689 	netif_addr_unlock_bh(dev);
4690 }
4691 
4692 /**
4693  *	dev_get_flags - get flags reported to userspace
4694  *	@dev: device
4695  *
4696  *	Get the combination of flag bits exported through APIs to userspace.
4697  */
4698 unsigned int dev_get_flags(const struct net_device *dev)
4699 {
4700 	unsigned int flags;
4701 
4702 	flags = (dev->flags & ~(IFF_PROMISC |
4703 				IFF_ALLMULTI |
4704 				IFF_RUNNING |
4705 				IFF_LOWER_UP |
4706 				IFF_DORMANT)) |
4707 		(dev->gflags & (IFF_PROMISC |
4708 				IFF_ALLMULTI));
4709 
4710 	if (netif_running(dev)) {
4711 		if (netif_oper_up(dev))
4712 			flags |= IFF_RUNNING;
4713 		if (netif_carrier_ok(dev))
4714 			flags |= IFF_LOWER_UP;
4715 		if (netif_dormant(dev))
4716 			flags |= IFF_DORMANT;
4717 	}
4718 
4719 	return flags;
4720 }
4721 EXPORT_SYMBOL(dev_get_flags);
4722 
4723 int __dev_change_flags(struct net_device *dev, unsigned int flags)
4724 {
4725 	unsigned int old_flags = dev->flags;
4726 	int ret;
4727 
4728 	ASSERT_RTNL();
4729 
4730 	/*
4731 	 *	Set the flags on our device.
4732 	 */
4733 
4734 	dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4735 			       IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4736 			       IFF_AUTOMEDIA)) |
4737 		     (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4738 				    IFF_ALLMULTI));
4739 
4740 	/*
4741 	 *	Load in the correct multicast list now the flags have changed.
4742 	 */
4743 
4744 	if ((old_flags ^ flags) & IFF_MULTICAST)
4745 		dev_change_rx_flags(dev, IFF_MULTICAST);
4746 
4747 	dev_set_rx_mode(dev);
4748 
4749 	/*
4750 	 *	Have we downed the interface. We handle IFF_UP ourselves
4751 	 *	according to user attempts to set it, rather than blindly
4752 	 *	setting it.
4753 	 */
4754 
4755 	ret = 0;
4756 	if ((old_flags ^ flags) & IFF_UP) {	/* Bit is different  ? */
4757 		ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
4758 
4759 		if (!ret)
4760 			dev_set_rx_mode(dev);
4761 	}
4762 
4763 	if ((flags ^ dev->gflags) & IFF_PROMISC) {
4764 		int inc = (flags & IFF_PROMISC) ? 1 : -1;
4765 
4766 		dev->gflags ^= IFF_PROMISC;
4767 		dev_set_promiscuity(dev, inc);
4768 	}
4769 
4770 	/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4771 	   is important. Some (broken) drivers set IFF_PROMISC, when
4772 	   IFF_ALLMULTI is requested not asking us and not reporting.
4773 	 */
4774 	if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4775 		int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4776 
4777 		dev->gflags ^= IFF_ALLMULTI;
4778 		dev_set_allmulti(dev, inc);
4779 	}
4780 
4781 	return ret;
4782 }
4783 
4784 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4785 {
4786 	unsigned int changes = dev->flags ^ old_flags;
4787 
4788 	if (changes & IFF_UP) {
4789 		if (dev->flags & IFF_UP)
4790 			call_netdevice_notifiers(NETDEV_UP, dev);
4791 		else
4792 			call_netdevice_notifiers(NETDEV_DOWN, dev);
4793 	}
4794 
4795 	if (dev->flags & IFF_UP &&
4796 	    (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4797 		call_netdevice_notifiers(NETDEV_CHANGE, dev);
4798 }
4799 
4800 /**
4801  *	dev_change_flags - change device settings
4802  *	@dev: device
4803  *	@flags: device state flags
4804  *
4805  *	Change settings on device based state flags. The flags are
4806  *	in the userspace exported format.
4807  */
4808 int dev_change_flags(struct net_device *dev, unsigned int flags)
4809 {
4810 	int ret;
4811 	unsigned int changes, old_flags = dev->flags;
4812 
4813 	ret = __dev_change_flags(dev, flags);
4814 	if (ret < 0)
4815 		return ret;
4816 
4817 	changes = old_flags ^ dev->flags;
4818 	if (changes)
4819 		rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
4820 
4821 	__dev_notify_flags(dev, old_flags);
4822 	return ret;
4823 }
4824 EXPORT_SYMBOL(dev_change_flags);
4825 
4826 /**
4827  *	dev_set_mtu - Change maximum transfer unit
4828  *	@dev: device
4829  *	@new_mtu: new transfer unit
4830  *
4831  *	Change the maximum transfer size of the network device.
4832  */
4833 int dev_set_mtu(struct net_device *dev, int new_mtu)
4834 {
4835 	const struct net_device_ops *ops = dev->netdev_ops;
4836 	int err;
4837 
4838 	if (new_mtu == dev->mtu)
4839 		return 0;
4840 
4841 	/*	MTU must be positive.	 */
4842 	if (new_mtu < 0)
4843 		return -EINVAL;
4844 
4845 	if (!netif_device_present(dev))
4846 		return -ENODEV;
4847 
4848 	err = 0;
4849 	if (ops->ndo_change_mtu)
4850 		err = ops->ndo_change_mtu(dev, new_mtu);
4851 	else
4852 		dev->mtu = new_mtu;
4853 
4854 	if (!err && dev->flags & IFF_UP)
4855 		call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
4856 	return err;
4857 }
4858 EXPORT_SYMBOL(dev_set_mtu);
4859 
4860 /**
4861  *	dev_set_group - Change group this device belongs to
4862  *	@dev: device
4863  *	@new_group: group this device should belong to
4864  */
4865 void dev_set_group(struct net_device *dev, int new_group)
4866 {
4867 	dev->group = new_group;
4868 }
4869 EXPORT_SYMBOL(dev_set_group);
4870 
4871 /**
4872  *	dev_set_mac_address - Change Media Access Control Address
4873  *	@dev: device
4874  *	@sa: new address
4875  *
4876  *	Change the hardware (MAC) address of the device
4877  */
4878 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4879 {
4880 	const struct net_device_ops *ops = dev->netdev_ops;
4881 	int err;
4882 
4883 	if (!ops->ndo_set_mac_address)
4884 		return -EOPNOTSUPP;
4885 	if (sa->sa_family != dev->type)
4886 		return -EINVAL;
4887 	if (!netif_device_present(dev))
4888 		return -ENODEV;
4889 	err = ops->ndo_set_mac_address(dev, sa);
4890 	if (!err)
4891 		call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4892 	add_device_randomness(dev->dev_addr, dev->addr_len);
4893 	return err;
4894 }
4895 EXPORT_SYMBOL(dev_set_mac_address);
4896 
4897 /*
4898  *	Perform the SIOCxIFxxx calls, inside rcu_read_lock()
4899  */
4900 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
4901 {
4902 	int err;
4903 	struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
4904 
4905 	if (!dev)
4906 		return -ENODEV;
4907 
4908 	switch (cmd) {
4909 	case SIOCGIFFLAGS:	/* Get interface flags */
4910 		ifr->ifr_flags = (short) dev_get_flags(dev);
4911 		return 0;
4912 
4913 	case SIOCGIFMETRIC:	/* Get the metric on the interface
4914 				   (currently unused) */
4915 		ifr->ifr_metric = 0;
4916 		return 0;
4917 
4918 	case SIOCGIFMTU:	/* Get the MTU of a device */
4919 		ifr->ifr_mtu = dev->mtu;
4920 		return 0;
4921 
4922 	case SIOCGIFHWADDR:
4923 		if (!dev->addr_len)
4924 			memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4925 		else
4926 			memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4927 			       min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4928 		ifr->ifr_hwaddr.sa_family = dev->type;
4929 		return 0;
4930 
4931 	case SIOCGIFSLAVE:
4932 		err = -EINVAL;
4933 		break;
4934 
4935 	case SIOCGIFMAP:
4936 		ifr->ifr_map.mem_start = dev->mem_start;
4937 		ifr->ifr_map.mem_end   = dev->mem_end;
4938 		ifr->ifr_map.base_addr = dev->base_addr;
4939 		ifr->ifr_map.irq       = dev->irq;
4940 		ifr->ifr_map.dma       = dev->dma;
4941 		ifr->ifr_map.port      = dev->if_port;
4942 		return 0;
4943 
4944 	case SIOCGIFINDEX:
4945 		ifr->ifr_ifindex = dev->ifindex;
4946 		return 0;
4947 
4948 	case SIOCGIFTXQLEN:
4949 		ifr->ifr_qlen = dev->tx_queue_len;
4950 		return 0;
4951 
4952 	default:
4953 		/* dev_ioctl() should ensure this case
4954 		 * is never reached
4955 		 */
4956 		WARN_ON(1);
4957 		err = -ENOTTY;
4958 		break;
4959 
4960 	}
4961 	return err;
4962 }
4963 
4964 /*
4965  *	Perform the SIOCxIFxxx calls, inside rtnl_lock()
4966  */
4967 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4968 {
4969 	int err;
4970 	struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
4971 	const struct net_device_ops *ops;
4972 
4973 	if (!dev)
4974 		return -ENODEV;
4975 
4976 	ops = dev->netdev_ops;
4977 
4978 	switch (cmd) {
4979 	case SIOCSIFFLAGS:	/* Set interface flags */
4980 		return dev_change_flags(dev, ifr->ifr_flags);
4981 
4982 	case SIOCSIFMETRIC:	/* Set the metric on the interface
4983 				   (currently unused) */
4984 		return -EOPNOTSUPP;
4985 
4986 	case SIOCSIFMTU:	/* Set the MTU of a device */
4987 		return dev_set_mtu(dev, ifr->ifr_mtu);
4988 
4989 	case SIOCSIFHWADDR:
4990 		return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4991 
4992 	case SIOCSIFHWBROADCAST:
4993 		if (ifr->ifr_hwaddr.sa_family != dev->type)
4994 			return -EINVAL;
4995 		memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4996 		       min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4997 		call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4998 		return 0;
4999 
5000 	case SIOCSIFMAP:
5001 		if (ops->ndo_set_config) {
5002 			if (!netif_device_present(dev))
5003 				return -ENODEV;
5004 			return ops->ndo_set_config(dev, &ifr->ifr_map);
5005 		}
5006 		return -EOPNOTSUPP;
5007 
5008 	case SIOCADDMULTI:
5009 		if (!ops->ndo_set_rx_mode ||
5010 		    ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
5011 			return -EINVAL;
5012 		if (!netif_device_present(dev))
5013 			return -ENODEV;
5014 		return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
5015 
5016 	case SIOCDELMULTI:
5017 		if (!ops->ndo_set_rx_mode ||
5018 		    ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
5019 			return -EINVAL;
5020 		if (!netif_device_present(dev))
5021 			return -ENODEV;
5022 		return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
5023 
5024 	case SIOCSIFTXQLEN:
5025 		if (ifr->ifr_qlen < 0)
5026 			return -EINVAL;
5027 		dev->tx_queue_len = ifr->ifr_qlen;
5028 		return 0;
5029 
5030 	case SIOCSIFNAME:
5031 		ifr->ifr_newname[IFNAMSIZ-1] = '\0';
5032 		return dev_change_name(dev, ifr->ifr_newname);
5033 
5034 	case SIOCSHWTSTAMP:
5035 		err = net_hwtstamp_validate(ifr);
5036 		if (err)
5037 			return err;
5038 		/* fall through */
5039 
5040 	/*
5041 	 *	Unknown or private ioctl
5042 	 */
5043 	default:
5044 		if ((cmd >= SIOCDEVPRIVATE &&
5045 		    cmd <= SIOCDEVPRIVATE + 15) ||
5046 		    cmd == SIOCBONDENSLAVE ||
5047 		    cmd == SIOCBONDRELEASE ||
5048 		    cmd == SIOCBONDSETHWADDR ||
5049 		    cmd == SIOCBONDSLAVEINFOQUERY ||
5050 		    cmd == SIOCBONDINFOQUERY ||
5051 		    cmd == SIOCBONDCHANGEACTIVE ||
5052 		    cmd == SIOCGMIIPHY ||
5053 		    cmd == SIOCGMIIREG ||
5054 		    cmd == SIOCSMIIREG ||
5055 		    cmd == SIOCBRADDIF ||
5056 		    cmd == SIOCBRDELIF ||
5057 		    cmd == SIOCSHWTSTAMP ||
5058 		    cmd == SIOCWANDEV) {
5059 			err = -EOPNOTSUPP;
5060 			if (ops->ndo_do_ioctl) {
5061 				if (netif_device_present(dev))
5062 					err = ops->ndo_do_ioctl(dev, ifr, cmd);
5063 				else
5064 					err = -ENODEV;
5065 			}
5066 		} else
5067 			err = -EINVAL;
5068 
5069 	}
5070 	return err;
5071 }
5072 
5073 /*
5074  *	This function handles all "interface"-type I/O control requests. The actual
5075  *	'doing' part of this is dev_ifsioc above.
5076  */
5077 
5078 /**
5079  *	dev_ioctl	-	network device ioctl
5080  *	@net: the applicable net namespace
5081  *	@cmd: command to issue
5082  *	@arg: pointer to a struct ifreq in user space
5083  *
5084  *	Issue ioctl functions to devices. This is normally called by the
5085  *	user space syscall interfaces but can sometimes be useful for
5086  *	other purposes. The return value is the return from the syscall if
5087  *	positive or a negative errno code on error.
5088  */
5089 
5090 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
5091 {
5092 	struct ifreq ifr;
5093 	int ret;
5094 	char *colon;
5095 
5096 	/* One special case: SIOCGIFCONF takes ifconf argument
5097 	   and requires shared lock, because it sleeps writing
5098 	   to user space.
5099 	 */
5100 
5101 	if (cmd == SIOCGIFCONF) {
5102 		rtnl_lock();
5103 		ret = dev_ifconf(net, (char __user *) arg);
5104 		rtnl_unlock();
5105 		return ret;
5106 	}
5107 	if (cmd == SIOCGIFNAME)
5108 		return dev_ifname(net, (struct ifreq __user *)arg);
5109 
5110 	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
5111 		return -EFAULT;
5112 
5113 	ifr.ifr_name[IFNAMSIZ-1] = 0;
5114 
5115 	colon = strchr(ifr.ifr_name, ':');
5116 	if (colon)
5117 		*colon = 0;
5118 
5119 	/*
5120 	 *	See which interface the caller is talking about.
5121 	 */
5122 
5123 	switch (cmd) {
5124 	/*
5125 	 *	These ioctl calls:
5126 	 *	- can be done by all.
5127 	 *	- atomic and do not require locking.
5128 	 *	- return a value
5129 	 */
5130 	case SIOCGIFFLAGS:
5131 	case SIOCGIFMETRIC:
5132 	case SIOCGIFMTU:
5133 	case SIOCGIFHWADDR:
5134 	case SIOCGIFSLAVE:
5135 	case SIOCGIFMAP:
5136 	case SIOCGIFINDEX:
5137 	case SIOCGIFTXQLEN:
5138 		dev_load(net, ifr.ifr_name);
5139 		rcu_read_lock();
5140 		ret = dev_ifsioc_locked(net, &ifr, cmd);
5141 		rcu_read_unlock();
5142 		if (!ret) {
5143 			if (colon)
5144 				*colon = ':';
5145 			if (copy_to_user(arg, &ifr,
5146 					 sizeof(struct ifreq)))
5147 				ret = -EFAULT;
5148 		}
5149 		return ret;
5150 
5151 	case SIOCETHTOOL:
5152 		dev_load(net, ifr.ifr_name);
5153 		rtnl_lock();
5154 		ret = dev_ethtool(net, &ifr);
5155 		rtnl_unlock();
5156 		if (!ret) {
5157 			if (colon)
5158 				*colon = ':';
5159 			if (copy_to_user(arg, &ifr,
5160 					 sizeof(struct ifreq)))
5161 				ret = -EFAULT;
5162 		}
5163 		return ret;
5164 
5165 	/*
5166 	 *	These ioctl calls:
5167 	 *	- require superuser power.
5168 	 *	- require strict serialization.
5169 	 *	- return a value
5170 	 */
5171 	case SIOCGMIIPHY:
5172 	case SIOCGMIIREG:
5173 	case SIOCSIFNAME:
5174 		if (!capable(CAP_NET_ADMIN))
5175 			return -EPERM;
5176 		dev_load(net, ifr.ifr_name);
5177 		rtnl_lock();
5178 		ret = dev_ifsioc(net, &ifr, cmd);
5179 		rtnl_unlock();
5180 		if (!ret) {
5181 			if (colon)
5182 				*colon = ':';
5183 			if (copy_to_user(arg, &ifr,
5184 					 sizeof(struct ifreq)))
5185 				ret = -EFAULT;
5186 		}
5187 		return ret;
5188 
5189 	/*
5190 	 *	These ioctl calls:
5191 	 *	- require superuser power.
5192 	 *	- require strict serialization.
5193 	 *	- do not return a value
5194 	 */
5195 	case SIOCSIFFLAGS:
5196 	case SIOCSIFMETRIC:
5197 	case SIOCSIFMTU:
5198 	case SIOCSIFMAP:
5199 	case SIOCSIFHWADDR:
5200 	case SIOCSIFSLAVE:
5201 	case SIOCADDMULTI:
5202 	case SIOCDELMULTI:
5203 	case SIOCSIFHWBROADCAST:
5204 	case SIOCSIFTXQLEN:
5205 	case SIOCSMIIREG:
5206 	case SIOCBONDENSLAVE:
5207 	case SIOCBONDRELEASE:
5208 	case SIOCBONDSETHWADDR:
5209 	case SIOCBONDCHANGEACTIVE:
5210 	case SIOCBRADDIF:
5211 	case SIOCBRDELIF:
5212 	case SIOCSHWTSTAMP:
5213 		if (!capable(CAP_NET_ADMIN))
5214 			return -EPERM;
5215 		/* fall through */
5216 	case SIOCBONDSLAVEINFOQUERY:
5217 	case SIOCBONDINFOQUERY:
5218 		dev_load(net, ifr.ifr_name);
5219 		rtnl_lock();
5220 		ret = dev_ifsioc(net, &ifr, cmd);
5221 		rtnl_unlock();
5222 		return ret;
5223 
5224 	case SIOCGIFMEM:
5225 		/* Get the per device memory space. We can add this but
5226 		 * currently do not support it */
5227 	case SIOCSIFMEM:
5228 		/* Set the per device memory buffer space.
5229 		 * Not applicable in our case */
5230 	case SIOCSIFLINK:
5231 		return -ENOTTY;
5232 
5233 	/*
5234 	 *	Unknown or private ioctl.
5235 	 */
5236 	default:
5237 		if (cmd == SIOCWANDEV ||
5238 		    (cmd >= SIOCDEVPRIVATE &&
5239 		     cmd <= SIOCDEVPRIVATE + 15)) {
5240 			dev_load(net, ifr.ifr_name);
5241 			rtnl_lock();
5242 			ret = dev_ifsioc(net, &ifr, cmd);
5243 			rtnl_unlock();
5244 			if (!ret && copy_to_user(arg, &ifr,
5245 						 sizeof(struct ifreq)))
5246 				ret = -EFAULT;
5247 			return ret;
5248 		}
5249 		/* Take care of Wireless Extensions */
5250 		if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
5251 			return wext_handle_ioctl(net, &ifr, cmd, arg);
5252 		return -ENOTTY;
5253 	}
5254 }
5255 
5256 
5257 /**
5258  *	dev_new_index	-	allocate an ifindex
5259  *	@net: the applicable net namespace
5260  *
5261  *	Returns a suitable unique value for a new device interface
5262  *	number.  The caller must hold the rtnl semaphore or the
5263  *	dev_base_lock to be sure it remains unique.
5264  */
5265 static int dev_new_index(struct net *net)
5266 {
5267 	int ifindex = net->ifindex;
5268 	for (;;) {
5269 		if (++ifindex <= 0)
5270 			ifindex = 1;
5271 		if (!__dev_get_by_index(net, ifindex))
5272 			return net->ifindex = ifindex;
5273 	}
5274 }
5275 
5276 /* Delayed registration/unregisteration */
5277 static LIST_HEAD(net_todo_list);
5278 
5279 static void net_set_todo(struct net_device *dev)
5280 {
5281 	list_add_tail(&dev->todo_list, &net_todo_list);
5282 }
5283 
5284 static void rollback_registered_many(struct list_head *head)
5285 {
5286 	struct net_device *dev, *tmp;
5287 
5288 	BUG_ON(dev_boot_phase);
5289 	ASSERT_RTNL();
5290 
5291 	list_for_each_entry_safe(dev, tmp, head, unreg_list) {
5292 		/* Some devices call without registering
5293 		 * for initialization unwind. Remove those
5294 		 * devices and proceed with the remaining.
5295 		 */
5296 		if (dev->reg_state == NETREG_UNINITIALIZED) {
5297 			pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5298 				 dev->name, dev);
5299 
5300 			WARN_ON(1);
5301 			list_del(&dev->unreg_list);
5302 			continue;
5303 		}
5304 		dev->dismantle = true;
5305 		BUG_ON(dev->reg_state != NETREG_REGISTERED);
5306 	}
5307 
5308 	/* If device is running, close it first. */
5309 	dev_close_many(head);
5310 
5311 	list_for_each_entry(dev, head, unreg_list) {
5312 		/* And unlink it from device chain. */
5313 		unlist_netdevice(dev);
5314 
5315 		dev->reg_state = NETREG_UNREGISTERING;
5316 	}
5317 
5318 	synchronize_net();
5319 
5320 	list_for_each_entry(dev, head, unreg_list) {
5321 		/* Shutdown queueing discipline. */
5322 		dev_shutdown(dev);
5323 
5324 
5325 		/* Notify protocols, that we are about to destroy
5326 		   this device. They should clean all the things.
5327 		*/
5328 		call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5329 
5330 		if (!dev->rtnl_link_ops ||
5331 		    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5332 			rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
5333 
5334 		/*
5335 		 *	Flush the unicast and multicast chains
5336 		 */
5337 		dev_uc_flush(dev);
5338 		dev_mc_flush(dev);
5339 
5340 		if (dev->netdev_ops->ndo_uninit)
5341 			dev->netdev_ops->ndo_uninit(dev);
5342 
5343 		/* Notifier chain MUST detach us from master device. */
5344 		WARN_ON(dev->master);
5345 
5346 		/* Remove entries from kobject tree */
5347 		netdev_unregister_kobject(dev);
5348 	}
5349 
5350 	synchronize_net();
5351 
5352 	list_for_each_entry(dev, head, unreg_list)
5353 		dev_put(dev);
5354 }
5355 
5356 static void rollback_registered(struct net_device *dev)
5357 {
5358 	LIST_HEAD(single);
5359 
5360 	list_add(&dev->unreg_list, &single);
5361 	rollback_registered_many(&single);
5362 	list_del(&single);
5363 }
5364 
5365 static netdev_features_t netdev_fix_features(struct net_device *dev,
5366 	netdev_features_t features)
5367 {
5368 	/* Fix illegal checksum combinations */
5369 	if ((features & NETIF_F_HW_CSUM) &&
5370 	    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5371 		netdev_warn(dev, "mixed HW and IP checksum settings.\n");
5372 		features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5373 	}
5374 
5375 	/* Fix illegal SG+CSUM combinations. */
5376 	if ((features & NETIF_F_SG) &&
5377 	    !(features & NETIF_F_ALL_CSUM)) {
5378 		netdev_dbg(dev,
5379 			"Dropping NETIF_F_SG since no checksum feature.\n");
5380 		features &= ~NETIF_F_SG;
5381 	}
5382 
5383 	/* TSO requires that SG is present as well. */
5384 	if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
5385 		netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
5386 		features &= ~NETIF_F_ALL_TSO;
5387 	}
5388 
5389 	/* TSO ECN requires that TSO is present as well. */
5390 	if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5391 		features &= ~NETIF_F_TSO_ECN;
5392 
5393 	/* Software GSO depends on SG. */
5394 	if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
5395 		netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
5396 		features &= ~NETIF_F_GSO;
5397 	}
5398 
5399 	/* UFO needs SG and checksumming */
5400 	if (features & NETIF_F_UFO) {
5401 		/* maybe split UFO into V4 and V6? */
5402 		if (!((features & NETIF_F_GEN_CSUM) ||
5403 		    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5404 			    == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5405 			netdev_dbg(dev,
5406 				"Dropping NETIF_F_UFO since no checksum offload features.\n");
5407 			features &= ~NETIF_F_UFO;
5408 		}
5409 
5410 		if (!(features & NETIF_F_SG)) {
5411 			netdev_dbg(dev,
5412 				"Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
5413 			features &= ~NETIF_F_UFO;
5414 		}
5415 	}
5416 
5417 	return features;
5418 }
5419 
5420 int __netdev_update_features(struct net_device *dev)
5421 {
5422 	netdev_features_t features;
5423 	int err = 0;
5424 
5425 	ASSERT_RTNL();
5426 
5427 	features = netdev_get_wanted_features(dev);
5428 
5429 	if (dev->netdev_ops->ndo_fix_features)
5430 		features = dev->netdev_ops->ndo_fix_features(dev, features);
5431 
5432 	/* driver might be less strict about feature dependencies */
5433 	features = netdev_fix_features(dev, features);
5434 
5435 	if (dev->features == features)
5436 		return 0;
5437 
5438 	netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5439 		&dev->features, &features);
5440 
5441 	if (dev->netdev_ops->ndo_set_features)
5442 		err = dev->netdev_ops->ndo_set_features(dev, features);
5443 
5444 	if (unlikely(err < 0)) {
5445 		netdev_err(dev,
5446 			"set_features() failed (%d); wanted %pNF, left %pNF\n",
5447 			err, &features, &dev->features);
5448 		return -1;
5449 	}
5450 
5451 	if (!err)
5452 		dev->features = features;
5453 
5454 	return 1;
5455 }
5456 
5457 /**
5458  *	netdev_update_features - recalculate device features
5459  *	@dev: the device to check
5460  *
5461  *	Recalculate dev->features set and send notifications if it
5462  *	has changed. Should be called after driver or hardware dependent
5463  *	conditions might have changed that influence the features.
5464  */
5465 void netdev_update_features(struct net_device *dev)
5466 {
5467 	if (__netdev_update_features(dev))
5468 		netdev_features_change(dev);
5469 }
5470 EXPORT_SYMBOL(netdev_update_features);
5471 
5472 /**
5473  *	netdev_change_features - recalculate device features
5474  *	@dev: the device to check
5475  *
5476  *	Recalculate dev->features set and send notifications even
5477  *	if they have not changed. Should be called instead of
5478  *	netdev_update_features() if also dev->vlan_features might
5479  *	have changed to allow the changes to be propagated to stacked
5480  *	VLAN devices.
5481  */
5482 void netdev_change_features(struct net_device *dev)
5483 {
5484 	__netdev_update_features(dev);
5485 	netdev_features_change(dev);
5486 }
5487 EXPORT_SYMBOL(netdev_change_features);
5488 
5489 /**
5490  *	netif_stacked_transfer_operstate -	transfer operstate
5491  *	@rootdev: the root or lower level device to transfer state from
5492  *	@dev: the device to transfer operstate to
5493  *
5494  *	Transfer operational state from root to device. This is normally
5495  *	called when a stacking relationship exists between the root
5496  *	device and the device(a leaf device).
5497  */
5498 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5499 					struct net_device *dev)
5500 {
5501 	if (rootdev->operstate == IF_OPER_DORMANT)
5502 		netif_dormant_on(dev);
5503 	else
5504 		netif_dormant_off(dev);
5505 
5506 	if (netif_carrier_ok(rootdev)) {
5507 		if (!netif_carrier_ok(dev))
5508 			netif_carrier_on(dev);
5509 	} else {
5510 		if (netif_carrier_ok(dev))
5511 			netif_carrier_off(dev);
5512 	}
5513 }
5514 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5515 
5516 #ifdef CONFIG_RPS
5517 static int netif_alloc_rx_queues(struct net_device *dev)
5518 {
5519 	unsigned int i, count = dev->num_rx_queues;
5520 	struct netdev_rx_queue *rx;
5521 
5522 	BUG_ON(count < 1);
5523 
5524 	rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5525 	if (!rx) {
5526 		pr_err("netdev: Unable to allocate %u rx queues\n", count);
5527 		return -ENOMEM;
5528 	}
5529 	dev->_rx = rx;
5530 
5531 	for (i = 0; i < count; i++)
5532 		rx[i].dev = dev;
5533 	return 0;
5534 }
5535 #endif
5536 
5537 static void netdev_init_one_queue(struct net_device *dev,
5538 				  struct netdev_queue *queue, void *_unused)
5539 {
5540 	/* Initialize queue lock */
5541 	spin_lock_init(&queue->_xmit_lock);
5542 	netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5543 	queue->xmit_lock_owner = -1;
5544 	netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
5545 	queue->dev = dev;
5546 #ifdef CONFIG_BQL
5547 	dql_init(&queue->dql, HZ);
5548 #endif
5549 }
5550 
5551 static int netif_alloc_netdev_queues(struct net_device *dev)
5552 {
5553 	unsigned int count = dev->num_tx_queues;
5554 	struct netdev_queue *tx;
5555 
5556 	BUG_ON(count < 1);
5557 
5558 	tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
5559 	if (!tx) {
5560 		pr_err("netdev: Unable to allocate %u tx queues\n", count);
5561 		return -ENOMEM;
5562 	}
5563 	dev->_tx = tx;
5564 
5565 	netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5566 	spin_lock_init(&dev->tx_global_lock);
5567 
5568 	return 0;
5569 }
5570 
5571 /**
5572  *	register_netdevice	- register a network device
5573  *	@dev: device to register
5574  *
5575  *	Take a completed network device structure and add it to the kernel
5576  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5577  *	chain. 0 is returned on success. A negative errno code is returned
5578  *	on a failure to set up the device, or if the name is a duplicate.
5579  *
5580  *	Callers must hold the rtnl semaphore. You may want
5581  *	register_netdev() instead of this.
5582  *
5583  *	BUGS:
5584  *	The locking appears insufficient to guarantee two parallel registers
5585  *	will not get the same name.
5586  */
5587 
5588 int register_netdevice(struct net_device *dev)
5589 {
5590 	int ret;
5591 	struct net *net = dev_net(dev);
5592 
5593 	BUG_ON(dev_boot_phase);
5594 	ASSERT_RTNL();
5595 
5596 	might_sleep();
5597 
5598 	/* When net_device's are persistent, this will be fatal. */
5599 	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
5600 	BUG_ON(!net);
5601 
5602 	spin_lock_init(&dev->addr_list_lock);
5603 	netdev_set_addr_lockdep_class(dev);
5604 
5605 	dev->iflink = -1;
5606 
5607 	ret = dev_get_valid_name(net, dev, dev->name);
5608 	if (ret < 0)
5609 		goto out;
5610 
5611 	/* Init, if this function is available */
5612 	if (dev->netdev_ops->ndo_init) {
5613 		ret = dev->netdev_ops->ndo_init(dev);
5614 		if (ret) {
5615 			if (ret > 0)
5616 				ret = -EIO;
5617 			goto out;
5618 		}
5619 	}
5620 
5621 	ret = -EBUSY;
5622 	if (!dev->ifindex)
5623 		dev->ifindex = dev_new_index(net);
5624 	else if (__dev_get_by_index(net, dev->ifindex))
5625 		goto err_uninit;
5626 
5627 	if (dev->iflink == -1)
5628 		dev->iflink = dev->ifindex;
5629 
5630 	/* Transfer changeable features to wanted_features and enable
5631 	 * software offloads (GSO and GRO).
5632 	 */
5633 	dev->hw_features |= NETIF_F_SOFT_FEATURES;
5634 	dev->features |= NETIF_F_SOFT_FEATURES;
5635 	dev->wanted_features = dev->features & dev->hw_features;
5636 
5637 	/* Turn on no cache copy if HW is doing checksum */
5638 	if (!(dev->flags & IFF_LOOPBACK)) {
5639 		dev->hw_features |= NETIF_F_NOCACHE_COPY;
5640 		if (dev->features & NETIF_F_ALL_CSUM) {
5641 			dev->wanted_features |= NETIF_F_NOCACHE_COPY;
5642 			dev->features |= NETIF_F_NOCACHE_COPY;
5643 		}
5644 	}
5645 
5646 	/* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
5647 	 */
5648 	dev->vlan_features |= NETIF_F_HIGHDMA;
5649 
5650 	ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5651 	ret = notifier_to_errno(ret);
5652 	if (ret)
5653 		goto err_uninit;
5654 
5655 	ret = netdev_register_kobject(dev);
5656 	if (ret)
5657 		goto err_uninit;
5658 	dev->reg_state = NETREG_REGISTERED;
5659 
5660 	__netdev_update_features(dev);
5661 
5662 	/*
5663 	 *	Default initial state at registry is that the
5664 	 *	device is present.
5665 	 */
5666 
5667 	set_bit(__LINK_STATE_PRESENT, &dev->state);
5668 
5669 	linkwatch_init_dev(dev);
5670 
5671 	dev_init_scheduler(dev);
5672 	dev_hold(dev);
5673 	list_netdevice(dev);
5674 	add_device_randomness(dev->dev_addr, dev->addr_len);
5675 
5676 	/* Notify protocols, that a new device appeared. */
5677 	ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
5678 	ret = notifier_to_errno(ret);
5679 	if (ret) {
5680 		rollback_registered(dev);
5681 		dev->reg_state = NETREG_UNREGISTERED;
5682 	}
5683 	/*
5684 	 *	Prevent userspace races by waiting until the network
5685 	 *	device is fully setup before sending notifications.
5686 	 */
5687 	if (!dev->rtnl_link_ops ||
5688 	    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5689 		rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5690 
5691 out:
5692 	return ret;
5693 
5694 err_uninit:
5695 	if (dev->netdev_ops->ndo_uninit)
5696 		dev->netdev_ops->ndo_uninit(dev);
5697 	goto out;
5698 }
5699 EXPORT_SYMBOL(register_netdevice);
5700 
5701 /**
5702  *	init_dummy_netdev	- init a dummy network device for NAPI
5703  *	@dev: device to init
5704  *
5705  *	This takes a network device structure and initialize the minimum
5706  *	amount of fields so it can be used to schedule NAPI polls without
5707  *	registering a full blown interface. This is to be used by drivers
5708  *	that need to tie several hardware interfaces to a single NAPI
5709  *	poll scheduler due to HW limitations.
5710  */
5711 int init_dummy_netdev(struct net_device *dev)
5712 {
5713 	/* Clear everything. Note we don't initialize spinlocks
5714 	 * are they aren't supposed to be taken by any of the
5715 	 * NAPI code and this dummy netdev is supposed to be
5716 	 * only ever used for NAPI polls
5717 	 */
5718 	memset(dev, 0, sizeof(struct net_device));
5719 
5720 	/* make sure we BUG if trying to hit standard
5721 	 * register/unregister code path
5722 	 */
5723 	dev->reg_state = NETREG_DUMMY;
5724 
5725 	/* NAPI wants this */
5726 	INIT_LIST_HEAD(&dev->napi_list);
5727 
5728 	/* a dummy interface is started by default */
5729 	set_bit(__LINK_STATE_PRESENT, &dev->state);
5730 	set_bit(__LINK_STATE_START, &dev->state);
5731 
5732 	/* Note : We dont allocate pcpu_refcnt for dummy devices,
5733 	 * because users of this 'device' dont need to change
5734 	 * its refcount.
5735 	 */
5736 
5737 	return 0;
5738 }
5739 EXPORT_SYMBOL_GPL(init_dummy_netdev);
5740 
5741 
5742 /**
5743  *	register_netdev	- register a network device
5744  *	@dev: device to register
5745  *
5746  *	Take a completed network device structure and add it to the kernel
5747  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5748  *	chain. 0 is returned on success. A negative errno code is returned
5749  *	on a failure to set up the device, or if the name is a duplicate.
5750  *
5751  *	This is a wrapper around register_netdevice that takes the rtnl semaphore
5752  *	and expands the device name if you passed a format string to
5753  *	alloc_netdev.
5754  */
5755 int register_netdev(struct net_device *dev)
5756 {
5757 	int err;
5758 
5759 	rtnl_lock();
5760 	err = register_netdevice(dev);
5761 	rtnl_unlock();
5762 	return err;
5763 }
5764 EXPORT_SYMBOL(register_netdev);
5765 
5766 int netdev_refcnt_read(const struct net_device *dev)
5767 {
5768 	int i, refcnt = 0;
5769 
5770 	for_each_possible_cpu(i)
5771 		refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5772 	return refcnt;
5773 }
5774 EXPORT_SYMBOL(netdev_refcnt_read);
5775 
5776 /**
5777  * netdev_wait_allrefs - wait until all references are gone.
5778  * @dev: target net_device
5779  *
5780  * This is called when unregistering network devices.
5781  *
5782  * Any protocol or device that holds a reference should register
5783  * for netdevice notification, and cleanup and put back the
5784  * reference if they receive an UNREGISTER event.
5785  * We can get stuck here if buggy protocols don't correctly
5786  * call dev_put.
5787  */
5788 static void netdev_wait_allrefs(struct net_device *dev)
5789 {
5790 	unsigned long rebroadcast_time, warning_time;
5791 	int refcnt;
5792 
5793 	linkwatch_forget_dev(dev);
5794 
5795 	rebroadcast_time = warning_time = jiffies;
5796 	refcnt = netdev_refcnt_read(dev);
5797 
5798 	while (refcnt != 0) {
5799 		if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
5800 			rtnl_lock();
5801 
5802 			/* Rebroadcast unregister notification */
5803 			call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5804 
5805 			__rtnl_unlock();
5806 			rcu_barrier();
5807 			rtnl_lock();
5808 
5809 			call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
5810 			if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5811 				     &dev->state)) {
5812 				/* We must not have linkwatch events
5813 				 * pending on unregister. If this
5814 				 * happens, we simply run the queue
5815 				 * unscheduled, resulting in a noop
5816 				 * for this device.
5817 				 */
5818 				linkwatch_run_queue();
5819 			}
5820 
5821 			__rtnl_unlock();
5822 
5823 			rebroadcast_time = jiffies;
5824 		}
5825 
5826 		msleep(250);
5827 
5828 		refcnt = netdev_refcnt_read(dev);
5829 
5830 		if (time_after(jiffies, warning_time + 10 * HZ)) {
5831 			pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
5832 				 dev->name, refcnt);
5833 			warning_time = jiffies;
5834 		}
5835 	}
5836 }
5837 
5838 /* The sequence is:
5839  *
5840  *	rtnl_lock();
5841  *	...
5842  *	register_netdevice(x1);
5843  *	register_netdevice(x2);
5844  *	...
5845  *	unregister_netdevice(y1);
5846  *	unregister_netdevice(y2);
5847  *      ...
5848  *	rtnl_unlock();
5849  *	free_netdev(y1);
5850  *	free_netdev(y2);
5851  *
5852  * We are invoked by rtnl_unlock().
5853  * This allows us to deal with problems:
5854  * 1) We can delete sysfs objects which invoke hotplug
5855  *    without deadlocking with linkwatch via keventd.
5856  * 2) Since we run with the RTNL semaphore not held, we can sleep
5857  *    safely in order to wait for the netdev refcnt to drop to zero.
5858  *
5859  * We must not return until all unregister events added during
5860  * the interval the lock was held have been completed.
5861  */
5862 void netdev_run_todo(void)
5863 {
5864 	struct list_head list;
5865 
5866 	/* Snapshot list, allow later requests */
5867 	list_replace_init(&net_todo_list, &list);
5868 
5869 	__rtnl_unlock();
5870 
5871 
5872 	/* Wait for rcu callbacks to finish before next phase */
5873 	if (!list_empty(&list))
5874 		rcu_barrier();
5875 
5876 	while (!list_empty(&list)) {
5877 		struct net_device *dev
5878 			= list_first_entry(&list, struct net_device, todo_list);
5879 		list_del(&dev->todo_list);
5880 
5881 		rtnl_lock();
5882 		call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
5883 		__rtnl_unlock();
5884 
5885 		if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5886 			pr_err("network todo '%s' but state %d\n",
5887 			       dev->name, dev->reg_state);
5888 			dump_stack();
5889 			continue;
5890 		}
5891 
5892 		dev->reg_state = NETREG_UNREGISTERED;
5893 
5894 		on_each_cpu(flush_backlog, dev, 1);
5895 
5896 		netdev_wait_allrefs(dev);
5897 
5898 		/* paranoia */
5899 		BUG_ON(netdev_refcnt_read(dev));
5900 		WARN_ON(rcu_access_pointer(dev->ip_ptr));
5901 		WARN_ON(rcu_access_pointer(dev->ip6_ptr));
5902 		WARN_ON(dev->dn_ptr);
5903 
5904 		if (dev->destructor)
5905 			dev->destructor(dev);
5906 
5907 		/* Free network device */
5908 		kobject_put(&dev->dev.kobj);
5909 	}
5910 }
5911 
5912 /* Convert net_device_stats to rtnl_link_stats64.  They have the same
5913  * fields in the same order, with only the type differing.
5914  */
5915 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5916 			     const struct net_device_stats *netdev_stats)
5917 {
5918 #if BITS_PER_LONG == 64
5919 	BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5920 	memcpy(stats64, netdev_stats, sizeof(*stats64));
5921 #else
5922 	size_t i, n = sizeof(*stats64) / sizeof(u64);
5923 	const unsigned long *src = (const unsigned long *)netdev_stats;
5924 	u64 *dst = (u64 *)stats64;
5925 
5926 	BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
5927 		     sizeof(*stats64) / sizeof(u64));
5928 	for (i = 0; i < n; i++)
5929 		dst[i] = src[i];
5930 #endif
5931 }
5932 EXPORT_SYMBOL(netdev_stats_to_stats64);
5933 
5934 /**
5935  *	dev_get_stats	- get network device statistics
5936  *	@dev: device to get statistics from
5937  *	@storage: place to store stats
5938  *
5939  *	Get network statistics from device. Return @storage.
5940  *	The device driver may provide its own method by setting
5941  *	dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
5942  *	otherwise the internal statistics structure is used.
5943  */
5944 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
5945 					struct rtnl_link_stats64 *storage)
5946 {
5947 	const struct net_device_ops *ops = dev->netdev_ops;
5948 
5949 	if (ops->ndo_get_stats64) {
5950 		memset(storage, 0, sizeof(*storage));
5951 		ops->ndo_get_stats64(dev, storage);
5952 	} else if (ops->ndo_get_stats) {
5953 		netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
5954 	} else {
5955 		netdev_stats_to_stats64(storage, &dev->stats);
5956 	}
5957 	storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
5958 	return storage;
5959 }
5960 EXPORT_SYMBOL(dev_get_stats);
5961 
5962 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
5963 {
5964 	struct netdev_queue *queue = dev_ingress_queue(dev);
5965 
5966 #ifdef CONFIG_NET_CLS_ACT
5967 	if (queue)
5968 		return queue;
5969 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
5970 	if (!queue)
5971 		return NULL;
5972 	netdev_init_one_queue(dev, queue, NULL);
5973 	queue->qdisc = &noop_qdisc;
5974 	queue->qdisc_sleeping = &noop_qdisc;
5975 	rcu_assign_pointer(dev->ingress_queue, queue);
5976 #endif
5977 	return queue;
5978 }
5979 
5980 static const struct ethtool_ops default_ethtool_ops;
5981 
5982 /**
5983  *	alloc_netdev_mqs - allocate network device
5984  *	@sizeof_priv:	size of private data to allocate space for
5985  *	@name:		device name format string
5986  *	@setup:		callback to initialize device
5987  *	@txqs:		the number of TX subqueues to allocate
5988  *	@rxqs:		the number of RX subqueues to allocate
5989  *
5990  *	Allocates a struct net_device with private data area for driver use
5991  *	and performs basic initialization.  Also allocates subquue structs
5992  *	for each queue on the device.
5993  */
5994 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5995 		void (*setup)(struct net_device *),
5996 		unsigned int txqs, unsigned int rxqs)
5997 {
5998 	struct net_device *dev;
5999 	size_t alloc_size;
6000 	struct net_device *p;
6001 
6002 	BUG_ON(strlen(name) >= sizeof(dev->name));
6003 
6004 	if (txqs < 1) {
6005 		pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
6006 		return NULL;
6007 	}
6008 
6009 #ifdef CONFIG_RPS
6010 	if (rxqs < 1) {
6011 		pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
6012 		return NULL;
6013 	}
6014 #endif
6015 
6016 	alloc_size = sizeof(struct net_device);
6017 	if (sizeof_priv) {
6018 		/* ensure 32-byte alignment of private area */
6019 		alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
6020 		alloc_size += sizeof_priv;
6021 	}
6022 	/* ensure 32-byte alignment of whole construct */
6023 	alloc_size += NETDEV_ALIGN - 1;
6024 
6025 	p = kzalloc(alloc_size, GFP_KERNEL);
6026 	if (!p) {
6027 		pr_err("alloc_netdev: Unable to allocate device\n");
6028 		return NULL;
6029 	}
6030 
6031 	dev = PTR_ALIGN(p, NETDEV_ALIGN);
6032 	dev->padded = (char *)dev - (char *)p;
6033 
6034 	dev->pcpu_refcnt = alloc_percpu(int);
6035 	if (!dev->pcpu_refcnt)
6036 		goto free_p;
6037 
6038 	if (dev_addr_init(dev))
6039 		goto free_pcpu;
6040 
6041 	dev_mc_init(dev);
6042 	dev_uc_init(dev);
6043 
6044 	dev_net_set(dev, &init_net);
6045 
6046 	dev->gso_max_size = GSO_MAX_SIZE;
6047 	dev->gso_max_segs = GSO_MAX_SEGS;
6048 
6049 	INIT_LIST_HEAD(&dev->napi_list);
6050 	INIT_LIST_HEAD(&dev->unreg_list);
6051 	INIT_LIST_HEAD(&dev->link_watch_list);
6052 	dev->priv_flags = IFF_XMIT_DST_RELEASE;
6053 	setup(dev);
6054 
6055 	dev->num_tx_queues = txqs;
6056 	dev->real_num_tx_queues = txqs;
6057 	if (netif_alloc_netdev_queues(dev))
6058 		goto free_all;
6059 
6060 #ifdef CONFIG_RPS
6061 	dev->num_rx_queues = rxqs;
6062 	dev->real_num_rx_queues = rxqs;
6063 	if (netif_alloc_rx_queues(dev))
6064 		goto free_all;
6065 #endif
6066 
6067 	strcpy(dev->name, name);
6068 	dev->group = INIT_NETDEV_GROUP;
6069 	if (!dev->ethtool_ops)
6070 		dev->ethtool_ops = &default_ethtool_ops;
6071 	return dev;
6072 
6073 free_all:
6074 	free_netdev(dev);
6075 	return NULL;
6076 
6077 free_pcpu:
6078 	free_percpu(dev->pcpu_refcnt);
6079 	kfree(dev->_tx);
6080 #ifdef CONFIG_RPS
6081 	kfree(dev->_rx);
6082 #endif
6083 
6084 free_p:
6085 	kfree(p);
6086 	return NULL;
6087 }
6088 EXPORT_SYMBOL(alloc_netdev_mqs);
6089 
6090 /**
6091  *	free_netdev - free network device
6092  *	@dev: device
6093  *
6094  *	This function does the last stage of destroying an allocated device
6095  * 	interface. The reference to the device object is released.
6096  *	If this is the last reference then it will be freed.
6097  */
6098 void free_netdev(struct net_device *dev)
6099 {
6100 	struct napi_struct *p, *n;
6101 
6102 	release_net(dev_net(dev));
6103 
6104 	kfree(dev->_tx);
6105 #ifdef CONFIG_RPS
6106 	kfree(dev->_rx);
6107 #endif
6108 
6109 	kfree(rcu_dereference_protected(dev->ingress_queue, 1));
6110 
6111 	/* Flush device addresses */
6112 	dev_addr_flush(dev);
6113 
6114 	list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6115 		netif_napi_del(p);
6116 
6117 	free_percpu(dev->pcpu_refcnt);
6118 	dev->pcpu_refcnt = NULL;
6119 
6120 	/*  Compatibility with error handling in drivers */
6121 	if (dev->reg_state == NETREG_UNINITIALIZED) {
6122 		kfree((char *)dev - dev->padded);
6123 		return;
6124 	}
6125 
6126 	BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6127 	dev->reg_state = NETREG_RELEASED;
6128 
6129 	/* will free via device release */
6130 	put_device(&dev->dev);
6131 }
6132 EXPORT_SYMBOL(free_netdev);
6133 
6134 /**
6135  *	synchronize_net -  Synchronize with packet receive processing
6136  *
6137  *	Wait for packets currently being received to be done.
6138  *	Does not block later packets from starting.
6139  */
6140 void synchronize_net(void)
6141 {
6142 	might_sleep();
6143 	if (rtnl_is_locked())
6144 		synchronize_rcu_expedited();
6145 	else
6146 		synchronize_rcu();
6147 }
6148 EXPORT_SYMBOL(synchronize_net);
6149 
6150 /**
6151  *	unregister_netdevice_queue - remove device from the kernel
6152  *	@dev: device
6153  *	@head: list
6154  *
6155  *	This function shuts down a device interface and removes it
6156  *	from the kernel tables.
6157  *	If head not NULL, device is queued to be unregistered later.
6158  *
6159  *	Callers must hold the rtnl semaphore.  You may want
6160  *	unregister_netdev() instead of this.
6161  */
6162 
6163 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
6164 {
6165 	ASSERT_RTNL();
6166 
6167 	if (head) {
6168 		list_move_tail(&dev->unreg_list, head);
6169 	} else {
6170 		rollback_registered(dev);
6171 		/* Finish processing unregister after unlock */
6172 		net_set_todo(dev);
6173 	}
6174 }
6175 EXPORT_SYMBOL(unregister_netdevice_queue);
6176 
6177 /**
6178  *	unregister_netdevice_many - unregister many devices
6179  *	@head: list of devices
6180  */
6181 void unregister_netdevice_many(struct list_head *head)
6182 {
6183 	struct net_device *dev;
6184 
6185 	if (!list_empty(head)) {
6186 		rollback_registered_many(head);
6187 		list_for_each_entry(dev, head, unreg_list)
6188 			net_set_todo(dev);
6189 	}
6190 }
6191 EXPORT_SYMBOL(unregister_netdevice_many);
6192 
6193 /**
6194  *	unregister_netdev - remove device from the kernel
6195  *	@dev: device
6196  *
6197  *	This function shuts down a device interface and removes it
6198  *	from the kernel tables.
6199  *
6200  *	This is just a wrapper for unregister_netdevice that takes
6201  *	the rtnl semaphore.  In general you want to use this and not
6202  *	unregister_netdevice.
6203  */
6204 void unregister_netdev(struct net_device *dev)
6205 {
6206 	rtnl_lock();
6207 	unregister_netdevice(dev);
6208 	rtnl_unlock();
6209 }
6210 EXPORT_SYMBOL(unregister_netdev);
6211 
6212 /**
6213  *	dev_change_net_namespace - move device to different nethost namespace
6214  *	@dev: device
6215  *	@net: network namespace
6216  *	@pat: If not NULL name pattern to try if the current device name
6217  *	      is already taken in the destination network namespace.
6218  *
6219  *	This function shuts down a device interface and moves it
6220  *	to a new network namespace. On success 0 is returned, on
6221  *	a failure a netagive errno code is returned.
6222  *
6223  *	Callers must hold the rtnl semaphore.
6224  */
6225 
6226 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6227 {
6228 	int err;
6229 
6230 	ASSERT_RTNL();
6231 
6232 	/* Don't allow namespace local devices to be moved. */
6233 	err = -EINVAL;
6234 	if (dev->features & NETIF_F_NETNS_LOCAL)
6235 		goto out;
6236 
6237 	/* Ensure the device has been registrered */
6238 	err = -EINVAL;
6239 	if (dev->reg_state != NETREG_REGISTERED)
6240 		goto out;
6241 
6242 	/* Get out if there is nothing todo */
6243 	err = 0;
6244 	if (net_eq(dev_net(dev), net))
6245 		goto out;
6246 
6247 	/* Pick the destination device name, and ensure
6248 	 * we can use it in the destination network namespace.
6249 	 */
6250 	err = -EEXIST;
6251 	if (__dev_get_by_name(net, dev->name)) {
6252 		/* We get here if we can't use the current device name */
6253 		if (!pat)
6254 			goto out;
6255 		if (dev_get_valid_name(net, dev, pat) < 0)
6256 			goto out;
6257 	}
6258 
6259 	/*
6260 	 * And now a mini version of register_netdevice unregister_netdevice.
6261 	 */
6262 
6263 	/* If device is running close it first. */
6264 	dev_close(dev);
6265 
6266 	/* And unlink it from device chain */
6267 	err = -ENODEV;
6268 	unlist_netdevice(dev);
6269 
6270 	synchronize_net();
6271 
6272 	/* Shutdown queueing discipline. */
6273 	dev_shutdown(dev);
6274 
6275 	/* Notify protocols, that we are about to destroy
6276 	   this device. They should clean all the things.
6277 
6278 	   Note that dev->reg_state stays at NETREG_REGISTERED.
6279 	   This is wanted because this way 8021q and macvlan know
6280 	   the device is just moving and can keep their slaves up.
6281 	*/
6282 	call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6283 	rcu_barrier();
6284 	call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6285 	rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
6286 
6287 	/*
6288 	 *	Flush the unicast and multicast chains
6289 	 */
6290 	dev_uc_flush(dev);
6291 	dev_mc_flush(dev);
6292 
6293 	/* Actually switch the network namespace */
6294 	dev_net_set(dev, net);
6295 
6296 	/* If there is an ifindex conflict assign a new one */
6297 	if (__dev_get_by_index(net, dev->ifindex)) {
6298 		int iflink = (dev->iflink == dev->ifindex);
6299 		dev->ifindex = dev_new_index(net);
6300 		if (iflink)
6301 			dev->iflink = dev->ifindex;
6302 	}
6303 
6304 	/* Fixup kobjects */
6305 	err = device_rename(&dev->dev, dev->name);
6306 	WARN_ON(err);
6307 
6308 	/* Add the device back in the hashes */
6309 	list_netdevice(dev);
6310 
6311 	/* Notify protocols, that a new device appeared. */
6312 	call_netdevice_notifiers(NETDEV_REGISTER, dev);
6313 
6314 	/*
6315 	 *	Prevent userspace races by waiting until the network
6316 	 *	device is fully setup before sending notifications.
6317 	 */
6318 	rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
6319 
6320 	synchronize_net();
6321 	err = 0;
6322 out:
6323 	return err;
6324 }
6325 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
6326 
6327 static int dev_cpu_callback(struct notifier_block *nfb,
6328 			    unsigned long action,
6329 			    void *ocpu)
6330 {
6331 	struct sk_buff **list_skb;
6332 	struct sk_buff *skb;
6333 	unsigned int cpu, oldcpu = (unsigned long)ocpu;
6334 	struct softnet_data *sd, *oldsd;
6335 
6336 	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
6337 		return NOTIFY_OK;
6338 
6339 	local_irq_disable();
6340 	cpu = smp_processor_id();
6341 	sd = &per_cpu(softnet_data, cpu);
6342 	oldsd = &per_cpu(softnet_data, oldcpu);
6343 
6344 	/* Find end of our completion_queue. */
6345 	list_skb = &sd->completion_queue;
6346 	while (*list_skb)
6347 		list_skb = &(*list_skb)->next;
6348 	/* Append completion queue from offline CPU. */
6349 	*list_skb = oldsd->completion_queue;
6350 	oldsd->completion_queue = NULL;
6351 
6352 	/* Append output queue from offline CPU. */
6353 	if (oldsd->output_queue) {
6354 		*sd->output_queue_tailp = oldsd->output_queue;
6355 		sd->output_queue_tailp = oldsd->output_queue_tailp;
6356 		oldsd->output_queue = NULL;
6357 		oldsd->output_queue_tailp = &oldsd->output_queue;
6358 	}
6359 	/* Append NAPI poll list from offline CPU. */
6360 	if (!list_empty(&oldsd->poll_list)) {
6361 		list_splice_init(&oldsd->poll_list, &sd->poll_list);
6362 		raise_softirq_irqoff(NET_RX_SOFTIRQ);
6363 	}
6364 
6365 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
6366 	local_irq_enable();
6367 
6368 	/* Process offline CPU's input_pkt_queue */
6369 	while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6370 		netif_rx(skb);
6371 		input_queue_head_incr(oldsd);
6372 	}
6373 	while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
6374 		netif_rx(skb);
6375 		input_queue_head_incr(oldsd);
6376 	}
6377 
6378 	return NOTIFY_OK;
6379 }
6380 
6381 
6382 /**
6383  *	netdev_increment_features - increment feature set by one
6384  *	@all: current feature set
6385  *	@one: new feature set
6386  *	@mask: mask feature set
6387  *
6388  *	Computes a new feature set after adding a device with feature set
6389  *	@one to the master device with current feature set @all.  Will not
6390  *	enable anything that is off in @mask. Returns the new feature set.
6391  */
6392 netdev_features_t netdev_increment_features(netdev_features_t all,
6393 	netdev_features_t one, netdev_features_t mask)
6394 {
6395 	if (mask & NETIF_F_GEN_CSUM)
6396 		mask |= NETIF_F_ALL_CSUM;
6397 	mask |= NETIF_F_VLAN_CHALLENGED;
6398 
6399 	all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6400 	all &= one | ~NETIF_F_ALL_FOR_ALL;
6401 
6402 	/* If one device supports hw checksumming, set for all. */
6403 	if (all & NETIF_F_GEN_CSUM)
6404 		all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
6405 
6406 	return all;
6407 }
6408 EXPORT_SYMBOL(netdev_increment_features);
6409 
6410 static struct hlist_head *netdev_create_hash(void)
6411 {
6412 	int i;
6413 	struct hlist_head *hash;
6414 
6415 	hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6416 	if (hash != NULL)
6417 		for (i = 0; i < NETDEV_HASHENTRIES; i++)
6418 			INIT_HLIST_HEAD(&hash[i]);
6419 
6420 	return hash;
6421 }
6422 
6423 /* Initialize per network namespace state */
6424 static int __net_init netdev_init(struct net *net)
6425 {
6426 	if (net != &init_net)
6427 		INIT_LIST_HEAD(&net->dev_base_head);
6428 
6429 	net->dev_name_head = netdev_create_hash();
6430 	if (net->dev_name_head == NULL)
6431 		goto err_name;
6432 
6433 	net->dev_index_head = netdev_create_hash();
6434 	if (net->dev_index_head == NULL)
6435 		goto err_idx;
6436 
6437 	return 0;
6438 
6439 err_idx:
6440 	kfree(net->dev_name_head);
6441 err_name:
6442 	return -ENOMEM;
6443 }
6444 
6445 /**
6446  *	netdev_drivername - network driver for the device
6447  *	@dev: network device
6448  *
6449  *	Determine network driver for device.
6450  */
6451 const char *netdev_drivername(const struct net_device *dev)
6452 {
6453 	const struct device_driver *driver;
6454 	const struct device *parent;
6455 	const char *empty = "";
6456 
6457 	parent = dev->dev.parent;
6458 	if (!parent)
6459 		return empty;
6460 
6461 	driver = parent->driver;
6462 	if (driver && driver->name)
6463 		return driver->name;
6464 	return empty;
6465 }
6466 
6467 static int __netdev_printk(const char *level, const struct net_device *dev,
6468 			   struct va_format *vaf)
6469 {
6470 	int r;
6471 
6472 	if (dev && dev->dev.parent) {
6473 		r = dev_printk_emit(level[1] - '0',
6474 				    dev->dev.parent,
6475 				    "%s %s %s: %pV",
6476 				    dev_driver_string(dev->dev.parent),
6477 				    dev_name(dev->dev.parent),
6478 				    netdev_name(dev), vaf);
6479 	} else if (dev) {
6480 		r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
6481 	} else {
6482 		r = printk("%s(NULL net_device): %pV", level, vaf);
6483 	}
6484 
6485 	return r;
6486 }
6487 
6488 int netdev_printk(const char *level, const struct net_device *dev,
6489 		  const char *format, ...)
6490 {
6491 	struct va_format vaf;
6492 	va_list args;
6493 	int r;
6494 
6495 	va_start(args, format);
6496 
6497 	vaf.fmt = format;
6498 	vaf.va = &args;
6499 
6500 	r = __netdev_printk(level, dev, &vaf);
6501 
6502 	va_end(args);
6503 
6504 	return r;
6505 }
6506 EXPORT_SYMBOL(netdev_printk);
6507 
6508 #define define_netdev_printk_level(func, level)			\
6509 int func(const struct net_device *dev, const char *fmt, ...)	\
6510 {								\
6511 	int r;							\
6512 	struct va_format vaf;					\
6513 	va_list args;						\
6514 								\
6515 	va_start(args, fmt);					\
6516 								\
6517 	vaf.fmt = fmt;						\
6518 	vaf.va = &args;						\
6519 								\
6520 	r = __netdev_printk(level, dev, &vaf);			\
6521 								\
6522 	va_end(args);						\
6523 								\
6524 	return r;						\
6525 }								\
6526 EXPORT_SYMBOL(func);
6527 
6528 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6529 define_netdev_printk_level(netdev_alert, KERN_ALERT);
6530 define_netdev_printk_level(netdev_crit, KERN_CRIT);
6531 define_netdev_printk_level(netdev_err, KERN_ERR);
6532 define_netdev_printk_level(netdev_warn, KERN_WARNING);
6533 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6534 define_netdev_printk_level(netdev_info, KERN_INFO);
6535 
6536 static void __net_exit netdev_exit(struct net *net)
6537 {
6538 	kfree(net->dev_name_head);
6539 	kfree(net->dev_index_head);
6540 }
6541 
6542 static struct pernet_operations __net_initdata netdev_net_ops = {
6543 	.init = netdev_init,
6544 	.exit = netdev_exit,
6545 };
6546 
6547 static void __net_exit default_device_exit(struct net *net)
6548 {
6549 	struct net_device *dev, *aux;
6550 	/*
6551 	 * Push all migratable network devices back to the
6552 	 * initial network namespace
6553 	 */
6554 	rtnl_lock();
6555 	for_each_netdev_safe(net, dev, aux) {
6556 		int err;
6557 		char fb_name[IFNAMSIZ];
6558 
6559 		/* Ignore unmoveable devices (i.e. loopback) */
6560 		if (dev->features & NETIF_F_NETNS_LOCAL)
6561 			continue;
6562 
6563 		/* Leave virtual devices for the generic cleanup */
6564 		if (dev->rtnl_link_ops)
6565 			continue;
6566 
6567 		/* Push remaining network devices to init_net */
6568 		snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6569 		err = dev_change_net_namespace(dev, &init_net, fb_name);
6570 		if (err) {
6571 			pr_emerg("%s: failed to move %s to init_net: %d\n",
6572 				 __func__, dev->name, err);
6573 			BUG();
6574 		}
6575 	}
6576 	rtnl_unlock();
6577 }
6578 
6579 static void __net_exit default_device_exit_batch(struct list_head *net_list)
6580 {
6581 	/* At exit all network devices most be removed from a network
6582 	 * namespace.  Do this in the reverse order of registration.
6583 	 * Do this across as many network namespaces as possible to
6584 	 * improve batching efficiency.
6585 	 */
6586 	struct net_device *dev;
6587 	struct net *net;
6588 	LIST_HEAD(dev_kill_list);
6589 
6590 	rtnl_lock();
6591 	list_for_each_entry(net, net_list, exit_list) {
6592 		for_each_netdev_reverse(net, dev) {
6593 			if (dev->rtnl_link_ops)
6594 				dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6595 			else
6596 				unregister_netdevice_queue(dev, &dev_kill_list);
6597 		}
6598 	}
6599 	unregister_netdevice_many(&dev_kill_list);
6600 	list_del(&dev_kill_list);
6601 	rtnl_unlock();
6602 }
6603 
6604 static struct pernet_operations __net_initdata default_device_ops = {
6605 	.exit = default_device_exit,
6606 	.exit_batch = default_device_exit_batch,
6607 };
6608 
6609 /*
6610  *	Initialize the DEV module. At boot time this walks the device list and
6611  *	unhooks any devices that fail to initialise (normally hardware not
6612  *	present) and leaves us with a valid list of present and active devices.
6613  *
6614  */
6615 
6616 /*
6617  *       This is called single threaded during boot, so no need
6618  *       to take the rtnl semaphore.
6619  */
6620 static int __init net_dev_init(void)
6621 {
6622 	int i, rc = -ENOMEM;
6623 
6624 	BUG_ON(!dev_boot_phase);
6625 
6626 	if (dev_proc_init())
6627 		goto out;
6628 
6629 	if (netdev_kobject_init())
6630 		goto out;
6631 
6632 	INIT_LIST_HEAD(&ptype_all);
6633 	for (i = 0; i < PTYPE_HASH_SIZE; i++)
6634 		INIT_LIST_HEAD(&ptype_base[i]);
6635 
6636 	if (register_pernet_subsys(&netdev_net_ops))
6637 		goto out;
6638 
6639 	/*
6640 	 *	Initialise the packet receive queues.
6641 	 */
6642 
6643 	for_each_possible_cpu(i) {
6644 		struct softnet_data *sd = &per_cpu(softnet_data, i);
6645 
6646 		memset(sd, 0, sizeof(*sd));
6647 		skb_queue_head_init(&sd->input_pkt_queue);
6648 		skb_queue_head_init(&sd->process_queue);
6649 		sd->completion_queue = NULL;
6650 		INIT_LIST_HEAD(&sd->poll_list);
6651 		sd->output_queue = NULL;
6652 		sd->output_queue_tailp = &sd->output_queue;
6653 #ifdef CONFIG_RPS
6654 		sd->csd.func = rps_trigger_softirq;
6655 		sd->csd.info = sd;
6656 		sd->csd.flags = 0;
6657 		sd->cpu = i;
6658 #endif
6659 
6660 		sd->backlog.poll = process_backlog;
6661 		sd->backlog.weight = weight_p;
6662 		sd->backlog.gro_list = NULL;
6663 		sd->backlog.gro_count = 0;
6664 	}
6665 
6666 	dev_boot_phase = 0;
6667 
6668 	/* The loopback device is special if any other network devices
6669 	 * is present in a network namespace the loopback device must
6670 	 * be present. Since we now dynamically allocate and free the
6671 	 * loopback device ensure this invariant is maintained by
6672 	 * keeping the loopback device as the first device on the
6673 	 * list of network devices.  Ensuring the loopback devices
6674 	 * is the first device that appears and the last network device
6675 	 * that disappears.
6676 	 */
6677 	if (register_pernet_device(&loopback_net_ops))
6678 		goto out;
6679 
6680 	if (register_pernet_device(&default_device_ops))
6681 		goto out;
6682 
6683 	open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6684 	open_softirq(NET_RX_SOFTIRQ, net_rx_action);
6685 
6686 	hotcpu_notifier(dev_cpu_callback, 0);
6687 	dst_init();
6688 	dev_mcast_init();
6689 	rc = 0;
6690 out:
6691 	return rc;
6692 }
6693 
6694 subsys_initcall(net_dev_init);
6695 
6696 static int __init initialize_hashrnd(void)
6697 {
6698 	get_random_bytes(&hashrnd, sizeof(hashrnd));
6699 	return 0;
6700 }
6701 
6702 late_initcall_sync(initialize_hashrnd);
6703 
6704