xref: /linux/net/core/dev.c (revision 9baa0b0364103dd726384c71db30b74044754743)
1 /*
2  * 	NET3	Protocol independent device support routines.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  *	Derived from the non IP parts of dev.c 1.0.19
10  * 		Authors:	Ross Biro
11  *				Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *				Mark Evans, <evansmp@uhura.aston.ac.uk>
13  *
14  *	Additional Authors:
15  *		Florian la Roche <rzsfl@rz.uni-sb.de>
16  *		Alan Cox <gw4pts@gw4pts.ampr.org>
17  *		David Hinds <dahinds@users.sourceforge.net>
18  *		Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19  *		Adam Sulmicki <adam@cfar.umd.edu>
20  *              Pekka Riikonen <priikone@poesidon.pspt.fi>
21  *
22  *	Changes:
23  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
24  *              			to 2 if register_netdev gets called
25  *              			before net_dev_init & also removed a
26  *              			few lines of code in the process.
27  *		Alan Cox	:	device private ioctl copies fields back.
28  *		Alan Cox	:	Transmit queue code does relevant
29  *					stunts to keep the queue safe.
30  *		Alan Cox	:	Fixed double lock.
31  *		Alan Cox	:	Fixed promisc NULL pointer trap
32  *		????????	:	Support the full private ioctl range
33  *		Alan Cox	:	Moved ioctl permission check into
34  *					drivers
35  *		Tim Kordas	:	SIOCADDMULTI/SIOCDELMULTI
36  *		Alan Cox	:	100 backlog just doesn't cut it when
37  *					you start doing multicast video 8)
38  *		Alan Cox	:	Rewrote net_bh and list manager.
39  *		Alan Cox	: 	Fix ETH_P_ALL echoback lengths.
40  *		Alan Cox	:	Took out transmit every packet pass
41  *					Saved a few bytes in the ioctl handler
42  *		Alan Cox	:	Network driver sets packet type before
43  *					calling netif_rx. Saves a function
44  *					call a packet.
45  *		Alan Cox	:	Hashed net_bh()
46  *		Richard Kooijman:	Timestamp fixes.
47  *		Alan Cox	:	Wrong field in SIOCGIFDSTADDR
48  *		Alan Cox	:	Device lock protection.
49  *		Alan Cox	: 	Fixed nasty side effect of device close
50  *					changes.
51  *		Rudi Cilibrasi	:	Pass the right thing to
52  *					set_mac_address()
53  *		Dave Miller	:	32bit quantity for the device lock to
54  *					make it work out on a Sparc.
55  *		Bjorn Ekwall	:	Added KERNELD hack.
56  *		Alan Cox	:	Cleaned up the backlog initialise.
57  *		Craig Metz	:	SIOCGIFCONF fix if space for under
58  *					1 device.
59  *	    Thomas Bogendoerfer :	Return ENODEV for dev_open, if there
60  *					is no device open function.
61  *		Andi Kleen	:	Fix error reporting for SIOCGIFCONF
62  *	    Michael Chastain	:	Fix signed/unsigned for SIOCGIFCONF
63  *		Cyrus Durgin	:	Cleaned for KMOD
64  *		Adam Sulmicki   :	Bug Fix : Network Device Unload
65  *					A network device unload needs to purge
66  *					the backlog queue.
67  *	Paul Rusty Russell	:	SIOCSIFNAME
68  *              Pekka Riikonen  :	Netdev boot-time settings code
69  *              Andrew Morton   :       Make unregister_netdevice wait
70  *              			indefinitely on dev->refcnt
71  * 		J Hadi Salim	:	- Backlog queue sampling
72  *				        - netif_rx() feedback
73  */
74 
75 #include <asm/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/mutex.h>
85 #include <linux/string.h>
86 #include <linux/mm.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/notifier.h>
96 #include <linux/skbuff.h>
97 #include <net/net_namespace.h>
98 #include <net/sock.h>
99 #include <linux/rtnetlink.h>
100 #include <linux/proc_fs.h>
101 #include <linux/seq_file.h>
102 #include <linux/stat.h>
103 #include <net/dst.h>
104 #include <net/pkt_sched.h>
105 #include <net/checksum.h>
106 #include <net/xfrm.h>
107 #include <linux/highmem.h>
108 #include <linux/init.h>
109 #include <linux/kmod.h>
110 #include <linux/module.h>
111 #include <linux/netpoll.h>
112 #include <linux/rcupdate.h>
113 #include <linux/delay.h>
114 #include <net/wext.h>
115 #include <net/iw_handler.h>
116 #include <asm/current.h>
117 #include <linux/audit.h>
118 #include <linux/dmaengine.h>
119 #include <linux/err.h>
120 #include <linux/ctype.h>
121 #include <linux/if_arp.h>
122 #include <linux/if_vlan.h>
123 #include <linux/ip.h>
124 #include <net/ip.h>
125 #include <linux/ipv6.h>
126 #include <linux/in.h>
127 #include <linux/jhash.h>
128 #include <linux/random.h>
129 #include <trace/events/napi.h>
130 #include <trace/events/net.h>
131 #include <trace/events/skb.h>
132 #include <linux/pci.h>
133 #include <linux/inetdevice.h>
134 #include <linux/cpu_rmap.h>
135 #include <linux/net_tstamp.h>
136 #include <linux/static_key.h>
137 #include <net/flow_keys.h>
138 
139 #include "net-sysfs.h"
140 
141 /* Instead of increasing this, you should create a hash table. */
142 #define MAX_GRO_SKBS 8
143 
144 /* This should be increased if a protocol with a bigger head is added. */
145 #define GRO_MAX_HEAD (MAX_HEADER + 128)
146 
147 /*
148  *	The list of packet types we will receive (as opposed to discard)
149  *	and the routines to invoke.
150  *
151  *	Why 16. Because with 16 the only overlap we get on a hash of the
152  *	low nibble of the protocol value is RARP/SNAP/X.25.
153  *
154  *      NOTE:  That is no longer true with the addition of VLAN tags.  Not
155  *             sure which should go first, but I bet it won't make much
156  *             difference if we are running VLANs.  The good news is that
157  *             this protocol won't be in the list unless compiled in, so
158  *             the average user (w/out VLANs) will not be adversely affected.
159  *             --BLG
160  *
161  *		0800	IP
162  *		8100    802.1Q VLAN
163  *		0001	802.3
164  *		0002	AX.25
165  *		0004	802.2
166  *		8035	RARP
167  *		0005	SNAP
168  *		0805	X.25
169  *		0806	ARP
170  *		8137	IPX
171  *		0009	Localtalk
172  *		86DD	IPv6
173  */
174 
175 #define PTYPE_HASH_SIZE	(16)
176 #define PTYPE_HASH_MASK	(PTYPE_HASH_SIZE - 1)
177 
178 static DEFINE_SPINLOCK(ptype_lock);
179 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
180 static struct list_head ptype_all __read_mostly;	/* Taps */
181 
182 /*
183  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
184  * semaphore.
185  *
186  * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
187  *
188  * Writers must hold the rtnl semaphore while they loop through the
189  * dev_base_head list, and hold dev_base_lock for writing when they do the
190  * actual updates.  This allows pure readers to access the list even
191  * while a writer is preparing to update it.
192  *
193  * To put it another way, dev_base_lock is held for writing only to
194  * protect against pure readers; the rtnl semaphore provides the
195  * protection against other writers.
196  *
197  * See, for example usages, register_netdevice() and
198  * unregister_netdevice(), which must be called with the rtnl
199  * semaphore held.
200  */
201 DEFINE_RWLOCK(dev_base_lock);
202 EXPORT_SYMBOL(dev_base_lock);
203 
204 static inline void dev_base_seq_inc(struct net *net)
205 {
206 	while (++net->dev_base_seq == 0);
207 }
208 
209 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
210 {
211 	unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
212 
213 	return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
214 }
215 
216 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
217 {
218 	return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
219 }
220 
221 static inline void rps_lock(struct softnet_data *sd)
222 {
223 #ifdef CONFIG_RPS
224 	spin_lock(&sd->input_pkt_queue.lock);
225 #endif
226 }
227 
228 static inline void rps_unlock(struct softnet_data *sd)
229 {
230 #ifdef CONFIG_RPS
231 	spin_unlock(&sd->input_pkt_queue.lock);
232 #endif
233 }
234 
235 /* Device list insertion */
236 static int list_netdevice(struct net_device *dev)
237 {
238 	struct net *net = dev_net(dev);
239 
240 	ASSERT_RTNL();
241 
242 	write_lock_bh(&dev_base_lock);
243 	list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
244 	hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
245 	hlist_add_head_rcu(&dev->index_hlist,
246 			   dev_index_hash(net, dev->ifindex));
247 	write_unlock_bh(&dev_base_lock);
248 
249 	dev_base_seq_inc(net);
250 
251 	return 0;
252 }
253 
254 /* Device list removal
255  * caller must respect a RCU grace period before freeing/reusing dev
256  */
257 static void unlist_netdevice(struct net_device *dev)
258 {
259 	ASSERT_RTNL();
260 
261 	/* Unlink dev from the device chain */
262 	write_lock_bh(&dev_base_lock);
263 	list_del_rcu(&dev->dev_list);
264 	hlist_del_rcu(&dev->name_hlist);
265 	hlist_del_rcu(&dev->index_hlist);
266 	write_unlock_bh(&dev_base_lock);
267 
268 	dev_base_seq_inc(dev_net(dev));
269 }
270 
271 /*
272  *	Our notifier list
273  */
274 
275 static RAW_NOTIFIER_HEAD(netdev_chain);
276 
277 /*
278  *	Device drivers call our routines to queue packets here. We empty the
279  *	queue in the local softnet handler.
280  */
281 
282 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
283 EXPORT_PER_CPU_SYMBOL(softnet_data);
284 
285 #ifdef CONFIG_LOCKDEP
286 /*
287  * register_netdevice() inits txq->_xmit_lock and sets lockdep class
288  * according to dev->type
289  */
290 static const unsigned short netdev_lock_type[] =
291 	{ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
292 	 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
293 	 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
294 	 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
295 	 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
296 	 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
297 	 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
298 	 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
299 	 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
300 	 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
301 	 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
302 	 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
303 	 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
304 	 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
305 	 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
306 
307 static const char *const netdev_lock_name[] =
308 	{"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
309 	 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
310 	 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
311 	 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
312 	 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
313 	 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
314 	 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
315 	 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
316 	 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
317 	 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
318 	 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
319 	 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
320 	 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
321 	 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
322 	 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
323 
324 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
325 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
326 
327 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
328 {
329 	int i;
330 
331 	for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
332 		if (netdev_lock_type[i] == dev_type)
333 			return i;
334 	/* the last key is used by default */
335 	return ARRAY_SIZE(netdev_lock_type) - 1;
336 }
337 
338 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
339 						 unsigned short dev_type)
340 {
341 	int i;
342 
343 	i = netdev_lock_pos(dev_type);
344 	lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
345 				   netdev_lock_name[i]);
346 }
347 
348 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
349 {
350 	int i;
351 
352 	i = netdev_lock_pos(dev->type);
353 	lockdep_set_class_and_name(&dev->addr_list_lock,
354 				   &netdev_addr_lock_key[i],
355 				   netdev_lock_name[i]);
356 }
357 #else
358 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
359 						 unsigned short dev_type)
360 {
361 }
362 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
363 {
364 }
365 #endif
366 
367 /*******************************************************************************
368 
369 		Protocol management and registration routines
370 
371 *******************************************************************************/
372 
373 /*
374  *	Add a protocol ID to the list. Now that the input handler is
375  *	smarter we can dispense with all the messy stuff that used to be
376  *	here.
377  *
378  *	BEWARE!!! Protocol handlers, mangling input packets,
379  *	MUST BE last in hash buckets and checking protocol handlers
380  *	MUST start from promiscuous ptype_all chain in net_bh.
381  *	It is true now, do not change it.
382  *	Explanation follows: if protocol handler, mangling packet, will
383  *	be the first on list, it is not able to sense, that packet
384  *	is cloned and should be copied-on-write, so that it will
385  *	change it and subsequent readers will get broken packet.
386  *							--ANK (980803)
387  */
388 
389 static inline struct list_head *ptype_head(const struct packet_type *pt)
390 {
391 	if (pt->type == htons(ETH_P_ALL))
392 		return &ptype_all;
393 	else
394 		return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
395 }
396 
397 /**
398  *	dev_add_pack - add packet handler
399  *	@pt: packet type declaration
400  *
401  *	Add a protocol handler to the networking stack. The passed &packet_type
402  *	is linked into kernel lists and may not be freed until it has been
403  *	removed from the kernel lists.
404  *
405  *	This call does not sleep therefore it can not
406  *	guarantee all CPU's that are in middle of receiving packets
407  *	will see the new packet type (until the next received packet).
408  */
409 
410 void dev_add_pack(struct packet_type *pt)
411 {
412 	struct list_head *head = ptype_head(pt);
413 
414 	spin_lock(&ptype_lock);
415 	list_add_rcu(&pt->list, head);
416 	spin_unlock(&ptype_lock);
417 }
418 EXPORT_SYMBOL(dev_add_pack);
419 
420 /**
421  *	__dev_remove_pack	 - remove packet handler
422  *	@pt: packet type declaration
423  *
424  *	Remove a protocol handler that was previously added to the kernel
425  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
426  *	from the kernel lists and can be freed or reused once this function
427  *	returns.
428  *
429  *      The packet type might still be in use by receivers
430  *	and must not be freed until after all the CPU's have gone
431  *	through a quiescent state.
432  */
433 void __dev_remove_pack(struct packet_type *pt)
434 {
435 	struct list_head *head = ptype_head(pt);
436 	struct packet_type *pt1;
437 
438 	spin_lock(&ptype_lock);
439 
440 	list_for_each_entry(pt1, head, list) {
441 		if (pt == pt1) {
442 			list_del_rcu(&pt->list);
443 			goto out;
444 		}
445 	}
446 
447 	pr_warn("dev_remove_pack: %p not found\n", pt);
448 out:
449 	spin_unlock(&ptype_lock);
450 }
451 EXPORT_SYMBOL(__dev_remove_pack);
452 
453 /**
454  *	dev_remove_pack	 - remove packet handler
455  *	@pt: packet type declaration
456  *
457  *	Remove a protocol handler that was previously added to the kernel
458  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
459  *	from the kernel lists and can be freed or reused once this function
460  *	returns.
461  *
462  *	This call sleeps to guarantee that no CPU is looking at the packet
463  *	type after return.
464  */
465 void dev_remove_pack(struct packet_type *pt)
466 {
467 	__dev_remove_pack(pt);
468 
469 	synchronize_net();
470 }
471 EXPORT_SYMBOL(dev_remove_pack);
472 
473 /******************************************************************************
474 
475 		      Device Boot-time Settings Routines
476 
477 *******************************************************************************/
478 
479 /* Boot time configuration table */
480 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
481 
482 /**
483  *	netdev_boot_setup_add	- add new setup entry
484  *	@name: name of the device
485  *	@map: configured settings for the device
486  *
487  *	Adds new setup entry to the dev_boot_setup list.  The function
488  *	returns 0 on error and 1 on success.  This is a generic routine to
489  *	all netdevices.
490  */
491 static int netdev_boot_setup_add(char *name, struct ifmap *map)
492 {
493 	struct netdev_boot_setup *s;
494 	int i;
495 
496 	s = dev_boot_setup;
497 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
498 		if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
499 			memset(s[i].name, 0, sizeof(s[i].name));
500 			strlcpy(s[i].name, name, IFNAMSIZ);
501 			memcpy(&s[i].map, map, sizeof(s[i].map));
502 			break;
503 		}
504 	}
505 
506 	return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
507 }
508 
509 /**
510  *	netdev_boot_setup_check	- check boot time settings
511  *	@dev: the netdevice
512  *
513  * 	Check boot time settings for the device.
514  *	The found settings are set for the device to be used
515  *	later in the device probing.
516  *	Returns 0 if no settings found, 1 if they are.
517  */
518 int netdev_boot_setup_check(struct net_device *dev)
519 {
520 	struct netdev_boot_setup *s = dev_boot_setup;
521 	int i;
522 
523 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
524 		if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
525 		    !strcmp(dev->name, s[i].name)) {
526 			dev->irq 	= s[i].map.irq;
527 			dev->base_addr 	= s[i].map.base_addr;
528 			dev->mem_start 	= s[i].map.mem_start;
529 			dev->mem_end 	= s[i].map.mem_end;
530 			return 1;
531 		}
532 	}
533 	return 0;
534 }
535 EXPORT_SYMBOL(netdev_boot_setup_check);
536 
537 
538 /**
539  *	netdev_boot_base	- get address from boot time settings
540  *	@prefix: prefix for network device
541  *	@unit: id for network device
542  *
543  * 	Check boot time settings for the base address of device.
544  *	The found settings are set for the device to be used
545  *	later in the device probing.
546  *	Returns 0 if no settings found.
547  */
548 unsigned long netdev_boot_base(const char *prefix, int unit)
549 {
550 	const struct netdev_boot_setup *s = dev_boot_setup;
551 	char name[IFNAMSIZ];
552 	int i;
553 
554 	sprintf(name, "%s%d", prefix, unit);
555 
556 	/*
557 	 * If device already registered then return base of 1
558 	 * to indicate not to probe for this interface
559 	 */
560 	if (__dev_get_by_name(&init_net, name))
561 		return 1;
562 
563 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
564 		if (!strcmp(name, s[i].name))
565 			return s[i].map.base_addr;
566 	return 0;
567 }
568 
569 /*
570  * Saves at boot time configured settings for any netdevice.
571  */
572 int __init netdev_boot_setup(char *str)
573 {
574 	int ints[5];
575 	struct ifmap map;
576 
577 	str = get_options(str, ARRAY_SIZE(ints), ints);
578 	if (!str || !*str)
579 		return 0;
580 
581 	/* Save settings */
582 	memset(&map, 0, sizeof(map));
583 	if (ints[0] > 0)
584 		map.irq = ints[1];
585 	if (ints[0] > 1)
586 		map.base_addr = ints[2];
587 	if (ints[0] > 2)
588 		map.mem_start = ints[3];
589 	if (ints[0] > 3)
590 		map.mem_end = ints[4];
591 
592 	/* Add new entry to the list */
593 	return netdev_boot_setup_add(str, &map);
594 }
595 
596 __setup("netdev=", netdev_boot_setup);
597 
598 /*******************************************************************************
599 
600 			    Device Interface Subroutines
601 
602 *******************************************************************************/
603 
604 /**
605  *	__dev_get_by_name	- find a device by its name
606  *	@net: the applicable net namespace
607  *	@name: name to find
608  *
609  *	Find an interface by name. Must be called under RTNL semaphore
610  *	or @dev_base_lock. If the name is found a pointer to the device
611  *	is returned. If the name is not found then %NULL is returned. The
612  *	reference counters are not incremented so the caller must be
613  *	careful with locks.
614  */
615 
616 struct net_device *__dev_get_by_name(struct net *net, const char *name)
617 {
618 	struct hlist_node *p;
619 	struct net_device *dev;
620 	struct hlist_head *head = dev_name_hash(net, name);
621 
622 	hlist_for_each_entry(dev, p, head, name_hlist)
623 		if (!strncmp(dev->name, name, IFNAMSIZ))
624 			return dev;
625 
626 	return NULL;
627 }
628 EXPORT_SYMBOL(__dev_get_by_name);
629 
630 /**
631  *	dev_get_by_name_rcu	- find a device by its name
632  *	@net: the applicable net namespace
633  *	@name: name to find
634  *
635  *	Find an interface by name.
636  *	If the name is found a pointer to the device is returned.
637  * 	If the name is not found then %NULL is returned.
638  *	The reference counters are not incremented so the caller must be
639  *	careful with locks. The caller must hold RCU lock.
640  */
641 
642 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
643 {
644 	struct hlist_node *p;
645 	struct net_device *dev;
646 	struct hlist_head *head = dev_name_hash(net, name);
647 
648 	hlist_for_each_entry_rcu(dev, p, head, name_hlist)
649 		if (!strncmp(dev->name, name, IFNAMSIZ))
650 			return dev;
651 
652 	return NULL;
653 }
654 EXPORT_SYMBOL(dev_get_by_name_rcu);
655 
656 /**
657  *	dev_get_by_name		- find a device by its name
658  *	@net: the applicable net namespace
659  *	@name: name to find
660  *
661  *	Find an interface by name. This can be called from any
662  *	context and does its own locking. The returned handle has
663  *	the usage count incremented and the caller must use dev_put() to
664  *	release it when it is no longer needed. %NULL is returned if no
665  *	matching device is found.
666  */
667 
668 struct net_device *dev_get_by_name(struct net *net, const char *name)
669 {
670 	struct net_device *dev;
671 
672 	rcu_read_lock();
673 	dev = dev_get_by_name_rcu(net, name);
674 	if (dev)
675 		dev_hold(dev);
676 	rcu_read_unlock();
677 	return dev;
678 }
679 EXPORT_SYMBOL(dev_get_by_name);
680 
681 /**
682  *	__dev_get_by_index - find a device by its ifindex
683  *	@net: the applicable net namespace
684  *	@ifindex: index of device
685  *
686  *	Search for an interface by index. Returns %NULL if the device
687  *	is not found or a pointer to the device. The device has not
688  *	had its reference counter increased so the caller must be careful
689  *	about locking. The caller must hold either the RTNL semaphore
690  *	or @dev_base_lock.
691  */
692 
693 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
694 {
695 	struct hlist_node *p;
696 	struct net_device *dev;
697 	struct hlist_head *head = dev_index_hash(net, ifindex);
698 
699 	hlist_for_each_entry(dev, p, head, index_hlist)
700 		if (dev->ifindex == ifindex)
701 			return dev;
702 
703 	return NULL;
704 }
705 EXPORT_SYMBOL(__dev_get_by_index);
706 
707 /**
708  *	dev_get_by_index_rcu - find a device by its ifindex
709  *	@net: the applicable net namespace
710  *	@ifindex: index of device
711  *
712  *	Search for an interface by index. Returns %NULL if the device
713  *	is not found or a pointer to the device. The device has not
714  *	had its reference counter increased so the caller must be careful
715  *	about locking. The caller must hold RCU lock.
716  */
717 
718 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
719 {
720 	struct hlist_node *p;
721 	struct net_device *dev;
722 	struct hlist_head *head = dev_index_hash(net, ifindex);
723 
724 	hlist_for_each_entry_rcu(dev, p, head, index_hlist)
725 		if (dev->ifindex == ifindex)
726 			return dev;
727 
728 	return NULL;
729 }
730 EXPORT_SYMBOL(dev_get_by_index_rcu);
731 
732 
733 /**
734  *	dev_get_by_index - find a device by its ifindex
735  *	@net: the applicable net namespace
736  *	@ifindex: index of device
737  *
738  *	Search for an interface by index. Returns NULL if the device
739  *	is not found or a pointer to the device. The device returned has
740  *	had a reference added and the pointer is safe until the user calls
741  *	dev_put to indicate they have finished with it.
742  */
743 
744 struct net_device *dev_get_by_index(struct net *net, int ifindex)
745 {
746 	struct net_device *dev;
747 
748 	rcu_read_lock();
749 	dev = dev_get_by_index_rcu(net, ifindex);
750 	if (dev)
751 		dev_hold(dev);
752 	rcu_read_unlock();
753 	return dev;
754 }
755 EXPORT_SYMBOL(dev_get_by_index);
756 
757 /**
758  *	dev_getbyhwaddr_rcu - find a device by its hardware address
759  *	@net: the applicable net namespace
760  *	@type: media type of device
761  *	@ha: hardware address
762  *
763  *	Search for an interface by MAC address. Returns NULL if the device
764  *	is not found or a pointer to the device.
765  *	The caller must hold RCU or RTNL.
766  *	The returned device has not had its ref count increased
767  *	and the caller must therefore be careful about locking
768  *
769  */
770 
771 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
772 				       const char *ha)
773 {
774 	struct net_device *dev;
775 
776 	for_each_netdev_rcu(net, dev)
777 		if (dev->type == type &&
778 		    !memcmp(dev->dev_addr, ha, dev->addr_len))
779 			return dev;
780 
781 	return NULL;
782 }
783 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
784 
785 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
786 {
787 	struct net_device *dev;
788 
789 	ASSERT_RTNL();
790 	for_each_netdev(net, dev)
791 		if (dev->type == type)
792 			return dev;
793 
794 	return NULL;
795 }
796 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
797 
798 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
799 {
800 	struct net_device *dev, *ret = NULL;
801 
802 	rcu_read_lock();
803 	for_each_netdev_rcu(net, dev)
804 		if (dev->type == type) {
805 			dev_hold(dev);
806 			ret = dev;
807 			break;
808 		}
809 	rcu_read_unlock();
810 	return ret;
811 }
812 EXPORT_SYMBOL(dev_getfirstbyhwtype);
813 
814 /**
815  *	dev_get_by_flags_rcu - find any device with given flags
816  *	@net: the applicable net namespace
817  *	@if_flags: IFF_* values
818  *	@mask: bitmask of bits in if_flags to check
819  *
820  *	Search for any interface with the given flags. Returns NULL if a device
821  *	is not found or a pointer to the device. Must be called inside
822  *	rcu_read_lock(), and result refcount is unchanged.
823  */
824 
825 struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
826 				    unsigned short mask)
827 {
828 	struct net_device *dev, *ret;
829 
830 	ret = NULL;
831 	for_each_netdev_rcu(net, dev) {
832 		if (((dev->flags ^ if_flags) & mask) == 0) {
833 			ret = dev;
834 			break;
835 		}
836 	}
837 	return ret;
838 }
839 EXPORT_SYMBOL(dev_get_by_flags_rcu);
840 
841 /**
842  *	dev_valid_name - check if name is okay for network device
843  *	@name: name string
844  *
845  *	Network device names need to be valid file names to
846  *	to allow sysfs to work.  We also disallow any kind of
847  *	whitespace.
848  */
849 bool dev_valid_name(const char *name)
850 {
851 	if (*name == '\0')
852 		return false;
853 	if (strlen(name) >= IFNAMSIZ)
854 		return false;
855 	if (!strcmp(name, ".") || !strcmp(name, ".."))
856 		return false;
857 
858 	while (*name) {
859 		if (*name == '/' || isspace(*name))
860 			return false;
861 		name++;
862 	}
863 	return true;
864 }
865 EXPORT_SYMBOL(dev_valid_name);
866 
867 /**
868  *	__dev_alloc_name - allocate a name for a device
869  *	@net: network namespace to allocate the device name in
870  *	@name: name format string
871  *	@buf:  scratch buffer and result name string
872  *
873  *	Passed a format string - eg "lt%d" it will try and find a suitable
874  *	id. It scans list of devices to build up a free map, then chooses
875  *	the first empty slot. The caller must hold the dev_base or rtnl lock
876  *	while allocating the name and adding the device in order to avoid
877  *	duplicates.
878  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
879  *	Returns the number of the unit assigned or a negative errno code.
880  */
881 
882 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
883 {
884 	int i = 0;
885 	const char *p;
886 	const int max_netdevices = 8*PAGE_SIZE;
887 	unsigned long *inuse;
888 	struct net_device *d;
889 
890 	p = strnchr(name, IFNAMSIZ-1, '%');
891 	if (p) {
892 		/*
893 		 * Verify the string as this thing may have come from
894 		 * the user.  There must be either one "%d" and no other "%"
895 		 * characters.
896 		 */
897 		if (p[1] != 'd' || strchr(p + 2, '%'))
898 			return -EINVAL;
899 
900 		/* Use one page as a bit array of possible slots */
901 		inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
902 		if (!inuse)
903 			return -ENOMEM;
904 
905 		for_each_netdev(net, d) {
906 			if (!sscanf(d->name, name, &i))
907 				continue;
908 			if (i < 0 || i >= max_netdevices)
909 				continue;
910 
911 			/*  avoid cases where sscanf is not exact inverse of printf */
912 			snprintf(buf, IFNAMSIZ, name, i);
913 			if (!strncmp(buf, d->name, IFNAMSIZ))
914 				set_bit(i, inuse);
915 		}
916 
917 		i = find_first_zero_bit(inuse, max_netdevices);
918 		free_page((unsigned long) inuse);
919 	}
920 
921 	if (buf != name)
922 		snprintf(buf, IFNAMSIZ, name, i);
923 	if (!__dev_get_by_name(net, buf))
924 		return i;
925 
926 	/* It is possible to run out of possible slots
927 	 * when the name is long and there isn't enough space left
928 	 * for the digits, or if all bits are used.
929 	 */
930 	return -ENFILE;
931 }
932 
933 /**
934  *	dev_alloc_name - allocate a name for a device
935  *	@dev: device
936  *	@name: name format string
937  *
938  *	Passed a format string - eg "lt%d" it will try and find a suitable
939  *	id. It scans list of devices to build up a free map, then chooses
940  *	the first empty slot. The caller must hold the dev_base or rtnl lock
941  *	while allocating the name and adding the device in order to avoid
942  *	duplicates.
943  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
944  *	Returns the number of the unit assigned or a negative errno code.
945  */
946 
947 int dev_alloc_name(struct net_device *dev, const char *name)
948 {
949 	char buf[IFNAMSIZ];
950 	struct net *net;
951 	int ret;
952 
953 	BUG_ON(!dev_net(dev));
954 	net = dev_net(dev);
955 	ret = __dev_alloc_name(net, name, buf);
956 	if (ret >= 0)
957 		strlcpy(dev->name, buf, IFNAMSIZ);
958 	return ret;
959 }
960 EXPORT_SYMBOL(dev_alloc_name);
961 
962 static int dev_alloc_name_ns(struct net *net,
963 			     struct net_device *dev,
964 			     const char *name)
965 {
966 	char buf[IFNAMSIZ];
967 	int ret;
968 
969 	ret = __dev_alloc_name(net, name, buf);
970 	if (ret >= 0)
971 		strlcpy(dev->name, buf, IFNAMSIZ);
972 	return ret;
973 }
974 
975 static int dev_get_valid_name(struct net *net,
976 			      struct net_device *dev,
977 			      const char *name)
978 {
979 	BUG_ON(!net);
980 
981 	if (!dev_valid_name(name))
982 		return -EINVAL;
983 
984 	if (strchr(name, '%'))
985 		return dev_alloc_name_ns(net, dev, name);
986 	else if (__dev_get_by_name(net, name))
987 		return -EEXIST;
988 	else if (dev->name != name)
989 		strlcpy(dev->name, name, IFNAMSIZ);
990 
991 	return 0;
992 }
993 
994 /**
995  *	dev_change_name - change name of a device
996  *	@dev: device
997  *	@newname: name (or format string) must be at least IFNAMSIZ
998  *
999  *	Change name of a device, can pass format strings "eth%d".
1000  *	for wildcarding.
1001  */
1002 int dev_change_name(struct net_device *dev, const char *newname)
1003 {
1004 	char oldname[IFNAMSIZ];
1005 	int err = 0;
1006 	int ret;
1007 	struct net *net;
1008 
1009 	ASSERT_RTNL();
1010 	BUG_ON(!dev_net(dev));
1011 
1012 	net = dev_net(dev);
1013 	if (dev->flags & IFF_UP)
1014 		return -EBUSY;
1015 
1016 	if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
1017 		return 0;
1018 
1019 	memcpy(oldname, dev->name, IFNAMSIZ);
1020 
1021 	err = dev_get_valid_name(net, dev, newname);
1022 	if (err < 0)
1023 		return err;
1024 
1025 rollback:
1026 	ret = device_rename(&dev->dev, dev->name);
1027 	if (ret) {
1028 		memcpy(dev->name, oldname, IFNAMSIZ);
1029 		return ret;
1030 	}
1031 
1032 	write_lock_bh(&dev_base_lock);
1033 	hlist_del_rcu(&dev->name_hlist);
1034 	write_unlock_bh(&dev_base_lock);
1035 
1036 	synchronize_rcu();
1037 
1038 	write_lock_bh(&dev_base_lock);
1039 	hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1040 	write_unlock_bh(&dev_base_lock);
1041 
1042 	ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1043 	ret = notifier_to_errno(ret);
1044 
1045 	if (ret) {
1046 		/* err >= 0 after dev_alloc_name() or stores the first errno */
1047 		if (err >= 0) {
1048 			err = ret;
1049 			memcpy(dev->name, oldname, IFNAMSIZ);
1050 			goto rollback;
1051 		} else {
1052 			pr_err("%s: name change rollback failed: %d\n",
1053 			       dev->name, ret);
1054 		}
1055 	}
1056 
1057 	return err;
1058 }
1059 
1060 /**
1061  *	dev_set_alias - change ifalias of a device
1062  *	@dev: device
1063  *	@alias: name up to IFALIASZ
1064  *	@len: limit of bytes to copy from info
1065  *
1066  *	Set ifalias for a device,
1067  */
1068 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1069 {
1070 	char *new_ifalias;
1071 
1072 	ASSERT_RTNL();
1073 
1074 	if (len >= IFALIASZ)
1075 		return -EINVAL;
1076 
1077 	if (!len) {
1078 		if (dev->ifalias) {
1079 			kfree(dev->ifalias);
1080 			dev->ifalias = NULL;
1081 		}
1082 		return 0;
1083 	}
1084 
1085 	new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1086 	if (!new_ifalias)
1087 		return -ENOMEM;
1088 	dev->ifalias = new_ifalias;
1089 
1090 	strlcpy(dev->ifalias, alias, len+1);
1091 	return len;
1092 }
1093 
1094 
1095 /**
1096  *	netdev_features_change - device changes features
1097  *	@dev: device to cause notification
1098  *
1099  *	Called to indicate a device has changed features.
1100  */
1101 void netdev_features_change(struct net_device *dev)
1102 {
1103 	call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1104 }
1105 EXPORT_SYMBOL(netdev_features_change);
1106 
1107 /**
1108  *	netdev_state_change - device changes state
1109  *	@dev: device to cause notification
1110  *
1111  *	Called to indicate a device has changed state. This function calls
1112  *	the notifier chains for netdev_chain and sends a NEWLINK message
1113  *	to the routing socket.
1114  */
1115 void netdev_state_change(struct net_device *dev)
1116 {
1117 	if (dev->flags & IFF_UP) {
1118 		call_netdevice_notifiers(NETDEV_CHANGE, dev);
1119 		rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1120 	}
1121 }
1122 EXPORT_SYMBOL(netdev_state_change);
1123 
1124 /**
1125  * 	netdev_notify_peers - notify network peers about existence of @dev
1126  * 	@dev: network device
1127  *
1128  * Generate traffic such that interested network peers are aware of
1129  * @dev, such as by generating a gratuitous ARP. This may be used when
1130  * a device wants to inform the rest of the network about some sort of
1131  * reconfiguration such as a failover event or virtual machine
1132  * migration.
1133  */
1134 void netdev_notify_peers(struct net_device *dev)
1135 {
1136 	rtnl_lock();
1137 	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1138 	rtnl_unlock();
1139 }
1140 EXPORT_SYMBOL(netdev_notify_peers);
1141 
1142 /**
1143  *	dev_load 	- load a network module
1144  *	@net: the applicable net namespace
1145  *	@name: name of interface
1146  *
1147  *	If a network interface is not present and the process has suitable
1148  *	privileges this function loads the module. If module loading is not
1149  *	available in this kernel then it becomes a nop.
1150  */
1151 
1152 void dev_load(struct net *net, const char *name)
1153 {
1154 	struct net_device *dev;
1155 	int no_module;
1156 
1157 	rcu_read_lock();
1158 	dev = dev_get_by_name_rcu(net, name);
1159 	rcu_read_unlock();
1160 
1161 	no_module = !dev;
1162 	if (no_module && capable(CAP_NET_ADMIN))
1163 		no_module = request_module("netdev-%s", name);
1164 	if (no_module && capable(CAP_SYS_MODULE)) {
1165 		if (!request_module("%s", name))
1166 			pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
1167 				name);
1168 	}
1169 }
1170 EXPORT_SYMBOL(dev_load);
1171 
1172 static int __dev_open(struct net_device *dev)
1173 {
1174 	const struct net_device_ops *ops = dev->netdev_ops;
1175 	int ret;
1176 
1177 	ASSERT_RTNL();
1178 
1179 	if (!netif_device_present(dev))
1180 		return -ENODEV;
1181 
1182 	ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1183 	ret = notifier_to_errno(ret);
1184 	if (ret)
1185 		return ret;
1186 
1187 	set_bit(__LINK_STATE_START, &dev->state);
1188 
1189 	if (ops->ndo_validate_addr)
1190 		ret = ops->ndo_validate_addr(dev);
1191 
1192 	if (!ret && ops->ndo_open)
1193 		ret = ops->ndo_open(dev);
1194 
1195 	if (ret)
1196 		clear_bit(__LINK_STATE_START, &dev->state);
1197 	else {
1198 		dev->flags |= IFF_UP;
1199 		net_dmaengine_get();
1200 		dev_set_rx_mode(dev);
1201 		dev_activate(dev);
1202 		add_device_randomness(dev->dev_addr, dev->addr_len);
1203 	}
1204 
1205 	return ret;
1206 }
1207 
1208 /**
1209  *	dev_open	- prepare an interface for use.
1210  *	@dev:	device to open
1211  *
1212  *	Takes a device from down to up state. The device's private open
1213  *	function is invoked and then the multicast lists are loaded. Finally
1214  *	the device is moved into the up state and a %NETDEV_UP message is
1215  *	sent to the netdev notifier chain.
1216  *
1217  *	Calling this function on an active interface is a nop. On a failure
1218  *	a negative errno code is returned.
1219  */
1220 int dev_open(struct net_device *dev)
1221 {
1222 	int ret;
1223 
1224 	if (dev->flags & IFF_UP)
1225 		return 0;
1226 
1227 	ret = __dev_open(dev);
1228 	if (ret < 0)
1229 		return ret;
1230 
1231 	rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1232 	call_netdevice_notifiers(NETDEV_UP, dev);
1233 
1234 	return ret;
1235 }
1236 EXPORT_SYMBOL(dev_open);
1237 
1238 static int __dev_close_many(struct list_head *head)
1239 {
1240 	struct net_device *dev;
1241 
1242 	ASSERT_RTNL();
1243 	might_sleep();
1244 
1245 	list_for_each_entry(dev, head, unreg_list) {
1246 		call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1247 
1248 		clear_bit(__LINK_STATE_START, &dev->state);
1249 
1250 		/* Synchronize to scheduled poll. We cannot touch poll list, it
1251 		 * can be even on different cpu. So just clear netif_running().
1252 		 *
1253 		 * dev->stop() will invoke napi_disable() on all of it's
1254 		 * napi_struct instances on this device.
1255 		 */
1256 		smp_mb__after_clear_bit(); /* Commit netif_running(). */
1257 	}
1258 
1259 	dev_deactivate_many(head);
1260 
1261 	list_for_each_entry(dev, head, unreg_list) {
1262 		const struct net_device_ops *ops = dev->netdev_ops;
1263 
1264 		/*
1265 		 *	Call the device specific close. This cannot fail.
1266 		 *	Only if device is UP
1267 		 *
1268 		 *	We allow it to be called even after a DETACH hot-plug
1269 		 *	event.
1270 		 */
1271 		if (ops->ndo_stop)
1272 			ops->ndo_stop(dev);
1273 
1274 		dev->flags &= ~IFF_UP;
1275 		net_dmaengine_put();
1276 	}
1277 
1278 	return 0;
1279 }
1280 
1281 static int __dev_close(struct net_device *dev)
1282 {
1283 	int retval;
1284 	LIST_HEAD(single);
1285 
1286 	list_add(&dev->unreg_list, &single);
1287 	retval = __dev_close_many(&single);
1288 	list_del(&single);
1289 	return retval;
1290 }
1291 
1292 static int dev_close_many(struct list_head *head)
1293 {
1294 	struct net_device *dev, *tmp;
1295 	LIST_HEAD(tmp_list);
1296 
1297 	list_for_each_entry_safe(dev, tmp, head, unreg_list)
1298 		if (!(dev->flags & IFF_UP))
1299 			list_move(&dev->unreg_list, &tmp_list);
1300 
1301 	__dev_close_many(head);
1302 
1303 	list_for_each_entry(dev, head, unreg_list) {
1304 		rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1305 		call_netdevice_notifiers(NETDEV_DOWN, dev);
1306 	}
1307 
1308 	/* rollback_registered_many needs the complete original list */
1309 	list_splice(&tmp_list, head);
1310 	return 0;
1311 }
1312 
1313 /**
1314  *	dev_close - shutdown an interface.
1315  *	@dev: device to shutdown
1316  *
1317  *	This function moves an active device into down state. A
1318  *	%NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1319  *	is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1320  *	chain.
1321  */
1322 int dev_close(struct net_device *dev)
1323 {
1324 	if (dev->flags & IFF_UP) {
1325 		LIST_HEAD(single);
1326 
1327 		list_add(&dev->unreg_list, &single);
1328 		dev_close_many(&single);
1329 		list_del(&single);
1330 	}
1331 	return 0;
1332 }
1333 EXPORT_SYMBOL(dev_close);
1334 
1335 
1336 /**
1337  *	dev_disable_lro - disable Large Receive Offload on a device
1338  *	@dev: device
1339  *
1340  *	Disable Large Receive Offload (LRO) on a net device.  Must be
1341  *	called under RTNL.  This is needed if received packets may be
1342  *	forwarded to another interface.
1343  */
1344 void dev_disable_lro(struct net_device *dev)
1345 {
1346 	/*
1347 	 * If we're trying to disable lro on a vlan device
1348 	 * use the underlying physical device instead
1349 	 */
1350 	if (is_vlan_dev(dev))
1351 		dev = vlan_dev_real_dev(dev);
1352 
1353 	dev->wanted_features &= ~NETIF_F_LRO;
1354 	netdev_update_features(dev);
1355 
1356 	if (unlikely(dev->features & NETIF_F_LRO))
1357 		netdev_WARN(dev, "failed to disable LRO!\n");
1358 }
1359 EXPORT_SYMBOL(dev_disable_lro);
1360 
1361 
1362 static int dev_boot_phase = 1;
1363 
1364 /**
1365  *	register_netdevice_notifier - register a network notifier block
1366  *	@nb: notifier
1367  *
1368  *	Register a notifier to be called when network device events occur.
1369  *	The notifier passed is linked into the kernel structures and must
1370  *	not be reused until it has been unregistered. A negative errno code
1371  *	is returned on a failure.
1372  *
1373  * 	When registered all registration and up events are replayed
1374  *	to the new notifier to allow device to have a race free
1375  *	view of the network device list.
1376  */
1377 
1378 int register_netdevice_notifier(struct notifier_block *nb)
1379 {
1380 	struct net_device *dev;
1381 	struct net_device *last;
1382 	struct net *net;
1383 	int err;
1384 
1385 	rtnl_lock();
1386 	err = raw_notifier_chain_register(&netdev_chain, nb);
1387 	if (err)
1388 		goto unlock;
1389 	if (dev_boot_phase)
1390 		goto unlock;
1391 	for_each_net(net) {
1392 		for_each_netdev(net, dev) {
1393 			err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1394 			err = notifier_to_errno(err);
1395 			if (err)
1396 				goto rollback;
1397 
1398 			if (!(dev->flags & IFF_UP))
1399 				continue;
1400 
1401 			nb->notifier_call(nb, NETDEV_UP, dev);
1402 		}
1403 	}
1404 
1405 unlock:
1406 	rtnl_unlock();
1407 	return err;
1408 
1409 rollback:
1410 	last = dev;
1411 	for_each_net(net) {
1412 		for_each_netdev(net, dev) {
1413 			if (dev == last)
1414 				goto outroll;
1415 
1416 			if (dev->flags & IFF_UP) {
1417 				nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1418 				nb->notifier_call(nb, NETDEV_DOWN, dev);
1419 			}
1420 			nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1421 		}
1422 	}
1423 
1424 outroll:
1425 	raw_notifier_chain_unregister(&netdev_chain, nb);
1426 	goto unlock;
1427 }
1428 EXPORT_SYMBOL(register_netdevice_notifier);
1429 
1430 /**
1431  *	unregister_netdevice_notifier - unregister a network notifier block
1432  *	@nb: notifier
1433  *
1434  *	Unregister a notifier previously registered by
1435  *	register_netdevice_notifier(). The notifier is unlinked into the
1436  *	kernel structures and may then be reused. A negative errno code
1437  *	is returned on a failure.
1438  *
1439  * 	After unregistering unregister and down device events are synthesized
1440  *	for all devices on the device list to the removed notifier to remove
1441  *	the need for special case cleanup code.
1442  */
1443 
1444 int unregister_netdevice_notifier(struct notifier_block *nb)
1445 {
1446 	struct net_device *dev;
1447 	struct net *net;
1448 	int err;
1449 
1450 	rtnl_lock();
1451 	err = raw_notifier_chain_unregister(&netdev_chain, nb);
1452 	if (err)
1453 		goto unlock;
1454 
1455 	for_each_net(net) {
1456 		for_each_netdev(net, dev) {
1457 			if (dev->flags & IFF_UP) {
1458 				nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1459 				nb->notifier_call(nb, NETDEV_DOWN, dev);
1460 			}
1461 			nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1462 		}
1463 	}
1464 unlock:
1465 	rtnl_unlock();
1466 	return err;
1467 }
1468 EXPORT_SYMBOL(unregister_netdevice_notifier);
1469 
1470 /**
1471  *	call_netdevice_notifiers - call all network notifier blocks
1472  *      @val: value passed unmodified to notifier function
1473  *      @dev: net_device pointer passed unmodified to notifier function
1474  *
1475  *	Call all network notifier blocks.  Parameters and return value
1476  *	are as for raw_notifier_call_chain().
1477  */
1478 
1479 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1480 {
1481 	ASSERT_RTNL();
1482 	return raw_notifier_call_chain(&netdev_chain, val, dev);
1483 }
1484 EXPORT_SYMBOL(call_netdevice_notifiers);
1485 
1486 static struct static_key netstamp_needed __read_mostly;
1487 #ifdef HAVE_JUMP_LABEL
1488 /* We are not allowed to call static_key_slow_dec() from irq context
1489  * If net_disable_timestamp() is called from irq context, defer the
1490  * static_key_slow_dec() calls.
1491  */
1492 static atomic_t netstamp_needed_deferred;
1493 #endif
1494 
1495 void net_enable_timestamp(void)
1496 {
1497 #ifdef HAVE_JUMP_LABEL
1498 	int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1499 
1500 	if (deferred) {
1501 		while (--deferred)
1502 			static_key_slow_dec(&netstamp_needed);
1503 		return;
1504 	}
1505 #endif
1506 	WARN_ON(in_interrupt());
1507 	static_key_slow_inc(&netstamp_needed);
1508 }
1509 EXPORT_SYMBOL(net_enable_timestamp);
1510 
1511 void net_disable_timestamp(void)
1512 {
1513 #ifdef HAVE_JUMP_LABEL
1514 	if (in_interrupt()) {
1515 		atomic_inc(&netstamp_needed_deferred);
1516 		return;
1517 	}
1518 #endif
1519 	static_key_slow_dec(&netstamp_needed);
1520 }
1521 EXPORT_SYMBOL(net_disable_timestamp);
1522 
1523 static inline void net_timestamp_set(struct sk_buff *skb)
1524 {
1525 	skb->tstamp.tv64 = 0;
1526 	if (static_key_false(&netstamp_needed))
1527 		__net_timestamp(skb);
1528 }
1529 
1530 #define net_timestamp_check(COND, SKB)			\
1531 	if (static_key_false(&netstamp_needed)) {		\
1532 		if ((COND) && !(SKB)->tstamp.tv64)	\
1533 			__net_timestamp(SKB);		\
1534 	}						\
1535 
1536 static int net_hwtstamp_validate(struct ifreq *ifr)
1537 {
1538 	struct hwtstamp_config cfg;
1539 	enum hwtstamp_tx_types tx_type;
1540 	enum hwtstamp_rx_filters rx_filter;
1541 	int tx_type_valid = 0;
1542 	int rx_filter_valid = 0;
1543 
1544 	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1545 		return -EFAULT;
1546 
1547 	if (cfg.flags) /* reserved for future extensions */
1548 		return -EINVAL;
1549 
1550 	tx_type = cfg.tx_type;
1551 	rx_filter = cfg.rx_filter;
1552 
1553 	switch (tx_type) {
1554 	case HWTSTAMP_TX_OFF:
1555 	case HWTSTAMP_TX_ON:
1556 	case HWTSTAMP_TX_ONESTEP_SYNC:
1557 		tx_type_valid = 1;
1558 		break;
1559 	}
1560 
1561 	switch (rx_filter) {
1562 	case HWTSTAMP_FILTER_NONE:
1563 	case HWTSTAMP_FILTER_ALL:
1564 	case HWTSTAMP_FILTER_SOME:
1565 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1566 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1567 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1568 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1569 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1570 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1571 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1572 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1573 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1574 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
1575 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
1576 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1577 		rx_filter_valid = 1;
1578 		break;
1579 	}
1580 
1581 	if (!tx_type_valid || !rx_filter_valid)
1582 		return -ERANGE;
1583 
1584 	return 0;
1585 }
1586 
1587 static inline bool is_skb_forwardable(struct net_device *dev,
1588 				      struct sk_buff *skb)
1589 {
1590 	unsigned int len;
1591 
1592 	if (!(dev->flags & IFF_UP))
1593 		return false;
1594 
1595 	len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1596 	if (skb->len <= len)
1597 		return true;
1598 
1599 	/* if TSO is enabled, we don't care about the length as the packet
1600 	 * could be forwarded without being segmented before
1601 	 */
1602 	if (skb_is_gso(skb))
1603 		return true;
1604 
1605 	return false;
1606 }
1607 
1608 /**
1609  * dev_forward_skb - loopback an skb to another netif
1610  *
1611  * @dev: destination network device
1612  * @skb: buffer to forward
1613  *
1614  * return values:
1615  *	NET_RX_SUCCESS	(no congestion)
1616  *	NET_RX_DROP     (packet was dropped, but freed)
1617  *
1618  * dev_forward_skb can be used for injecting an skb from the
1619  * start_xmit function of one device into the receive queue
1620  * of another device.
1621  *
1622  * The receiving device may be in another namespace, so
1623  * we have to clear all information in the skb that could
1624  * impact namespace isolation.
1625  */
1626 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1627 {
1628 	if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1629 		if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1630 			atomic_long_inc(&dev->rx_dropped);
1631 			kfree_skb(skb);
1632 			return NET_RX_DROP;
1633 		}
1634 	}
1635 
1636 	skb_orphan(skb);
1637 	nf_reset(skb);
1638 
1639 	if (unlikely(!is_skb_forwardable(dev, skb))) {
1640 		atomic_long_inc(&dev->rx_dropped);
1641 		kfree_skb(skb);
1642 		return NET_RX_DROP;
1643 	}
1644 	skb->skb_iif = 0;
1645 	skb->dev = dev;
1646 	skb_dst_drop(skb);
1647 	skb->tstamp.tv64 = 0;
1648 	skb->pkt_type = PACKET_HOST;
1649 	skb->protocol = eth_type_trans(skb, dev);
1650 	skb->mark = 0;
1651 	secpath_reset(skb);
1652 	nf_reset(skb);
1653 	return netif_rx(skb);
1654 }
1655 EXPORT_SYMBOL_GPL(dev_forward_skb);
1656 
1657 static inline int deliver_skb(struct sk_buff *skb,
1658 			      struct packet_type *pt_prev,
1659 			      struct net_device *orig_dev)
1660 {
1661 	if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1662 		return -ENOMEM;
1663 	atomic_inc(&skb->users);
1664 	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1665 }
1666 
1667 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1668 {
1669 	if (ptype->af_packet_priv == NULL)
1670 		return false;
1671 
1672 	if (ptype->id_match)
1673 		return ptype->id_match(ptype, skb->sk);
1674 	else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1675 		return true;
1676 
1677 	return false;
1678 }
1679 
1680 /*
1681  *	Support routine. Sends outgoing frames to any network
1682  *	taps currently in use.
1683  */
1684 
1685 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1686 {
1687 	struct packet_type *ptype;
1688 	struct sk_buff *skb2 = NULL;
1689 	struct packet_type *pt_prev = NULL;
1690 
1691 	rcu_read_lock();
1692 	list_for_each_entry_rcu(ptype, &ptype_all, list) {
1693 		/* Never send packets back to the socket
1694 		 * they originated from - MvS (miquels@drinkel.ow.org)
1695 		 */
1696 		if ((ptype->dev == dev || !ptype->dev) &&
1697 		    (!skb_loop_sk(ptype, skb))) {
1698 			if (pt_prev) {
1699 				deliver_skb(skb2, pt_prev, skb->dev);
1700 				pt_prev = ptype;
1701 				continue;
1702 			}
1703 
1704 			skb2 = skb_clone(skb, GFP_ATOMIC);
1705 			if (!skb2)
1706 				break;
1707 
1708 			net_timestamp_set(skb2);
1709 
1710 			/* skb->nh should be correctly
1711 			   set by sender, so that the second statement is
1712 			   just protection against buggy protocols.
1713 			 */
1714 			skb_reset_mac_header(skb2);
1715 
1716 			if (skb_network_header(skb2) < skb2->data ||
1717 			    skb2->network_header > skb2->tail) {
1718 				net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1719 						     ntohs(skb2->protocol),
1720 						     dev->name);
1721 				skb_reset_network_header(skb2);
1722 			}
1723 
1724 			skb2->transport_header = skb2->network_header;
1725 			skb2->pkt_type = PACKET_OUTGOING;
1726 			pt_prev = ptype;
1727 		}
1728 	}
1729 	if (pt_prev)
1730 		pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1731 	rcu_read_unlock();
1732 }
1733 
1734 /**
1735  * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1736  * @dev: Network device
1737  * @txq: number of queues available
1738  *
1739  * If real_num_tx_queues is changed the tc mappings may no longer be
1740  * valid. To resolve this verify the tc mapping remains valid and if
1741  * not NULL the mapping. With no priorities mapping to this
1742  * offset/count pair it will no longer be used. In the worst case TC0
1743  * is invalid nothing can be done so disable priority mappings. If is
1744  * expected that drivers will fix this mapping if they can before
1745  * calling netif_set_real_num_tx_queues.
1746  */
1747 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1748 {
1749 	int i;
1750 	struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1751 
1752 	/* If TC0 is invalidated disable TC mapping */
1753 	if (tc->offset + tc->count > txq) {
1754 		pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1755 		dev->num_tc = 0;
1756 		return;
1757 	}
1758 
1759 	/* Invalidated prio to tc mappings set to TC0 */
1760 	for (i = 1; i < TC_BITMASK + 1; i++) {
1761 		int q = netdev_get_prio_tc_map(dev, i);
1762 
1763 		tc = &dev->tc_to_txq[q];
1764 		if (tc->offset + tc->count > txq) {
1765 			pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1766 				i, q);
1767 			netdev_set_prio_tc_map(dev, i, 0);
1768 		}
1769 	}
1770 }
1771 
1772 /*
1773  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1774  * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
1775  */
1776 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1777 {
1778 	int rc;
1779 
1780 	if (txq < 1 || txq > dev->num_tx_queues)
1781 		return -EINVAL;
1782 
1783 	if (dev->reg_state == NETREG_REGISTERED ||
1784 	    dev->reg_state == NETREG_UNREGISTERING) {
1785 		ASSERT_RTNL();
1786 
1787 		rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
1788 						  txq);
1789 		if (rc)
1790 			return rc;
1791 
1792 		if (dev->num_tc)
1793 			netif_setup_tc(dev, txq);
1794 
1795 		if (txq < dev->real_num_tx_queues)
1796 			qdisc_reset_all_tx_gt(dev, txq);
1797 	}
1798 
1799 	dev->real_num_tx_queues = txq;
1800 	return 0;
1801 }
1802 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
1803 
1804 #ifdef CONFIG_RPS
1805 /**
1806  *	netif_set_real_num_rx_queues - set actual number of RX queues used
1807  *	@dev: Network device
1808  *	@rxq: Actual number of RX queues
1809  *
1810  *	This must be called either with the rtnl_lock held or before
1811  *	registration of the net device.  Returns 0 on success, or a
1812  *	negative error code.  If called before registration, it always
1813  *	succeeds.
1814  */
1815 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
1816 {
1817 	int rc;
1818 
1819 	if (rxq < 1 || rxq > dev->num_rx_queues)
1820 		return -EINVAL;
1821 
1822 	if (dev->reg_state == NETREG_REGISTERED) {
1823 		ASSERT_RTNL();
1824 
1825 		rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
1826 						  rxq);
1827 		if (rc)
1828 			return rc;
1829 	}
1830 
1831 	dev->real_num_rx_queues = rxq;
1832 	return 0;
1833 }
1834 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
1835 #endif
1836 
1837 /**
1838  * netif_get_num_default_rss_queues - default number of RSS queues
1839  *
1840  * This routine should set an upper limit on the number of RSS queues
1841  * used by default by multiqueue devices.
1842  */
1843 int netif_get_num_default_rss_queues(void)
1844 {
1845 	return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
1846 }
1847 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
1848 
1849 static inline void __netif_reschedule(struct Qdisc *q)
1850 {
1851 	struct softnet_data *sd;
1852 	unsigned long flags;
1853 
1854 	local_irq_save(flags);
1855 	sd = &__get_cpu_var(softnet_data);
1856 	q->next_sched = NULL;
1857 	*sd->output_queue_tailp = q;
1858 	sd->output_queue_tailp = &q->next_sched;
1859 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
1860 	local_irq_restore(flags);
1861 }
1862 
1863 void __netif_schedule(struct Qdisc *q)
1864 {
1865 	if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1866 		__netif_reschedule(q);
1867 }
1868 EXPORT_SYMBOL(__netif_schedule);
1869 
1870 void dev_kfree_skb_irq(struct sk_buff *skb)
1871 {
1872 	if (atomic_dec_and_test(&skb->users)) {
1873 		struct softnet_data *sd;
1874 		unsigned long flags;
1875 
1876 		local_irq_save(flags);
1877 		sd = &__get_cpu_var(softnet_data);
1878 		skb->next = sd->completion_queue;
1879 		sd->completion_queue = skb;
1880 		raise_softirq_irqoff(NET_TX_SOFTIRQ);
1881 		local_irq_restore(flags);
1882 	}
1883 }
1884 EXPORT_SYMBOL(dev_kfree_skb_irq);
1885 
1886 void dev_kfree_skb_any(struct sk_buff *skb)
1887 {
1888 	if (in_irq() || irqs_disabled())
1889 		dev_kfree_skb_irq(skb);
1890 	else
1891 		dev_kfree_skb(skb);
1892 }
1893 EXPORT_SYMBOL(dev_kfree_skb_any);
1894 
1895 
1896 /**
1897  * netif_device_detach - mark device as removed
1898  * @dev: network device
1899  *
1900  * Mark device as removed from system and therefore no longer available.
1901  */
1902 void netif_device_detach(struct net_device *dev)
1903 {
1904 	if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1905 	    netif_running(dev)) {
1906 		netif_tx_stop_all_queues(dev);
1907 	}
1908 }
1909 EXPORT_SYMBOL(netif_device_detach);
1910 
1911 /**
1912  * netif_device_attach - mark device as attached
1913  * @dev: network device
1914  *
1915  * Mark device as attached from system and restart if needed.
1916  */
1917 void netif_device_attach(struct net_device *dev)
1918 {
1919 	if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1920 	    netif_running(dev)) {
1921 		netif_tx_wake_all_queues(dev);
1922 		__netdev_watchdog_up(dev);
1923 	}
1924 }
1925 EXPORT_SYMBOL(netif_device_attach);
1926 
1927 static void skb_warn_bad_offload(const struct sk_buff *skb)
1928 {
1929 	static const netdev_features_t null_features = 0;
1930 	struct net_device *dev = skb->dev;
1931 	const char *driver = "";
1932 
1933 	if (dev && dev->dev.parent)
1934 		driver = dev_driver_string(dev->dev.parent);
1935 
1936 	WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
1937 	     "gso_type=%d ip_summed=%d\n",
1938 	     driver, dev ? &dev->features : &null_features,
1939 	     skb->sk ? &skb->sk->sk_route_caps : &null_features,
1940 	     skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
1941 	     skb_shinfo(skb)->gso_type, skb->ip_summed);
1942 }
1943 
1944 /*
1945  * Invalidate hardware checksum when packet is to be mangled, and
1946  * complete checksum manually on outgoing path.
1947  */
1948 int skb_checksum_help(struct sk_buff *skb)
1949 {
1950 	__wsum csum;
1951 	int ret = 0, offset;
1952 
1953 	if (skb->ip_summed == CHECKSUM_COMPLETE)
1954 		goto out_set_summed;
1955 
1956 	if (unlikely(skb_shinfo(skb)->gso_size)) {
1957 		skb_warn_bad_offload(skb);
1958 		return -EINVAL;
1959 	}
1960 
1961 	offset = skb_checksum_start_offset(skb);
1962 	BUG_ON(offset >= skb_headlen(skb));
1963 	csum = skb_checksum(skb, offset, skb->len - offset, 0);
1964 
1965 	offset += skb->csum_offset;
1966 	BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1967 
1968 	if (skb_cloned(skb) &&
1969 	    !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1970 		ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1971 		if (ret)
1972 			goto out;
1973 	}
1974 
1975 	*(__sum16 *)(skb->data + offset) = csum_fold(csum);
1976 out_set_summed:
1977 	skb->ip_summed = CHECKSUM_NONE;
1978 out:
1979 	return ret;
1980 }
1981 EXPORT_SYMBOL(skb_checksum_help);
1982 
1983 /**
1984  *	skb_gso_segment - Perform segmentation on skb.
1985  *	@skb: buffer to segment
1986  *	@features: features for the output path (see dev->features)
1987  *
1988  *	This function segments the given skb and returns a list of segments.
1989  *
1990  *	It may return NULL if the skb requires no segmentation.  This is
1991  *	only possible when GSO is used for verifying header integrity.
1992  */
1993 struct sk_buff *skb_gso_segment(struct sk_buff *skb,
1994 	netdev_features_t features)
1995 {
1996 	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1997 	struct packet_type *ptype;
1998 	__be16 type = skb->protocol;
1999 	int vlan_depth = ETH_HLEN;
2000 	int err;
2001 
2002 	while (type == htons(ETH_P_8021Q)) {
2003 		struct vlan_hdr *vh;
2004 
2005 		if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
2006 			return ERR_PTR(-EINVAL);
2007 
2008 		vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2009 		type = vh->h_vlan_encapsulated_proto;
2010 		vlan_depth += VLAN_HLEN;
2011 	}
2012 
2013 	skb_reset_mac_header(skb);
2014 	skb->mac_len = skb->network_header - skb->mac_header;
2015 	__skb_pull(skb, skb->mac_len);
2016 
2017 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2018 		skb_warn_bad_offload(skb);
2019 
2020 		if (skb_header_cloned(skb) &&
2021 		    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2022 			return ERR_PTR(err);
2023 	}
2024 
2025 	rcu_read_lock();
2026 	list_for_each_entry_rcu(ptype,
2027 			&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2028 		if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
2029 			if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2030 				err = ptype->gso_send_check(skb);
2031 				segs = ERR_PTR(err);
2032 				if (err || skb_gso_ok(skb, features))
2033 					break;
2034 				__skb_push(skb, (skb->data -
2035 						 skb_network_header(skb)));
2036 			}
2037 			segs = ptype->gso_segment(skb, features);
2038 			break;
2039 		}
2040 	}
2041 	rcu_read_unlock();
2042 
2043 	__skb_push(skb, skb->data - skb_mac_header(skb));
2044 
2045 	return segs;
2046 }
2047 EXPORT_SYMBOL(skb_gso_segment);
2048 
2049 /* Take action when hardware reception checksum errors are detected. */
2050 #ifdef CONFIG_BUG
2051 void netdev_rx_csum_fault(struct net_device *dev)
2052 {
2053 	if (net_ratelimit()) {
2054 		pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2055 		dump_stack();
2056 	}
2057 }
2058 EXPORT_SYMBOL(netdev_rx_csum_fault);
2059 #endif
2060 
2061 /* Actually, we should eliminate this check as soon as we know, that:
2062  * 1. IOMMU is present and allows to map all the memory.
2063  * 2. No high memory really exists on this machine.
2064  */
2065 
2066 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2067 {
2068 #ifdef CONFIG_HIGHMEM
2069 	int i;
2070 	if (!(dev->features & NETIF_F_HIGHDMA)) {
2071 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2072 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2073 			if (PageHighMem(skb_frag_page(frag)))
2074 				return 1;
2075 		}
2076 	}
2077 
2078 	if (PCI_DMA_BUS_IS_PHYS) {
2079 		struct device *pdev = dev->dev.parent;
2080 
2081 		if (!pdev)
2082 			return 0;
2083 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2084 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2085 			dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2086 			if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2087 				return 1;
2088 		}
2089 	}
2090 #endif
2091 	return 0;
2092 }
2093 
2094 struct dev_gso_cb {
2095 	void (*destructor)(struct sk_buff *skb);
2096 };
2097 
2098 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2099 
2100 static void dev_gso_skb_destructor(struct sk_buff *skb)
2101 {
2102 	struct dev_gso_cb *cb;
2103 
2104 	do {
2105 		struct sk_buff *nskb = skb->next;
2106 
2107 		skb->next = nskb->next;
2108 		nskb->next = NULL;
2109 		kfree_skb(nskb);
2110 	} while (skb->next);
2111 
2112 	cb = DEV_GSO_CB(skb);
2113 	if (cb->destructor)
2114 		cb->destructor(skb);
2115 }
2116 
2117 /**
2118  *	dev_gso_segment - Perform emulated hardware segmentation on skb.
2119  *	@skb: buffer to segment
2120  *	@features: device features as applicable to this skb
2121  *
2122  *	This function segments the given skb and stores the list of segments
2123  *	in skb->next.
2124  */
2125 static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2126 {
2127 	struct sk_buff *segs;
2128 
2129 	segs = skb_gso_segment(skb, features);
2130 
2131 	/* Verifying header integrity only. */
2132 	if (!segs)
2133 		return 0;
2134 
2135 	if (IS_ERR(segs))
2136 		return PTR_ERR(segs);
2137 
2138 	skb->next = segs;
2139 	DEV_GSO_CB(skb)->destructor = skb->destructor;
2140 	skb->destructor = dev_gso_skb_destructor;
2141 
2142 	return 0;
2143 }
2144 
2145 static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
2146 {
2147 	return ((features & NETIF_F_GEN_CSUM) ||
2148 		((features & NETIF_F_V4_CSUM) &&
2149 		 protocol == htons(ETH_P_IP)) ||
2150 		((features & NETIF_F_V6_CSUM) &&
2151 		 protocol == htons(ETH_P_IPV6)) ||
2152 		((features & NETIF_F_FCOE_CRC) &&
2153 		 protocol == htons(ETH_P_FCOE)));
2154 }
2155 
2156 static netdev_features_t harmonize_features(struct sk_buff *skb,
2157 	__be16 protocol, netdev_features_t features)
2158 {
2159 	if (!can_checksum_protocol(features, protocol)) {
2160 		features &= ~NETIF_F_ALL_CSUM;
2161 		features &= ~NETIF_F_SG;
2162 	} else if (illegal_highdma(skb->dev, skb)) {
2163 		features &= ~NETIF_F_SG;
2164 	}
2165 
2166 	return features;
2167 }
2168 
2169 netdev_features_t netif_skb_features(struct sk_buff *skb)
2170 {
2171 	__be16 protocol = skb->protocol;
2172 	netdev_features_t features = skb->dev->features;
2173 
2174 	if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2175 		features &= ~NETIF_F_GSO_MASK;
2176 
2177 	if (protocol == htons(ETH_P_8021Q)) {
2178 		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2179 		protocol = veh->h_vlan_encapsulated_proto;
2180 	} else if (!vlan_tx_tag_present(skb)) {
2181 		return harmonize_features(skb, protocol, features);
2182 	}
2183 
2184 	features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
2185 
2186 	if (protocol != htons(ETH_P_8021Q)) {
2187 		return harmonize_features(skb, protocol, features);
2188 	} else {
2189 		features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
2190 				NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
2191 		return harmonize_features(skb, protocol, features);
2192 	}
2193 }
2194 EXPORT_SYMBOL(netif_skb_features);
2195 
2196 /*
2197  * Returns true if either:
2198  *	1. skb has frag_list and the device doesn't support FRAGLIST, or
2199  *	2. skb is fragmented and the device does not support SG.
2200  */
2201 static inline int skb_needs_linearize(struct sk_buff *skb,
2202 				      int features)
2203 {
2204 	return skb_is_nonlinear(skb) &&
2205 			((skb_has_frag_list(skb) &&
2206 				!(features & NETIF_F_FRAGLIST)) ||
2207 			(skb_shinfo(skb)->nr_frags &&
2208 				!(features & NETIF_F_SG)));
2209 }
2210 
2211 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2212 			struct netdev_queue *txq)
2213 {
2214 	const struct net_device_ops *ops = dev->netdev_ops;
2215 	int rc = NETDEV_TX_OK;
2216 	unsigned int skb_len;
2217 
2218 	if (likely(!skb->next)) {
2219 		netdev_features_t features;
2220 
2221 		/*
2222 		 * If device doesn't need skb->dst, release it right now while
2223 		 * its hot in this cpu cache
2224 		 */
2225 		if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2226 			skb_dst_drop(skb);
2227 
2228 		features = netif_skb_features(skb);
2229 
2230 		if (vlan_tx_tag_present(skb) &&
2231 		    !(features & NETIF_F_HW_VLAN_TX)) {
2232 			skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
2233 			if (unlikely(!skb))
2234 				goto out;
2235 
2236 			skb->vlan_tci = 0;
2237 		}
2238 
2239 		if (netif_needs_gso(skb, features)) {
2240 			if (unlikely(dev_gso_segment(skb, features)))
2241 				goto out_kfree_skb;
2242 			if (skb->next)
2243 				goto gso;
2244 		} else {
2245 			if (skb_needs_linearize(skb, features) &&
2246 			    __skb_linearize(skb))
2247 				goto out_kfree_skb;
2248 
2249 			/* If packet is not checksummed and device does not
2250 			 * support checksumming for this protocol, complete
2251 			 * checksumming here.
2252 			 */
2253 			if (skb->ip_summed == CHECKSUM_PARTIAL) {
2254 				skb_set_transport_header(skb,
2255 					skb_checksum_start_offset(skb));
2256 				if (!(features & NETIF_F_ALL_CSUM) &&
2257 				     skb_checksum_help(skb))
2258 					goto out_kfree_skb;
2259 			}
2260 		}
2261 
2262 		if (!list_empty(&ptype_all))
2263 			dev_queue_xmit_nit(skb, dev);
2264 
2265 		skb_len = skb->len;
2266 		rc = ops->ndo_start_xmit(skb, dev);
2267 		trace_net_dev_xmit(skb, rc, dev, skb_len);
2268 		if (rc == NETDEV_TX_OK)
2269 			txq_trans_update(txq);
2270 		return rc;
2271 	}
2272 
2273 gso:
2274 	do {
2275 		struct sk_buff *nskb = skb->next;
2276 
2277 		skb->next = nskb->next;
2278 		nskb->next = NULL;
2279 
2280 		/*
2281 		 * If device doesn't need nskb->dst, release it right now while
2282 		 * its hot in this cpu cache
2283 		 */
2284 		if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2285 			skb_dst_drop(nskb);
2286 
2287 		if (!list_empty(&ptype_all))
2288 			dev_queue_xmit_nit(nskb, dev);
2289 
2290 		skb_len = nskb->len;
2291 		rc = ops->ndo_start_xmit(nskb, dev);
2292 		trace_net_dev_xmit(nskb, rc, dev, skb_len);
2293 		if (unlikely(rc != NETDEV_TX_OK)) {
2294 			if (rc & ~NETDEV_TX_MASK)
2295 				goto out_kfree_gso_skb;
2296 			nskb->next = skb->next;
2297 			skb->next = nskb;
2298 			return rc;
2299 		}
2300 		txq_trans_update(txq);
2301 		if (unlikely(netif_xmit_stopped(txq) && skb->next))
2302 			return NETDEV_TX_BUSY;
2303 	} while (skb->next);
2304 
2305 out_kfree_gso_skb:
2306 	if (likely(skb->next == NULL))
2307 		skb->destructor = DEV_GSO_CB(skb)->destructor;
2308 out_kfree_skb:
2309 	kfree_skb(skb);
2310 out:
2311 	return rc;
2312 }
2313 
2314 static u32 hashrnd __read_mostly;
2315 
2316 /*
2317  * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2318  * to be used as a distribution range.
2319  */
2320 u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
2321 		  unsigned int num_tx_queues)
2322 {
2323 	u32 hash;
2324 	u16 qoffset = 0;
2325 	u16 qcount = num_tx_queues;
2326 
2327 	if (skb_rx_queue_recorded(skb)) {
2328 		hash = skb_get_rx_queue(skb);
2329 		while (unlikely(hash >= num_tx_queues))
2330 			hash -= num_tx_queues;
2331 		return hash;
2332 	}
2333 
2334 	if (dev->num_tc) {
2335 		u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2336 		qoffset = dev->tc_to_txq[tc].offset;
2337 		qcount = dev->tc_to_txq[tc].count;
2338 	}
2339 
2340 	if (skb->sk && skb->sk->sk_hash)
2341 		hash = skb->sk->sk_hash;
2342 	else
2343 		hash = (__force u16) skb->protocol;
2344 	hash = jhash_1word(hash, hashrnd);
2345 
2346 	return (u16) (((u64) hash * qcount) >> 32) + qoffset;
2347 }
2348 EXPORT_SYMBOL(__skb_tx_hash);
2349 
2350 static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2351 {
2352 	if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2353 		net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
2354 				     dev->name, queue_index,
2355 				     dev->real_num_tx_queues);
2356 		return 0;
2357 	}
2358 	return queue_index;
2359 }
2360 
2361 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2362 {
2363 #ifdef CONFIG_XPS
2364 	struct xps_dev_maps *dev_maps;
2365 	struct xps_map *map;
2366 	int queue_index = -1;
2367 
2368 	rcu_read_lock();
2369 	dev_maps = rcu_dereference(dev->xps_maps);
2370 	if (dev_maps) {
2371 		map = rcu_dereference(
2372 		    dev_maps->cpu_map[raw_smp_processor_id()]);
2373 		if (map) {
2374 			if (map->len == 1)
2375 				queue_index = map->queues[0];
2376 			else {
2377 				u32 hash;
2378 				if (skb->sk && skb->sk->sk_hash)
2379 					hash = skb->sk->sk_hash;
2380 				else
2381 					hash = (__force u16) skb->protocol ^
2382 					    skb->rxhash;
2383 				hash = jhash_1word(hash, hashrnd);
2384 				queue_index = map->queues[
2385 				    ((u64)hash * map->len) >> 32];
2386 			}
2387 			if (unlikely(queue_index >= dev->real_num_tx_queues))
2388 				queue_index = -1;
2389 		}
2390 	}
2391 	rcu_read_unlock();
2392 
2393 	return queue_index;
2394 #else
2395 	return -1;
2396 #endif
2397 }
2398 
2399 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
2400 				    struct sk_buff *skb)
2401 {
2402 	int queue_index;
2403 	const struct net_device_ops *ops = dev->netdev_ops;
2404 
2405 	if (dev->real_num_tx_queues == 1)
2406 		queue_index = 0;
2407 	else if (ops->ndo_select_queue) {
2408 		queue_index = ops->ndo_select_queue(dev, skb);
2409 		queue_index = dev_cap_txqueue(dev, queue_index);
2410 	} else {
2411 		struct sock *sk = skb->sk;
2412 		queue_index = sk_tx_queue_get(sk);
2413 
2414 		if (queue_index < 0 || skb->ooo_okay ||
2415 		    queue_index >= dev->real_num_tx_queues) {
2416 			int old_index = queue_index;
2417 
2418 			queue_index = get_xps_queue(dev, skb);
2419 			if (queue_index < 0)
2420 				queue_index = skb_tx_hash(dev, skb);
2421 
2422 			if (queue_index != old_index && sk) {
2423 				struct dst_entry *dst =
2424 				    rcu_dereference_check(sk->sk_dst_cache, 1);
2425 
2426 				if (dst && skb_dst(skb) == dst)
2427 					sk_tx_queue_set(sk, queue_index);
2428 			}
2429 		}
2430 	}
2431 
2432 	skb_set_queue_mapping(skb, queue_index);
2433 	return netdev_get_tx_queue(dev, queue_index);
2434 }
2435 
2436 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2437 				 struct net_device *dev,
2438 				 struct netdev_queue *txq)
2439 {
2440 	spinlock_t *root_lock = qdisc_lock(q);
2441 	bool contended;
2442 	int rc;
2443 
2444 	qdisc_skb_cb(skb)->pkt_len = skb->len;
2445 	qdisc_calculate_pkt_len(skb, q);
2446 	/*
2447 	 * Heuristic to force contended enqueues to serialize on a
2448 	 * separate lock before trying to get qdisc main lock.
2449 	 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2450 	 * and dequeue packets faster.
2451 	 */
2452 	contended = qdisc_is_running(q);
2453 	if (unlikely(contended))
2454 		spin_lock(&q->busylock);
2455 
2456 	spin_lock(root_lock);
2457 	if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2458 		kfree_skb(skb);
2459 		rc = NET_XMIT_DROP;
2460 	} else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2461 		   qdisc_run_begin(q)) {
2462 		/*
2463 		 * This is a work-conserving queue; there are no old skbs
2464 		 * waiting to be sent out; and the qdisc is not running -
2465 		 * xmit the skb directly.
2466 		 */
2467 		if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2468 			skb_dst_force(skb);
2469 
2470 		qdisc_bstats_update(q, skb);
2471 
2472 		if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2473 			if (unlikely(contended)) {
2474 				spin_unlock(&q->busylock);
2475 				contended = false;
2476 			}
2477 			__qdisc_run(q);
2478 		} else
2479 			qdisc_run_end(q);
2480 
2481 		rc = NET_XMIT_SUCCESS;
2482 	} else {
2483 		skb_dst_force(skb);
2484 		rc = q->enqueue(skb, q) & NET_XMIT_MASK;
2485 		if (qdisc_run_begin(q)) {
2486 			if (unlikely(contended)) {
2487 				spin_unlock(&q->busylock);
2488 				contended = false;
2489 			}
2490 			__qdisc_run(q);
2491 		}
2492 	}
2493 	spin_unlock(root_lock);
2494 	if (unlikely(contended))
2495 		spin_unlock(&q->busylock);
2496 	return rc;
2497 }
2498 
2499 #if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
2500 static void skb_update_prio(struct sk_buff *skb)
2501 {
2502 	struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
2503 
2504 	if (!skb->priority && skb->sk && map) {
2505 		unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2506 
2507 		if (prioidx < map->priomap_len)
2508 			skb->priority = map->priomap[prioidx];
2509 	}
2510 }
2511 #else
2512 #define skb_update_prio(skb)
2513 #endif
2514 
2515 static DEFINE_PER_CPU(int, xmit_recursion);
2516 #define RECURSION_LIMIT 10
2517 
2518 /**
2519  *	dev_loopback_xmit - loop back @skb
2520  *	@skb: buffer to transmit
2521  */
2522 int dev_loopback_xmit(struct sk_buff *skb)
2523 {
2524 	skb_reset_mac_header(skb);
2525 	__skb_pull(skb, skb_network_offset(skb));
2526 	skb->pkt_type = PACKET_LOOPBACK;
2527 	skb->ip_summed = CHECKSUM_UNNECESSARY;
2528 	WARN_ON(!skb_dst(skb));
2529 	skb_dst_force(skb);
2530 	netif_rx_ni(skb);
2531 	return 0;
2532 }
2533 EXPORT_SYMBOL(dev_loopback_xmit);
2534 
2535 /**
2536  *	dev_queue_xmit - transmit a buffer
2537  *	@skb: buffer to transmit
2538  *
2539  *	Queue a buffer for transmission to a network device. The caller must
2540  *	have set the device and priority and built the buffer before calling
2541  *	this function. The function can be called from an interrupt.
2542  *
2543  *	A negative errno code is returned on a failure. A success does not
2544  *	guarantee the frame will be transmitted as it may be dropped due
2545  *	to congestion or traffic shaping.
2546  *
2547  * -----------------------------------------------------------------------------------
2548  *      I notice this method can also return errors from the queue disciplines,
2549  *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
2550  *      be positive.
2551  *
2552  *      Regardless of the return value, the skb is consumed, so it is currently
2553  *      difficult to retry a send to this method.  (You can bump the ref count
2554  *      before sending to hold a reference for retry if you are careful.)
2555  *
2556  *      When calling this method, interrupts MUST be enabled.  This is because
2557  *      the BH enable code must have IRQs enabled so that it will not deadlock.
2558  *          --BLG
2559  */
2560 int dev_queue_xmit(struct sk_buff *skb)
2561 {
2562 	struct net_device *dev = skb->dev;
2563 	struct netdev_queue *txq;
2564 	struct Qdisc *q;
2565 	int rc = -ENOMEM;
2566 
2567 	/* Disable soft irqs for various locks below. Also
2568 	 * stops preemption for RCU.
2569 	 */
2570 	rcu_read_lock_bh();
2571 
2572 	skb_update_prio(skb);
2573 
2574 	txq = netdev_pick_tx(dev, skb);
2575 	q = rcu_dereference_bh(txq->qdisc);
2576 
2577 #ifdef CONFIG_NET_CLS_ACT
2578 	skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
2579 #endif
2580 	trace_net_dev_queue(skb);
2581 	if (q->enqueue) {
2582 		rc = __dev_xmit_skb(skb, q, dev, txq);
2583 		goto out;
2584 	}
2585 
2586 	/* The device has no queue. Common case for software devices:
2587 	   loopback, all the sorts of tunnels...
2588 
2589 	   Really, it is unlikely that netif_tx_lock protection is necessary
2590 	   here.  (f.e. loopback and IP tunnels are clean ignoring statistics
2591 	   counters.)
2592 	   However, it is possible, that they rely on protection
2593 	   made by us here.
2594 
2595 	   Check this and shot the lock. It is not prone from deadlocks.
2596 	   Either shot noqueue qdisc, it is even simpler 8)
2597 	 */
2598 	if (dev->flags & IFF_UP) {
2599 		int cpu = smp_processor_id(); /* ok because BHs are off */
2600 
2601 		if (txq->xmit_lock_owner != cpu) {
2602 
2603 			if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2604 				goto recursion_alert;
2605 
2606 			HARD_TX_LOCK(dev, txq, cpu);
2607 
2608 			if (!netif_xmit_stopped(txq)) {
2609 				__this_cpu_inc(xmit_recursion);
2610 				rc = dev_hard_start_xmit(skb, dev, txq);
2611 				__this_cpu_dec(xmit_recursion);
2612 				if (dev_xmit_complete(rc)) {
2613 					HARD_TX_UNLOCK(dev, txq);
2614 					goto out;
2615 				}
2616 			}
2617 			HARD_TX_UNLOCK(dev, txq);
2618 			net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2619 					     dev->name);
2620 		} else {
2621 			/* Recursion is detected! It is possible,
2622 			 * unfortunately
2623 			 */
2624 recursion_alert:
2625 			net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2626 					     dev->name);
2627 		}
2628 	}
2629 
2630 	rc = -ENETDOWN;
2631 	rcu_read_unlock_bh();
2632 
2633 	kfree_skb(skb);
2634 	return rc;
2635 out:
2636 	rcu_read_unlock_bh();
2637 	return rc;
2638 }
2639 EXPORT_SYMBOL(dev_queue_xmit);
2640 
2641 
2642 /*=======================================================================
2643 			Receiver routines
2644   =======================================================================*/
2645 
2646 int netdev_max_backlog __read_mostly = 1000;
2647 int netdev_tstamp_prequeue __read_mostly = 1;
2648 int netdev_budget __read_mostly = 300;
2649 int weight_p __read_mostly = 64;            /* old backlog weight */
2650 
2651 /* Called with irq disabled */
2652 static inline void ____napi_schedule(struct softnet_data *sd,
2653 				     struct napi_struct *napi)
2654 {
2655 	list_add_tail(&napi->poll_list, &sd->poll_list);
2656 	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
2657 }
2658 
2659 /*
2660  * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2661  * and src/dst port numbers.  Sets rxhash in skb to non-zero hash value
2662  * on success, zero indicates no valid hash.  Also, sets l4_rxhash in skb
2663  * if hash is a canonical 4-tuple hash over transport ports.
2664  */
2665 void __skb_get_rxhash(struct sk_buff *skb)
2666 {
2667 	struct flow_keys keys;
2668 	u32 hash;
2669 
2670 	if (!skb_flow_dissect(skb, &keys))
2671 		return;
2672 
2673 	if (keys.ports)
2674 		skb->l4_rxhash = 1;
2675 
2676 	/* get a consistent hash (same value on both flow directions) */
2677 	if (((__force u32)keys.dst < (__force u32)keys.src) ||
2678 	    (((__force u32)keys.dst == (__force u32)keys.src) &&
2679 	     ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
2680 		swap(keys.dst, keys.src);
2681 		swap(keys.port16[0], keys.port16[1]);
2682 	}
2683 
2684 	hash = jhash_3words((__force u32)keys.dst,
2685 			    (__force u32)keys.src,
2686 			    (__force u32)keys.ports, hashrnd);
2687 	if (!hash)
2688 		hash = 1;
2689 
2690 	skb->rxhash = hash;
2691 }
2692 EXPORT_SYMBOL(__skb_get_rxhash);
2693 
2694 #ifdef CONFIG_RPS
2695 
2696 /* One global table that all flow-based protocols share. */
2697 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
2698 EXPORT_SYMBOL(rps_sock_flow_table);
2699 
2700 struct static_key rps_needed __read_mostly;
2701 
2702 static struct rps_dev_flow *
2703 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2704 	    struct rps_dev_flow *rflow, u16 next_cpu)
2705 {
2706 	if (next_cpu != RPS_NO_CPU) {
2707 #ifdef CONFIG_RFS_ACCEL
2708 		struct netdev_rx_queue *rxqueue;
2709 		struct rps_dev_flow_table *flow_table;
2710 		struct rps_dev_flow *old_rflow;
2711 		u32 flow_id;
2712 		u16 rxq_index;
2713 		int rc;
2714 
2715 		/* Should we steer this flow to a different hardware queue? */
2716 		if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
2717 		    !(dev->features & NETIF_F_NTUPLE))
2718 			goto out;
2719 		rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
2720 		if (rxq_index == skb_get_rx_queue(skb))
2721 			goto out;
2722 
2723 		rxqueue = dev->_rx + rxq_index;
2724 		flow_table = rcu_dereference(rxqueue->rps_flow_table);
2725 		if (!flow_table)
2726 			goto out;
2727 		flow_id = skb->rxhash & flow_table->mask;
2728 		rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2729 							rxq_index, flow_id);
2730 		if (rc < 0)
2731 			goto out;
2732 		old_rflow = rflow;
2733 		rflow = &flow_table->flows[flow_id];
2734 		rflow->filter = rc;
2735 		if (old_rflow->filter == rflow->filter)
2736 			old_rflow->filter = RPS_NO_FILTER;
2737 	out:
2738 #endif
2739 		rflow->last_qtail =
2740 			per_cpu(softnet_data, next_cpu).input_queue_head;
2741 	}
2742 
2743 	rflow->cpu = next_cpu;
2744 	return rflow;
2745 }
2746 
2747 /*
2748  * get_rps_cpu is called from netif_receive_skb and returns the target
2749  * CPU from the RPS map of the receiving queue for a given skb.
2750  * rcu_read_lock must be held on entry.
2751  */
2752 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2753 		       struct rps_dev_flow **rflowp)
2754 {
2755 	struct netdev_rx_queue *rxqueue;
2756 	struct rps_map *map;
2757 	struct rps_dev_flow_table *flow_table;
2758 	struct rps_sock_flow_table *sock_flow_table;
2759 	int cpu = -1;
2760 	u16 tcpu;
2761 
2762 	if (skb_rx_queue_recorded(skb)) {
2763 		u16 index = skb_get_rx_queue(skb);
2764 		if (unlikely(index >= dev->real_num_rx_queues)) {
2765 			WARN_ONCE(dev->real_num_rx_queues > 1,
2766 				  "%s received packet on queue %u, but number "
2767 				  "of RX queues is %u\n",
2768 				  dev->name, index, dev->real_num_rx_queues);
2769 			goto done;
2770 		}
2771 		rxqueue = dev->_rx + index;
2772 	} else
2773 		rxqueue = dev->_rx;
2774 
2775 	map = rcu_dereference(rxqueue->rps_map);
2776 	if (map) {
2777 		if (map->len == 1 &&
2778 		    !rcu_access_pointer(rxqueue->rps_flow_table)) {
2779 			tcpu = map->cpus[0];
2780 			if (cpu_online(tcpu))
2781 				cpu = tcpu;
2782 			goto done;
2783 		}
2784 	} else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
2785 		goto done;
2786 	}
2787 
2788 	skb_reset_network_header(skb);
2789 	if (!skb_get_rxhash(skb))
2790 		goto done;
2791 
2792 	flow_table = rcu_dereference(rxqueue->rps_flow_table);
2793 	sock_flow_table = rcu_dereference(rps_sock_flow_table);
2794 	if (flow_table && sock_flow_table) {
2795 		u16 next_cpu;
2796 		struct rps_dev_flow *rflow;
2797 
2798 		rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2799 		tcpu = rflow->cpu;
2800 
2801 		next_cpu = sock_flow_table->ents[skb->rxhash &
2802 		    sock_flow_table->mask];
2803 
2804 		/*
2805 		 * If the desired CPU (where last recvmsg was done) is
2806 		 * different from current CPU (one in the rx-queue flow
2807 		 * table entry), switch if one of the following holds:
2808 		 *   - Current CPU is unset (equal to RPS_NO_CPU).
2809 		 *   - Current CPU is offline.
2810 		 *   - The current CPU's queue tail has advanced beyond the
2811 		 *     last packet that was enqueued using this table entry.
2812 		 *     This guarantees that all previous packets for the flow
2813 		 *     have been dequeued, thus preserving in order delivery.
2814 		 */
2815 		if (unlikely(tcpu != next_cpu) &&
2816 		    (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2817 		     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2818 		      rflow->last_qtail)) >= 0))
2819 			rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
2820 
2821 		if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2822 			*rflowp = rflow;
2823 			cpu = tcpu;
2824 			goto done;
2825 		}
2826 	}
2827 
2828 	if (map) {
2829 		tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
2830 
2831 		if (cpu_online(tcpu)) {
2832 			cpu = tcpu;
2833 			goto done;
2834 		}
2835 	}
2836 
2837 done:
2838 	return cpu;
2839 }
2840 
2841 #ifdef CONFIG_RFS_ACCEL
2842 
2843 /**
2844  * rps_may_expire_flow - check whether an RFS hardware filter may be removed
2845  * @dev: Device on which the filter was set
2846  * @rxq_index: RX queue index
2847  * @flow_id: Flow ID passed to ndo_rx_flow_steer()
2848  * @filter_id: Filter ID returned by ndo_rx_flow_steer()
2849  *
2850  * Drivers that implement ndo_rx_flow_steer() should periodically call
2851  * this function for each installed filter and remove the filters for
2852  * which it returns %true.
2853  */
2854 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
2855 			 u32 flow_id, u16 filter_id)
2856 {
2857 	struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
2858 	struct rps_dev_flow_table *flow_table;
2859 	struct rps_dev_flow *rflow;
2860 	bool expire = true;
2861 	int cpu;
2862 
2863 	rcu_read_lock();
2864 	flow_table = rcu_dereference(rxqueue->rps_flow_table);
2865 	if (flow_table && flow_id <= flow_table->mask) {
2866 		rflow = &flow_table->flows[flow_id];
2867 		cpu = ACCESS_ONCE(rflow->cpu);
2868 		if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
2869 		    ((int)(per_cpu(softnet_data, cpu).input_queue_head -
2870 			   rflow->last_qtail) <
2871 		     (int)(10 * flow_table->mask)))
2872 			expire = false;
2873 	}
2874 	rcu_read_unlock();
2875 	return expire;
2876 }
2877 EXPORT_SYMBOL(rps_may_expire_flow);
2878 
2879 #endif /* CONFIG_RFS_ACCEL */
2880 
2881 /* Called from hardirq (IPI) context */
2882 static void rps_trigger_softirq(void *data)
2883 {
2884 	struct softnet_data *sd = data;
2885 
2886 	____napi_schedule(sd, &sd->backlog);
2887 	sd->received_rps++;
2888 }
2889 
2890 #endif /* CONFIG_RPS */
2891 
2892 /*
2893  * Check if this softnet_data structure is another cpu one
2894  * If yes, queue it to our IPI list and return 1
2895  * If no, return 0
2896  */
2897 static int rps_ipi_queued(struct softnet_data *sd)
2898 {
2899 #ifdef CONFIG_RPS
2900 	struct softnet_data *mysd = &__get_cpu_var(softnet_data);
2901 
2902 	if (sd != mysd) {
2903 		sd->rps_ipi_next = mysd->rps_ipi_list;
2904 		mysd->rps_ipi_list = sd;
2905 
2906 		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
2907 		return 1;
2908 	}
2909 #endif /* CONFIG_RPS */
2910 	return 0;
2911 }
2912 
2913 /*
2914  * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2915  * queue (may be a remote CPU queue).
2916  */
2917 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
2918 			      unsigned int *qtail)
2919 {
2920 	struct softnet_data *sd;
2921 	unsigned long flags;
2922 
2923 	sd = &per_cpu(softnet_data, cpu);
2924 
2925 	local_irq_save(flags);
2926 
2927 	rps_lock(sd);
2928 	if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
2929 		if (skb_queue_len(&sd->input_pkt_queue)) {
2930 enqueue:
2931 			__skb_queue_tail(&sd->input_pkt_queue, skb);
2932 			input_queue_tail_incr_save(sd, qtail);
2933 			rps_unlock(sd);
2934 			local_irq_restore(flags);
2935 			return NET_RX_SUCCESS;
2936 		}
2937 
2938 		/* Schedule NAPI for backlog device
2939 		 * We can use non atomic operation since we own the queue lock
2940 		 */
2941 		if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
2942 			if (!rps_ipi_queued(sd))
2943 				____napi_schedule(sd, &sd->backlog);
2944 		}
2945 		goto enqueue;
2946 	}
2947 
2948 	sd->dropped++;
2949 	rps_unlock(sd);
2950 
2951 	local_irq_restore(flags);
2952 
2953 	atomic_long_inc(&skb->dev->rx_dropped);
2954 	kfree_skb(skb);
2955 	return NET_RX_DROP;
2956 }
2957 
2958 /**
2959  *	netif_rx	-	post buffer to the network code
2960  *	@skb: buffer to post
2961  *
2962  *	This function receives a packet from a device driver and queues it for
2963  *	the upper (protocol) levels to process.  It always succeeds. The buffer
2964  *	may be dropped during processing for congestion control or by the
2965  *	protocol layers.
2966  *
2967  *	return values:
2968  *	NET_RX_SUCCESS	(no congestion)
2969  *	NET_RX_DROP     (packet was dropped)
2970  *
2971  */
2972 
2973 int netif_rx(struct sk_buff *skb)
2974 {
2975 	int ret;
2976 
2977 	/* if netpoll wants it, pretend we never saw it */
2978 	if (netpoll_rx(skb))
2979 		return NET_RX_DROP;
2980 
2981 	net_timestamp_check(netdev_tstamp_prequeue, skb);
2982 
2983 	trace_netif_rx(skb);
2984 #ifdef CONFIG_RPS
2985 	if (static_key_false(&rps_needed)) {
2986 		struct rps_dev_flow voidflow, *rflow = &voidflow;
2987 		int cpu;
2988 
2989 		preempt_disable();
2990 		rcu_read_lock();
2991 
2992 		cpu = get_rps_cpu(skb->dev, skb, &rflow);
2993 		if (cpu < 0)
2994 			cpu = smp_processor_id();
2995 
2996 		ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2997 
2998 		rcu_read_unlock();
2999 		preempt_enable();
3000 	} else
3001 #endif
3002 	{
3003 		unsigned int qtail;
3004 		ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3005 		put_cpu();
3006 	}
3007 	return ret;
3008 }
3009 EXPORT_SYMBOL(netif_rx);
3010 
3011 int netif_rx_ni(struct sk_buff *skb)
3012 {
3013 	int err;
3014 
3015 	preempt_disable();
3016 	err = netif_rx(skb);
3017 	if (local_softirq_pending())
3018 		do_softirq();
3019 	preempt_enable();
3020 
3021 	return err;
3022 }
3023 EXPORT_SYMBOL(netif_rx_ni);
3024 
3025 static void net_tx_action(struct softirq_action *h)
3026 {
3027 	struct softnet_data *sd = &__get_cpu_var(softnet_data);
3028 
3029 	if (sd->completion_queue) {
3030 		struct sk_buff *clist;
3031 
3032 		local_irq_disable();
3033 		clist = sd->completion_queue;
3034 		sd->completion_queue = NULL;
3035 		local_irq_enable();
3036 
3037 		while (clist) {
3038 			struct sk_buff *skb = clist;
3039 			clist = clist->next;
3040 
3041 			WARN_ON(atomic_read(&skb->users));
3042 			trace_kfree_skb(skb, net_tx_action);
3043 			__kfree_skb(skb);
3044 		}
3045 	}
3046 
3047 	if (sd->output_queue) {
3048 		struct Qdisc *head;
3049 
3050 		local_irq_disable();
3051 		head = sd->output_queue;
3052 		sd->output_queue = NULL;
3053 		sd->output_queue_tailp = &sd->output_queue;
3054 		local_irq_enable();
3055 
3056 		while (head) {
3057 			struct Qdisc *q = head;
3058 			spinlock_t *root_lock;
3059 
3060 			head = head->next_sched;
3061 
3062 			root_lock = qdisc_lock(q);
3063 			if (spin_trylock(root_lock)) {
3064 				smp_mb__before_clear_bit();
3065 				clear_bit(__QDISC_STATE_SCHED,
3066 					  &q->state);
3067 				qdisc_run(q);
3068 				spin_unlock(root_lock);
3069 			} else {
3070 				if (!test_bit(__QDISC_STATE_DEACTIVATED,
3071 					      &q->state)) {
3072 					__netif_reschedule(q);
3073 				} else {
3074 					smp_mb__before_clear_bit();
3075 					clear_bit(__QDISC_STATE_SCHED,
3076 						  &q->state);
3077 				}
3078 			}
3079 		}
3080 	}
3081 }
3082 
3083 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3084     (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3085 /* This hook is defined here for ATM LANE */
3086 int (*br_fdb_test_addr_hook)(struct net_device *dev,
3087 			     unsigned char *addr) __read_mostly;
3088 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
3089 #endif
3090 
3091 #ifdef CONFIG_NET_CLS_ACT
3092 /* TODO: Maybe we should just force sch_ingress to be compiled in
3093  * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3094  * a compare and 2 stores extra right now if we dont have it on
3095  * but have CONFIG_NET_CLS_ACT
3096  * NOTE: This doesn't stop any functionality; if you dont have
3097  * the ingress scheduler, you just can't add policies on ingress.
3098  *
3099  */
3100 static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
3101 {
3102 	struct net_device *dev = skb->dev;
3103 	u32 ttl = G_TC_RTTL(skb->tc_verd);
3104 	int result = TC_ACT_OK;
3105 	struct Qdisc *q;
3106 
3107 	if (unlikely(MAX_RED_LOOP < ttl++)) {
3108 		net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3109 				     skb->skb_iif, dev->ifindex);
3110 		return TC_ACT_SHOT;
3111 	}
3112 
3113 	skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3114 	skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3115 
3116 	q = rxq->qdisc;
3117 	if (q != &noop_qdisc) {
3118 		spin_lock(qdisc_lock(q));
3119 		if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3120 			result = qdisc_enqueue_root(skb, q);
3121 		spin_unlock(qdisc_lock(q));
3122 	}
3123 
3124 	return result;
3125 }
3126 
3127 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3128 					 struct packet_type **pt_prev,
3129 					 int *ret, struct net_device *orig_dev)
3130 {
3131 	struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3132 
3133 	if (!rxq || rxq->qdisc == &noop_qdisc)
3134 		goto out;
3135 
3136 	if (*pt_prev) {
3137 		*ret = deliver_skb(skb, *pt_prev, orig_dev);
3138 		*pt_prev = NULL;
3139 	}
3140 
3141 	switch (ing_filter(skb, rxq)) {
3142 	case TC_ACT_SHOT:
3143 	case TC_ACT_STOLEN:
3144 		kfree_skb(skb);
3145 		return NULL;
3146 	}
3147 
3148 out:
3149 	skb->tc_verd = 0;
3150 	return skb;
3151 }
3152 #endif
3153 
3154 /**
3155  *	netdev_rx_handler_register - register receive handler
3156  *	@dev: device to register a handler for
3157  *	@rx_handler: receive handler to register
3158  *	@rx_handler_data: data pointer that is used by rx handler
3159  *
3160  *	Register a receive hander for a device. This handler will then be
3161  *	called from __netif_receive_skb. A negative errno code is returned
3162  *	on a failure.
3163  *
3164  *	The caller must hold the rtnl_mutex.
3165  *
3166  *	For a general description of rx_handler, see enum rx_handler_result.
3167  */
3168 int netdev_rx_handler_register(struct net_device *dev,
3169 			       rx_handler_func_t *rx_handler,
3170 			       void *rx_handler_data)
3171 {
3172 	ASSERT_RTNL();
3173 
3174 	if (dev->rx_handler)
3175 		return -EBUSY;
3176 
3177 	rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3178 	rcu_assign_pointer(dev->rx_handler, rx_handler);
3179 
3180 	return 0;
3181 }
3182 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3183 
3184 /**
3185  *	netdev_rx_handler_unregister - unregister receive handler
3186  *	@dev: device to unregister a handler from
3187  *
3188  *	Unregister a receive hander from a device.
3189  *
3190  *	The caller must hold the rtnl_mutex.
3191  */
3192 void netdev_rx_handler_unregister(struct net_device *dev)
3193 {
3194 
3195 	ASSERT_RTNL();
3196 	RCU_INIT_POINTER(dev->rx_handler, NULL);
3197 	RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3198 }
3199 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3200 
3201 /*
3202  * Limit the use of PFMEMALLOC reserves to those protocols that implement
3203  * the special handling of PFMEMALLOC skbs.
3204  */
3205 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3206 {
3207 	switch (skb->protocol) {
3208 	case __constant_htons(ETH_P_ARP):
3209 	case __constant_htons(ETH_P_IP):
3210 	case __constant_htons(ETH_P_IPV6):
3211 	case __constant_htons(ETH_P_8021Q):
3212 		return true;
3213 	default:
3214 		return false;
3215 	}
3216 }
3217 
3218 static int __netif_receive_skb(struct sk_buff *skb)
3219 {
3220 	struct packet_type *ptype, *pt_prev;
3221 	rx_handler_func_t *rx_handler;
3222 	struct net_device *orig_dev;
3223 	struct net_device *null_or_dev;
3224 	bool deliver_exact = false;
3225 	int ret = NET_RX_DROP;
3226 	__be16 type;
3227 	unsigned long pflags = current->flags;
3228 
3229 	net_timestamp_check(!netdev_tstamp_prequeue, skb);
3230 
3231 	trace_netif_receive_skb(skb);
3232 
3233 	/*
3234 	 * PFMEMALLOC skbs are special, they should
3235 	 * - be delivered to SOCK_MEMALLOC sockets only
3236 	 * - stay away from userspace
3237 	 * - have bounded memory usage
3238 	 *
3239 	 * Use PF_MEMALLOC as this saves us from propagating the allocation
3240 	 * context down to all allocation sites.
3241 	 */
3242 	if (sk_memalloc_socks() && skb_pfmemalloc(skb))
3243 		current->flags |= PF_MEMALLOC;
3244 
3245 	/* if we've gotten here through NAPI, check netpoll */
3246 	if (netpoll_receive_skb(skb))
3247 		goto out;
3248 
3249 	orig_dev = skb->dev;
3250 
3251 	skb_reset_network_header(skb);
3252 	skb_reset_transport_header(skb);
3253 	skb_reset_mac_len(skb);
3254 
3255 	pt_prev = NULL;
3256 
3257 	rcu_read_lock();
3258 
3259 another_round:
3260 	skb->skb_iif = skb->dev->ifindex;
3261 
3262 	__this_cpu_inc(softnet_data.processed);
3263 
3264 	if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3265 		skb = vlan_untag(skb);
3266 		if (unlikely(!skb))
3267 			goto unlock;
3268 	}
3269 
3270 #ifdef CONFIG_NET_CLS_ACT
3271 	if (skb->tc_verd & TC_NCLS) {
3272 		skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3273 		goto ncls;
3274 	}
3275 #endif
3276 
3277 	if (sk_memalloc_socks() && skb_pfmemalloc(skb))
3278 		goto skip_taps;
3279 
3280 	list_for_each_entry_rcu(ptype, &ptype_all, list) {
3281 		if (!ptype->dev || ptype->dev == skb->dev) {
3282 			if (pt_prev)
3283 				ret = deliver_skb(skb, pt_prev, orig_dev);
3284 			pt_prev = ptype;
3285 		}
3286 	}
3287 
3288 skip_taps:
3289 #ifdef CONFIG_NET_CLS_ACT
3290 	skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3291 	if (!skb)
3292 		goto unlock;
3293 ncls:
3294 #endif
3295 
3296 	if (sk_memalloc_socks() && skb_pfmemalloc(skb)
3297 				&& !skb_pfmemalloc_protocol(skb))
3298 		goto drop;
3299 
3300 	rx_handler = rcu_dereference(skb->dev->rx_handler);
3301 	if (vlan_tx_tag_present(skb)) {
3302 		if (pt_prev) {
3303 			ret = deliver_skb(skb, pt_prev, orig_dev);
3304 			pt_prev = NULL;
3305 		}
3306 		if (vlan_do_receive(&skb, !rx_handler))
3307 			goto another_round;
3308 		else if (unlikely(!skb))
3309 			goto unlock;
3310 	}
3311 
3312 	if (rx_handler) {
3313 		if (pt_prev) {
3314 			ret = deliver_skb(skb, pt_prev, orig_dev);
3315 			pt_prev = NULL;
3316 		}
3317 		switch (rx_handler(&skb)) {
3318 		case RX_HANDLER_CONSUMED:
3319 			goto unlock;
3320 		case RX_HANDLER_ANOTHER:
3321 			goto another_round;
3322 		case RX_HANDLER_EXACT:
3323 			deliver_exact = true;
3324 		case RX_HANDLER_PASS:
3325 			break;
3326 		default:
3327 			BUG();
3328 		}
3329 	}
3330 
3331 	/* deliver only exact match when indicated */
3332 	null_or_dev = deliver_exact ? skb->dev : NULL;
3333 
3334 	type = skb->protocol;
3335 	list_for_each_entry_rcu(ptype,
3336 			&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
3337 		if (ptype->type == type &&
3338 		    (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3339 		     ptype->dev == orig_dev)) {
3340 			if (pt_prev)
3341 				ret = deliver_skb(skb, pt_prev, orig_dev);
3342 			pt_prev = ptype;
3343 		}
3344 	}
3345 
3346 	if (pt_prev) {
3347 		if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
3348 			ret = -ENOMEM;
3349 		else
3350 			ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
3351 	} else {
3352 drop:
3353 		atomic_long_inc(&skb->dev->rx_dropped);
3354 		kfree_skb(skb);
3355 		/* Jamal, now you will not able to escape explaining
3356 		 * me how you were going to use this. :-)
3357 		 */
3358 		ret = NET_RX_DROP;
3359 	}
3360 
3361 unlock:
3362 	rcu_read_unlock();
3363 out:
3364 	tsk_restore_flags(current, pflags, PF_MEMALLOC);
3365 	return ret;
3366 }
3367 
3368 /**
3369  *	netif_receive_skb - process receive buffer from network
3370  *	@skb: buffer to process
3371  *
3372  *	netif_receive_skb() is the main receive data processing function.
3373  *	It always succeeds. The buffer may be dropped during processing
3374  *	for congestion control or by the protocol layers.
3375  *
3376  *	This function may only be called from softirq context and interrupts
3377  *	should be enabled.
3378  *
3379  *	Return values (usually ignored):
3380  *	NET_RX_SUCCESS: no congestion
3381  *	NET_RX_DROP: packet was dropped
3382  */
3383 int netif_receive_skb(struct sk_buff *skb)
3384 {
3385 	net_timestamp_check(netdev_tstamp_prequeue, skb);
3386 
3387 	if (skb_defer_rx_timestamp(skb))
3388 		return NET_RX_SUCCESS;
3389 
3390 #ifdef CONFIG_RPS
3391 	if (static_key_false(&rps_needed)) {
3392 		struct rps_dev_flow voidflow, *rflow = &voidflow;
3393 		int cpu, ret;
3394 
3395 		rcu_read_lock();
3396 
3397 		cpu = get_rps_cpu(skb->dev, skb, &rflow);
3398 
3399 		if (cpu >= 0) {
3400 			ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3401 			rcu_read_unlock();
3402 			return ret;
3403 		}
3404 		rcu_read_unlock();
3405 	}
3406 #endif
3407 	return __netif_receive_skb(skb);
3408 }
3409 EXPORT_SYMBOL(netif_receive_skb);
3410 
3411 /* Network device is going away, flush any packets still pending
3412  * Called with irqs disabled.
3413  */
3414 static void flush_backlog(void *arg)
3415 {
3416 	struct net_device *dev = arg;
3417 	struct softnet_data *sd = &__get_cpu_var(softnet_data);
3418 	struct sk_buff *skb, *tmp;
3419 
3420 	rps_lock(sd);
3421 	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
3422 		if (skb->dev == dev) {
3423 			__skb_unlink(skb, &sd->input_pkt_queue);
3424 			kfree_skb(skb);
3425 			input_queue_head_incr(sd);
3426 		}
3427 	}
3428 	rps_unlock(sd);
3429 
3430 	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3431 		if (skb->dev == dev) {
3432 			__skb_unlink(skb, &sd->process_queue);
3433 			kfree_skb(skb);
3434 			input_queue_head_incr(sd);
3435 		}
3436 	}
3437 }
3438 
3439 static int napi_gro_complete(struct sk_buff *skb)
3440 {
3441 	struct packet_type *ptype;
3442 	__be16 type = skb->protocol;
3443 	struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3444 	int err = -ENOENT;
3445 
3446 	if (NAPI_GRO_CB(skb)->count == 1) {
3447 		skb_shinfo(skb)->gso_size = 0;
3448 		goto out;
3449 	}
3450 
3451 	rcu_read_lock();
3452 	list_for_each_entry_rcu(ptype, head, list) {
3453 		if (ptype->type != type || ptype->dev || !ptype->gro_complete)
3454 			continue;
3455 
3456 		err = ptype->gro_complete(skb);
3457 		break;
3458 	}
3459 	rcu_read_unlock();
3460 
3461 	if (err) {
3462 		WARN_ON(&ptype->list == head);
3463 		kfree_skb(skb);
3464 		return NET_RX_SUCCESS;
3465 	}
3466 
3467 out:
3468 	return netif_receive_skb(skb);
3469 }
3470 
3471 inline void napi_gro_flush(struct napi_struct *napi)
3472 {
3473 	struct sk_buff *skb, *next;
3474 
3475 	for (skb = napi->gro_list; skb; skb = next) {
3476 		next = skb->next;
3477 		skb->next = NULL;
3478 		napi_gro_complete(skb);
3479 	}
3480 
3481 	napi->gro_count = 0;
3482 	napi->gro_list = NULL;
3483 }
3484 EXPORT_SYMBOL(napi_gro_flush);
3485 
3486 enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3487 {
3488 	struct sk_buff **pp = NULL;
3489 	struct packet_type *ptype;
3490 	__be16 type = skb->protocol;
3491 	struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3492 	int same_flow;
3493 	int mac_len;
3494 	enum gro_result ret;
3495 
3496 	if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
3497 		goto normal;
3498 
3499 	if (skb_is_gso(skb) || skb_has_frag_list(skb))
3500 		goto normal;
3501 
3502 	rcu_read_lock();
3503 	list_for_each_entry_rcu(ptype, head, list) {
3504 		if (ptype->type != type || ptype->dev || !ptype->gro_receive)
3505 			continue;
3506 
3507 		skb_set_network_header(skb, skb_gro_offset(skb));
3508 		mac_len = skb->network_header - skb->mac_header;
3509 		skb->mac_len = mac_len;
3510 		NAPI_GRO_CB(skb)->same_flow = 0;
3511 		NAPI_GRO_CB(skb)->flush = 0;
3512 		NAPI_GRO_CB(skb)->free = 0;
3513 
3514 		pp = ptype->gro_receive(&napi->gro_list, skb);
3515 		break;
3516 	}
3517 	rcu_read_unlock();
3518 
3519 	if (&ptype->list == head)
3520 		goto normal;
3521 
3522 	same_flow = NAPI_GRO_CB(skb)->same_flow;
3523 	ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
3524 
3525 	if (pp) {
3526 		struct sk_buff *nskb = *pp;
3527 
3528 		*pp = nskb->next;
3529 		nskb->next = NULL;
3530 		napi_gro_complete(nskb);
3531 		napi->gro_count--;
3532 	}
3533 
3534 	if (same_flow)
3535 		goto ok;
3536 
3537 	if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
3538 		goto normal;
3539 
3540 	napi->gro_count++;
3541 	NAPI_GRO_CB(skb)->count = 1;
3542 	skb_shinfo(skb)->gso_size = skb_gro_len(skb);
3543 	skb->next = napi->gro_list;
3544 	napi->gro_list = skb;
3545 	ret = GRO_HELD;
3546 
3547 pull:
3548 	if (skb_headlen(skb) < skb_gro_offset(skb)) {
3549 		int grow = skb_gro_offset(skb) - skb_headlen(skb);
3550 
3551 		BUG_ON(skb->end - skb->tail < grow);
3552 
3553 		memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3554 
3555 		skb->tail += grow;
3556 		skb->data_len -= grow;
3557 
3558 		skb_shinfo(skb)->frags[0].page_offset += grow;
3559 		skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
3560 
3561 		if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
3562 			skb_frag_unref(skb, 0);
3563 			memmove(skb_shinfo(skb)->frags,
3564 				skb_shinfo(skb)->frags + 1,
3565 				--skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
3566 		}
3567 	}
3568 
3569 ok:
3570 	return ret;
3571 
3572 normal:
3573 	ret = GRO_NORMAL;
3574 	goto pull;
3575 }
3576 EXPORT_SYMBOL(dev_gro_receive);
3577 
3578 static inline gro_result_t
3579 __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3580 {
3581 	struct sk_buff *p;
3582 	unsigned int maclen = skb->dev->hard_header_len;
3583 
3584 	for (p = napi->gro_list; p; p = p->next) {
3585 		unsigned long diffs;
3586 
3587 		diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3588 		diffs |= p->vlan_tci ^ skb->vlan_tci;
3589 		if (maclen == ETH_HLEN)
3590 			diffs |= compare_ether_header(skb_mac_header(p),
3591 						      skb_gro_mac_header(skb));
3592 		else if (!diffs)
3593 			diffs = memcmp(skb_mac_header(p),
3594 				       skb_gro_mac_header(skb),
3595 				       maclen);
3596 		NAPI_GRO_CB(p)->same_flow = !diffs;
3597 		NAPI_GRO_CB(p)->flush = 0;
3598 	}
3599 
3600 	return dev_gro_receive(napi, skb);
3601 }
3602 
3603 gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
3604 {
3605 	switch (ret) {
3606 	case GRO_NORMAL:
3607 		if (netif_receive_skb(skb))
3608 			ret = GRO_DROP;
3609 		break;
3610 
3611 	case GRO_DROP:
3612 		kfree_skb(skb);
3613 		break;
3614 
3615 	case GRO_MERGED_FREE:
3616 		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3617 			kmem_cache_free(skbuff_head_cache, skb);
3618 		else
3619 			__kfree_skb(skb);
3620 		break;
3621 
3622 	case GRO_HELD:
3623 	case GRO_MERGED:
3624 		break;
3625 	}
3626 
3627 	return ret;
3628 }
3629 EXPORT_SYMBOL(napi_skb_finish);
3630 
3631 void skb_gro_reset_offset(struct sk_buff *skb)
3632 {
3633 	NAPI_GRO_CB(skb)->data_offset = 0;
3634 	NAPI_GRO_CB(skb)->frag0 = NULL;
3635 	NAPI_GRO_CB(skb)->frag0_len = 0;
3636 
3637 	if (skb->mac_header == skb->tail &&
3638 	    !PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) {
3639 		NAPI_GRO_CB(skb)->frag0 =
3640 			skb_frag_address(&skb_shinfo(skb)->frags[0]);
3641 		NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]);
3642 	}
3643 }
3644 EXPORT_SYMBOL(skb_gro_reset_offset);
3645 
3646 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3647 {
3648 	skb_gro_reset_offset(skb);
3649 
3650 	return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
3651 }
3652 EXPORT_SYMBOL(napi_gro_receive);
3653 
3654 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3655 {
3656 	__skb_pull(skb, skb_headlen(skb));
3657 	/* restore the reserve we had after netdev_alloc_skb_ip_align() */
3658 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
3659 	skb->vlan_tci = 0;
3660 	skb->dev = napi->dev;
3661 	skb->skb_iif = 0;
3662 
3663 	napi->skb = skb;
3664 }
3665 
3666 struct sk_buff *napi_get_frags(struct napi_struct *napi)
3667 {
3668 	struct sk_buff *skb = napi->skb;
3669 
3670 	if (!skb) {
3671 		skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3672 		if (skb)
3673 			napi->skb = skb;
3674 	}
3675 	return skb;
3676 }
3677 EXPORT_SYMBOL(napi_get_frags);
3678 
3679 gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3680 			       gro_result_t ret)
3681 {
3682 	switch (ret) {
3683 	case GRO_NORMAL:
3684 	case GRO_HELD:
3685 		skb->protocol = eth_type_trans(skb, skb->dev);
3686 
3687 		if (ret == GRO_HELD)
3688 			skb_gro_pull(skb, -ETH_HLEN);
3689 		else if (netif_receive_skb(skb))
3690 			ret = GRO_DROP;
3691 		break;
3692 
3693 	case GRO_DROP:
3694 	case GRO_MERGED_FREE:
3695 		napi_reuse_skb(napi, skb);
3696 		break;
3697 
3698 	case GRO_MERGED:
3699 		break;
3700 	}
3701 
3702 	return ret;
3703 }
3704 EXPORT_SYMBOL(napi_frags_finish);
3705 
3706 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3707 {
3708 	struct sk_buff *skb = napi->skb;
3709 	struct ethhdr *eth;
3710 	unsigned int hlen;
3711 	unsigned int off;
3712 
3713 	napi->skb = NULL;
3714 
3715 	skb_reset_mac_header(skb);
3716 	skb_gro_reset_offset(skb);
3717 
3718 	off = skb_gro_offset(skb);
3719 	hlen = off + sizeof(*eth);
3720 	eth = skb_gro_header_fast(skb, off);
3721 	if (skb_gro_header_hard(skb, hlen)) {
3722 		eth = skb_gro_header_slow(skb, hlen, off);
3723 		if (unlikely(!eth)) {
3724 			napi_reuse_skb(napi, skb);
3725 			skb = NULL;
3726 			goto out;
3727 		}
3728 	}
3729 
3730 	skb_gro_pull(skb, sizeof(*eth));
3731 
3732 	/*
3733 	 * This works because the only protocols we care about don't require
3734 	 * special handling.  We'll fix it up properly at the end.
3735 	 */
3736 	skb->protocol = eth->h_proto;
3737 
3738 out:
3739 	return skb;
3740 }
3741 
3742 gro_result_t napi_gro_frags(struct napi_struct *napi)
3743 {
3744 	struct sk_buff *skb = napi_frags_skb(napi);
3745 
3746 	if (!skb)
3747 		return GRO_DROP;
3748 
3749 	return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
3750 }
3751 EXPORT_SYMBOL(napi_gro_frags);
3752 
3753 /*
3754  * net_rps_action sends any pending IPI's for rps.
3755  * Note: called with local irq disabled, but exits with local irq enabled.
3756  */
3757 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3758 {
3759 #ifdef CONFIG_RPS
3760 	struct softnet_data *remsd = sd->rps_ipi_list;
3761 
3762 	if (remsd) {
3763 		sd->rps_ipi_list = NULL;
3764 
3765 		local_irq_enable();
3766 
3767 		/* Send pending IPI's to kick RPS processing on remote cpus. */
3768 		while (remsd) {
3769 			struct softnet_data *next = remsd->rps_ipi_next;
3770 
3771 			if (cpu_online(remsd->cpu))
3772 				__smp_call_function_single(remsd->cpu,
3773 							   &remsd->csd, 0);
3774 			remsd = next;
3775 		}
3776 	} else
3777 #endif
3778 		local_irq_enable();
3779 }
3780 
3781 static int process_backlog(struct napi_struct *napi, int quota)
3782 {
3783 	int work = 0;
3784 	struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
3785 
3786 #ifdef CONFIG_RPS
3787 	/* Check if we have pending ipi, its better to send them now,
3788 	 * not waiting net_rx_action() end.
3789 	 */
3790 	if (sd->rps_ipi_list) {
3791 		local_irq_disable();
3792 		net_rps_action_and_irq_enable(sd);
3793 	}
3794 #endif
3795 	napi->weight = weight_p;
3796 	local_irq_disable();
3797 	while (work < quota) {
3798 		struct sk_buff *skb;
3799 		unsigned int qlen;
3800 
3801 		while ((skb = __skb_dequeue(&sd->process_queue))) {
3802 			local_irq_enable();
3803 			__netif_receive_skb(skb);
3804 			local_irq_disable();
3805 			input_queue_head_incr(sd);
3806 			if (++work >= quota) {
3807 				local_irq_enable();
3808 				return work;
3809 			}
3810 		}
3811 
3812 		rps_lock(sd);
3813 		qlen = skb_queue_len(&sd->input_pkt_queue);
3814 		if (qlen)
3815 			skb_queue_splice_tail_init(&sd->input_pkt_queue,
3816 						   &sd->process_queue);
3817 
3818 		if (qlen < quota - work) {
3819 			/*
3820 			 * Inline a custom version of __napi_complete().
3821 			 * only current cpu owns and manipulates this napi,
3822 			 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
3823 			 * we can use a plain write instead of clear_bit(),
3824 			 * and we dont need an smp_mb() memory barrier.
3825 			 */
3826 			list_del(&napi->poll_list);
3827 			napi->state = 0;
3828 
3829 			quota = work + qlen;
3830 		}
3831 		rps_unlock(sd);
3832 	}
3833 	local_irq_enable();
3834 
3835 	return work;
3836 }
3837 
3838 /**
3839  * __napi_schedule - schedule for receive
3840  * @n: entry to schedule
3841  *
3842  * The entry's receive function will be scheduled to run
3843  */
3844 void __napi_schedule(struct napi_struct *n)
3845 {
3846 	unsigned long flags;
3847 
3848 	local_irq_save(flags);
3849 	____napi_schedule(&__get_cpu_var(softnet_data), n);
3850 	local_irq_restore(flags);
3851 }
3852 EXPORT_SYMBOL(__napi_schedule);
3853 
3854 void __napi_complete(struct napi_struct *n)
3855 {
3856 	BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
3857 	BUG_ON(n->gro_list);
3858 
3859 	list_del(&n->poll_list);
3860 	smp_mb__before_clear_bit();
3861 	clear_bit(NAPI_STATE_SCHED, &n->state);
3862 }
3863 EXPORT_SYMBOL(__napi_complete);
3864 
3865 void napi_complete(struct napi_struct *n)
3866 {
3867 	unsigned long flags;
3868 
3869 	/*
3870 	 * don't let napi dequeue from the cpu poll list
3871 	 * just in case its running on a different cpu
3872 	 */
3873 	if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
3874 		return;
3875 
3876 	napi_gro_flush(n);
3877 	local_irq_save(flags);
3878 	__napi_complete(n);
3879 	local_irq_restore(flags);
3880 }
3881 EXPORT_SYMBOL(napi_complete);
3882 
3883 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
3884 		    int (*poll)(struct napi_struct *, int), int weight)
3885 {
3886 	INIT_LIST_HEAD(&napi->poll_list);
3887 	napi->gro_count = 0;
3888 	napi->gro_list = NULL;
3889 	napi->skb = NULL;
3890 	napi->poll = poll;
3891 	napi->weight = weight;
3892 	list_add(&napi->dev_list, &dev->napi_list);
3893 	napi->dev = dev;
3894 #ifdef CONFIG_NETPOLL
3895 	spin_lock_init(&napi->poll_lock);
3896 	napi->poll_owner = -1;
3897 #endif
3898 	set_bit(NAPI_STATE_SCHED, &napi->state);
3899 }
3900 EXPORT_SYMBOL(netif_napi_add);
3901 
3902 void netif_napi_del(struct napi_struct *napi)
3903 {
3904 	struct sk_buff *skb, *next;
3905 
3906 	list_del_init(&napi->dev_list);
3907 	napi_free_frags(napi);
3908 
3909 	for (skb = napi->gro_list; skb; skb = next) {
3910 		next = skb->next;
3911 		skb->next = NULL;
3912 		kfree_skb(skb);
3913 	}
3914 
3915 	napi->gro_list = NULL;
3916 	napi->gro_count = 0;
3917 }
3918 EXPORT_SYMBOL(netif_napi_del);
3919 
3920 static void net_rx_action(struct softirq_action *h)
3921 {
3922 	struct softnet_data *sd = &__get_cpu_var(softnet_data);
3923 	unsigned long time_limit = jiffies + 2;
3924 	int budget = netdev_budget;
3925 	void *have;
3926 
3927 	local_irq_disable();
3928 
3929 	while (!list_empty(&sd->poll_list)) {
3930 		struct napi_struct *n;
3931 		int work, weight;
3932 
3933 		/* If softirq window is exhuasted then punt.
3934 		 * Allow this to run for 2 jiffies since which will allow
3935 		 * an average latency of 1.5/HZ.
3936 		 */
3937 		if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
3938 			goto softnet_break;
3939 
3940 		local_irq_enable();
3941 
3942 		/* Even though interrupts have been re-enabled, this
3943 		 * access is safe because interrupts can only add new
3944 		 * entries to the tail of this list, and only ->poll()
3945 		 * calls can remove this head entry from the list.
3946 		 */
3947 		n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
3948 
3949 		have = netpoll_poll_lock(n);
3950 
3951 		weight = n->weight;
3952 
3953 		/* This NAPI_STATE_SCHED test is for avoiding a race
3954 		 * with netpoll's poll_napi().  Only the entity which
3955 		 * obtains the lock and sees NAPI_STATE_SCHED set will
3956 		 * actually make the ->poll() call.  Therefore we avoid
3957 		 * accidentally calling ->poll() when NAPI is not scheduled.
3958 		 */
3959 		work = 0;
3960 		if (test_bit(NAPI_STATE_SCHED, &n->state)) {
3961 			work = n->poll(n, weight);
3962 			trace_napi_poll(n);
3963 		}
3964 
3965 		WARN_ON_ONCE(work > weight);
3966 
3967 		budget -= work;
3968 
3969 		local_irq_disable();
3970 
3971 		/* Drivers must not modify the NAPI state if they
3972 		 * consume the entire weight.  In such cases this code
3973 		 * still "owns" the NAPI instance and therefore can
3974 		 * move the instance around on the list at-will.
3975 		 */
3976 		if (unlikely(work == weight)) {
3977 			if (unlikely(napi_disable_pending(n))) {
3978 				local_irq_enable();
3979 				napi_complete(n);
3980 				local_irq_disable();
3981 			} else
3982 				list_move_tail(&n->poll_list, &sd->poll_list);
3983 		}
3984 
3985 		netpoll_poll_unlock(have);
3986 	}
3987 out:
3988 	net_rps_action_and_irq_enable(sd);
3989 
3990 #ifdef CONFIG_NET_DMA
3991 	/*
3992 	 * There may not be any more sk_buffs coming right now, so push
3993 	 * any pending DMA copies to hardware
3994 	 */
3995 	dma_issue_pending_all();
3996 #endif
3997 
3998 	return;
3999 
4000 softnet_break:
4001 	sd->time_squeeze++;
4002 	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
4003 	goto out;
4004 }
4005 
4006 static gifconf_func_t *gifconf_list[NPROTO];
4007 
4008 /**
4009  *	register_gifconf	-	register a SIOCGIF handler
4010  *	@family: Address family
4011  *	@gifconf: Function handler
4012  *
4013  *	Register protocol dependent address dumping routines. The handler
4014  *	that is passed must not be freed or reused until it has been replaced
4015  *	by another handler.
4016  */
4017 int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
4018 {
4019 	if (family >= NPROTO)
4020 		return -EINVAL;
4021 	gifconf_list[family] = gifconf;
4022 	return 0;
4023 }
4024 EXPORT_SYMBOL(register_gifconf);
4025 
4026 
4027 /*
4028  *	Map an interface index to its name (SIOCGIFNAME)
4029  */
4030 
4031 /*
4032  *	We need this ioctl for efficient implementation of the
4033  *	if_indextoname() function required by the IPv6 API.  Without
4034  *	it, we would have to search all the interfaces to find a
4035  *	match.  --pb
4036  */
4037 
4038 static int dev_ifname(struct net *net, struct ifreq __user *arg)
4039 {
4040 	struct net_device *dev;
4041 	struct ifreq ifr;
4042 
4043 	/*
4044 	 *	Fetch the caller's info block.
4045 	 */
4046 
4047 	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4048 		return -EFAULT;
4049 
4050 	rcu_read_lock();
4051 	dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
4052 	if (!dev) {
4053 		rcu_read_unlock();
4054 		return -ENODEV;
4055 	}
4056 
4057 	strcpy(ifr.ifr_name, dev->name);
4058 	rcu_read_unlock();
4059 
4060 	if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
4061 		return -EFAULT;
4062 	return 0;
4063 }
4064 
4065 /*
4066  *	Perform a SIOCGIFCONF call. This structure will change
4067  *	size eventually, and there is nothing I can do about it.
4068  *	Thus we will need a 'compatibility mode'.
4069  */
4070 
4071 static int dev_ifconf(struct net *net, char __user *arg)
4072 {
4073 	struct ifconf ifc;
4074 	struct net_device *dev;
4075 	char __user *pos;
4076 	int len;
4077 	int total;
4078 	int i;
4079 
4080 	/*
4081 	 *	Fetch the caller's info block.
4082 	 */
4083 
4084 	if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
4085 		return -EFAULT;
4086 
4087 	pos = ifc.ifc_buf;
4088 	len = ifc.ifc_len;
4089 
4090 	/*
4091 	 *	Loop over the interfaces, and write an info block for each.
4092 	 */
4093 
4094 	total = 0;
4095 	for_each_netdev(net, dev) {
4096 		for (i = 0; i < NPROTO; i++) {
4097 			if (gifconf_list[i]) {
4098 				int done;
4099 				if (!pos)
4100 					done = gifconf_list[i](dev, NULL, 0);
4101 				else
4102 					done = gifconf_list[i](dev, pos + total,
4103 							       len - total);
4104 				if (done < 0)
4105 					return -EFAULT;
4106 				total += done;
4107 			}
4108 		}
4109 	}
4110 
4111 	/*
4112 	 *	All done.  Write the updated control block back to the caller.
4113 	 */
4114 	ifc.ifc_len = total;
4115 
4116 	/*
4117 	 * 	Both BSD and Solaris return 0 here, so we do too.
4118 	 */
4119 	return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
4120 }
4121 
4122 #ifdef CONFIG_PROC_FS
4123 
4124 #define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
4125 
4126 #define get_bucket(x) ((x) >> BUCKET_SPACE)
4127 #define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
4128 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
4129 
4130 static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
4131 {
4132 	struct net *net = seq_file_net(seq);
4133 	struct net_device *dev;
4134 	struct hlist_node *p;
4135 	struct hlist_head *h;
4136 	unsigned int count = 0, offset = get_offset(*pos);
4137 
4138 	h = &net->dev_name_head[get_bucket(*pos)];
4139 	hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
4140 		if (++count == offset)
4141 			return dev;
4142 	}
4143 
4144 	return NULL;
4145 }
4146 
4147 static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
4148 {
4149 	struct net_device *dev;
4150 	unsigned int bucket;
4151 
4152 	do {
4153 		dev = dev_from_same_bucket(seq, pos);
4154 		if (dev)
4155 			return dev;
4156 
4157 		bucket = get_bucket(*pos) + 1;
4158 		*pos = set_bucket_offset(bucket, 1);
4159 	} while (bucket < NETDEV_HASHENTRIES);
4160 
4161 	return NULL;
4162 }
4163 
4164 /*
4165  *	This is invoked by the /proc filesystem handler to display a device
4166  *	in detail.
4167  */
4168 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
4169 	__acquires(RCU)
4170 {
4171 	rcu_read_lock();
4172 	if (!*pos)
4173 		return SEQ_START_TOKEN;
4174 
4175 	if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
4176 		return NULL;
4177 
4178 	return dev_from_bucket(seq, pos);
4179 }
4180 
4181 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4182 {
4183 	++*pos;
4184 	return dev_from_bucket(seq, pos);
4185 }
4186 
4187 void dev_seq_stop(struct seq_file *seq, void *v)
4188 	__releases(RCU)
4189 {
4190 	rcu_read_unlock();
4191 }
4192 
4193 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
4194 {
4195 	struct rtnl_link_stats64 temp;
4196 	const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
4197 
4198 	seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
4199 		   "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
4200 		   dev->name, stats->rx_bytes, stats->rx_packets,
4201 		   stats->rx_errors,
4202 		   stats->rx_dropped + stats->rx_missed_errors,
4203 		   stats->rx_fifo_errors,
4204 		   stats->rx_length_errors + stats->rx_over_errors +
4205 		    stats->rx_crc_errors + stats->rx_frame_errors,
4206 		   stats->rx_compressed, stats->multicast,
4207 		   stats->tx_bytes, stats->tx_packets,
4208 		   stats->tx_errors, stats->tx_dropped,
4209 		   stats->tx_fifo_errors, stats->collisions,
4210 		   stats->tx_carrier_errors +
4211 		    stats->tx_aborted_errors +
4212 		    stats->tx_window_errors +
4213 		    stats->tx_heartbeat_errors,
4214 		   stats->tx_compressed);
4215 }
4216 
4217 /*
4218  *	Called from the PROCfs module. This now uses the new arbitrary sized
4219  *	/proc/net interface to create /proc/net/dev
4220  */
4221 static int dev_seq_show(struct seq_file *seq, void *v)
4222 {
4223 	if (v == SEQ_START_TOKEN)
4224 		seq_puts(seq, "Inter-|   Receive                            "
4225 			      "                    |  Transmit\n"
4226 			      " face |bytes    packets errs drop fifo frame "
4227 			      "compressed multicast|bytes    packets errs "
4228 			      "drop fifo colls carrier compressed\n");
4229 	else
4230 		dev_seq_printf_stats(seq, v);
4231 	return 0;
4232 }
4233 
4234 static struct softnet_data *softnet_get_online(loff_t *pos)
4235 {
4236 	struct softnet_data *sd = NULL;
4237 
4238 	while (*pos < nr_cpu_ids)
4239 		if (cpu_online(*pos)) {
4240 			sd = &per_cpu(softnet_data, *pos);
4241 			break;
4242 		} else
4243 			++*pos;
4244 	return sd;
4245 }
4246 
4247 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
4248 {
4249 	return softnet_get_online(pos);
4250 }
4251 
4252 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4253 {
4254 	++*pos;
4255 	return softnet_get_online(pos);
4256 }
4257 
4258 static void softnet_seq_stop(struct seq_file *seq, void *v)
4259 {
4260 }
4261 
4262 static int softnet_seq_show(struct seq_file *seq, void *v)
4263 {
4264 	struct softnet_data *sd = v;
4265 
4266 	seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
4267 		   sd->processed, sd->dropped, sd->time_squeeze, 0,
4268 		   0, 0, 0, 0, /* was fastroute */
4269 		   sd->cpu_collision, sd->received_rps);
4270 	return 0;
4271 }
4272 
4273 static const struct seq_operations dev_seq_ops = {
4274 	.start = dev_seq_start,
4275 	.next  = dev_seq_next,
4276 	.stop  = dev_seq_stop,
4277 	.show  = dev_seq_show,
4278 };
4279 
4280 static int dev_seq_open(struct inode *inode, struct file *file)
4281 {
4282 	return seq_open_net(inode, file, &dev_seq_ops,
4283 			    sizeof(struct seq_net_private));
4284 }
4285 
4286 static const struct file_operations dev_seq_fops = {
4287 	.owner	 = THIS_MODULE,
4288 	.open    = dev_seq_open,
4289 	.read    = seq_read,
4290 	.llseek  = seq_lseek,
4291 	.release = seq_release_net,
4292 };
4293 
4294 static const struct seq_operations softnet_seq_ops = {
4295 	.start = softnet_seq_start,
4296 	.next  = softnet_seq_next,
4297 	.stop  = softnet_seq_stop,
4298 	.show  = softnet_seq_show,
4299 };
4300 
4301 static int softnet_seq_open(struct inode *inode, struct file *file)
4302 {
4303 	return seq_open(file, &softnet_seq_ops);
4304 }
4305 
4306 static const struct file_operations softnet_seq_fops = {
4307 	.owner	 = THIS_MODULE,
4308 	.open    = softnet_seq_open,
4309 	.read    = seq_read,
4310 	.llseek  = seq_lseek,
4311 	.release = seq_release,
4312 };
4313 
4314 static void *ptype_get_idx(loff_t pos)
4315 {
4316 	struct packet_type *pt = NULL;
4317 	loff_t i = 0;
4318 	int t;
4319 
4320 	list_for_each_entry_rcu(pt, &ptype_all, list) {
4321 		if (i == pos)
4322 			return pt;
4323 		++i;
4324 	}
4325 
4326 	for (t = 0; t < PTYPE_HASH_SIZE; t++) {
4327 		list_for_each_entry_rcu(pt, &ptype_base[t], list) {
4328 			if (i == pos)
4329 				return pt;
4330 			++i;
4331 		}
4332 	}
4333 	return NULL;
4334 }
4335 
4336 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
4337 	__acquires(RCU)
4338 {
4339 	rcu_read_lock();
4340 	return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
4341 }
4342 
4343 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4344 {
4345 	struct packet_type *pt;
4346 	struct list_head *nxt;
4347 	int hash;
4348 
4349 	++*pos;
4350 	if (v == SEQ_START_TOKEN)
4351 		return ptype_get_idx(0);
4352 
4353 	pt = v;
4354 	nxt = pt->list.next;
4355 	if (pt->type == htons(ETH_P_ALL)) {
4356 		if (nxt != &ptype_all)
4357 			goto found;
4358 		hash = 0;
4359 		nxt = ptype_base[0].next;
4360 	} else
4361 		hash = ntohs(pt->type) & PTYPE_HASH_MASK;
4362 
4363 	while (nxt == &ptype_base[hash]) {
4364 		if (++hash >= PTYPE_HASH_SIZE)
4365 			return NULL;
4366 		nxt = ptype_base[hash].next;
4367 	}
4368 found:
4369 	return list_entry(nxt, struct packet_type, list);
4370 }
4371 
4372 static void ptype_seq_stop(struct seq_file *seq, void *v)
4373 	__releases(RCU)
4374 {
4375 	rcu_read_unlock();
4376 }
4377 
4378 static int ptype_seq_show(struct seq_file *seq, void *v)
4379 {
4380 	struct packet_type *pt = v;
4381 
4382 	if (v == SEQ_START_TOKEN)
4383 		seq_puts(seq, "Type Device      Function\n");
4384 	else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
4385 		if (pt->type == htons(ETH_P_ALL))
4386 			seq_puts(seq, "ALL ");
4387 		else
4388 			seq_printf(seq, "%04x", ntohs(pt->type));
4389 
4390 		seq_printf(seq, " %-8s %pF\n",
4391 			   pt->dev ? pt->dev->name : "", pt->func);
4392 	}
4393 
4394 	return 0;
4395 }
4396 
4397 static const struct seq_operations ptype_seq_ops = {
4398 	.start = ptype_seq_start,
4399 	.next  = ptype_seq_next,
4400 	.stop  = ptype_seq_stop,
4401 	.show  = ptype_seq_show,
4402 };
4403 
4404 static int ptype_seq_open(struct inode *inode, struct file *file)
4405 {
4406 	return seq_open_net(inode, file, &ptype_seq_ops,
4407 			sizeof(struct seq_net_private));
4408 }
4409 
4410 static const struct file_operations ptype_seq_fops = {
4411 	.owner	 = THIS_MODULE,
4412 	.open    = ptype_seq_open,
4413 	.read    = seq_read,
4414 	.llseek  = seq_lseek,
4415 	.release = seq_release_net,
4416 };
4417 
4418 
4419 static int __net_init dev_proc_net_init(struct net *net)
4420 {
4421 	int rc = -ENOMEM;
4422 
4423 	if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
4424 		goto out;
4425 	if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
4426 		goto out_dev;
4427 	if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
4428 		goto out_softnet;
4429 
4430 	if (wext_proc_init(net))
4431 		goto out_ptype;
4432 	rc = 0;
4433 out:
4434 	return rc;
4435 out_ptype:
4436 	proc_net_remove(net, "ptype");
4437 out_softnet:
4438 	proc_net_remove(net, "softnet_stat");
4439 out_dev:
4440 	proc_net_remove(net, "dev");
4441 	goto out;
4442 }
4443 
4444 static void __net_exit dev_proc_net_exit(struct net *net)
4445 {
4446 	wext_proc_exit(net);
4447 
4448 	proc_net_remove(net, "ptype");
4449 	proc_net_remove(net, "softnet_stat");
4450 	proc_net_remove(net, "dev");
4451 }
4452 
4453 static struct pernet_operations __net_initdata dev_proc_ops = {
4454 	.init = dev_proc_net_init,
4455 	.exit = dev_proc_net_exit,
4456 };
4457 
4458 static int __init dev_proc_init(void)
4459 {
4460 	return register_pernet_subsys(&dev_proc_ops);
4461 }
4462 #else
4463 #define dev_proc_init() 0
4464 #endif	/* CONFIG_PROC_FS */
4465 
4466 
4467 /**
4468  *	netdev_set_master	-	set up master pointer
4469  *	@slave: slave device
4470  *	@master: new master device
4471  *
4472  *	Changes the master device of the slave. Pass %NULL to break the
4473  *	bonding. The caller must hold the RTNL semaphore. On a failure
4474  *	a negative errno code is returned. On success the reference counts
4475  *	are adjusted and the function returns zero.
4476  */
4477 int netdev_set_master(struct net_device *slave, struct net_device *master)
4478 {
4479 	struct net_device *old = slave->master;
4480 
4481 	ASSERT_RTNL();
4482 
4483 	if (master) {
4484 		if (old)
4485 			return -EBUSY;
4486 		dev_hold(master);
4487 	}
4488 
4489 	slave->master = master;
4490 
4491 	if (old)
4492 		dev_put(old);
4493 	return 0;
4494 }
4495 EXPORT_SYMBOL(netdev_set_master);
4496 
4497 /**
4498  *	netdev_set_bond_master	-	set up bonding master/slave pair
4499  *	@slave: slave device
4500  *	@master: new master device
4501  *
4502  *	Changes the master device of the slave. Pass %NULL to break the
4503  *	bonding. The caller must hold the RTNL semaphore. On a failure
4504  *	a negative errno code is returned. On success %RTM_NEWLINK is sent
4505  *	to the routing socket and the function returns zero.
4506  */
4507 int netdev_set_bond_master(struct net_device *slave, struct net_device *master)
4508 {
4509 	int err;
4510 
4511 	ASSERT_RTNL();
4512 
4513 	err = netdev_set_master(slave, master);
4514 	if (err)
4515 		return err;
4516 	if (master)
4517 		slave->flags |= IFF_SLAVE;
4518 	else
4519 		slave->flags &= ~IFF_SLAVE;
4520 
4521 	rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
4522 	return 0;
4523 }
4524 EXPORT_SYMBOL(netdev_set_bond_master);
4525 
4526 static void dev_change_rx_flags(struct net_device *dev, int flags)
4527 {
4528 	const struct net_device_ops *ops = dev->netdev_ops;
4529 
4530 	if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
4531 		ops->ndo_change_rx_flags(dev, flags);
4532 }
4533 
4534 static int __dev_set_promiscuity(struct net_device *dev, int inc)
4535 {
4536 	unsigned int old_flags = dev->flags;
4537 	kuid_t uid;
4538 	kgid_t gid;
4539 
4540 	ASSERT_RTNL();
4541 
4542 	dev->flags |= IFF_PROMISC;
4543 	dev->promiscuity += inc;
4544 	if (dev->promiscuity == 0) {
4545 		/*
4546 		 * Avoid overflow.
4547 		 * If inc causes overflow, untouch promisc and return error.
4548 		 */
4549 		if (inc < 0)
4550 			dev->flags &= ~IFF_PROMISC;
4551 		else {
4552 			dev->promiscuity -= inc;
4553 			pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
4554 				dev->name);
4555 			return -EOVERFLOW;
4556 		}
4557 	}
4558 	if (dev->flags != old_flags) {
4559 		pr_info("device %s %s promiscuous mode\n",
4560 			dev->name,
4561 			dev->flags & IFF_PROMISC ? "entered" : "left");
4562 		if (audit_enabled) {
4563 			current_uid_gid(&uid, &gid);
4564 			audit_log(current->audit_context, GFP_ATOMIC,
4565 				AUDIT_ANOM_PROMISCUOUS,
4566 				"dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4567 				dev->name, (dev->flags & IFF_PROMISC),
4568 				(old_flags & IFF_PROMISC),
4569 				audit_get_loginuid(current),
4570 				from_kuid(&init_user_ns, uid),
4571 				from_kgid(&init_user_ns, gid),
4572 				audit_get_sessionid(current));
4573 		}
4574 
4575 		dev_change_rx_flags(dev, IFF_PROMISC);
4576 	}
4577 	return 0;
4578 }
4579 
4580 /**
4581  *	dev_set_promiscuity	- update promiscuity count on a device
4582  *	@dev: device
4583  *	@inc: modifier
4584  *
4585  *	Add or remove promiscuity from a device. While the count in the device
4586  *	remains above zero the interface remains promiscuous. Once it hits zero
4587  *	the device reverts back to normal filtering operation. A negative inc
4588  *	value is used to drop promiscuity on the device.
4589  *	Return 0 if successful or a negative errno code on error.
4590  */
4591 int dev_set_promiscuity(struct net_device *dev, int inc)
4592 {
4593 	unsigned int old_flags = dev->flags;
4594 	int err;
4595 
4596 	err = __dev_set_promiscuity(dev, inc);
4597 	if (err < 0)
4598 		return err;
4599 	if (dev->flags != old_flags)
4600 		dev_set_rx_mode(dev);
4601 	return err;
4602 }
4603 EXPORT_SYMBOL(dev_set_promiscuity);
4604 
4605 /**
4606  *	dev_set_allmulti	- update allmulti count on a device
4607  *	@dev: device
4608  *	@inc: modifier
4609  *
4610  *	Add or remove reception of all multicast frames to a device. While the
4611  *	count in the device remains above zero the interface remains listening
4612  *	to all interfaces. Once it hits zero the device reverts back to normal
4613  *	filtering operation. A negative @inc value is used to drop the counter
4614  *	when releasing a resource needing all multicasts.
4615  *	Return 0 if successful or a negative errno code on error.
4616  */
4617 
4618 int dev_set_allmulti(struct net_device *dev, int inc)
4619 {
4620 	unsigned int old_flags = dev->flags;
4621 
4622 	ASSERT_RTNL();
4623 
4624 	dev->flags |= IFF_ALLMULTI;
4625 	dev->allmulti += inc;
4626 	if (dev->allmulti == 0) {
4627 		/*
4628 		 * Avoid overflow.
4629 		 * If inc causes overflow, untouch allmulti and return error.
4630 		 */
4631 		if (inc < 0)
4632 			dev->flags &= ~IFF_ALLMULTI;
4633 		else {
4634 			dev->allmulti -= inc;
4635 			pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
4636 				dev->name);
4637 			return -EOVERFLOW;
4638 		}
4639 	}
4640 	if (dev->flags ^ old_flags) {
4641 		dev_change_rx_flags(dev, IFF_ALLMULTI);
4642 		dev_set_rx_mode(dev);
4643 	}
4644 	return 0;
4645 }
4646 EXPORT_SYMBOL(dev_set_allmulti);
4647 
4648 /*
4649  *	Upload unicast and multicast address lists to device and
4650  *	configure RX filtering. When the device doesn't support unicast
4651  *	filtering it is put in promiscuous mode while unicast addresses
4652  *	are present.
4653  */
4654 void __dev_set_rx_mode(struct net_device *dev)
4655 {
4656 	const struct net_device_ops *ops = dev->netdev_ops;
4657 
4658 	/* dev_open will call this function so the list will stay sane. */
4659 	if (!(dev->flags&IFF_UP))
4660 		return;
4661 
4662 	if (!netif_device_present(dev))
4663 		return;
4664 
4665 	if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4666 		/* Unicast addresses changes may only happen under the rtnl,
4667 		 * therefore calling __dev_set_promiscuity here is safe.
4668 		 */
4669 		if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4670 			__dev_set_promiscuity(dev, 1);
4671 			dev->uc_promisc = true;
4672 		} else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4673 			__dev_set_promiscuity(dev, -1);
4674 			dev->uc_promisc = false;
4675 		}
4676 	}
4677 
4678 	if (ops->ndo_set_rx_mode)
4679 		ops->ndo_set_rx_mode(dev);
4680 }
4681 
4682 void dev_set_rx_mode(struct net_device *dev)
4683 {
4684 	netif_addr_lock_bh(dev);
4685 	__dev_set_rx_mode(dev);
4686 	netif_addr_unlock_bh(dev);
4687 }
4688 
4689 /**
4690  *	dev_get_flags - get flags reported to userspace
4691  *	@dev: device
4692  *
4693  *	Get the combination of flag bits exported through APIs to userspace.
4694  */
4695 unsigned int dev_get_flags(const struct net_device *dev)
4696 {
4697 	unsigned int flags;
4698 
4699 	flags = (dev->flags & ~(IFF_PROMISC |
4700 				IFF_ALLMULTI |
4701 				IFF_RUNNING |
4702 				IFF_LOWER_UP |
4703 				IFF_DORMANT)) |
4704 		(dev->gflags & (IFF_PROMISC |
4705 				IFF_ALLMULTI));
4706 
4707 	if (netif_running(dev)) {
4708 		if (netif_oper_up(dev))
4709 			flags |= IFF_RUNNING;
4710 		if (netif_carrier_ok(dev))
4711 			flags |= IFF_LOWER_UP;
4712 		if (netif_dormant(dev))
4713 			flags |= IFF_DORMANT;
4714 	}
4715 
4716 	return flags;
4717 }
4718 EXPORT_SYMBOL(dev_get_flags);
4719 
4720 int __dev_change_flags(struct net_device *dev, unsigned int flags)
4721 {
4722 	unsigned int old_flags = dev->flags;
4723 	int ret;
4724 
4725 	ASSERT_RTNL();
4726 
4727 	/*
4728 	 *	Set the flags on our device.
4729 	 */
4730 
4731 	dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4732 			       IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4733 			       IFF_AUTOMEDIA)) |
4734 		     (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4735 				    IFF_ALLMULTI));
4736 
4737 	/*
4738 	 *	Load in the correct multicast list now the flags have changed.
4739 	 */
4740 
4741 	if ((old_flags ^ flags) & IFF_MULTICAST)
4742 		dev_change_rx_flags(dev, IFF_MULTICAST);
4743 
4744 	dev_set_rx_mode(dev);
4745 
4746 	/*
4747 	 *	Have we downed the interface. We handle IFF_UP ourselves
4748 	 *	according to user attempts to set it, rather than blindly
4749 	 *	setting it.
4750 	 */
4751 
4752 	ret = 0;
4753 	if ((old_flags ^ flags) & IFF_UP) {	/* Bit is different  ? */
4754 		ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
4755 
4756 		if (!ret)
4757 			dev_set_rx_mode(dev);
4758 	}
4759 
4760 	if ((flags ^ dev->gflags) & IFF_PROMISC) {
4761 		int inc = (flags & IFF_PROMISC) ? 1 : -1;
4762 
4763 		dev->gflags ^= IFF_PROMISC;
4764 		dev_set_promiscuity(dev, inc);
4765 	}
4766 
4767 	/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4768 	   is important. Some (broken) drivers set IFF_PROMISC, when
4769 	   IFF_ALLMULTI is requested not asking us and not reporting.
4770 	 */
4771 	if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4772 		int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4773 
4774 		dev->gflags ^= IFF_ALLMULTI;
4775 		dev_set_allmulti(dev, inc);
4776 	}
4777 
4778 	return ret;
4779 }
4780 
4781 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4782 {
4783 	unsigned int changes = dev->flags ^ old_flags;
4784 
4785 	if (changes & IFF_UP) {
4786 		if (dev->flags & IFF_UP)
4787 			call_netdevice_notifiers(NETDEV_UP, dev);
4788 		else
4789 			call_netdevice_notifiers(NETDEV_DOWN, dev);
4790 	}
4791 
4792 	if (dev->flags & IFF_UP &&
4793 	    (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4794 		call_netdevice_notifiers(NETDEV_CHANGE, dev);
4795 }
4796 
4797 /**
4798  *	dev_change_flags - change device settings
4799  *	@dev: device
4800  *	@flags: device state flags
4801  *
4802  *	Change settings on device based state flags. The flags are
4803  *	in the userspace exported format.
4804  */
4805 int dev_change_flags(struct net_device *dev, unsigned int flags)
4806 {
4807 	int ret;
4808 	unsigned int changes, old_flags = dev->flags;
4809 
4810 	ret = __dev_change_flags(dev, flags);
4811 	if (ret < 0)
4812 		return ret;
4813 
4814 	changes = old_flags ^ dev->flags;
4815 	if (changes)
4816 		rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
4817 
4818 	__dev_notify_flags(dev, old_flags);
4819 	return ret;
4820 }
4821 EXPORT_SYMBOL(dev_change_flags);
4822 
4823 /**
4824  *	dev_set_mtu - Change maximum transfer unit
4825  *	@dev: device
4826  *	@new_mtu: new transfer unit
4827  *
4828  *	Change the maximum transfer size of the network device.
4829  */
4830 int dev_set_mtu(struct net_device *dev, int new_mtu)
4831 {
4832 	const struct net_device_ops *ops = dev->netdev_ops;
4833 	int err;
4834 
4835 	if (new_mtu == dev->mtu)
4836 		return 0;
4837 
4838 	/*	MTU must be positive.	 */
4839 	if (new_mtu < 0)
4840 		return -EINVAL;
4841 
4842 	if (!netif_device_present(dev))
4843 		return -ENODEV;
4844 
4845 	err = 0;
4846 	if (ops->ndo_change_mtu)
4847 		err = ops->ndo_change_mtu(dev, new_mtu);
4848 	else
4849 		dev->mtu = new_mtu;
4850 
4851 	if (!err && dev->flags & IFF_UP)
4852 		call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
4853 	return err;
4854 }
4855 EXPORT_SYMBOL(dev_set_mtu);
4856 
4857 /**
4858  *	dev_set_group - Change group this device belongs to
4859  *	@dev: device
4860  *	@new_group: group this device should belong to
4861  */
4862 void dev_set_group(struct net_device *dev, int new_group)
4863 {
4864 	dev->group = new_group;
4865 }
4866 EXPORT_SYMBOL(dev_set_group);
4867 
4868 /**
4869  *	dev_set_mac_address - Change Media Access Control Address
4870  *	@dev: device
4871  *	@sa: new address
4872  *
4873  *	Change the hardware (MAC) address of the device
4874  */
4875 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4876 {
4877 	const struct net_device_ops *ops = dev->netdev_ops;
4878 	int err;
4879 
4880 	if (!ops->ndo_set_mac_address)
4881 		return -EOPNOTSUPP;
4882 	if (sa->sa_family != dev->type)
4883 		return -EINVAL;
4884 	if (!netif_device_present(dev))
4885 		return -ENODEV;
4886 	err = ops->ndo_set_mac_address(dev, sa);
4887 	if (!err)
4888 		call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4889 	add_device_randomness(dev->dev_addr, dev->addr_len);
4890 	return err;
4891 }
4892 EXPORT_SYMBOL(dev_set_mac_address);
4893 
4894 /*
4895  *	Perform the SIOCxIFxxx calls, inside rcu_read_lock()
4896  */
4897 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
4898 {
4899 	int err;
4900 	struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
4901 
4902 	if (!dev)
4903 		return -ENODEV;
4904 
4905 	switch (cmd) {
4906 	case SIOCGIFFLAGS:	/* Get interface flags */
4907 		ifr->ifr_flags = (short) dev_get_flags(dev);
4908 		return 0;
4909 
4910 	case SIOCGIFMETRIC:	/* Get the metric on the interface
4911 				   (currently unused) */
4912 		ifr->ifr_metric = 0;
4913 		return 0;
4914 
4915 	case SIOCGIFMTU:	/* Get the MTU of a device */
4916 		ifr->ifr_mtu = dev->mtu;
4917 		return 0;
4918 
4919 	case SIOCGIFHWADDR:
4920 		if (!dev->addr_len)
4921 			memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4922 		else
4923 			memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4924 			       min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4925 		ifr->ifr_hwaddr.sa_family = dev->type;
4926 		return 0;
4927 
4928 	case SIOCGIFSLAVE:
4929 		err = -EINVAL;
4930 		break;
4931 
4932 	case SIOCGIFMAP:
4933 		ifr->ifr_map.mem_start = dev->mem_start;
4934 		ifr->ifr_map.mem_end   = dev->mem_end;
4935 		ifr->ifr_map.base_addr = dev->base_addr;
4936 		ifr->ifr_map.irq       = dev->irq;
4937 		ifr->ifr_map.dma       = dev->dma;
4938 		ifr->ifr_map.port      = dev->if_port;
4939 		return 0;
4940 
4941 	case SIOCGIFINDEX:
4942 		ifr->ifr_ifindex = dev->ifindex;
4943 		return 0;
4944 
4945 	case SIOCGIFTXQLEN:
4946 		ifr->ifr_qlen = dev->tx_queue_len;
4947 		return 0;
4948 
4949 	default:
4950 		/* dev_ioctl() should ensure this case
4951 		 * is never reached
4952 		 */
4953 		WARN_ON(1);
4954 		err = -ENOTTY;
4955 		break;
4956 
4957 	}
4958 	return err;
4959 }
4960 
4961 /*
4962  *	Perform the SIOCxIFxxx calls, inside rtnl_lock()
4963  */
4964 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4965 {
4966 	int err;
4967 	struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
4968 	const struct net_device_ops *ops;
4969 
4970 	if (!dev)
4971 		return -ENODEV;
4972 
4973 	ops = dev->netdev_ops;
4974 
4975 	switch (cmd) {
4976 	case SIOCSIFFLAGS:	/* Set interface flags */
4977 		return dev_change_flags(dev, ifr->ifr_flags);
4978 
4979 	case SIOCSIFMETRIC:	/* Set the metric on the interface
4980 				   (currently unused) */
4981 		return -EOPNOTSUPP;
4982 
4983 	case SIOCSIFMTU:	/* Set the MTU of a device */
4984 		return dev_set_mtu(dev, ifr->ifr_mtu);
4985 
4986 	case SIOCSIFHWADDR:
4987 		return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4988 
4989 	case SIOCSIFHWBROADCAST:
4990 		if (ifr->ifr_hwaddr.sa_family != dev->type)
4991 			return -EINVAL;
4992 		memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4993 		       min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4994 		call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4995 		return 0;
4996 
4997 	case SIOCSIFMAP:
4998 		if (ops->ndo_set_config) {
4999 			if (!netif_device_present(dev))
5000 				return -ENODEV;
5001 			return ops->ndo_set_config(dev, &ifr->ifr_map);
5002 		}
5003 		return -EOPNOTSUPP;
5004 
5005 	case SIOCADDMULTI:
5006 		if (!ops->ndo_set_rx_mode ||
5007 		    ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
5008 			return -EINVAL;
5009 		if (!netif_device_present(dev))
5010 			return -ENODEV;
5011 		return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
5012 
5013 	case SIOCDELMULTI:
5014 		if (!ops->ndo_set_rx_mode ||
5015 		    ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
5016 			return -EINVAL;
5017 		if (!netif_device_present(dev))
5018 			return -ENODEV;
5019 		return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
5020 
5021 	case SIOCSIFTXQLEN:
5022 		if (ifr->ifr_qlen < 0)
5023 			return -EINVAL;
5024 		dev->tx_queue_len = ifr->ifr_qlen;
5025 		return 0;
5026 
5027 	case SIOCSIFNAME:
5028 		ifr->ifr_newname[IFNAMSIZ-1] = '\0';
5029 		return dev_change_name(dev, ifr->ifr_newname);
5030 
5031 	case SIOCSHWTSTAMP:
5032 		err = net_hwtstamp_validate(ifr);
5033 		if (err)
5034 			return err;
5035 		/* fall through */
5036 
5037 	/*
5038 	 *	Unknown or private ioctl
5039 	 */
5040 	default:
5041 		if ((cmd >= SIOCDEVPRIVATE &&
5042 		    cmd <= SIOCDEVPRIVATE + 15) ||
5043 		    cmd == SIOCBONDENSLAVE ||
5044 		    cmd == SIOCBONDRELEASE ||
5045 		    cmd == SIOCBONDSETHWADDR ||
5046 		    cmd == SIOCBONDSLAVEINFOQUERY ||
5047 		    cmd == SIOCBONDINFOQUERY ||
5048 		    cmd == SIOCBONDCHANGEACTIVE ||
5049 		    cmd == SIOCGMIIPHY ||
5050 		    cmd == SIOCGMIIREG ||
5051 		    cmd == SIOCSMIIREG ||
5052 		    cmd == SIOCBRADDIF ||
5053 		    cmd == SIOCBRDELIF ||
5054 		    cmd == SIOCSHWTSTAMP ||
5055 		    cmd == SIOCWANDEV) {
5056 			err = -EOPNOTSUPP;
5057 			if (ops->ndo_do_ioctl) {
5058 				if (netif_device_present(dev))
5059 					err = ops->ndo_do_ioctl(dev, ifr, cmd);
5060 				else
5061 					err = -ENODEV;
5062 			}
5063 		} else
5064 			err = -EINVAL;
5065 
5066 	}
5067 	return err;
5068 }
5069 
5070 /*
5071  *	This function handles all "interface"-type I/O control requests. The actual
5072  *	'doing' part of this is dev_ifsioc above.
5073  */
5074 
5075 /**
5076  *	dev_ioctl	-	network device ioctl
5077  *	@net: the applicable net namespace
5078  *	@cmd: command to issue
5079  *	@arg: pointer to a struct ifreq in user space
5080  *
5081  *	Issue ioctl functions to devices. This is normally called by the
5082  *	user space syscall interfaces but can sometimes be useful for
5083  *	other purposes. The return value is the return from the syscall if
5084  *	positive or a negative errno code on error.
5085  */
5086 
5087 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
5088 {
5089 	struct ifreq ifr;
5090 	int ret;
5091 	char *colon;
5092 
5093 	/* One special case: SIOCGIFCONF takes ifconf argument
5094 	   and requires shared lock, because it sleeps writing
5095 	   to user space.
5096 	 */
5097 
5098 	if (cmd == SIOCGIFCONF) {
5099 		rtnl_lock();
5100 		ret = dev_ifconf(net, (char __user *) arg);
5101 		rtnl_unlock();
5102 		return ret;
5103 	}
5104 	if (cmd == SIOCGIFNAME)
5105 		return dev_ifname(net, (struct ifreq __user *)arg);
5106 
5107 	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
5108 		return -EFAULT;
5109 
5110 	ifr.ifr_name[IFNAMSIZ-1] = 0;
5111 
5112 	colon = strchr(ifr.ifr_name, ':');
5113 	if (colon)
5114 		*colon = 0;
5115 
5116 	/*
5117 	 *	See which interface the caller is talking about.
5118 	 */
5119 
5120 	switch (cmd) {
5121 	/*
5122 	 *	These ioctl calls:
5123 	 *	- can be done by all.
5124 	 *	- atomic and do not require locking.
5125 	 *	- return a value
5126 	 */
5127 	case SIOCGIFFLAGS:
5128 	case SIOCGIFMETRIC:
5129 	case SIOCGIFMTU:
5130 	case SIOCGIFHWADDR:
5131 	case SIOCGIFSLAVE:
5132 	case SIOCGIFMAP:
5133 	case SIOCGIFINDEX:
5134 	case SIOCGIFTXQLEN:
5135 		dev_load(net, ifr.ifr_name);
5136 		rcu_read_lock();
5137 		ret = dev_ifsioc_locked(net, &ifr, cmd);
5138 		rcu_read_unlock();
5139 		if (!ret) {
5140 			if (colon)
5141 				*colon = ':';
5142 			if (copy_to_user(arg, &ifr,
5143 					 sizeof(struct ifreq)))
5144 				ret = -EFAULT;
5145 		}
5146 		return ret;
5147 
5148 	case SIOCETHTOOL:
5149 		dev_load(net, ifr.ifr_name);
5150 		rtnl_lock();
5151 		ret = dev_ethtool(net, &ifr);
5152 		rtnl_unlock();
5153 		if (!ret) {
5154 			if (colon)
5155 				*colon = ':';
5156 			if (copy_to_user(arg, &ifr,
5157 					 sizeof(struct ifreq)))
5158 				ret = -EFAULT;
5159 		}
5160 		return ret;
5161 
5162 	/*
5163 	 *	These ioctl calls:
5164 	 *	- require superuser power.
5165 	 *	- require strict serialization.
5166 	 *	- return a value
5167 	 */
5168 	case SIOCGMIIPHY:
5169 	case SIOCGMIIREG:
5170 	case SIOCSIFNAME:
5171 		if (!capable(CAP_NET_ADMIN))
5172 			return -EPERM;
5173 		dev_load(net, ifr.ifr_name);
5174 		rtnl_lock();
5175 		ret = dev_ifsioc(net, &ifr, cmd);
5176 		rtnl_unlock();
5177 		if (!ret) {
5178 			if (colon)
5179 				*colon = ':';
5180 			if (copy_to_user(arg, &ifr,
5181 					 sizeof(struct ifreq)))
5182 				ret = -EFAULT;
5183 		}
5184 		return ret;
5185 
5186 	/*
5187 	 *	These ioctl calls:
5188 	 *	- require superuser power.
5189 	 *	- require strict serialization.
5190 	 *	- do not return a value
5191 	 */
5192 	case SIOCSIFFLAGS:
5193 	case SIOCSIFMETRIC:
5194 	case SIOCSIFMTU:
5195 	case SIOCSIFMAP:
5196 	case SIOCSIFHWADDR:
5197 	case SIOCSIFSLAVE:
5198 	case SIOCADDMULTI:
5199 	case SIOCDELMULTI:
5200 	case SIOCSIFHWBROADCAST:
5201 	case SIOCSIFTXQLEN:
5202 	case SIOCSMIIREG:
5203 	case SIOCBONDENSLAVE:
5204 	case SIOCBONDRELEASE:
5205 	case SIOCBONDSETHWADDR:
5206 	case SIOCBONDCHANGEACTIVE:
5207 	case SIOCBRADDIF:
5208 	case SIOCBRDELIF:
5209 	case SIOCSHWTSTAMP:
5210 		if (!capable(CAP_NET_ADMIN))
5211 			return -EPERM;
5212 		/* fall through */
5213 	case SIOCBONDSLAVEINFOQUERY:
5214 	case SIOCBONDINFOQUERY:
5215 		dev_load(net, ifr.ifr_name);
5216 		rtnl_lock();
5217 		ret = dev_ifsioc(net, &ifr, cmd);
5218 		rtnl_unlock();
5219 		return ret;
5220 
5221 	case SIOCGIFMEM:
5222 		/* Get the per device memory space. We can add this but
5223 		 * currently do not support it */
5224 	case SIOCSIFMEM:
5225 		/* Set the per device memory buffer space.
5226 		 * Not applicable in our case */
5227 	case SIOCSIFLINK:
5228 		return -ENOTTY;
5229 
5230 	/*
5231 	 *	Unknown or private ioctl.
5232 	 */
5233 	default:
5234 		if (cmd == SIOCWANDEV ||
5235 		    (cmd >= SIOCDEVPRIVATE &&
5236 		     cmd <= SIOCDEVPRIVATE + 15)) {
5237 			dev_load(net, ifr.ifr_name);
5238 			rtnl_lock();
5239 			ret = dev_ifsioc(net, &ifr, cmd);
5240 			rtnl_unlock();
5241 			if (!ret && copy_to_user(arg, &ifr,
5242 						 sizeof(struct ifreq)))
5243 				ret = -EFAULT;
5244 			return ret;
5245 		}
5246 		/* Take care of Wireless Extensions */
5247 		if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
5248 			return wext_handle_ioctl(net, &ifr, cmd, arg);
5249 		return -ENOTTY;
5250 	}
5251 }
5252 
5253 
5254 /**
5255  *	dev_new_index	-	allocate an ifindex
5256  *	@net: the applicable net namespace
5257  *
5258  *	Returns a suitable unique value for a new device interface
5259  *	number.  The caller must hold the rtnl semaphore or the
5260  *	dev_base_lock to be sure it remains unique.
5261  */
5262 static int dev_new_index(struct net *net)
5263 {
5264 	int ifindex = net->ifindex;
5265 	for (;;) {
5266 		if (++ifindex <= 0)
5267 			ifindex = 1;
5268 		if (!__dev_get_by_index(net, ifindex))
5269 			return net->ifindex = ifindex;
5270 	}
5271 }
5272 
5273 /* Delayed registration/unregisteration */
5274 static LIST_HEAD(net_todo_list);
5275 
5276 static void net_set_todo(struct net_device *dev)
5277 {
5278 	list_add_tail(&dev->todo_list, &net_todo_list);
5279 }
5280 
5281 static void rollback_registered_many(struct list_head *head)
5282 {
5283 	struct net_device *dev, *tmp;
5284 
5285 	BUG_ON(dev_boot_phase);
5286 	ASSERT_RTNL();
5287 
5288 	list_for_each_entry_safe(dev, tmp, head, unreg_list) {
5289 		/* Some devices call without registering
5290 		 * for initialization unwind. Remove those
5291 		 * devices and proceed with the remaining.
5292 		 */
5293 		if (dev->reg_state == NETREG_UNINITIALIZED) {
5294 			pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5295 				 dev->name, dev);
5296 
5297 			WARN_ON(1);
5298 			list_del(&dev->unreg_list);
5299 			continue;
5300 		}
5301 		dev->dismantle = true;
5302 		BUG_ON(dev->reg_state != NETREG_REGISTERED);
5303 	}
5304 
5305 	/* If device is running, close it first. */
5306 	dev_close_many(head);
5307 
5308 	list_for_each_entry(dev, head, unreg_list) {
5309 		/* And unlink it from device chain. */
5310 		unlist_netdevice(dev);
5311 
5312 		dev->reg_state = NETREG_UNREGISTERING;
5313 	}
5314 
5315 	synchronize_net();
5316 
5317 	list_for_each_entry(dev, head, unreg_list) {
5318 		/* Shutdown queueing discipline. */
5319 		dev_shutdown(dev);
5320 
5321 
5322 		/* Notify protocols, that we are about to destroy
5323 		   this device. They should clean all the things.
5324 		*/
5325 		call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5326 
5327 		if (!dev->rtnl_link_ops ||
5328 		    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5329 			rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
5330 
5331 		/*
5332 		 *	Flush the unicast and multicast chains
5333 		 */
5334 		dev_uc_flush(dev);
5335 		dev_mc_flush(dev);
5336 
5337 		if (dev->netdev_ops->ndo_uninit)
5338 			dev->netdev_ops->ndo_uninit(dev);
5339 
5340 		/* Notifier chain MUST detach us from master device. */
5341 		WARN_ON(dev->master);
5342 
5343 		/* Remove entries from kobject tree */
5344 		netdev_unregister_kobject(dev);
5345 	}
5346 
5347 	synchronize_net();
5348 
5349 	list_for_each_entry(dev, head, unreg_list)
5350 		dev_put(dev);
5351 }
5352 
5353 static void rollback_registered(struct net_device *dev)
5354 {
5355 	LIST_HEAD(single);
5356 
5357 	list_add(&dev->unreg_list, &single);
5358 	rollback_registered_many(&single);
5359 	list_del(&single);
5360 }
5361 
5362 static netdev_features_t netdev_fix_features(struct net_device *dev,
5363 	netdev_features_t features)
5364 {
5365 	/* Fix illegal checksum combinations */
5366 	if ((features & NETIF_F_HW_CSUM) &&
5367 	    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5368 		netdev_warn(dev, "mixed HW and IP checksum settings.\n");
5369 		features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5370 	}
5371 
5372 	/* Fix illegal SG+CSUM combinations. */
5373 	if ((features & NETIF_F_SG) &&
5374 	    !(features & NETIF_F_ALL_CSUM)) {
5375 		netdev_dbg(dev,
5376 			"Dropping NETIF_F_SG since no checksum feature.\n");
5377 		features &= ~NETIF_F_SG;
5378 	}
5379 
5380 	/* TSO requires that SG is present as well. */
5381 	if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
5382 		netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
5383 		features &= ~NETIF_F_ALL_TSO;
5384 	}
5385 
5386 	/* TSO ECN requires that TSO is present as well. */
5387 	if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5388 		features &= ~NETIF_F_TSO_ECN;
5389 
5390 	/* Software GSO depends on SG. */
5391 	if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
5392 		netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
5393 		features &= ~NETIF_F_GSO;
5394 	}
5395 
5396 	/* UFO needs SG and checksumming */
5397 	if (features & NETIF_F_UFO) {
5398 		/* maybe split UFO into V4 and V6? */
5399 		if (!((features & NETIF_F_GEN_CSUM) ||
5400 		    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5401 			    == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5402 			netdev_dbg(dev,
5403 				"Dropping NETIF_F_UFO since no checksum offload features.\n");
5404 			features &= ~NETIF_F_UFO;
5405 		}
5406 
5407 		if (!(features & NETIF_F_SG)) {
5408 			netdev_dbg(dev,
5409 				"Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
5410 			features &= ~NETIF_F_UFO;
5411 		}
5412 	}
5413 
5414 	return features;
5415 }
5416 
5417 int __netdev_update_features(struct net_device *dev)
5418 {
5419 	netdev_features_t features;
5420 	int err = 0;
5421 
5422 	ASSERT_RTNL();
5423 
5424 	features = netdev_get_wanted_features(dev);
5425 
5426 	if (dev->netdev_ops->ndo_fix_features)
5427 		features = dev->netdev_ops->ndo_fix_features(dev, features);
5428 
5429 	/* driver might be less strict about feature dependencies */
5430 	features = netdev_fix_features(dev, features);
5431 
5432 	if (dev->features == features)
5433 		return 0;
5434 
5435 	netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5436 		&dev->features, &features);
5437 
5438 	if (dev->netdev_ops->ndo_set_features)
5439 		err = dev->netdev_ops->ndo_set_features(dev, features);
5440 
5441 	if (unlikely(err < 0)) {
5442 		netdev_err(dev,
5443 			"set_features() failed (%d); wanted %pNF, left %pNF\n",
5444 			err, &features, &dev->features);
5445 		return -1;
5446 	}
5447 
5448 	if (!err)
5449 		dev->features = features;
5450 
5451 	return 1;
5452 }
5453 
5454 /**
5455  *	netdev_update_features - recalculate device features
5456  *	@dev: the device to check
5457  *
5458  *	Recalculate dev->features set and send notifications if it
5459  *	has changed. Should be called after driver or hardware dependent
5460  *	conditions might have changed that influence the features.
5461  */
5462 void netdev_update_features(struct net_device *dev)
5463 {
5464 	if (__netdev_update_features(dev))
5465 		netdev_features_change(dev);
5466 }
5467 EXPORT_SYMBOL(netdev_update_features);
5468 
5469 /**
5470  *	netdev_change_features - recalculate device features
5471  *	@dev: the device to check
5472  *
5473  *	Recalculate dev->features set and send notifications even
5474  *	if they have not changed. Should be called instead of
5475  *	netdev_update_features() if also dev->vlan_features might
5476  *	have changed to allow the changes to be propagated to stacked
5477  *	VLAN devices.
5478  */
5479 void netdev_change_features(struct net_device *dev)
5480 {
5481 	__netdev_update_features(dev);
5482 	netdev_features_change(dev);
5483 }
5484 EXPORT_SYMBOL(netdev_change_features);
5485 
5486 /**
5487  *	netif_stacked_transfer_operstate -	transfer operstate
5488  *	@rootdev: the root or lower level device to transfer state from
5489  *	@dev: the device to transfer operstate to
5490  *
5491  *	Transfer operational state from root to device. This is normally
5492  *	called when a stacking relationship exists between the root
5493  *	device and the device(a leaf device).
5494  */
5495 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5496 					struct net_device *dev)
5497 {
5498 	if (rootdev->operstate == IF_OPER_DORMANT)
5499 		netif_dormant_on(dev);
5500 	else
5501 		netif_dormant_off(dev);
5502 
5503 	if (netif_carrier_ok(rootdev)) {
5504 		if (!netif_carrier_ok(dev))
5505 			netif_carrier_on(dev);
5506 	} else {
5507 		if (netif_carrier_ok(dev))
5508 			netif_carrier_off(dev);
5509 	}
5510 }
5511 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5512 
5513 #ifdef CONFIG_RPS
5514 static int netif_alloc_rx_queues(struct net_device *dev)
5515 {
5516 	unsigned int i, count = dev->num_rx_queues;
5517 	struct netdev_rx_queue *rx;
5518 
5519 	BUG_ON(count < 1);
5520 
5521 	rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5522 	if (!rx) {
5523 		pr_err("netdev: Unable to allocate %u rx queues\n", count);
5524 		return -ENOMEM;
5525 	}
5526 	dev->_rx = rx;
5527 
5528 	for (i = 0; i < count; i++)
5529 		rx[i].dev = dev;
5530 	return 0;
5531 }
5532 #endif
5533 
5534 static void netdev_init_one_queue(struct net_device *dev,
5535 				  struct netdev_queue *queue, void *_unused)
5536 {
5537 	/* Initialize queue lock */
5538 	spin_lock_init(&queue->_xmit_lock);
5539 	netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5540 	queue->xmit_lock_owner = -1;
5541 	netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
5542 	queue->dev = dev;
5543 #ifdef CONFIG_BQL
5544 	dql_init(&queue->dql, HZ);
5545 #endif
5546 }
5547 
5548 static int netif_alloc_netdev_queues(struct net_device *dev)
5549 {
5550 	unsigned int count = dev->num_tx_queues;
5551 	struct netdev_queue *tx;
5552 
5553 	BUG_ON(count < 1);
5554 
5555 	tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
5556 	if (!tx) {
5557 		pr_err("netdev: Unable to allocate %u tx queues\n", count);
5558 		return -ENOMEM;
5559 	}
5560 	dev->_tx = tx;
5561 
5562 	netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5563 	spin_lock_init(&dev->tx_global_lock);
5564 
5565 	return 0;
5566 }
5567 
5568 /**
5569  *	register_netdevice	- register a network device
5570  *	@dev: device to register
5571  *
5572  *	Take a completed network device structure and add it to the kernel
5573  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5574  *	chain. 0 is returned on success. A negative errno code is returned
5575  *	on a failure to set up the device, or if the name is a duplicate.
5576  *
5577  *	Callers must hold the rtnl semaphore. You may want
5578  *	register_netdev() instead of this.
5579  *
5580  *	BUGS:
5581  *	The locking appears insufficient to guarantee two parallel registers
5582  *	will not get the same name.
5583  */
5584 
5585 int register_netdevice(struct net_device *dev)
5586 {
5587 	int ret;
5588 	struct net *net = dev_net(dev);
5589 
5590 	BUG_ON(dev_boot_phase);
5591 	ASSERT_RTNL();
5592 
5593 	might_sleep();
5594 
5595 	/* When net_device's are persistent, this will be fatal. */
5596 	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
5597 	BUG_ON(!net);
5598 
5599 	spin_lock_init(&dev->addr_list_lock);
5600 	netdev_set_addr_lockdep_class(dev);
5601 
5602 	dev->iflink = -1;
5603 
5604 	ret = dev_get_valid_name(net, dev, dev->name);
5605 	if (ret < 0)
5606 		goto out;
5607 
5608 	/* Init, if this function is available */
5609 	if (dev->netdev_ops->ndo_init) {
5610 		ret = dev->netdev_ops->ndo_init(dev);
5611 		if (ret) {
5612 			if (ret > 0)
5613 				ret = -EIO;
5614 			goto out;
5615 		}
5616 	}
5617 
5618 	ret = -EBUSY;
5619 	if (!dev->ifindex)
5620 		dev->ifindex = dev_new_index(net);
5621 	else if (__dev_get_by_index(net, dev->ifindex))
5622 		goto err_uninit;
5623 
5624 	if (dev->iflink == -1)
5625 		dev->iflink = dev->ifindex;
5626 
5627 	/* Transfer changeable features to wanted_features and enable
5628 	 * software offloads (GSO and GRO).
5629 	 */
5630 	dev->hw_features |= NETIF_F_SOFT_FEATURES;
5631 	dev->features |= NETIF_F_SOFT_FEATURES;
5632 	dev->wanted_features = dev->features & dev->hw_features;
5633 
5634 	/* Turn on no cache copy if HW is doing checksum */
5635 	if (!(dev->flags & IFF_LOOPBACK)) {
5636 		dev->hw_features |= NETIF_F_NOCACHE_COPY;
5637 		if (dev->features & NETIF_F_ALL_CSUM) {
5638 			dev->wanted_features |= NETIF_F_NOCACHE_COPY;
5639 			dev->features |= NETIF_F_NOCACHE_COPY;
5640 		}
5641 	}
5642 
5643 	/* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
5644 	 */
5645 	dev->vlan_features |= NETIF_F_HIGHDMA;
5646 
5647 	ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5648 	ret = notifier_to_errno(ret);
5649 	if (ret)
5650 		goto err_uninit;
5651 
5652 	ret = netdev_register_kobject(dev);
5653 	if (ret)
5654 		goto err_uninit;
5655 	dev->reg_state = NETREG_REGISTERED;
5656 
5657 	__netdev_update_features(dev);
5658 
5659 	/*
5660 	 *	Default initial state at registry is that the
5661 	 *	device is present.
5662 	 */
5663 
5664 	set_bit(__LINK_STATE_PRESENT, &dev->state);
5665 
5666 	linkwatch_init_dev(dev);
5667 
5668 	dev_init_scheduler(dev);
5669 	dev_hold(dev);
5670 	list_netdevice(dev);
5671 	add_device_randomness(dev->dev_addr, dev->addr_len);
5672 
5673 	/* Notify protocols, that a new device appeared. */
5674 	ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
5675 	ret = notifier_to_errno(ret);
5676 	if (ret) {
5677 		rollback_registered(dev);
5678 		dev->reg_state = NETREG_UNREGISTERED;
5679 	}
5680 	/*
5681 	 *	Prevent userspace races by waiting until the network
5682 	 *	device is fully setup before sending notifications.
5683 	 */
5684 	if (!dev->rtnl_link_ops ||
5685 	    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5686 		rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5687 
5688 out:
5689 	return ret;
5690 
5691 err_uninit:
5692 	if (dev->netdev_ops->ndo_uninit)
5693 		dev->netdev_ops->ndo_uninit(dev);
5694 	goto out;
5695 }
5696 EXPORT_SYMBOL(register_netdevice);
5697 
5698 /**
5699  *	init_dummy_netdev	- init a dummy network device for NAPI
5700  *	@dev: device to init
5701  *
5702  *	This takes a network device structure and initialize the minimum
5703  *	amount of fields so it can be used to schedule NAPI polls without
5704  *	registering a full blown interface. This is to be used by drivers
5705  *	that need to tie several hardware interfaces to a single NAPI
5706  *	poll scheduler due to HW limitations.
5707  */
5708 int init_dummy_netdev(struct net_device *dev)
5709 {
5710 	/* Clear everything. Note we don't initialize spinlocks
5711 	 * are they aren't supposed to be taken by any of the
5712 	 * NAPI code and this dummy netdev is supposed to be
5713 	 * only ever used for NAPI polls
5714 	 */
5715 	memset(dev, 0, sizeof(struct net_device));
5716 
5717 	/* make sure we BUG if trying to hit standard
5718 	 * register/unregister code path
5719 	 */
5720 	dev->reg_state = NETREG_DUMMY;
5721 
5722 	/* NAPI wants this */
5723 	INIT_LIST_HEAD(&dev->napi_list);
5724 
5725 	/* a dummy interface is started by default */
5726 	set_bit(__LINK_STATE_PRESENT, &dev->state);
5727 	set_bit(__LINK_STATE_START, &dev->state);
5728 
5729 	/* Note : We dont allocate pcpu_refcnt for dummy devices,
5730 	 * because users of this 'device' dont need to change
5731 	 * its refcount.
5732 	 */
5733 
5734 	return 0;
5735 }
5736 EXPORT_SYMBOL_GPL(init_dummy_netdev);
5737 
5738 
5739 /**
5740  *	register_netdev	- register a network device
5741  *	@dev: device to register
5742  *
5743  *	Take a completed network device structure and add it to the kernel
5744  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5745  *	chain. 0 is returned on success. A negative errno code is returned
5746  *	on a failure to set up the device, or if the name is a duplicate.
5747  *
5748  *	This is a wrapper around register_netdevice that takes the rtnl semaphore
5749  *	and expands the device name if you passed a format string to
5750  *	alloc_netdev.
5751  */
5752 int register_netdev(struct net_device *dev)
5753 {
5754 	int err;
5755 
5756 	rtnl_lock();
5757 	err = register_netdevice(dev);
5758 	rtnl_unlock();
5759 	return err;
5760 }
5761 EXPORT_SYMBOL(register_netdev);
5762 
5763 int netdev_refcnt_read(const struct net_device *dev)
5764 {
5765 	int i, refcnt = 0;
5766 
5767 	for_each_possible_cpu(i)
5768 		refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5769 	return refcnt;
5770 }
5771 EXPORT_SYMBOL(netdev_refcnt_read);
5772 
5773 /**
5774  * netdev_wait_allrefs - wait until all references are gone.
5775  * @dev: target net_device
5776  *
5777  * This is called when unregistering network devices.
5778  *
5779  * Any protocol or device that holds a reference should register
5780  * for netdevice notification, and cleanup and put back the
5781  * reference if they receive an UNREGISTER event.
5782  * We can get stuck here if buggy protocols don't correctly
5783  * call dev_put.
5784  */
5785 static void netdev_wait_allrefs(struct net_device *dev)
5786 {
5787 	unsigned long rebroadcast_time, warning_time;
5788 	int refcnt;
5789 
5790 	linkwatch_forget_dev(dev);
5791 
5792 	rebroadcast_time = warning_time = jiffies;
5793 	refcnt = netdev_refcnt_read(dev);
5794 
5795 	while (refcnt != 0) {
5796 		if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
5797 			rtnl_lock();
5798 
5799 			/* Rebroadcast unregister notification */
5800 			call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5801 
5802 			__rtnl_unlock();
5803 			rcu_barrier();
5804 			rtnl_lock();
5805 
5806 			call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
5807 			if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5808 				     &dev->state)) {
5809 				/* We must not have linkwatch events
5810 				 * pending on unregister. If this
5811 				 * happens, we simply run the queue
5812 				 * unscheduled, resulting in a noop
5813 				 * for this device.
5814 				 */
5815 				linkwatch_run_queue();
5816 			}
5817 
5818 			__rtnl_unlock();
5819 
5820 			rebroadcast_time = jiffies;
5821 		}
5822 
5823 		msleep(250);
5824 
5825 		refcnt = netdev_refcnt_read(dev);
5826 
5827 		if (time_after(jiffies, warning_time + 10 * HZ)) {
5828 			pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
5829 				 dev->name, refcnt);
5830 			warning_time = jiffies;
5831 		}
5832 	}
5833 }
5834 
5835 /* The sequence is:
5836  *
5837  *	rtnl_lock();
5838  *	...
5839  *	register_netdevice(x1);
5840  *	register_netdevice(x2);
5841  *	...
5842  *	unregister_netdevice(y1);
5843  *	unregister_netdevice(y2);
5844  *      ...
5845  *	rtnl_unlock();
5846  *	free_netdev(y1);
5847  *	free_netdev(y2);
5848  *
5849  * We are invoked by rtnl_unlock().
5850  * This allows us to deal with problems:
5851  * 1) We can delete sysfs objects which invoke hotplug
5852  *    without deadlocking with linkwatch via keventd.
5853  * 2) Since we run with the RTNL semaphore not held, we can sleep
5854  *    safely in order to wait for the netdev refcnt to drop to zero.
5855  *
5856  * We must not return until all unregister events added during
5857  * the interval the lock was held have been completed.
5858  */
5859 void netdev_run_todo(void)
5860 {
5861 	struct list_head list;
5862 
5863 	/* Snapshot list, allow later requests */
5864 	list_replace_init(&net_todo_list, &list);
5865 
5866 	__rtnl_unlock();
5867 
5868 
5869 	/* Wait for rcu callbacks to finish before next phase */
5870 	if (!list_empty(&list))
5871 		rcu_barrier();
5872 
5873 	while (!list_empty(&list)) {
5874 		struct net_device *dev
5875 			= list_first_entry(&list, struct net_device, todo_list);
5876 		list_del(&dev->todo_list);
5877 
5878 		rtnl_lock();
5879 		call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
5880 		__rtnl_unlock();
5881 
5882 		if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5883 			pr_err("network todo '%s' but state %d\n",
5884 			       dev->name, dev->reg_state);
5885 			dump_stack();
5886 			continue;
5887 		}
5888 
5889 		dev->reg_state = NETREG_UNREGISTERED;
5890 
5891 		on_each_cpu(flush_backlog, dev, 1);
5892 
5893 		netdev_wait_allrefs(dev);
5894 
5895 		/* paranoia */
5896 		BUG_ON(netdev_refcnt_read(dev));
5897 		WARN_ON(rcu_access_pointer(dev->ip_ptr));
5898 		WARN_ON(rcu_access_pointer(dev->ip6_ptr));
5899 		WARN_ON(dev->dn_ptr);
5900 
5901 		if (dev->destructor)
5902 			dev->destructor(dev);
5903 
5904 		/* Free network device */
5905 		kobject_put(&dev->dev.kobj);
5906 	}
5907 }
5908 
5909 /* Convert net_device_stats to rtnl_link_stats64.  They have the same
5910  * fields in the same order, with only the type differing.
5911  */
5912 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5913 			     const struct net_device_stats *netdev_stats)
5914 {
5915 #if BITS_PER_LONG == 64
5916 	BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5917 	memcpy(stats64, netdev_stats, sizeof(*stats64));
5918 #else
5919 	size_t i, n = sizeof(*stats64) / sizeof(u64);
5920 	const unsigned long *src = (const unsigned long *)netdev_stats;
5921 	u64 *dst = (u64 *)stats64;
5922 
5923 	BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
5924 		     sizeof(*stats64) / sizeof(u64));
5925 	for (i = 0; i < n; i++)
5926 		dst[i] = src[i];
5927 #endif
5928 }
5929 EXPORT_SYMBOL(netdev_stats_to_stats64);
5930 
5931 /**
5932  *	dev_get_stats	- get network device statistics
5933  *	@dev: device to get statistics from
5934  *	@storage: place to store stats
5935  *
5936  *	Get network statistics from device. Return @storage.
5937  *	The device driver may provide its own method by setting
5938  *	dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
5939  *	otherwise the internal statistics structure is used.
5940  */
5941 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
5942 					struct rtnl_link_stats64 *storage)
5943 {
5944 	const struct net_device_ops *ops = dev->netdev_ops;
5945 
5946 	if (ops->ndo_get_stats64) {
5947 		memset(storage, 0, sizeof(*storage));
5948 		ops->ndo_get_stats64(dev, storage);
5949 	} else if (ops->ndo_get_stats) {
5950 		netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
5951 	} else {
5952 		netdev_stats_to_stats64(storage, &dev->stats);
5953 	}
5954 	storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
5955 	return storage;
5956 }
5957 EXPORT_SYMBOL(dev_get_stats);
5958 
5959 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
5960 {
5961 	struct netdev_queue *queue = dev_ingress_queue(dev);
5962 
5963 #ifdef CONFIG_NET_CLS_ACT
5964 	if (queue)
5965 		return queue;
5966 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
5967 	if (!queue)
5968 		return NULL;
5969 	netdev_init_one_queue(dev, queue, NULL);
5970 	queue->qdisc = &noop_qdisc;
5971 	queue->qdisc_sleeping = &noop_qdisc;
5972 	rcu_assign_pointer(dev->ingress_queue, queue);
5973 #endif
5974 	return queue;
5975 }
5976 
5977 static const struct ethtool_ops default_ethtool_ops;
5978 
5979 /**
5980  *	alloc_netdev_mqs - allocate network device
5981  *	@sizeof_priv:	size of private data to allocate space for
5982  *	@name:		device name format string
5983  *	@setup:		callback to initialize device
5984  *	@txqs:		the number of TX subqueues to allocate
5985  *	@rxqs:		the number of RX subqueues to allocate
5986  *
5987  *	Allocates a struct net_device with private data area for driver use
5988  *	and performs basic initialization.  Also allocates subquue structs
5989  *	for each queue on the device.
5990  */
5991 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5992 		void (*setup)(struct net_device *),
5993 		unsigned int txqs, unsigned int rxqs)
5994 {
5995 	struct net_device *dev;
5996 	size_t alloc_size;
5997 	struct net_device *p;
5998 
5999 	BUG_ON(strlen(name) >= sizeof(dev->name));
6000 
6001 	if (txqs < 1) {
6002 		pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
6003 		return NULL;
6004 	}
6005 
6006 #ifdef CONFIG_RPS
6007 	if (rxqs < 1) {
6008 		pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
6009 		return NULL;
6010 	}
6011 #endif
6012 
6013 	alloc_size = sizeof(struct net_device);
6014 	if (sizeof_priv) {
6015 		/* ensure 32-byte alignment of private area */
6016 		alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
6017 		alloc_size += sizeof_priv;
6018 	}
6019 	/* ensure 32-byte alignment of whole construct */
6020 	alloc_size += NETDEV_ALIGN - 1;
6021 
6022 	p = kzalloc(alloc_size, GFP_KERNEL);
6023 	if (!p) {
6024 		pr_err("alloc_netdev: Unable to allocate device\n");
6025 		return NULL;
6026 	}
6027 
6028 	dev = PTR_ALIGN(p, NETDEV_ALIGN);
6029 	dev->padded = (char *)dev - (char *)p;
6030 
6031 	dev->pcpu_refcnt = alloc_percpu(int);
6032 	if (!dev->pcpu_refcnt)
6033 		goto free_p;
6034 
6035 	if (dev_addr_init(dev))
6036 		goto free_pcpu;
6037 
6038 	dev_mc_init(dev);
6039 	dev_uc_init(dev);
6040 
6041 	dev_net_set(dev, &init_net);
6042 
6043 	dev->gso_max_size = GSO_MAX_SIZE;
6044 	dev->gso_max_segs = GSO_MAX_SEGS;
6045 
6046 	INIT_LIST_HEAD(&dev->napi_list);
6047 	INIT_LIST_HEAD(&dev->unreg_list);
6048 	INIT_LIST_HEAD(&dev->link_watch_list);
6049 	dev->priv_flags = IFF_XMIT_DST_RELEASE;
6050 	setup(dev);
6051 
6052 	dev->num_tx_queues = txqs;
6053 	dev->real_num_tx_queues = txqs;
6054 	if (netif_alloc_netdev_queues(dev))
6055 		goto free_all;
6056 
6057 #ifdef CONFIG_RPS
6058 	dev->num_rx_queues = rxqs;
6059 	dev->real_num_rx_queues = rxqs;
6060 	if (netif_alloc_rx_queues(dev))
6061 		goto free_all;
6062 #endif
6063 
6064 	strcpy(dev->name, name);
6065 	dev->group = INIT_NETDEV_GROUP;
6066 	if (!dev->ethtool_ops)
6067 		dev->ethtool_ops = &default_ethtool_ops;
6068 	return dev;
6069 
6070 free_all:
6071 	free_netdev(dev);
6072 	return NULL;
6073 
6074 free_pcpu:
6075 	free_percpu(dev->pcpu_refcnt);
6076 	kfree(dev->_tx);
6077 #ifdef CONFIG_RPS
6078 	kfree(dev->_rx);
6079 #endif
6080 
6081 free_p:
6082 	kfree(p);
6083 	return NULL;
6084 }
6085 EXPORT_SYMBOL(alloc_netdev_mqs);
6086 
6087 /**
6088  *	free_netdev - free network device
6089  *	@dev: device
6090  *
6091  *	This function does the last stage of destroying an allocated device
6092  * 	interface. The reference to the device object is released.
6093  *	If this is the last reference then it will be freed.
6094  */
6095 void free_netdev(struct net_device *dev)
6096 {
6097 	struct napi_struct *p, *n;
6098 
6099 	release_net(dev_net(dev));
6100 
6101 	kfree(dev->_tx);
6102 #ifdef CONFIG_RPS
6103 	kfree(dev->_rx);
6104 #endif
6105 
6106 	kfree(rcu_dereference_protected(dev->ingress_queue, 1));
6107 
6108 	/* Flush device addresses */
6109 	dev_addr_flush(dev);
6110 
6111 	list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6112 		netif_napi_del(p);
6113 
6114 	free_percpu(dev->pcpu_refcnt);
6115 	dev->pcpu_refcnt = NULL;
6116 
6117 	/*  Compatibility with error handling in drivers */
6118 	if (dev->reg_state == NETREG_UNINITIALIZED) {
6119 		kfree((char *)dev - dev->padded);
6120 		return;
6121 	}
6122 
6123 	BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6124 	dev->reg_state = NETREG_RELEASED;
6125 
6126 	/* will free via device release */
6127 	put_device(&dev->dev);
6128 }
6129 EXPORT_SYMBOL(free_netdev);
6130 
6131 /**
6132  *	synchronize_net -  Synchronize with packet receive processing
6133  *
6134  *	Wait for packets currently being received to be done.
6135  *	Does not block later packets from starting.
6136  */
6137 void synchronize_net(void)
6138 {
6139 	might_sleep();
6140 	if (rtnl_is_locked())
6141 		synchronize_rcu_expedited();
6142 	else
6143 		synchronize_rcu();
6144 }
6145 EXPORT_SYMBOL(synchronize_net);
6146 
6147 /**
6148  *	unregister_netdevice_queue - remove device from the kernel
6149  *	@dev: device
6150  *	@head: list
6151  *
6152  *	This function shuts down a device interface and removes it
6153  *	from the kernel tables.
6154  *	If head not NULL, device is queued to be unregistered later.
6155  *
6156  *	Callers must hold the rtnl semaphore.  You may want
6157  *	unregister_netdev() instead of this.
6158  */
6159 
6160 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
6161 {
6162 	ASSERT_RTNL();
6163 
6164 	if (head) {
6165 		list_move_tail(&dev->unreg_list, head);
6166 	} else {
6167 		rollback_registered(dev);
6168 		/* Finish processing unregister after unlock */
6169 		net_set_todo(dev);
6170 	}
6171 }
6172 EXPORT_SYMBOL(unregister_netdevice_queue);
6173 
6174 /**
6175  *	unregister_netdevice_many - unregister many devices
6176  *	@head: list of devices
6177  */
6178 void unregister_netdevice_many(struct list_head *head)
6179 {
6180 	struct net_device *dev;
6181 
6182 	if (!list_empty(head)) {
6183 		rollback_registered_many(head);
6184 		list_for_each_entry(dev, head, unreg_list)
6185 			net_set_todo(dev);
6186 	}
6187 }
6188 EXPORT_SYMBOL(unregister_netdevice_many);
6189 
6190 /**
6191  *	unregister_netdev - remove device from the kernel
6192  *	@dev: device
6193  *
6194  *	This function shuts down a device interface and removes it
6195  *	from the kernel tables.
6196  *
6197  *	This is just a wrapper for unregister_netdevice that takes
6198  *	the rtnl semaphore.  In general you want to use this and not
6199  *	unregister_netdevice.
6200  */
6201 void unregister_netdev(struct net_device *dev)
6202 {
6203 	rtnl_lock();
6204 	unregister_netdevice(dev);
6205 	rtnl_unlock();
6206 }
6207 EXPORT_SYMBOL(unregister_netdev);
6208 
6209 /**
6210  *	dev_change_net_namespace - move device to different nethost namespace
6211  *	@dev: device
6212  *	@net: network namespace
6213  *	@pat: If not NULL name pattern to try if the current device name
6214  *	      is already taken in the destination network namespace.
6215  *
6216  *	This function shuts down a device interface and moves it
6217  *	to a new network namespace. On success 0 is returned, on
6218  *	a failure a netagive errno code is returned.
6219  *
6220  *	Callers must hold the rtnl semaphore.
6221  */
6222 
6223 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6224 {
6225 	int err;
6226 
6227 	ASSERT_RTNL();
6228 
6229 	/* Don't allow namespace local devices to be moved. */
6230 	err = -EINVAL;
6231 	if (dev->features & NETIF_F_NETNS_LOCAL)
6232 		goto out;
6233 
6234 	/* Ensure the device has been registrered */
6235 	err = -EINVAL;
6236 	if (dev->reg_state != NETREG_REGISTERED)
6237 		goto out;
6238 
6239 	/* Get out if there is nothing todo */
6240 	err = 0;
6241 	if (net_eq(dev_net(dev), net))
6242 		goto out;
6243 
6244 	/* Pick the destination device name, and ensure
6245 	 * we can use it in the destination network namespace.
6246 	 */
6247 	err = -EEXIST;
6248 	if (__dev_get_by_name(net, dev->name)) {
6249 		/* We get here if we can't use the current device name */
6250 		if (!pat)
6251 			goto out;
6252 		if (dev_get_valid_name(net, dev, pat) < 0)
6253 			goto out;
6254 	}
6255 
6256 	/*
6257 	 * And now a mini version of register_netdevice unregister_netdevice.
6258 	 */
6259 
6260 	/* If device is running close it first. */
6261 	dev_close(dev);
6262 
6263 	/* And unlink it from device chain */
6264 	err = -ENODEV;
6265 	unlist_netdevice(dev);
6266 
6267 	synchronize_net();
6268 
6269 	/* Shutdown queueing discipline. */
6270 	dev_shutdown(dev);
6271 
6272 	/* Notify protocols, that we are about to destroy
6273 	   this device. They should clean all the things.
6274 
6275 	   Note that dev->reg_state stays at NETREG_REGISTERED.
6276 	   This is wanted because this way 8021q and macvlan know
6277 	   the device is just moving and can keep their slaves up.
6278 	*/
6279 	call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6280 	rcu_barrier();
6281 	call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6282 	rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
6283 
6284 	/*
6285 	 *	Flush the unicast and multicast chains
6286 	 */
6287 	dev_uc_flush(dev);
6288 	dev_mc_flush(dev);
6289 
6290 	/* Actually switch the network namespace */
6291 	dev_net_set(dev, net);
6292 
6293 	/* If there is an ifindex conflict assign a new one */
6294 	if (__dev_get_by_index(net, dev->ifindex)) {
6295 		int iflink = (dev->iflink == dev->ifindex);
6296 		dev->ifindex = dev_new_index(net);
6297 		if (iflink)
6298 			dev->iflink = dev->ifindex;
6299 	}
6300 
6301 	/* Fixup kobjects */
6302 	err = device_rename(&dev->dev, dev->name);
6303 	WARN_ON(err);
6304 
6305 	/* Add the device back in the hashes */
6306 	list_netdevice(dev);
6307 
6308 	/* Notify protocols, that a new device appeared. */
6309 	call_netdevice_notifiers(NETDEV_REGISTER, dev);
6310 
6311 	/*
6312 	 *	Prevent userspace races by waiting until the network
6313 	 *	device is fully setup before sending notifications.
6314 	 */
6315 	rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
6316 
6317 	synchronize_net();
6318 	err = 0;
6319 out:
6320 	return err;
6321 }
6322 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
6323 
6324 static int dev_cpu_callback(struct notifier_block *nfb,
6325 			    unsigned long action,
6326 			    void *ocpu)
6327 {
6328 	struct sk_buff **list_skb;
6329 	struct sk_buff *skb;
6330 	unsigned int cpu, oldcpu = (unsigned long)ocpu;
6331 	struct softnet_data *sd, *oldsd;
6332 
6333 	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
6334 		return NOTIFY_OK;
6335 
6336 	local_irq_disable();
6337 	cpu = smp_processor_id();
6338 	sd = &per_cpu(softnet_data, cpu);
6339 	oldsd = &per_cpu(softnet_data, oldcpu);
6340 
6341 	/* Find end of our completion_queue. */
6342 	list_skb = &sd->completion_queue;
6343 	while (*list_skb)
6344 		list_skb = &(*list_skb)->next;
6345 	/* Append completion queue from offline CPU. */
6346 	*list_skb = oldsd->completion_queue;
6347 	oldsd->completion_queue = NULL;
6348 
6349 	/* Append output queue from offline CPU. */
6350 	if (oldsd->output_queue) {
6351 		*sd->output_queue_tailp = oldsd->output_queue;
6352 		sd->output_queue_tailp = oldsd->output_queue_tailp;
6353 		oldsd->output_queue = NULL;
6354 		oldsd->output_queue_tailp = &oldsd->output_queue;
6355 	}
6356 	/* Append NAPI poll list from offline CPU. */
6357 	if (!list_empty(&oldsd->poll_list)) {
6358 		list_splice_init(&oldsd->poll_list, &sd->poll_list);
6359 		raise_softirq_irqoff(NET_RX_SOFTIRQ);
6360 	}
6361 
6362 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
6363 	local_irq_enable();
6364 
6365 	/* Process offline CPU's input_pkt_queue */
6366 	while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6367 		netif_rx(skb);
6368 		input_queue_head_incr(oldsd);
6369 	}
6370 	while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
6371 		netif_rx(skb);
6372 		input_queue_head_incr(oldsd);
6373 	}
6374 
6375 	return NOTIFY_OK;
6376 }
6377 
6378 
6379 /**
6380  *	netdev_increment_features - increment feature set by one
6381  *	@all: current feature set
6382  *	@one: new feature set
6383  *	@mask: mask feature set
6384  *
6385  *	Computes a new feature set after adding a device with feature set
6386  *	@one to the master device with current feature set @all.  Will not
6387  *	enable anything that is off in @mask. Returns the new feature set.
6388  */
6389 netdev_features_t netdev_increment_features(netdev_features_t all,
6390 	netdev_features_t one, netdev_features_t mask)
6391 {
6392 	if (mask & NETIF_F_GEN_CSUM)
6393 		mask |= NETIF_F_ALL_CSUM;
6394 	mask |= NETIF_F_VLAN_CHALLENGED;
6395 
6396 	all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6397 	all &= one | ~NETIF_F_ALL_FOR_ALL;
6398 
6399 	/* If one device supports hw checksumming, set for all. */
6400 	if (all & NETIF_F_GEN_CSUM)
6401 		all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
6402 
6403 	return all;
6404 }
6405 EXPORT_SYMBOL(netdev_increment_features);
6406 
6407 static struct hlist_head *netdev_create_hash(void)
6408 {
6409 	int i;
6410 	struct hlist_head *hash;
6411 
6412 	hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6413 	if (hash != NULL)
6414 		for (i = 0; i < NETDEV_HASHENTRIES; i++)
6415 			INIT_HLIST_HEAD(&hash[i]);
6416 
6417 	return hash;
6418 }
6419 
6420 /* Initialize per network namespace state */
6421 static int __net_init netdev_init(struct net *net)
6422 {
6423 	if (net != &init_net)
6424 		INIT_LIST_HEAD(&net->dev_base_head);
6425 
6426 	net->dev_name_head = netdev_create_hash();
6427 	if (net->dev_name_head == NULL)
6428 		goto err_name;
6429 
6430 	net->dev_index_head = netdev_create_hash();
6431 	if (net->dev_index_head == NULL)
6432 		goto err_idx;
6433 
6434 	return 0;
6435 
6436 err_idx:
6437 	kfree(net->dev_name_head);
6438 err_name:
6439 	return -ENOMEM;
6440 }
6441 
6442 /**
6443  *	netdev_drivername - network driver for the device
6444  *	@dev: network device
6445  *
6446  *	Determine network driver for device.
6447  */
6448 const char *netdev_drivername(const struct net_device *dev)
6449 {
6450 	const struct device_driver *driver;
6451 	const struct device *parent;
6452 	const char *empty = "";
6453 
6454 	parent = dev->dev.parent;
6455 	if (!parent)
6456 		return empty;
6457 
6458 	driver = parent->driver;
6459 	if (driver && driver->name)
6460 		return driver->name;
6461 	return empty;
6462 }
6463 
6464 int __netdev_printk(const char *level, const struct net_device *dev,
6465 			   struct va_format *vaf)
6466 {
6467 	int r;
6468 
6469 	if (dev && dev->dev.parent)
6470 		r = dev_printk(level, dev->dev.parent, "%s: %pV",
6471 			       netdev_name(dev), vaf);
6472 	else if (dev)
6473 		r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
6474 	else
6475 		r = printk("%s(NULL net_device): %pV", level, vaf);
6476 
6477 	return r;
6478 }
6479 EXPORT_SYMBOL(__netdev_printk);
6480 
6481 int netdev_printk(const char *level, const struct net_device *dev,
6482 		  const char *format, ...)
6483 {
6484 	struct va_format vaf;
6485 	va_list args;
6486 	int r;
6487 
6488 	va_start(args, format);
6489 
6490 	vaf.fmt = format;
6491 	vaf.va = &args;
6492 
6493 	r = __netdev_printk(level, dev, &vaf);
6494 	va_end(args);
6495 
6496 	return r;
6497 }
6498 EXPORT_SYMBOL(netdev_printk);
6499 
6500 #define define_netdev_printk_level(func, level)			\
6501 int func(const struct net_device *dev, const char *fmt, ...)	\
6502 {								\
6503 	int r;							\
6504 	struct va_format vaf;					\
6505 	va_list args;						\
6506 								\
6507 	va_start(args, fmt);					\
6508 								\
6509 	vaf.fmt = fmt;						\
6510 	vaf.va = &args;						\
6511 								\
6512 	r = __netdev_printk(level, dev, &vaf);			\
6513 	va_end(args);						\
6514 								\
6515 	return r;						\
6516 }								\
6517 EXPORT_SYMBOL(func);
6518 
6519 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6520 define_netdev_printk_level(netdev_alert, KERN_ALERT);
6521 define_netdev_printk_level(netdev_crit, KERN_CRIT);
6522 define_netdev_printk_level(netdev_err, KERN_ERR);
6523 define_netdev_printk_level(netdev_warn, KERN_WARNING);
6524 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6525 define_netdev_printk_level(netdev_info, KERN_INFO);
6526 
6527 static void __net_exit netdev_exit(struct net *net)
6528 {
6529 	kfree(net->dev_name_head);
6530 	kfree(net->dev_index_head);
6531 }
6532 
6533 static struct pernet_operations __net_initdata netdev_net_ops = {
6534 	.init = netdev_init,
6535 	.exit = netdev_exit,
6536 };
6537 
6538 static void __net_exit default_device_exit(struct net *net)
6539 {
6540 	struct net_device *dev, *aux;
6541 	/*
6542 	 * Push all migratable network devices back to the
6543 	 * initial network namespace
6544 	 */
6545 	rtnl_lock();
6546 	for_each_netdev_safe(net, dev, aux) {
6547 		int err;
6548 		char fb_name[IFNAMSIZ];
6549 
6550 		/* Ignore unmoveable devices (i.e. loopback) */
6551 		if (dev->features & NETIF_F_NETNS_LOCAL)
6552 			continue;
6553 
6554 		/* Leave virtual devices for the generic cleanup */
6555 		if (dev->rtnl_link_ops)
6556 			continue;
6557 
6558 		/* Push remaining network devices to init_net */
6559 		snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6560 		err = dev_change_net_namespace(dev, &init_net, fb_name);
6561 		if (err) {
6562 			pr_emerg("%s: failed to move %s to init_net: %d\n",
6563 				 __func__, dev->name, err);
6564 			BUG();
6565 		}
6566 	}
6567 	rtnl_unlock();
6568 }
6569 
6570 static void __net_exit default_device_exit_batch(struct list_head *net_list)
6571 {
6572 	/* At exit all network devices most be removed from a network
6573 	 * namespace.  Do this in the reverse order of registration.
6574 	 * Do this across as many network namespaces as possible to
6575 	 * improve batching efficiency.
6576 	 */
6577 	struct net_device *dev;
6578 	struct net *net;
6579 	LIST_HEAD(dev_kill_list);
6580 
6581 	rtnl_lock();
6582 	list_for_each_entry(net, net_list, exit_list) {
6583 		for_each_netdev_reverse(net, dev) {
6584 			if (dev->rtnl_link_ops)
6585 				dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6586 			else
6587 				unregister_netdevice_queue(dev, &dev_kill_list);
6588 		}
6589 	}
6590 	unregister_netdevice_many(&dev_kill_list);
6591 	list_del(&dev_kill_list);
6592 	rtnl_unlock();
6593 }
6594 
6595 static struct pernet_operations __net_initdata default_device_ops = {
6596 	.exit = default_device_exit,
6597 	.exit_batch = default_device_exit_batch,
6598 };
6599 
6600 /*
6601  *	Initialize the DEV module. At boot time this walks the device list and
6602  *	unhooks any devices that fail to initialise (normally hardware not
6603  *	present) and leaves us with a valid list of present and active devices.
6604  *
6605  */
6606 
6607 /*
6608  *       This is called single threaded during boot, so no need
6609  *       to take the rtnl semaphore.
6610  */
6611 static int __init net_dev_init(void)
6612 {
6613 	int i, rc = -ENOMEM;
6614 
6615 	BUG_ON(!dev_boot_phase);
6616 
6617 	if (dev_proc_init())
6618 		goto out;
6619 
6620 	if (netdev_kobject_init())
6621 		goto out;
6622 
6623 	INIT_LIST_HEAD(&ptype_all);
6624 	for (i = 0; i < PTYPE_HASH_SIZE; i++)
6625 		INIT_LIST_HEAD(&ptype_base[i]);
6626 
6627 	if (register_pernet_subsys(&netdev_net_ops))
6628 		goto out;
6629 
6630 	/*
6631 	 *	Initialise the packet receive queues.
6632 	 */
6633 
6634 	for_each_possible_cpu(i) {
6635 		struct softnet_data *sd = &per_cpu(softnet_data, i);
6636 
6637 		memset(sd, 0, sizeof(*sd));
6638 		skb_queue_head_init(&sd->input_pkt_queue);
6639 		skb_queue_head_init(&sd->process_queue);
6640 		sd->completion_queue = NULL;
6641 		INIT_LIST_HEAD(&sd->poll_list);
6642 		sd->output_queue = NULL;
6643 		sd->output_queue_tailp = &sd->output_queue;
6644 #ifdef CONFIG_RPS
6645 		sd->csd.func = rps_trigger_softirq;
6646 		sd->csd.info = sd;
6647 		sd->csd.flags = 0;
6648 		sd->cpu = i;
6649 #endif
6650 
6651 		sd->backlog.poll = process_backlog;
6652 		sd->backlog.weight = weight_p;
6653 		sd->backlog.gro_list = NULL;
6654 		sd->backlog.gro_count = 0;
6655 	}
6656 
6657 	dev_boot_phase = 0;
6658 
6659 	/* The loopback device is special if any other network devices
6660 	 * is present in a network namespace the loopback device must
6661 	 * be present. Since we now dynamically allocate and free the
6662 	 * loopback device ensure this invariant is maintained by
6663 	 * keeping the loopback device as the first device on the
6664 	 * list of network devices.  Ensuring the loopback devices
6665 	 * is the first device that appears and the last network device
6666 	 * that disappears.
6667 	 */
6668 	if (register_pernet_device(&loopback_net_ops))
6669 		goto out;
6670 
6671 	if (register_pernet_device(&default_device_ops))
6672 		goto out;
6673 
6674 	open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6675 	open_softirq(NET_RX_SOFTIRQ, net_rx_action);
6676 
6677 	hotcpu_notifier(dev_cpu_callback, 0);
6678 	dst_init();
6679 	dev_mcast_init();
6680 	rc = 0;
6681 out:
6682 	return rc;
6683 }
6684 
6685 subsys_initcall(net_dev_init);
6686 
6687 static int __init initialize_hashrnd(void)
6688 {
6689 	get_random_bytes(&hashrnd, sizeof(hashrnd));
6690 	return 0;
6691 }
6692 
6693 late_initcall_sync(initialize_hashrnd);
6694 
6695