xref: /linux/net/core/dev.c (revision 092e0e7e520a1fca03e13c9f2d157432a8657ff2)
1 /*
2  * 	NET3	Protocol independent device support routines.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  *	Derived from the non IP parts of dev.c 1.0.19
10  * 		Authors:	Ross Biro
11  *				Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *				Mark Evans, <evansmp@uhura.aston.ac.uk>
13  *
14  *	Additional Authors:
15  *		Florian la Roche <rzsfl@rz.uni-sb.de>
16  *		Alan Cox <gw4pts@gw4pts.ampr.org>
17  *		David Hinds <dahinds@users.sourceforge.net>
18  *		Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19  *		Adam Sulmicki <adam@cfar.umd.edu>
20  *              Pekka Riikonen <priikone@poesidon.pspt.fi>
21  *
22  *	Changes:
23  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
24  *              			to 2 if register_netdev gets called
25  *              			before net_dev_init & also removed a
26  *              			few lines of code in the process.
27  *		Alan Cox	:	device private ioctl copies fields back.
28  *		Alan Cox	:	Transmit queue code does relevant
29  *					stunts to keep the queue safe.
30  *		Alan Cox	:	Fixed double lock.
31  *		Alan Cox	:	Fixed promisc NULL pointer trap
32  *		????????	:	Support the full private ioctl range
33  *		Alan Cox	:	Moved ioctl permission check into
34  *					drivers
35  *		Tim Kordas	:	SIOCADDMULTI/SIOCDELMULTI
36  *		Alan Cox	:	100 backlog just doesn't cut it when
37  *					you start doing multicast video 8)
38  *		Alan Cox	:	Rewrote net_bh and list manager.
39  *		Alan Cox	: 	Fix ETH_P_ALL echoback lengths.
40  *		Alan Cox	:	Took out transmit every packet pass
41  *					Saved a few bytes in the ioctl handler
42  *		Alan Cox	:	Network driver sets packet type before
43  *					calling netif_rx. Saves a function
44  *					call a packet.
45  *		Alan Cox	:	Hashed net_bh()
46  *		Richard Kooijman:	Timestamp fixes.
47  *		Alan Cox	:	Wrong field in SIOCGIFDSTADDR
48  *		Alan Cox	:	Device lock protection.
49  *		Alan Cox	: 	Fixed nasty side effect of device close
50  *					changes.
51  *		Rudi Cilibrasi	:	Pass the right thing to
52  *					set_mac_address()
53  *		Dave Miller	:	32bit quantity for the device lock to
54  *					make it work out on a Sparc.
55  *		Bjorn Ekwall	:	Added KERNELD hack.
56  *		Alan Cox	:	Cleaned up the backlog initialise.
57  *		Craig Metz	:	SIOCGIFCONF fix if space for under
58  *					1 device.
59  *	    Thomas Bogendoerfer :	Return ENODEV for dev_open, if there
60  *					is no device open function.
61  *		Andi Kleen	:	Fix error reporting for SIOCGIFCONF
62  *	    Michael Chastain	:	Fix signed/unsigned for SIOCGIFCONF
63  *		Cyrus Durgin	:	Cleaned for KMOD
64  *		Adam Sulmicki   :	Bug Fix : Network Device Unload
65  *					A network device unload needs to purge
66  *					the backlog queue.
67  *	Paul Rusty Russell	:	SIOCSIFNAME
68  *              Pekka Riikonen  :	Netdev boot-time settings code
69  *              Andrew Morton   :       Make unregister_netdevice wait
70  *              			indefinitely on dev->refcnt
71  * 		J Hadi Salim	:	- Backlog queue sampling
72  *				        - netif_rx() feedback
73  */
74 
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/hash.h>
83 #include <linux/slab.h>
84 #include <linux/sched.h>
85 #include <linux/mutex.h>
86 #include <linux/string.h>
87 #include <linux/mm.h>
88 #include <linux/socket.h>
89 #include <linux/sockios.h>
90 #include <linux/errno.h>
91 #include <linux/interrupt.h>
92 #include <linux/if_ether.h>
93 #include <linux/netdevice.h>
94 #include <linux/etherdevice.h>
95 #include <linux/ethtool.h>
96 #include <linux/notifier.h>
97 #include <linux/skbuff.h>
98 #include <net/net_namespace.h>
99 #include <net/sock.h>
100 #include <linux/rtnetlink.h>
101 #include <linux/proc_fs.h>
102 #include <linux/seq_file.h>
103 #include <linux/stat.h>
104 #include <net/dst.h>
105 #include <net/pkt_sched.h>
106 #include <net/checksum.h>
107 #include <net/xfrm.h>
108 #include <linux/highmem.h>
109 #include <linux/init.h>
110 #include <linux/kmod.h>
111 #include <linux/module.h>
112 #include <linux/netpoll.h>
113 #include <linux/rcupdate.h>
114 #include <linux/delay.h>
115 #include <net/wext.h>
116 #include <net/iw_handler.h>
117 #include <asm/current.h>
118 #include <linux/audit.h>
119 #include <linux/dmaengine.h>
120 #include <linux/err.h>
121 #include <linux/ctype.h>
122 #include <linux/if_arp.h>
123 #include <linux/if_vlan.h>
124 #include <linux/ip.h>
125 #include <net/ip.h>
126 #include <linux/ipv6.h>
127 #include <linux/in.h>
128 #include <linux/jhash.h>
129 #include <linux/random.h>
130 #include <trace/events/napi.h>
131 #include <trace/events/net.h>
132 #include <trace/events/skb.h>
133 #include <linux/pci.h>
134 
135 #include "net-sysfs.h"
136 
137 /* Instead of increasing this, you should create a hash table. */
138 #define MAX_GRO_SKBS 8
139 
140 /* This should be increased if a protocol with a bigger head is added. */
141 #define GRO_MAX_HEAD (MAX_HEADER + 128)
142 
143 /*
144  *	The list of packet types we will receive (as opposed to discard)
145  *	and the routines to invoke.
146  *
147  *	Why 16. Because with 16 the only overlap we get on a hash of the
148  *	low nibble of the protocol value is RARP/SNAP/X.25.
149  *
150  *      NOTE:  That is no longer true with the addition of VLAN tags.  Not
151  *             sure which should go first, but I bet it won't make much
152  *             difference if we are running VLANs.  The good news is that
153  *             this protocol won't be in the list unless compiled in, so
154  *             the average user (w/out VLANs) will not be adversely affected.
155  *             --BLG
156  *
157  *		0800	IP
158  *		8100    802.1Q VLAN
159  *		0001	802.3
160  *		0002	AX.25
161  *		0004	802.2
162  *		8035	RARP
163  *		0005	SNAP
164  *		0805	X.25
165  *		0806	ARP
166  *		8137	IPX
167  *		0009	Localtalk
168  *		86DD	IPv6
169  */
170 
171 #define PTYPE_HASH_SIZE	(16)
172 #define PTYPE_HASH_MASK	(PTYPE_HASH_SIZE - 1)
173 
174 static DEFINE_SPINLOCK(ptype_lock);
175 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
176 static struct list_head ptype_all __read_mostly;	/* Taps */
177 
178 /*
179  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
180  * semaphore.
181  *
182  * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
183  *
184  * Writers must hold the rtnl semaphore while they loop through the
185  * dev_base_head list, and hold dev_base_lock for writing when they do the
186  * actual updates.  This allows pure readers to access the list even
187  * while a writer is preparing to update it.
188  *
189  * To put it another way, dev_base_lock is held for writing only to
190  * protect against pure readers; the rtnl semaphore provides the
191  * protection against other writers.
192  *
193  * See, for example usages, register_netdevice() and
194  * unregister_netdevice(), which must be called with the rtnl
195  * semaphore held.
196  */
197 DEFINE_RWLOCK(dev_base_lock);
198 EXPORT_SYMBOL(dev_base_lock);
199 
200 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
201 {
202 	unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
203 	return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
204 }
205 
206 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
207 {
208 	return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
209 }
210 
211 static inline void rps_lock(struct softnet_data *sd)
212 {
213 #ifdef CONFIG_RPS
214 	spin_lock(&sd->input_pkt_queue.lock);
215 #endif
216 }
217 
218 static inline void rps_unlock(struct softnet_data *sd)
219 {
220 #ifdef CONFIG_RPS
221 	spin_unlock(&sd->input_pkt_queue.lock);
222 #endif
223 }
224 
225 /* Device list insertion */
226 static int list_netdevice(struct net_device *dev)
227 {
228 	struct net *net = dev_net(dev);
229 
230 	ASSERT_RTNL();
231 
232 	write_lock_bh(&dev_base_lock);
233 	list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
234 	hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
235 	hlist_add_head_rcu(&dev->index_hlist,
236 			   dev_index_hash(net, dev->ifindex));
237 	write_unlock_bh(&dev_base_lock);
238 	return 0;
239 }
240 
241 /* Device list removal
242  * caller must respect a RCU grace period before freeing/reusing dev
243  */
244 static void unlist_netdevice(struct net_device *dev)
245 {
246 	ASSERT_RTNL();
247 
248 	/* Unlink dev from the device chain */
249 	write_lock_bh(&dev_base_lock);
250 	list_del_rcu(&dev->dev_list);
251 	hlist_del_rcu(&dev->name_hlist);
252 	hlist_del_rcu(&dev->index_hlist);
253 	write_unlock_bh(&dev_base_lock);
254 }
255 
256 /*
257  *	Our notifier list
258  */
259 
260 static RAW_NOTIFIER_HEAD(netdev_chain);
261 
262 /*
263  *	Device drivers call our routines to queue packets here. We empty the
264  *	queue in the local softnet handler.
265  */
266 
267 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
268 EXPORT_PER_CPU_SYMBOL(softnet_data);
269 
270 #ifdef CONFIG_LOCKDEP
271 /*
272  * register_netdevice() inits txq->_xmit_lock and sets lockdep class
273  * according to dev->type
274  */
275 static const unsigned short netdev_lock_type[] =
276 	{ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
277 	 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
278 	 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
279 	 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
280 	 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
281 	 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
282 	 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
283 	 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
284 	 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
285 	 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
286 	 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
287 	 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
288 	 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
289 	 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
290 	 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
291 	 ARPHRD_VOID, ARPHRD_NONE};
292 
293 static const char *const netdev_lock_name[] =
294 	{"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
295 	 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
296 	 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
297 	 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
298 	 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
299 	 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
300 	 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
301 	 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
302 	 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
303 	 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
304 	 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
305 	 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
306 	 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
307 	 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
308 	 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
309 	 "_xmit_VOID", "_xmit_NONE"};
310 
311 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
312 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
313 
314 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
315 {
316 	int i;
317 
318 	for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
319 		if (netdev_lock_type[i] == dev_type)
320 			return i;
321 	/* the last key is used by default */
322 	return ARRAY_SIZE(netdev_lock_type) - 1;
323 }
324 
325 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
326 						 unsigned short dev_type)
327 {
328 	int i;
329 
330 	i = netdev_lock_pos(dev_type);
331 	lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
332 				   netdev_lock_name[i]);
333 }
334 
335 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
336 {
337 	int i;
338 
339 	i = netdev_lock_pos(dev->type);
340 	lockdep_set_class_and_name(&dev->addr_list_lock,
341 				   &netdev_addr_lock_key[i],
342 				   netdev_lock_name[i]);
343 }
344 #else
345 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
346 						 unsigned short dev_type)
347 {
348 }
349 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
350 {
351 }
352 #endif
353 
354 /*******************************************************************************
355 
356 		Protocol management and registration routines
357 
358 *******************************************************************************/
359 
360 /*
361  *	Add a protocol ID to the list. Now that the input handler is
362  *	smarter we can dispense with all the messy stuff that used to be
363  *	here.
364  *
365  *	BEWARE!!! Protocol handlers, mangling input packets,
366  *	MUST BE last in hash buckets and checking protocol handlers
367  *	MUST start from promiscuous ptype_all chain in net_bh.
368  *	It is true now, do not change it.
369  *	Explanation follows: if protocol handler, mangling packet, will
370  *	be the first on list, it is not able to sense, that packet
371  *	is cloned and should be copied-on-write, so that it will
372  *	change it and subsequent readers will get broken packet.
373  *							--ANK (980803)
374  */
375 
376 /**
377  *	dev_add_pack - add packet handler
378  *	@pt: packet type declaration
379  *
380  *	Add a protocol handler to the networking stack. The passed &packet_type
381  *	is linked into kernel lists and may not be freed until it has been
382  *	removed from the kernel lists.
383  *
384  *	This call does not sleep therefore it can not
385  *	guarantee all CPU's that are in middle of receiving packets
386  *	will see the new packet type (until the next received packet).
387  */
388 
389 void dev_add_pack(struct packet_type *pt)
390 {
391 	int hash;
392 
393 	spin_lock_bh(&ptype_lock);
394 	if (pt->type == htons(ETH_P_ALL))
395 		list_add_rcu(&pt->list, &ptype_all);
396 	else {
397 		hash = ntohs(pt->type) & PTYPE_HASH_MASK;
398 		list_add_rcu(&pt->list, &ptype_base[hash]);
399 	}
400 	spin_unlock_bh(&ptype_lock);
401 }
402 EXPORT_SYMBOL(dev_add_pack);
403 
404 /**
405  *	__dev_remove_pack	 - remove packet handler
406  *	@pt: packet type declaration
407  *
408  *	Remove a protocol handler that was previously added to the kernel
409  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
410  *	from the kernel lists and can be freed or reused once this function
411  *	returns.
412  *
413  *      The packet type might still be in use by receivers
414  *	and must not be freed until after all the CPU's have gone
415  *	through a quiescent state.
416  */
417 void __dev_remove_pack(struct packet_type *pt)
418 {
419 	struct list_head *head;
420 	struct packet_type *pt1;
421 
422 	spin_lock_bh(&ptype_lock);
423 
424 	if (pt->type == htons(ETH_P_ALL))
425 		head = &ptype_all;
426 	else
427 		head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
428 
429 	list_for_each_entry(pt1, head, list) {
430 		if (pt == pt1) {
431 			list_del_rcu(&pt->list);
432 			goto out;
433 		}
434 	}
435 
436 	printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
437 out:
438 	spin_unlock_bh(&ptype_lock);
439 }
440 EXPORT_SYMBOL(__dev_remove_pack);
441 
442 /**
443  *	dev_remove_pack	 - remove packet handler
444  *	@pt: packet type declaration
445  *
446  *	Remove a protocol handler that was previously added to the kernel
447  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
448  *	from the kernel lists and can be freed or reused once this function
449  *	returns.
450  *
451  *	This call sleeps to guarantee that no CPU is looking at the packet
452  *	type after return.
453  */
454 void dev_remove_pack(struct packet_type *pt)
455 {
456 	__dev_remove_pack(pt);
457 
458 	synchronize_net();
459 }
460 EXPORT_SYMBOL(dev_remove_pack);
461 
462 /******************************************************************************
463 
464 		      Device Boot-time Settings Routines
465 
466 *******************************************************************************/
467 
468 /* Boot time configuration table */
469 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
470 
471 /**
472  *	netdev_boot_setup_add	- add new setup entry
473  *	@name: name of the device
474  *	@map: configured settings for the device
475  *
476  *	Adds new setup entry to the dev_boot_setup list.  The function
477  *	returns 0 on error and 1 on success.  This is a generic routine to
478  *	all netdevices.
479  */
480 static int netdev_boot_setup_add(char *name, struct ifmap *map)
481 {
482 	struct netdev_boot_setup *s;
483 	int i;
484 
485 	s = dev_boot_setup;
486 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
487 		if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
488 			memset(s[i].name, 0, sizeof(s[i].name));
489 			strlcpy(s[i].name, name, IFNAMSIZ);
490 			memcpy(&s[i].map, map, sizeof(s[i].map));
491 			break;
492 		}
493 	}
494 
495 	return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
496 }
497 
498 /**
499  *	netdev_boot_setup_check	- check boot time settings
500  *	@dev: the netdevice
501  *
502  * 	Check boot time settings for the device.
503  *	The found settings are set for the device to be used
504  *	later in the device probing.
505  *	Returns 0 if no settings found, 1 if they are.
506  */
507 int netdev_boot_setup_check(struct net_device *dev)
508 {
509 	struct netdev_boot_setup *s = dev_boot_setup;
510 	int i;
511 
512 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
513 		if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
514 		    !strcmp(dev->name, s[i].name)) {
515 			dev->irq 	= s[i].map.irq;
516 			dev->base_addr 	= s[i].map.base_addr;
517 			dev->mem_start 	= s[i].map.mem_start;
518 			dev->mem_end 	= s[i].map.mem_end;
519 			return 1;
520 		}
521 	}
522 	return 0;
523 }
524 EXPORT_SYMBOL(netdev_boot_setup_check);
525 
526 
527 /**
528  *	netdev_boot_base	- get address from boot time settings
529  *	@prefix: prefix for network device
530  *	@unit: id for network device
531  *
532  * 	Check boot time settings for the base address of device.
533  *	The found settings are set for the device to be used
534  *	later in the device probing.
535  *	Returns 0 if no settings found.
536  */
537 unsigned long netdev_boot_base(const char *prefix, int unit)
538 {
539 	const struct netdev_boot_setup *s = dev_boot_setup;
540 	char name[IFNAMSIZ];
541 	int i;
542 
543 	sprintf(name, "%s%d", prefix, unit);
544 
545 	/*
546 	 * If device already registered then return base of 1
547 	 * to indicate not to probe for this interface
548 	 */
549 	if (__dev_get_by_name(&init_net, name))
550 		return 1;
551 
552 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
553 		if (!strcmp(name, s[i].name))
554 			return s[i].map.base_addr;
555 	return 0;
556 }
557 
558 /*
559  * Saves at boot time configured settings for any netdevice.
560  */
561 int __init netdev_boot_setup(char *str)
562 {
563 	int ints[5];
564 	struct ifmap map;
565 
566 	str = get_options(str, ARRAY_SIZE(ints), ints);
567 	if (!str || !*str)
568 		return 0;
569 
570 	/* Save settings */
571 	memset(&map, 0, sizeof(map));
572 	if (ints[0] > 0)
573 		map.irq = ints[1];
574 	if (ints[0] > 1)
575 		map.base_addr = ints[2];
576 	if (ints[0] > 2)
577 		map.mem_start = ints[3];
578 	if (ints[0] > 3)
579 		map.mem_end = ints[4];
580 
581 	/* Add new entry to the list */
582 	return netdev_boot_setup_add(str, &map);
583 }
584 
585 __setup("netdev=", netdev_boot_setup);
586 
587 /*******************************************************************************
588 
589 			    Device Interface Subroutines
590 
591 *******************************************************************************/
592 
593 /**
594  *	__dev_get_by_name	- find a device by its name
595  *	@net: the applicable net namespace
596  *	@name: name to find
597  *
598  *	Find an interface by name. Must be called under RTNL semaphore
599  *	or @dev_base_lock. If the name is found a pointer to the device
600  *	is returned. If the name is not found then %NULL is returned. The
601  *	reference counters are not incremented so the caller must be
602  *	careful with locks.
603  */
604 
605 struct net_device *__dev_get_by_name(struct net *net, const char *name)
606 {
607 	struct hlist_node *p;
608 	struct net_device *dev;
609 	struct hlist_head *head = dev_name_hash(net, name);
610 
611 	hlist_for_each_entry(dev, p, head, name_hlist)
612 		if (!strncmp(dev->name, name, IFNAMSIZ))
613 			return dev;
614 
615 	return NULL;
616 }
617 EXPORT_SYMBOL(__dev_get_by_name);
618 
619 /**
620  *	dev_get_by_name_rcu	- find a device by its name
621  *	@net: the applicable net namespace
622  *	@name: name to find
623  *
624  *	Find an interface by name.
625  *	If the name is found a pointer to the device is returned.
626  * 	If the name is not found then %NULL is returned.
627  *	The reference counters are not incremented so the caller must be
628  *	careful with locks. The caller must hold RCU lock.
629  */
630 
631 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
632 {
633 	struct hlist_node *p;
634 	struct net_device *dev;
635 	struct hlist_head *head = dev_name_hash(net, name);
636 
637 	hlist_for_each_entry_rcu(dev, p, head, name_hlist)
638 		if (!strncmp(dev->name, name, IFNAMSIZ))
639 			return dev;
640 
641 	return NULL;
642 }
643 EXPORT_SYMBOL(dev_get_by_name_rcu);
644 
645 /**
646  *	dev_get_by_name		- find a device by its name
647  *	@net: the applicable net namespace
648  *	@name: name to find
649  *
650  *	Find an interface by name. This can be called from any
651  *	context and does its own locking. The returned handle has
652  *	the usage count incremented and the caller must use dev_put() to
653  *	release it when it is no longer needed. %NULL is returned if no
654  *	matching device is found.
655  */
656 
657 struct net_device *dev_get_by_name(struct net *net, const char *name)
658 {
659 	struct net_device *dev;
660 
661 	rcu_read_lock();
662 	dev = dev_get_by_name_rcu(net, name);
663 	if (dev)
664 		dev_hold(dev);
665 	rcu_read_unlock();
666 	return dev;
667 }
668 EXPORT_SYMBOL(dev_get_by_name);
669 
670 /**
671  *	__dev_get_by_index - find a device by its ifindex
672  *	@net: the applicable net namespace
673  *	@ifindex: index of device
674  *
675  *	Search for an interface by index. Returns %NULL if the device
676  *	is not found or a pointer to the device. The device has not
677  *	had its reference counter increased so the caller must be careful
678  *	about locking. The caller must hold either the RTNL semaphore
679  *	or @dev_base_lock.
680  */
681 
682 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
683 {
684 	struct hlist_node *p;
685 	struct net_device *dev;
686 	struct hlist_head *head = dev_index_hash(net, ifindex);
687 
688 	hlist_for_each_entry(dev, p, head, index_hlist)
689 		if (dev->ifindex == ifindex)
690 			return dev;
691 
692 	return NULL;
693 }
694 EXPORT_SYMBOL(__dev_get_by_index);
695 
696 /**
697  *	dev_get_by_index_rcu - find a device by its ifindex
698  *	@net: the applicable net namespace
699  *	@ifindex: index of device
700  *
701  *	Search for an interface by index. Returns %NULL if the device
702  *	is not found or a pointer to the device. The device has not
703  *	had its reference counter increased so the caller must be careful
704  *	about locking. The caller must hold RCU lock.
705  */
706 
707 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
708 {
709 	struct hlist_node *p;
710 	struct net_device *dev;
711 	struct hlist_head *head = dev_index_hash(net, ifindex);
712 
713 	hlist_for_each_entry_rcu(dev, p, head, index_hlist)
714 		if (dev->ifindex == ifindex)
715 			return dev;
716 
717 	return NULL;
718 }
719 EXPORT_SYMBOL(dev_get_by_index_rcu);
720 
721 
722 /**
723  *	dev_get_by_index - find a device by its ifindex
724  *	@net: the applicable net namespace
725  *	@ifindex: index of device
726  *
727  *	Search for an interface by index. Returns NULL if the device
728  *	is not found or a pointer to the device. The device returned has
729  *	had a reference added and the pointer is safe until the user calls
730  *	dev_put to indicate they have finished with it.
731  */
732 
733 struct net_device *dev_get_by_index(struct net *net, int ifindex)
734 {
735 	struct net_device *dev;
736 
737 	rcu_read_lock();
738 	dev = dev_get_by_index_rcu(net, ifindex);
739 	if (dev)
740 		dev_hold(dev);
741 	rcu_read_unlock();
742 	return dev;
743 }
744 EXPORT_SYMBOL(dev_get_by_index);
745 
746 /**
747  *	dev_getbyhwaddr - find a device by its hardware address
748  *	@net: the applicable net namespace
749  *	@type: media type of device
750  *	@ha: hardware address
751  *
752  *	Search for an interface by MAC address. Returns NULL if the device
753  *	is not found or a pointer to the device. The caller must hold the
754  *	rtnl semaphore. The returned device has not had its ref count increased
755  *	and the caller must therefore be careful about locking
756  *
757  *	BUGS:
758  *	If the API was consistent this would be __dev_get_by_hwaddr
759  */
760 
761 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
762 {
763 	struct net_device *dev;
764 
765 	ASSERT_RTNL();
766 
767 	for_each_netdev(net, dev)
768 		if (dev->type == type &&
769 		    !memcmp(dev->dev_addr, ha, dev->addr_len))
770 			return dev;
771 
772 	return NULL;
773 }
774 EXPORT_SYMBOL(dev_getbyhwaddr);
775 
776 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
777 {
778 	struct net_device *dev;
779 
780 	ASSERT_RTNL();
781 	for_each_netdev(net, dev)
782 		if (dev->type == type)
783 			return dev;
784 
785 	return NULL;
786 }
787 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
788 
789 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
790 {
791 	struct net_device *dev, *ret = NULL;
792 
793 	rcu_read_lock();
794 	for_each_netdev_rcu(net, dev)
795 		if (dev->type == type) {
796 			dev_hold(dev);
797 			ret = dev;
798 			break;
799 		}
800 	rcu_read_unlock();
801 	return ret;
802 }
803 EXPORT_SYMBOL(dev_getfirstbyhwtype);
804 
805 /**
806  *	dev_get_by_flags_rcu - find any device with given flags
807  *	@net: the applicable net namespace
808  *	@if_flags: IFF_* values
809  *	@mask: bitmask of bits in if_flags to check
810  *
811  *	Search for any interface with the given flags. Returns NULL if a device
812  *	is not found or a pointer to the device. Must be called inside
813  *	rcu_read_lock(), and result refcount is unchanged.
814  */
815 
816 struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
817 				    unsigned short mask)
818 {
819 	struct net_device *dev, *ret;
820 
821 	ret = NULL;
822 	for_each_netdev_rcu(net, dev) {
823 		if (((dev->flags ^ if_flags) & mask) == 0) {
824 			ret = dev;
825 			break;
826 		}
827 	}
828 	return ret;
829 }
830 EXPORT_SYMBOL(dev_get_by_flags_rcu);
831 
832 /**
833  *	dev_valid_name - check if name is okay for network device
834  *	@name: name string
835  *
836  *	Network device names need to be valid file names to
837  *	to allow sysfs to work.  We also disallow any kind of
838  *	whitespace.
839  */
840 int dev_valid_name(const char *name)
841 {
842 	if (*name == '\0')
843 		return 0;
844 	if (strlen(name) >= IFNAMSIZ)
845 		return 0;
846 	if (!strcmp(name, ".") || !strcmp(name, ".."))
847 		return 0;
848 
849 	while (*name) {
850 		if (*name == '/' || isspace(*name))
851 			return 0;
852 		name++;
853 	}
854 	return 1;
855 }
856 EXPORT_SYMBOL(dev_valid_name);
857 
858 /**
859  *	__dev_alloc_name - allocate a name for a device
860  *	@net: network namespace to allocate the device name in
861  *	@name: name format string
862  *	@buf:  scratch buffer and result name string
863  *
864  *	Passed a format string - eg "lt%d" it will try and find a suitable
865  *	id. It scans list of devices to build up a free map, then chooses
866  *	the first empty slot. The caller must hold the dev_base or rtnl lock
867  *	while allocating the name and adding the device in order to avoid
868  *	duplicates.
869  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
870  *	Returns the number of the unit assigned or a negative errno code.
871  */
872 
873 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
874 {
875 	int i = 0;
876 	const char *p;
877 	const int max_netdevices = 8*PAGE_SIZE;
878 	unsigned long *inuse;
879 	struct net_device *d;
880 
881 	p = strnchr(name, IFNAMSIZ-1, '%');
882 	if (p) {
883 		/*
884 		 * Verify the string as this thing may have come from
885 		 * the user.  There must be either one "%d" and no other "%"
886 		 * characters.
887 		 */
888 		if (p[1] != 'd' || strchr(p + 2, '%'))
889 			return -EINVAL;
890 
891 		/* Use one page as a bit array of possible slots */
892 		inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
893 		if (!inuse)
894 			return -ENOMEM;
895 
896 		for_each_netdev(net, d) {
897 			if (!sscanf(d->name, name, &i))
898 				continue;
899 			if (i < 0 || i >= max_netdevices)
900 				continue;
901 
902 			/*  avoid cases where sscanf is not exact inverse of printf */
903 			snprintf(buf, IFNAMSIZ, name, i);
904 			if (!strncmp(buf, d->name, IFNAMSIZ))
905 				set_bit(i, inuse);
906 		}
907 
908 		i = find_first_zero_bit(inuse, max_netdevices);
909 		free_page((unsigned long) inuse);
910 	}
911 
912 	if (buf != name)
913 		snprintf(buf, IFNAMSIZ, name, i);
914 	if (!__dev_get_by_name(net, buf))
915 		return i;
916 
917 	/* It is possible to run out of possible slots
918 	 * when the name is long and there isn't enough space left
919 	 * for the digits, or if all bits are used.
920 	 */
921 	return -ENFILE;
922 }
923 
924 /**
925  *	dev_alloc_name - allocate a name for a device
926  *	@dev: device
927  *	@name: name format string
928  *
929  *	Passed a format string - eg "lt%d" it will try and find a suitable
930  *	id. It scans list of devices to build up a free map, then chooses
931  *	the first empty slot. The caller must hold the dev_base or rtnl lock
932  *	while allocating the name and adding the device in order to avoid
933  *	duplicates.
934  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
935  *	Returns the number of the unit assigned or a negative errno code.
936  */
937 
938 int dev_alloc_name(struct net_device *dev, const char *name)
939 {
940 	char buf[IFNAMSIZ];
941 	struct net *net;
942 	int ret;
943 
944 	BUG_ON(!dev_net(dev));
945 	net = dev_net(dev);
946 	ret = __dev_alloc_name(net, name, buf);
947 	if (ret >= 0)
948 		strlcpy(dev->name, buf, IFNAMSIZ);
949 	return ret;
950 }
951 EXPORT_SYMBOL(dev_alloc_name);
952 
953 static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt)
954 {
955 	struct net *net;
956 
957 	BUG_ON(!dev_net(dev));
958 	net = dev_net(dev);
959 
960 	if (!dev_valid_name(name))
961 		return -EINVAL;
962 
963 	if (fmt && strchr(name, '%'))
964 		return dev_alloc_name(dev, name);
965 	else if (__dev_get_by_name(net, name))
966 		return -EEXIST;
967 	else if (dev->name != name)
968 		strlcpy(dev->name, name, IFNAMSIZ);
969 
970 	return 0;
971 }
972 
973 /**
974  *	dev_change_name - change name of a device
975  *	@dev: device
976  *	@newname: name (or format string) must be at least IFNAMSIZ
977  *
978  *	Change name of a device, can pass format strings "eth%d".
979  *	for wildcarding.
980  */
981 int dev_change_name(struct net_device *dev, const char *newname)
982 {
983 	char oldname[IFNAMSIZ];
984 	int err = 0;
985 	int ret;
986 	struct net *net;
987 
988 	ASSERT_RTNL();
989 	BUG_ON(!dev_net(dev));
990 
991 	net = dev_net(dev);
992 	if (dev->flags & IFF_UP)
993 		return -EBUSY;
994 
995 	if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
996 		return 0;
997 
998 	memcpy(oldname, dev->name, IFNAMSIZ);
999 
1000 	err = dev_get_valid_name(dev, newname, 1);
1001 	if (err < 0)
1002 		return err;
1003 
1004 rollback:
1005 	ret = device_rename(&dev->dev, dev->name);
1006 	if (ret) {
1007 		memcpy(dev->name, oldname, IFNAMSIZ);
1008 		return ret;
1009 	}
1010 
1011 	write_lock_bh(&dev_base_lock);
1012 	hlist_del(&dev->name_hlist);
1013 	write_unlock_bh(&dev_base_lock);
1014 
1015 	synchronize_rcu();
1016 
1017 	write_lock_bh(&dev_base_lock);
1018 	hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1019 	write_unlock_bh(&dev_base_lock);
1020 
1021 	ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1022 	ret = notifier_to_errno(ret);
1023 
1024 	if (ret) {
1025 		/* err >= 0 after dev_alloc_name() or stores the first errno */
1026 		if (err >= 0) {
1027 			err = ret;
1028 			memcpy(dev->name, oldname, IFNAMSIZ);
1029 			goto rollback;
1030 		} else {
1031 			printk(KERN_ERR
1032 			       "%s: name change rollback failed: %d.\n",
1033 			       dev->name, ret);
1034 		}
1035 	}
1036 
1037 	return err;
1038 }
1039 
1040 /**
1041  *	dev_set_alias - change ifalias of a device
1042  *	@dev: device
1043  *	@alias: name up to IFALIASZ
1044  *	@len: limit of bytes to copy from info
1045  *
1046  *	Set ifalias for a device,
1047  */
1048 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1049 {
1050 	ASSERT_RTNL();
1051 
1052 	if (len >= IFALIASZ)
1053 		return -EINVAL;
1054 
1055 	if (!len) {
1056 		if (dev->ifalias) {
1057 			kfree(dev->ifalias);
1058 			dev->ifalias = NULL;
1059 		}
1060 		return 0;
1061 	}
1062 
1063 	dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1064 	if (!dev->ifalias)
1065 		return -ENOMEM;
1066 
1067 	strlcpy(dev->ifalias, alias, len+1);
1068 	return len;
1069 }
1070 
1071 
1072 /**
1073  *	netdev_features_change - device changes features
1074  *	@dev: device to cause notification
1075  *
1076  *	Called to indicate a device has changed features.
1077  */
1078 void netdev_features_change(struct net_device *dev)
1079 {
1080 	call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1081 }
1082 EXPORT_SYMBOL(netdev_features_change);
1083 
1084 /**
1085  *	netdev_state_change - device changes state
1086  *	@dev: device to cause notification
1087  *
1088  *	Called to indicate a device has changed state. This function calls
1089  *	the notifier chains for netdev_chain and sends a NEWLINK message
1090  *	to the routing socket.
1091  */
1092 void netdev_state_change(struct net_device *dev)
1093 {
1094 	if (dev->flags & IFF_UP) {
1095 		call_netdevice_notifiers(NETDEV_CHANGE, dev);
1096 		rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1097 	}
1098 }
1099 EXPORT_SYMBOL(netdev_state_change);
1100 
1101 int netdev_bonding_change(struct net_device *dev, unsigned long event)
1102 {
1103 	return call_netdevice_notifiers(event, dev);
1104 }
1105 EXPORT_SYMBOL(netdev_bonding_change);
1106 
1107 /**
1108  *	dev_load 	- load a network module
1109  *	@net: the applicable net namespace
1110  *	@name: name of interface
1111  *
1112  *	If a network interface is not present and the process has suitable
1113  *	privileges this function loads the module. If module loading is not
1114  *	available in this kernel then it becomes a nop.
1115  */
1116 
1117 void dev_load(struct net *net, const char *name)
1118 {
1119 	struct net_device *dev;
1120 
1121 	rcu_read_lock();
1122 	dev = dev_get_by_name_rcu(net, name);
1123 	rcu_read_unlock();
1124 
1125 	if (!dev && capable(CAP_NET_ADMIN))
1126 		request_module("%s", name);
1127 }
1128 EXPORT_SYMBOL(dev_load);
1129 
1130 static int __dev_open(struct net_device *dev)
1131 {
1132 	const struct net_device_ops *ops = dev->netdev_ops;
1133 	int ret;
1134 
1135 	ASSERT_RTNL();
1136 
1137 	/*
1138 	 *	Is it even present?
1139 	 */
1140 	if (!netif_device_present(dev))
1141 		return -ENODEV;
1142 
1143 	ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1144 	ret = notifier_to_errno(ret);
1145 	if (ret)
1146 		return ret;
1147 
1148 	/*
1149 	 *	Call device private open method
1150 	 */
1151 	set_bit(__LINK_STATE_START, &dev->state);
1152 
1153 	if (ops->ndo_validate_addr)
1154 		ret = ops->ndo_validate_addr(dev);
1155 
1156 	if (!ret && ops->ndo_open)
1157 		ret = ops->ndo_open(dev);
1158 
1159 	/*
1160 	 *	If it went open OK then:
1161 	 */
1162 
1163 	if (ret)
1164 		clear_bit(__LINK_STATE_START, &dev->state);
1165 	else {
1166 		/*
1167 		 *	Set the flags.
1168 		 */
1169 		dev->flags |= IFF_UP;
1170 
1171 		/*
1172 		 *	Enable NET_DMA
1173 		 */
1174 		net_dmaengine_get();
1175 
1176 		/*
1177 		 *	Initialize multicasting status
1178 		 */
1179 		dev_set_rx_mode(dev);
1180 
1181 		/*
1182 		 *	Wakeup transmit queue engine
1183 		 */
1184 		dev_activate(dev);
1185 	}
1186 
1187 	return ret;
1188 }
1189 
1190 /**
1191  *	dev_open	- prepare an interface for use.
1192  *	@dev:	device to open
1193  *
1194  *	Takes a device from down to up state. The device's private open
1195  *	function is invoked and then the multicast lists are loaded. Finally
1196  *	the device is moved into the up state and a %NETDEV_UP message is
1197  *	sent to the netdev notifier chain.
1198  *
1199  *	Calling this function on an active interface is a nop. On a failure
1200  *	a negative errno code is returned.
1201  */
1202 int dev_open(struct net_device *dev)
1203 {
1204 	int ret;
1205 
1206 	/*
1207 	 *	Is it already up?
1208 	 */
1209 	if (dev->flags & IFF_UP)
1210 		return 0;
1211 
1212 	/*
1213 	 *	Open device
1214 	 */
1215 	ret = __dev_open(dev);
1216 	if (ret < 0)
1217 		return ret;
1218 
1219 	/*
1220 	 *	... and announce new interface.
1221 	 */
1222 	rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1223 	call_netdevice_notifiers(NETDEV_UP, dev);
1224 
1225 	return ret;
1226 }
1227 EXPORT_SYMBOL(dev_open);
1228 
1229 static int __dev_close(struct net_device *dev)
1230 {
1231 	const struct net_device_ops *ops = dev->netdev_ops;
1232 
1233 	ASSERT_RTNL();
1234 	might_sleep();
1235 
1236 	/*
1237 	 *	Tell people we are going down, so that they can
1238 	 *	prepare to death, when device is still operating.
1239 	 */
1240 	call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1241 
1242 	clear_bit(__LINK_STATE_START, &dev->state);
1243 
1244 	/* Synchronize to scheduled poll. We cannot touch poll list,
1245 	 * it can be even on different cpu. So just clear netif_running().
1246 	 *
1247 	 * dev->stop() will invoke napi_disable() on all of it's
1248 	 * napi_struct instances on this device.
1249 	 */
1250 	smp_mb__after_clear_bit(); /* Commit netif_running(). */
1251 
1252 	dev_deactivate(dev);
1253 
1254 	/*
1255 	 *	Call the device specific close. This cannot fail.
1256 	 *	Only if device is UP
1257 	 *
1258 	 *	We allow it to be called even after a DETACH hot-plug
1259 	 *	event.
1260 	 */
1261 	if (ops->ndo_stop)
1262 		ops->ndo_stop(dev);
1263 
1264 	/*
1265 	 *	Device is now down.
1266 	 */
1267 
1268 	dev->flags &= ~IFF_UP;
1269 
1270 	/*
1271 	 *	Shutdown NET_DMA
1272 	 */
1273 	net_dmaengine_put();
1274 
1275 	return 0;
1276 }
1277 
1278 /**
1279  *	dev_close - shutdown an interface.
1280  *	@dev: device to shutdown
1281  *
1282  *	This function moves an active device into down state. A
1283  *	%NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1284  *	is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1285  *	chain.
1286  */
1287 int dev_close(struct net_device *dev)
1288 {
1289 	if (!(dev->flags & IFF_UP))
1290 		return 0;
1291 
1292 	__dev_close(dev);
1293 
1294 	/*
1295 	 * Tell people we are down
1296 	 */
1297 	rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1298 	call_netdevice_notifiers(NETDEV_DOWN, dev);
1299 
1300 	return 0;
1301 }
1302 EXPORT_SYMBOL(dev_close);
1303 
1304 
1305 /**
1306  *	dev_disable_lro - disable Large Receive Offload on a device
1307  *	@dev: device
1308  *
1309  *	Disable Large Receive Offload (LRO) on a net device.  Must be
1310  *	called under RTNL.  This is needed if received packets may be
1311  *	forwarded to another interface.
1312  */
1313 void dev_disable_lro(struct net_device *dev)
1314 {
1315 	if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1316 	    dev->ethtool_ops->set_flags) {
1317 		u32 flags = dev->ethtool_ops->get_flags(dev);
1318 		if (flags & ETH_FLAG_LRO) {
1319 			flags &= ~ETH_FLAG_LRO;
1320 			dev->ethtool_ops->set_flags(dev, flags);
1321 		}
1322 	}
1323 	WARN_ON(dev->features & NETIF_F_LRO);
1324 }
1325 EXPORT_SYMBOL(dev_disable_lro);
1326 
1327 
1328 static int dev_boot_phase = 1;
1329 
1330 /*
1331  *	Device change register/unregister. These are not inline or static
1332  *	as we export them to the world.
1333  */
1334 
1335 /**
1336  *	register_netdevice_notifier - register a network notifier block
1337  *	@nb: notifier
1338  *
1339  *	Register a notifier to be called when network device events occur.
1340  *	The notifier passed is linked into the kernel structures and must
1341  *	not be reused until it has been unregistered. A negative errno code
1342  *	is returned on a failure.
1343  *
1344  * 	When registered all registration and up events are replayed
1345  *	to the new notifier to allow device to have a race free
1346  *	view of the network device list.
1347  */
1348 
1349 int register_netdevice_notifier(struct notifier_block *nb)
1350 {
1351 	struct net_device *dev;
1352 	struct net_device *last;
1353 	struct net *net;
1354 	int err;
1355 
1356 	rtnl_lock();
1357 	err = raw_notifier_chain_register(&netdev_chain, nb);
1358 	if (err)
1359 		goto unlock;
1360 	if (dev_boot_phase)
1361 		goto unlock;
1362 	for_each_net(net) {
1363 		for_each_netdev(net, dev) {
1364 			err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1365 			err = notifier_to_errno(err);
1366 			if (err)
1367 				goto rollback;
1368 
1369 			if (!(dev->flags & IFF_UP))
1370 				continue;
1371 
1372 			nb->notifier_call(nb, NETDEV_UP, dev);
1373 		}
1374 	}
1375 
1376 unlock:
1377 	rtnl_unlock();
1378 	return err;
1379 
1380 rollback:
1381 	last = dev;
1382 	for_each_net(net) {
1383 		for_each_netdev(net, dev) {
1384 			if (dev == last)
1385 				break;
1386 
1387 			if (dev->flags & IFF_UP) {
1388 				nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1389 				nb->notifier_call(nb, NETDEV_DOWN, dev);
1390 			}
1391 			nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1392 			nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1393 		}
1394 	}
1395 
1396 	raw_notifier_chain_unregister(&netdev_chain, nb);
1397 	goto unlock;
1398 }
1399 EXPORT_SYMBOL(register_netdevice_notifier);
1400 
1401 /**
1402  *	unregister_netdevice_notifier - unregister a network notifier block
1403  *	@nb: notifier
1404  *
1405  *	Unregister a notifier previously registered by
1406  *	register_netdevice_notifier(). The notifier is unlinked into the
1407  *	kernel structures and may then be reused. A negative errno code
1408  *	is returned on a failure.
1409  */
1410 
1411 int unregister_netdevice_notifier(struct notifier_block *nb)
1412 {
1413 	int err;
1414 
1415 	rtnl_lock();
1416 	err = raw_notifier_chain_unregister(&netdev_chain, nb);
1417 	rtnl_unlock();
1418 	return err;
1419 }
1420 EXPORT_SYMBOL(unregister_netdevice_notifier);
1421 
1422 /**
1423  *	call_netdevice_notifiers - call all network notifier blocks
1424  *      @val: value passed unmodified to notifier function
1425  *      @dev: net_device pointer passed unmodified to notifier function
1426  *
1427  *	Call all network notifier blocks.  Parameters and return value
1428  *	are as for raw_notifier_call_chain().
1429  */
1430 
1431 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1432 {
1433 	ASSERT_RTNL();
1434 	return raw_notifier_call_chain(&netdev_chain, val, dev);
1435 }
1436 
1437 /* When > 0 there are consumers of rx skb time stamps */
1438 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1439 
1440 void net_enable_timestamp(void)
1441 {
1442 	atomic_inc(&netstamp_needed);
1443 }
1444 EXPORT_SYMBOL(net_enable_timestamp);
1445 
1446 void net_disable_timestamp(void)
1447 {
1448 	atomic_dec(&netstamp_needed);
1449 }
1450 EXPORT_SYMBOL(net_disable_timestamp);
1451 
1452 static inline void net_timestamp_set(struct sk_buff *skb)
1453 {
1454 	if (atomic_read(&netstamp_needed))
1455 		__net_timestamp(skb);
1456 	else
1457 		skb->tstamp.tv64 = 0;
1458 }
1459 
1460 static inline void net_timestamp_check(struct sk_buff *skb)
1461 {
1462 	if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed))
1463 		__net_timestamp(skb);
1464 }
1465 
1466 /**
1467  * dev_forward_skb - loopback an skb to another netif
1468  *
1469  * @dev: destination network device
1470  * @skb: buffer to forward
1471  *
1472  * return values:
1473  *	NET_RX_SUCCESS	(no congestion)
1474  *	NET_RX_DROP     (packet was dropped, but freed)
1475  *
1476  * dev_forward_skb can be used for injecting an skb from the
1477  * start_xmit function of one device into the receive queue
1478  * of another device.
1479  *
1480  * The receiving device may be in another namespace, so
1481  * we have to clear all information in the skb that could
1482  * impact namespace isolation.
1483  */
1484 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1485 {
1486 	skb_orphan(skb);
1487 	nf_reset(skb);
1488 
1489 	if (!(dev->flags & IFF_UP) ||
1490 	    (skb->len > (dev->mtu + dev->hard_header_len))) {
1491 		kfree_skb(skb);
1492 		return NET_RX_DROP;
1493 	}
1494 	skb_set_dev(skb, dev);
1495 	skb->tstamp.tv64 = 0;
1496 	skb->pkt_type = PACKET_HOST;
1497 	skb->protocol = eth_type_trans(skb, dev);
1498 	return netif_rx(skb);
1499 }
1500 EXPORT_SYMBOL_GPL(dev_forward_skb);
1501 
1502 /*
1503  *	Support routine. Sends outgoing frames to any network
1504  *	taps currently in use.
1505  */
1506 
1507 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1508 {
1509 	struct packet_type *ptype;
1510 
1511 #ifdef CONFIG_NET_CLS_ACT
1512 	if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1513 		net_timestamp_set(skb);
1514 #else
1515 	net_timestamp_set(skb);
1516 #endif
1517 
1518 	rcu_read_lock();
1519 	list_for_each_entry_rcu(ptype, &ptype_all, list) {
1520 		/* Never send packets back to the socket
1521 		 * they originated from - MvS (miquels@drinkel.ow.org)
1522 		 */
1523 		if ((ptype->dev == dev || !ptype->dev) &&
1524 		    (ptype->af_packet_priv == NULL ||
1525 		     (struct sock *)ptype->af_packet_priv != skb->sk)) {
1526 			struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1527 			if (!skb2)
1528 				break;
1529 
1530 			/* skb->nh should be correctly
1531 			   set by sender, so that the second statement is
1532 			   just protection against buggy protocols.
1533 			 */
1534 			skb_reset_mac_header(skb2);
1535 
1536 			if (skb_network_header(skb2) < skb2->data ||
1537 			    skb2->network_header > skb2->tail) {
1538 				if (net_ratelimit())
1539 					printk(KERN_CRIT "protocol %04x is "
1540 					       "buggy, dev %s\n",
1541 					       ntohs(skb2->protocol),
1542 					       dev->name);
1543 				skb_reset_network_header(skb2);
1544 			}
1545 
1546 			skb2->transport_header = skb2->network_header;
1547 			skb2->pkt_type = PACKET_OUTGOING;
1548 			ptype->func(skb2, skb->dev, ptype, skb->dev);
1549 		}
1550 	}
1551 	rcu_read_unlock();
1552 }
1553 
1554 /*
1555  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1556  * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
1557  */
1558 void netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1559 {
1560 	unsigned int real_num = dev->real_num_tx_queues;
1561 
1562 	if (unlikely(txq > dev->num_tx_queues))
1563 		;
1564 	else if (txq > real_num)
1565 		dev->real_num_tx_queues = txq;
1566 	else if (txq < real_num) {
1567 		dev->real_num_tx_queues = txq;
1568 		qdisc_reset_all_tx_gt(dev, txq);
1569 	}
1570 }
1571 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
1572 
1573 static inline void __netif_reschedule(struct Qdisc *q)
1574 {
1575 	struct softnet_data *sd;
1576 	unsigned long flags;
1577 
1578 	local_irq_save(flags);
1579 	sd = &__get_cpu_var(softnet_data);
1580 	q->next_sched = NULL;
1581 	*sd->output_queue_tailp = q;
1582 	sd->output_queue_tailp = &q->next_sched;
1583 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
1584 	local_irq_restore(flags);
1585 }
1586 
1587 void __netif_schedule(struct Qdisc *q)
1588 {
1589 	if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1590 		__netif_reschedule(q);
1591 }
1592 EXPORT_SYMBOL(__netif_schedule);
1593 
1594 void dev_kfree_skb_irq(struct sk_buff *skb)
1595 {
1596 	if (atomic_dec_and_test(&skb->users)) {
1597 		struct softnet_data *sd;
1598 		unsigned long flags;
1599 
1600 		local_irq_save(flags);
1601 		sd = &__get_cpu_var(softnet_data);
1602 		skb->next = sd->completion_queue;
1603 		sd->completion_queue = skb;
1604 		raise_softirq_irqoff(NET_TX_SOFTIRQ);
1605 		local_irq_restore(flags);
1606 	}
1607 }
1608 EXPORT_SYMBOL(dev_kfree_skb_irq);
1609 
1610 void dev_kfree_skb_any(struct sk_buff *skb)
1611 {
1612 	if (in_irq() || irqs_disabled())
1613 		dev_kfree_skb_irq(skb);
1614 	else
1615 		dev_kfree_skb(skb);
1616 }
1617 EXPORT_SYMBOL(dev_kfree_skb_any);
1618 
1619 
1620 /**
1621  * netif_device_detach - mark device as removed
1622  * @dev: network device
1623  *
1624  * Mark device as removed from system and therefore no longer available.
1625  */
1626 void netif_device_detach(struct net_device *dev)
1627 {
1628 	if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1629 	    netif_running(dev)) {
1630 		netif_tx_stop_all_queues(dev);
1631 	}
1632 }
1633 EXPORT_SYMBOL(netif_device_detach);
1634 
1635 /**
1636  * netif_device_attach - mark device as attached
1637  * @dev: network device
1638  *
1639  * Mark device as attached from system and restart if needed.
1640  */
1641 void netif_device_attach(struct net_device *dev)
1642 {
1643 	if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1644 	    netif_running(dev)) {
1645 		netif_tx_wake_all_queues(dev);
1646 		__netdev_watchdog_up(dev);
1647 	}
1648 }
1649 EXPORT_SYMBOL(netif_device_attach);
1650 
1651 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1652 {
1653 	return ((features & NETIF_F_GEN_CSUM) ||
1654 		((features & NETIF_F_IP_CSUM) &&
1655 		 protocol == htons(ETH_P_IP)) ||
1656 		((features & NETIF_F_IPV6_CSUM) &&
1657 		 protocol == htons(ETH_P_IPV6)) ||
1658 		((features & NETIF_F_FCOE_CRC) &&
1659 		 protocol == htons(ETH_P_FCOE)));
1660 }
1661 
1662 static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1663 {
1664 	if (can_checksum_protocol(dev->features, skb->protocol))
1665 		return true;
1666 
1667 	if (skb->protocol == htons(ETH_P_8021Q)) {
1668 		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1669 		if (can_checksum_protocol(dev->features & dev->vlan_features,
1670 					  veh->h_vlan_encapsulated_proto))
1671 			return true;
1672 	}
1673 
1674 	return false;
1675 }
1676 
1677 /**
1678  * skb_dev_set -- assign a new device to a buffer
1679  * @skb: buffer for the new device
1680  * @dev: network device
1681  *
1682  * If an skb is owned by a device already, we have to reset
1683  * all data private to the namespace a device belongs to
1684  * before assigning it a new device.
1685  */
1686 #ifdef CONFIG_NET_NS
1687 void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1688 {
1689 	skb_dst_drop(skb);
1690 	if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1691 		secpath_reset(skb);
1692 		nf_reset(skb);
1693 		skb_init_secmark(skb);
1694 		skb->mark = 0;
1695 		skb->priority = 0;
1696 		skb->nf_trace = 0;
1697 		skb->ipvs_property = 0;
1698 #ifdef CONFIG_NET_SCHED
1699 		skb->tc_index = 0;
1700 #endif
1701 	}
1702 	skb->dev = dev;
1703 }
1704 EXPORT_SYMBOL(skb_set_dev);
1705 #endif /* CONFIG_NET_NS */
1706 
1707 /*
1708  * Invalidate hardware checksum when packet is to be mangled, and
1709  * complete checksum manually on outgoing path.
1710  */
1711 int skb_checksum_help(struct sk_buff *skb)
1712 {
1713 	__wsum csum;
1714 	int ret = 0, offset;
1715 
1716 	if (skb->ip_summed == CHECKSUM_COMPLETE)
1717 		goto out_set_summed;
1718 
1719 	if (unlikely(skb_shinfo(skb)->gso_size)) {
1720 		/* Let GSO fix up the checksum. */
1721 		goto out_set_summed;
1722 	}
1723 
1724 	offset = skb->csum_start - skb_headroom(skb);
1725 	BUG_ON(offset >= skb_headlen(skb));
1726 	csum = skb_checksum(skb, offset, skb->len - offset, 0);
1727 
1728 	offset += skb->csum_offset;
1729 	BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1730 
1731 	if (skb_cloned(skb) &&
1732 	    !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1733 		ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1734 		if (ret)
1735 			goto out;
1736 	}
1737 
1738 	*(__sum16 *)(skb->data + offset) = csum_fold(csum);
1739 out_set_summed:
1740 	skb->ip_summed = CHECKSUM_NONE;
1741 out:
1742 	return ret;
1743 }
1744 EXPORT_SYMBOL(skb_checksum_help);
1745 
1746 /**
1747  *	skb_gso_segment - Perform segmentation on skb.
1748  *	@skb: buffer to segment
1749  *	@features: features for the output path (see dev->features)
1750  *
1751  *	This function segments the given skb and returns a list of segments.
1752  *
1753  *	It may return NULL if the skb requires no segmentation.  This is
1754  *	only possible when GSO is used for verifying header integrity.
1755  */
1756 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1757 {
1758 	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1759 	struct packet_type *ptype;
1760 	__be16 type = skb->protocol;
1761 	int err;
1762 
1763 	skb_reset_mac_header(skb);
1764 	skb->mac_len = skb->network_header - skb->mac_header;
1765 	__skb_pull(skb, skb->mac_len);
1766 
1767 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1768 		struct net_device *dev = skb->dev;
1769 		struct ethtool_drvinfo info = {};
1770 
1771 		if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1772 			dev->ethtool_ops->get_drvinfo(dev, &info);
1773 
1774 		WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1775 			"ip_summed=%d",
1776 		     info.driver, dev ? dev->features : 0L,
1777 		     skb->sk ? skb->sk->sk_route_caps : 0L,
1778 		     skb->len, skb->data_len, skb->ip_summed);
1779 
1780 		if (skb_header_cloned(skb) &&
1781 		    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1782 			return ERR_PTR(err);
1783 	}
1784 
1785 	rcu_read_lock();
1786 	list_for_each_entry_rcu(ptype,
1787 			&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1788 		if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1789 			if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1790 				err = ptype->gso_send_check(skb);
1791 				segs = ERR_PTR(err);
1792 				if (err || skb_gso_ok(skb, features))
1793 					break;
1794 				__skb_push(skb, (skb->data -
1795 						 skb_network_header(skb)));
1796 			}
1797 			segs = ptype->gso_segment(skb, features);
1798 			break;
1799 		}
1800 	}
1801 	rcu_read_unlock();
1802 
1803 	__skb_push(skb, skb->data - skb_mac_header(skb));
1804 
1805 	return segs;
1806 }
1807 EXPORT_SYMBOL(skb_gso_segment);
1808 
1809 /* Take action when hardware reception checksum errors are detected. */
1810 #ifdef CONFIG_BUG
1811 void netdev_rx_csum_fault(struct net_device *dev)
1812 {
1813 	if (net_ratelimit()) {
1814 		printk(KERN_ERR "%s: hw csum failure.\n",
1815 			dev ? dev->name : "<unknown>");
1816 		dump_stack();
1817 	}
1818 }
1819 EXPORT_SYMBOL(netdev_rx_csum_fault);
1820 #endif
1821 
1822 /* Actually, we should eliminate this check as soon as we know, that:
1823  * 1. IOMMU is present and allows to map all the memory.
1824  * 2. No high memory really exists on this machine.
1825  */
1826 
1827 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1828 {
1829 #ifdef CONFIG_HIGHMEM
1830 	int i;
1831 	if (!(dev->features & NETIF_F_HIGHDMA)) {
1832 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1833 			if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1834 				return 1;
1835 	}
1836 
1837 	if (PCI_DMA_BUS_IS_PHYS) {
1838 		struct device *pdev = dev->dev.parent;
1839 
1840 		if (!pdev)
1841 			return 0;
1842 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1843 			dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page);
1844 			if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
1845 				return 1;
1846 		}
1847 	}
1848 #endif
1849 	return 0;
1850 }
1851 
1852 struct dev_gso_cb {
1853 	void (*destructor)(struct sk_buff *skb);
1854 };
1855 
1856 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1857 
1858 static void dev_gso_skb_destructor(struct sk_buff *skb)
1859 {
1860 	struct dev_gso_cb *cb;
1861 
1862 	do {
1863 		struct sk_buff *nskb = skb->next;
1864 
1865 		skb->next = nskb->next;
1866 		nskb->next = NULL;
1867 		kfree_skb(nskb);
1868 	} while (skb->next);
1869 
1870 	cb = DEV_GSO_CB(skb);
1871 	if (cb->destructor)
1872 		cb->destructor(skb);
1873 }
1874 
1875 /**
1876  *	dev_gso_segment - Perform emulated hardware segmentation on skb.
1877  *	@skb: buffer to segment
1878  *
1879  *	This function segments the given skb and stores the list of segments
1880  *	in skb->next.
1881  */
1882 static int dev_gso_segment(struct sk_buff *skb)
1883 {
1884 	struct net_device *dev = skb->dev;
1885 	struct sk_buff *segs;
1886 	int features = dev->features & ~(illegal_highdma(dev, skb) ?
1887 					 NETIF_F_SG : 0);
1888 
1889 	segs = skb_gso_segment(skb, features);
1890 
1891 	/* Verifying header integrity only. */
1892 	if (!segs)
1893 		return 0;
1894 
1895 	if (IS_ERR(segs))
1896 		return PTR_ERR(segs);
1897 
1898 	skb->next = segs;
1899 	DEV_GSO_CB(skb)->destructor = skb->destructor;
1900 	skb->destructor = dev_gso_skb_destructor;
1901 
1902 	return 0;
1903 }
1904 
1905 /*
1906  * Try to orphan skb early, right before transmission by the device.
1907  * We cannot orphan skb if tx timestamp is requested, since
1908  * drivers need to call skb_tstamp_tx() to send the timestamp.
1909  */
1910 static inline void skb_orphan_try(struct sk_buff *skb)
1911 {
1912 	struct sock *sk = skb->sk;
1913 
1914 	if (sk && !skb_tx(skb)->flags) {
1915 		/* skb_tx_hash() wont be able to get sk.
1916 		 * We copy sk_hash into skb->rxhash
1917 		 */
1918 		if (!skb->rxhash)
1919 			skb->rxhash = sk->sk_hash;
1920 		skb_orphan(skb);
1921 	}
1922 }
1923 
1924 /*
1925  * Returns true if either:
1926  *	1. skb has frag_list and the device doesn't support FRAGLIST, or
1927  *	2. skb is fragmented and the device does not support SG, or if
1928  *	   at least one of fragments is in highmem and device does not
1929  *	   support DMA from it.
1930  */
1931 static inline int skb_needs_linearize(struct sk_buff *skb,
1932 				      struct net_device *dev)
1933 {
1934 	return skb_is_nonlinear(skb) &&
1935 	       ((skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
1936 	        (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
1937 					      illegal_highdma(dev, skb))));
1938 }
1939 
1940 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1941 			struct netdev_queue *txq)
1942 {
1943 	const struct net_device_ops *ops = dev->netdev_ops;
1944 	int rc = NETDEV_TX_OK;
1945 
1946 	if (likely(!skb->next)) {
1947 		if (!list_empty(&ptype_all))
1948 			dev_queue_xmit_nit(skb, dev);
1949 
1950 		/*
1951 		 * If device doesnt need skb->dst, release it right now while
1952 		 * its hot in this cpu cache
1953 		 */
1954 		if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1955 			skb_dst_drop(skb);
1956 
1957 		skb_orphan_try(skb);
1958 
1959 		if (netif_needs_gso(dev, skb)) {
1960 			if (unlikely(dev_gso_segment(skb)))
1961 				goto out_kfree_skb;
1962 			if (skb->next)
1963 				goto gso;
1964 		} else {
1965 			if (skb_needs_linearize(skb, dev) &&
1966 			    __skb_linearize(skb))
1967 				goto out_kfree_skb;
1968 
1969 			/* If packet is not checksummed and device does not
1970 			 * support checksumming for this protocol, complete
1971 			 * checksumming here.
1972 			 */
1973 			if (skb->ip_summed == CHECKSUM_PARTIAL) {
1974 				skb_set_transport_header(skb, skb->csum_start -
1975 					      skb_headroom(skb));
1976 				if (!dev_can_checksum(dev, skb) &&
1977 				     skb_checksum_help(skb))
1978 					goto out_kfree_skb;
1979 			}
1980 		}
1981 
1982 		rc = ops->ndo_start_xmit(skb, dev);
1983 		trace_net_dev_xmit(skb, rc);
1984 		if (rc == NETDEV_TX_OK)
1985 			txq_trans_update(txq);
1986 		return rc;
1987 	}
1988 
1989 gso:
1990 	do {
1991 		struct sk_buff *nskb = skb->next;
1992 
1993 		skb->next = nskb->next;
1994 		nskb->next = NULL;
1995 
1996 		/*
1997 		 * If device doesnt need nskb->dst, release it right now while
1998 		 * its hot in this cpu cache
1999 		 */
2000 		if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2001 			skb_dst_drop(nskb);
2002 
2003 		rc = ops->ndo_start_xmit(nskb, dev);
2004 		trace_net_dev_xmit(nskb, rc);
2005 		if (unlikely(rc != NETDEV_TX_OK)) {
2006 			if (rc & ~NETDEV_TX_MASK)
2007 				goto out_kfree_gso_skb;
2008 			nskb->next = skb->next;
2009 			skb->next = nskb;
2010 			return rc;
2011 		}
2012 		txq_trans_update(txq);
2013 		if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
2014 			return NETDEV_TX_BUSY;
2015 	} while (skb->next);
2016 
2017 out_kfree_gso_skb:
2018 	if (likely(skb->next == NULL))
2019 		skb->destructor = DEV_GSO_CB(skb)->destructor;
2020 out_kfree_skb:
2021 	kfree_skb(skb);
2022 	return rc;
2023 }
2024 
2025 static u32 hashrnd __read_mostly;
2026 
2027 u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
2028 {
2029 	u32 hash;
2030 
2031 	if (skb_rx_queue_recorded(skb)) {
2032 		hash = skb_get_rx_queue(skb);
2033 		while (unlikely(hash >= dev->real_num_tx_queues))
2034 			hash -= dev->real_num_tx_queues;
2035 		return hash;
2036 	}
2037 
2038 	if (skb->sk && skb->sk->sk_hash)
2039 		hash = skb->sk->sk_hash;
2040 	else
2041 		hash = (__force u16) skb->protocol ^ skb->rxhash;
2042 	hash = jhash_1word(hash, hashrnd);
2043 
2044 	return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
2045 }
2046 EXPORT_SYMBOL(skb_tx_hash);
2047 
2048 static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2049 {
2050 	if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2051 		if (net_ratelimit()) {
2052 			pr_warning("%s selects TX queue %d, but "
2053 				"real number of TX queues is %d\n",
2054 				dev->name, queue_index, dev->real_num_tx_queues);
2055 		}
2056 		return 0;
2057 	}
2058 	return queue_index;
2059 }
2060 
2061 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2062 					struct sk_buff *skb)
2063 {
2064 	int queue_index;
2065 	const struct net_device_ops *ops = dev->netdev_ops;
2066 
2067 	if (ops->ndo_select_queue) {
2068 		queue_index = ops->ndo_select_queue(dev, skb);
2069 		queue_index = dev_cap_txqueue(dev, queue_index);
2070 	} else {
2071 		struct sock *sk = skb->sk;
2072 		queue_index = sk_tx_queue_get(sk);
2073 		if (queue_index < 0) {
2074 
2075 			queue_index = 0;
2076 			if (dev->real_num_tx_queues > 1)
2077 				queue_index = skb_tx_hash(dev, skb);
2078 
2079 			if (sk) {
2080 				struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1);
2081 
2082 				if (dst && skb_dst(skb) == dst)
2083 					sk_tx_queue_set(sk, queue_index);
2084 			}
2085 		}
2086 	}
2087 
2088 	skb_set_queue_mapping(skb, queue_index);
2089 	return netdev_get_tx_queue(dev, queue_index);
2090 }
2091 
2092 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2093 				 struct net_device *dev,
2094 				 struct netdev_queue *txq)
2095 {
2096 	spinlock_t *root_lock = qdisc_lock(q);
2097 	bool contended = qdisc_is_running(q);
2098 	int rc;
2099 
2100 	/*
2101 	 * Heuristic to force contended enqueues to serialize on a
2102 	 * separate lock before trying to get qdisc main lock.
2103 	 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2104 	 * and dequeue packets faster.
2105 	 */
2106 	if (unlikely(contended))
2107 		spin_lock(&q->busylock);
2108 
2109 	spin_lock(root_lock);
2110 	if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2111 		kfree_skb(skb);
2112 		rc = NET_XMIT_DROP;
2113 	} else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2114 		   qdisc_run_begin(q)) {
2115 		/*
2116 		 * This is a work-conserving queue; there are no old skbs
2117 		 * waiting to be sent out; and the qdisc is not running -
2118 		 * xmit the skb directly.
2119 		 */
2120 		if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2121 			skb_dst_force(skb);
2122 		__qdisc_update_bstats(q, skb->len);
2123 		if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2124 			if (unlikely(contended)) {
2125 				spin_unlock(&q->busylock);
2126 				contended = false;
2127 			}
2128 			__qdisc_run(q);
2129 		} else
2130 			qdisc_run_end(q);
2131 
2132 		rc = NET_XMIT_SUCCESS;
2133 	} else {
2134 		skb_dst_force(skb);
2135 		rc = qdisc_enqueue_root(skb, q);
2136 		if (qdisc_run_begin(q)) {
2137 			if (unlikely(contended)) {
2138 				spin_unlock(&q->busylock);
2139 				contended = false;
2140 			}
2141 			__qdisc_run(q);
2142 		}
2143 	}
2144 	spin_unlock(root_lock);
2145 	if (unlikely(contended))
2146 		spin_unlock(&q->busylock);
2147 	return rc;
2148 }
2149 
2150 /**
2151  *	dev_queue_xmit - transmit a buffer
2152  *	@skb: buffer to transmit
2153  *
2154  *	Queue a buffer for transmission to a network device. The caller must
2155  *	have set the device and priority and built the buffer before calling
2156  *	this function. The function can be called from an interrupt.
2157  *
2158  *	A negative errno code is returned on a failure. A success does not
2159  *	guarantee the frame will be transmitted as it may be dropped due
2160  *	to congestion or traffic shaping.
2161  *
2162  * -----------------------------------------------------------------------------------
2163  *      I notice this method can also return errors from the queue disciplines,
2164  *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
2165  *      be positive.
2166  *
2167  *      Regardless of the return value, the skb is consumed, so it is currently
2168  *      difficult to retry a send to this method.  (You can bump the ref count
2169  *      before sending to hold a reference for retry if you are careful.)
2170  *
2171  *      When calling this method, interrupts MUST be enabled.  This is because
2172  *      the BH enable code must have IRQs enabled so that it will not deadlock.
2173  *          --BLG
2174  */
2175 int dev_queue_xmit(struct sk_buff *skb)
2176 {
2177 	struct net_device *dev = skb->dev;
2178 	struct netdev_queue *txq;
2179 	struct Qdisc *q;
2180 	int rc = -ENOMEM;
2181 
2182 	/* Disable soft irqs for various locks below. Also
2183 	 * stops preemption for RCU.
2184 	 */
2185 	rcu_read_lock_bh();
2186 
2187 	txq = dev_pick_tx(dev, skb);
2188 	q = rcu_dereference_bh(txq->qdisc);
2189 
2190 #ifdef CONFIG_NET_CLS_ACT
2191 	skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
2192 #endif
2193 	trace_net_dev_queue(skb);
2194 	if (q->enqueue) {
2195 		rc = __dev_xmit_skb(skb, q, dev, txq);
2196 		goto out;
2197 	}
2198 
2199 	/* The device has no queue. Common case for software devices:
2200 	   loopback, all the sorts of tunnels...
2201 
2202 	   Really, it is unlikely that netif_tx_lock protection is necessary
2203 	   here.  (f.e. loopback and IP tunnels are clean ignoring statistics
2204 	   counters.)
2205 	   However, it is possible, that they rely on protection
2206 	   made by us here.
2207 
2208 	   Check this and shot the lock. It is not prone from deadlocks.
2209 	   Either shot noqueue qdisc, it is even simpler 8)
2210 	 */
2211 	if (dev->flags & IFF_UP) {
2212 		int cpu = smp_processor_id(); /* ok because BHs are off */
2213 
2214 		if (txq->xmit_lock_owner != cpu) {
2215 
2216 			HARD_TX_LOCK(dev, txq, cpu);
2217 
2218 			if (!netif_tx_queue_stopped(txq)) {
2219 				rc = dev_hard_start_xmit(skb, dev, txq);
2220 				if (dev_xmit_complete(rc)) {
2221 					HARD_TX_UNLOCK(dev, txq);
2222 					goto out;
2223 				}
2224 			}
2225 			HARD_TX_UNLOCK(dev, txq);
2226 			if (net_ratelimit())
2227 				printk(KERN_CRIT "Virtual device %s asks to "
2228 				       "queue packet!\n", dev->name);
2229 		} else {
2230 			/* Recursion is detected! It is possible,
2231 			 * unfortunately */
2232 			if (net_ratelimit())
2233 				printk(KERN_CRIT "Dead loop on virtual device "
2234 				       "%s, fix it urgently!\n", dev->name);
2235 		}
2236 	}
2237 
2238 	rc = -ENETDOWN;
2239 	rcu_read_unlock_bh();
2240 
2241 	kfree_skb(skb);
2242 	return rc;
2243 out:
2244 	rcu_read_unlock_bh();
2245 	return rc;
2246 }
2247 EXPORT_SYMBOL(dev_queue_xmit);
2248 
2249 
2250 /*=======================================================================
2251 			Receiver routines
2252   =======================================================================*/
2253 
2254 int netdev_max_backlog __read_mostly = 1000;
2255 int netdev_tstamp_prequeue __read_mostly = 1;
2256 int netdev_budget __read_mostly = 300;
2257 int weight_p __read_mostly = 64;            /* old backlog weight */
2258 
2259 /* Called with irq disabled */
2260 static inline void ____napi_schedule(struct softnet_data *sd,
2261 				     struct napi_struct *napi)
2262 {
2263 	list_add_tail(&napi->poll_list, &sd->poll_list);
2264 	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
2265 }
2266 
2267 #ifdef CONFIG_RPS
2268 
2269 /* One global table that all flow-based protocols share. */
2270 struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
2271 EXPORT_SYMBOL(rps_sock_flow_table);
2272 
2273 /*
2274  * get_rps_cpu is called from netif_receive_skb and returns the target
2275  * CPU from the RPS map of the receiving queue for a given skb.
2276  * rcu_read_lock must be held on entry.
2277  */
2278 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2279 		       struct rps_dev_flow **rflowp)
2280 {
2281 	struct ipv6hdr *ip6;
2282 	struct iphdr *ip;
2283 	struct netdev_rx_queue *rxqueue;
2284 	struct rps_map *map;
2285 	struct rps_dev_flow_table *flow_table;
2286 	struct rps_sock_flow_table *sock_flow_table;
2287 	int cpu = -1;
2288 	u8 ip_proto;
2289 	u16 tcpu;
2290 	u32 addr1, addr2, ihl;
2291 	union {
2292 		u32 v32;
2293 		u16 v16[2];
2294 	} ports;
2295 
2296 	if (skb_rx_queue_recorded(skb)) {
2297 		u16 index = skb_get_rx_queue(skb);
2298 		if (unlikely(index >= dev->num_rx_queues)) {
2299 			WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
2300 				"on queue %u, but number of RX queues is %u\n",
2301 				dev->name, index, dev->num_rx_queues);
2302 			goto done;
2303 		}
2304 		rxqueue = dev->_rx + index;
2305 	} else
2306 		rxqueue = dev->_rx;
2307 
2308 	if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
2309 		goto done;
2310 
2311 	if (skb->rxhash)
2312 		goto got_hash; /* Skip hash computation on packet header */
2313 
2314 	switch (skb->protocol) {
2315 	case __constant_htons(ETH_P_IP):
2316 		if (!pskb_may_pull(skb, sizeof(*ip)))
2317 			goto done;
2318 
2319 		ip = (struct iphdr *) skb->data;
2320 		ip_proto = ip->protocol;
2321 		addr1 = (__force u32) ip->saddr;
2322 		addr2 = (__force u32) ip->daddr;
2323 		ihl = ip->ihl;
2324 		break;
2325 	case __constant_htons(ETH_P_IPV6):
2326 		if (!pskb_may_pull(skb, sizeof(*ip6)))
2327 			goto done;
2328 
2329 		ip6 = (struct ipv6hdr *) skb->data;
2330 		ip_proto = ip6->nexthdr;
2331 		addr1 = (__force u32) ip6->saddr.s6_addr32[3];
2332 		addr2 = (__force u32) ip6->daddr.s6_addr32[3];
2333 		ihl = (40 >> 2);
2334 		break;
2335 	default:
2336 		goto done;
2337 	}
2338 	switch (ip_proto) {
2339 	case IPPROTO_TCP:
2340 	case IPPROTO_UDP:
2341 	case IPPROTO_DCCP:
2342 	case IPPROTO_ESP:
2343 	case IPPROTO_AH:
2344 	case IPPROTO_SCTP:
2345 	case IPPROTO_UDPLITE:
2346 		if (pskb_may_pull(skb, (ihl * 4) + 4)) {
2347 			ports.v32 = * (__force u32 *) (skb->data + (ihl * 4));
2348 			if (ports.v16[1] < ports.v16[0])
2349 				swap(ports.v16[0], ports.v16[1]);
2350 			break;
2351 		}
2352 	default:
2353 		ports.v32 = 0;
2354 		break;
2355 	}
2356 
2357 	/* get a consistent hash (same value on both flow directions) */
2358 	if (addr2 < addr1)
2359 		swap(addr1, addr2);
2360 	skb->rxhash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
2361 	if (!skb->rxhash)
2362 		skb->rxhash = 1;
2363 
2364 got_hash:
2365 	flow_table = rcu_dereference(rxqueue->rps_flow_table);
2366 	sock_flow_table = rcu_dereference(rps_sock_flow_table);
2367 	if (flow_table && sock_flow_table) {
2368 		u16 next_cpu;
2369 		struct rps_dev_flow *rflow;
2370 
2371 		rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2372 		tcpu = rflow->cpu;
2373 
2374 		next_cpu = sock_flow_table->ents[skb->rxhash &
2375 		    sock_flow_table->mask];
2376 
2377 		/*
2378 		 * If the desired CPU (where last recvmsg was done) is
2379 		 * different from current CPU (one in the rx-queue flow
2380 		 * table entry), switch if one of the following holds:
2381 		 *   - Current CPU is unset (equal to RPS_NO_CPU).
2382 		 *   - Current CPU is offline.
2383 		 *   - The current CPU's queue tail has advanced beyond the
2384 		 *     last packet that was enqueued using this table entry.
2385 		 *     This guarantees that all previous packets for the flow
2386 		 *     have been dequeued, thus preserving in order delivery.
2387 		 */
2388 		if (unlikely(tcpu != next_cpu) &&
2389 		    (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2390 		     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2391 		      rflow->last_qtail)) >= 0)) {
2392 			tcpu = rflow->cpu = next_cpu;
2393 			if (tcpu != RPS_NO_CPU)
2394 				rflow->last_qtail = per_cpu(softnet_data,
2395 				    tcpu).input_queue_head;
2396 		}
2397 		if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2398 			*rflowp = rflow;
2399 			cpu = tcpu;
2400 			goto done;
2401 		}
2402 	}
2403 
2404 	map = rcu_dereference(rxqueue->rps_map);
2405 	if (map) {
2406 		tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
2407 
2408 		if (cpu_online(tcpu)) {
2409 			cpu = tcpu;
2410 			goto done;
2411 		}
2412 	}
2413 
2414 done:
2415 	return cpu;
2416 }
2417 
2418 /* Called from hardirq (IPI) context */
2419 static void rps_trigger_softirq(void *data)
2420 {
2421 	struct softnet_data *sd = data;
2422 
2423 	____napi_schedule(sd, &sd->backlog);
2424 	sd->received_rps++;
2425 }
2426 
2427 #endif /* CONFIG_RPS */
2428 
2429 /*
2430  * Check if this softnet_data structure is another cpu one
2431  * If yes, queue it to our IPI list and return 1
2432  * If no, return 0
2433  */
2434 static int rps_ipi_queued(struct softnet_data *sd)
2435 {
2436 #ifdef CONFIG_RPS
2437 	struct softnet_data *mysd = &__get_cpu_var(softnet_data);
2438 
2439 	if (sd != mysd) {
2440 		sd->rps_ipi_next = mysd->rps_ipi_list;
2441 		mysd->rps_ipi_list = sd;
2442 
2443 		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
2444 		return 1;
2445 	}
2446 #endif /* CONFIG_RPS */
2447 	return 0;
2448 }
2449 
2450 /*
2451  * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2452  * queue (may be a remote CPU queue).
2453  */
2454 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
2455 			      unsigned int *qtail)
2456 {
2457 	struct softnet_data *sd;
2458 	unsigned long flags;
2459 
2460 	sd = &per_cpu(softnet_data, cpu);
2461 
2462 	local_irq_save(flags);
2463 
2464 	rps_lock(sd);
2465 	if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
2466 		if (skb_queue_len(&sd->input_pkt_queue)) {
2467 enqueue:
2468 			__skb_queue_tail(&sd->input_pkt_queue, skb);
2469 			input_queue_tail_incr_save(sd, qtail);
2470 			rps_unlock(sd);
2471 			local_irq_restore(flags);
2472 			return NET_RX_SUCCESS;
2473 		}
2474 
2475 		/* Schedule NAPI for backlog device
2476 		 * We can use non atomic operation since we own the queue lock
2477 		 */
2478 		if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
2479 			if (!rps_ipi_queued(sd))
2480 				____napi_schedule(sd, &sd->backlog);
2481 		}
2482 		goto enqueue;
2483 	}
2484 
2485 	sd->dropped++;
2486 	rps_unlock(sd);
2487 
2488 	local_irq_restore(flags);
2489 
2490 	kfree_skb(skb);
2491 	return NET_RX_DROP;
2492 }
2493 
2494 /**
2495  *	netif_rx	-	post buffer to the network code
2496  *	@skb: buffer to post
2497  *
2498  *	This function receives a packet from a device driver and queues it for
2499  *	the upper (protocol) levels to process.  It always succeeds. The buffer
2500  *	may be dropped during processing for congestion control or by the
2501  *	protocol layers.
2502  *
2503  *	return values:
2504  *	NET_RX_SUCCESS	(no congestion)
2505  *	NET_RX_DROP     (packet was dropped)
2506  *
2507  */
2508 
2509 int netif_rx(struct sk_buff *skb)
2510 {
2511 	int ret;
2512 
2513 	/* if netpoll wants it, pretend we never saw it */
2514 	if (netpoll_rx(skb))
2515 		return NET_RX_DROP;
2516 
2517 	if (netdev_tstamp_prequeue)
2518 		net_timestamp_check(skb);
2519 
2520 	trace_netif_rx(skb);
2521 #ifdef CONFIG_RPS
2522 	{
2523 		struct rps_dev_flow voidflow, *rflow = &voidflow;
2524 		int cpu;
2525 
2526 		preempt_disable();
2527 		rcu_read_lock();
2528 
2529 		cpu = get_rps_cpu(skb->dev, skb, &rflow);
2530 		if (cpu < 0)
2531 			cpu = smp_processor_id();
2532 
2533 		ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2534 
2535 		rcu_read_unlock();
2536 		preempt_enable();
2537 	}
2538 #else
2539 	{
2540 		unsigned int qtail;
2541 		ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
2542 		put_cpu();
2543 	}
2544 #endif
2545 	return ret;
2546 }
2547 EXPORT_SYMBOL(netif_rx);
2548 
2549 int netif_rx_ni(struct sk_buff *skb)
2550 {
2551 	int err;
2552 
2553 	preempt_disable();
2554 	err = netif_rx(skb);
2555 	if (local_softirq_pending())
2556 		do_softirq();
2557 	preempt_enable();
2558 
2559 	return err;
2560 }
2561 EXPORT_SYMBOL(netif_rx_ni);
2562 
2563 static void net_tx_action(struct softirq_action *h)
2564 {
2565 	struct softnet_data *sd = &__get_cpu_var(softnet_data);
2566 
2567 	if (sd->completion_queue) {
2568 		struct sk_buff *clist;
2569 
2570 		local_irq_disable();
2571 		clist = sd->completion_queue;
2572 		sd->completion_queue = NULL;
2573 		local_irq_enable();
2574 
2575 		while (clist) {
2576 			struct sk_buff *skb = clist;
2577 			clist = clist->next;
2578 
2579 			WARN_ON(atomic_read(&skb->users));
2580 			trace_kfree_skb(skb, net_tx_action);
2581 			__kfree_skb(skb);
2582 		}
2583 	}
2584 
2585 	if (sd->output_queue) {
2586 		struct Qdisc *head;
2587 
2588 		local_irq_disable();
2589 		head = sd->output_queue;
2590 		sd->output_queue = NULL;
2591 		sd->output_queue_tailp = &sd->output_queue;
2592 		local_irq_enable();
2593 
2594 		while (head) {
2595 			struct Qdisc *q = head;
2596 			spinlock_t *root_lock;
2597 
2598 			head = head->next_sched;
2599 
2600 			root_lock = qdisc_lock(q);
2601 			if (spin_trylock(root_lock)) {
2602 				smp_mb__before_clear_bit();
2603 				clear_bit(__QDISC_STATE_SCHED,
2604 					  &q->state);
2605 				qdisc_run(q);
2606 				spin_unlock(root_lock);
2607 			} else {
2608 				if (!test_bit(__QDISC_STATE_DEACTIVATED,
2609 					      &q->state)) {
2610 					__netif_reschedule(q);
2611 				} else {
2612 					smp_mb__before_clear_bit();
2613 					clear_bit(__QDISC_STATE_SCHED,
2614 						  &q->state);
2615 				}
2616 			}
2617 		}
2618 	}
2619 }
2620 
2621 static inline int deliver_skb(struct sk_buff *skb,
2622 			      struct packet_type *pt_prev,
2623 			      struct net_device *orig_dev)
2624 {
2625 	atomic_inc(&skb->users);
2626 	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2627 }
2628 
2629 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
2630     (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
2631 /* This hook is defined here for ATM LANE */
2632 int (*br_fdb_test_addr_hook)(struct net_device *dev,
2633 			     unsigned char *addr) __read_mostly;
2634 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
2635 #endif
2636 
2637 #ifdef CONFIG_NET_CLS_ACT
2638 /* TODO: Maybe we should just force sch_ingress to be compiled in
2639  * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2640  * a compare and 2 stores extra right now if we dont have it on
2641  * but have CONFIG_NET_CLS_ACT
2642  * NOTE: This doesnt stop any functionality; if you dont have
2643  * the ingress scheduler, you just cant add policies on ingress.
2644  *
2645  */
2646 static int ing_filter(struct sk_buff *skb)
2647 {
2648 	struct net_device *dev = skb->dev;
2649 	u32 ttl = G_TC_RTTL(skb->tc_verd);
2650 	struct netdev_queue *rxq;
2651 	int result = TC_ACT_OK;
2652 	struct Qdisc *q;
2653 
2654 	if (unlikely(MAX_RED_LOOP < ttl++)) {
2655 		if (net_ratelimit())
2656 			pr_warning( "Redir loop detected Dropping packet (%d->%d)\n",
2657 			       skb->skb_iif, dev->ifindex);
2658 		return TC_ACT_SHOT;
2659 	}
2660 
2661 	skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2662 	skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2663 
2664 	rxq = &dev->rx_queue;
2665 
2666 	q = rxq->qdisc;
2667 	if (q != &noop_qdisc) {
2668 		spin_lock(qdisc_lock(q));
2669 		if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2670 			result = qdisc_enqueue_root(skb, q);
2671 		spin_unlock(qdisc_lock(q));
2672 	}
2673 
2674 	return result;
2675 }
2676 
2677 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2678 					 struct packet_type **pt_prev,
2679 					 int *ret, struct net_device *orig_dev)
2680 {
2681 	if (skb->dev->rx_queue.qdisc == &noop_qdisc)
2682 		goto out;
2683 
2684 	if (*pt_prev) {
2685 		*ret = deliver_skb(skb, *pt_prev, orig_dev);
2686 		*pt_prev = NULL;
2687 	}
2688 
2689 	switch (ing_filter(skb)) {
2690 	case TC_ACT_SHOT:
2691 	case TC_ACT_STOLEN:
2692 		kfree_skb(skb);
2693 		return NULL;
2694 	}
2695 
2696 out:
2697 	skb->tc_verd = 0;
2698 	return skb;
2699 }
2700 #endif
2701 
2702 /*
2703  * 	netif_nit_deliver - deliver received packets to network taps
2704  * 	@skb: buffer
2705  *
2706  * 	This function is used to deliver incoming packets to network
2707  * 	taps. It should be used when the normal netif_receive_skb path
2708  * 	is bypassed, for example because of VLAN acceleration.
2709  */
2710 void netif_nit_deliver(struct sk_buff *skb)
2711 {
2712 	struct packet_type *ptype;
2713 
2714 	if (list_empty(&ptype_all))
2715 		return;
2716 
2717 	skb_reset_network_header(skb);
2718 	skb_reset_transport_header(skb);
2719 	skb->mac_len = skb->network_header - skb->mac_header;
2720 
2721 	rcu_read_lock();
2722 	list_for_each_entry_rcu(ptype, &ptype_all, list) {
2723 		if (!ptype->dev || ptype->dev == skb->dev)
2724 			deliver_skb(skb, ptype, skb->dev);
2725 	}
2726 	rcu_read_unlock();
2727 }
2728 
2729 /**
2730  *	netdev_rx_handler_register - register receive handler
2731  *	@dev: device to register a handler for
2732  *	@rx_handler: receive handler to register
2733  *	@rx_handler_data: data pointer that is used by rx handler
2734  *
2735  *	Register a receive hander for a device. This handler will then be
2736  *	called from __netif_receive_skb. A negative errno code is returned
2737  *	on a failure.
2738  *
2739  *	The caller must hold the rtnl_mutex.
2740  */
2741 int netdev_rx_handler_register(struct net_device *dev,
2742 			       rx_handler_func_t *rx_handler,
2743 			       void *rx_handler_data)
2744 {
2745 	ASSERT_RTNL();
2746 
2747 	if (dev->rx_handler)
2748 		return -EBUSY;
2749 
2750 	rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
2751 	rcu_assign_pointer(dev->rx_handler, rx_handler);
2752 
2753 	return 0;
2754 }
2755 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
2756 
2757 /**
2758  *	netdev_rx_handler_unregister - unregister receive handler
2759  *	@dev: device to unregister a handler from
2760  *
2761  *	Unregister a receive hander from a device.
2762  *
2763  *	The caller must hold the rtnl_mutex.
2764  */
2765 void netdev_rx_handler_unregister(struct net_device *dev)
2766 {
2767 
2768 	ASSERT_RTNL();
2769 	rcu_assign_pointer(dev->rx_handler, NULL);
2770 	rcu_assign_pointer(dev->rx_handler_data, NULL);
2771 }
2772 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
2773 
2774 static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
2775 					      struct net_device *master)
2776 {
2777 	if (skb->pkt_type == PACKET_HOST) {
2778 		u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
2779 
2780 		memcpy(dest, master->dev_addr, ETH_ALEN);
2781 	}
2782 }
2783 
2784 /* On bonding slaves other than the currently active slave, suppress
2785  * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
2786  * ARP on active-backup slaves with arp_validate enabled.
2787  */
2788 int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
2789 {
2790 	struct net_device *dev = skb->dev;
2791 
2792 	if (master->priv_flags & IFF_MASTER_ARPMON)
2793 		dev->last_rx = jiffies;
2794 
2795 	if ((master->priv_flags & IFF_MASTER_ALB) &&
2796 	    (master->priv_flags & IFF_BRIDGE_PORT)) {
2797 		/* Do address unmangle. The local destination address
2798 		 * will be always the one master has. Provides the right
2799 		 * functionality in a bridge.
2800 		 */
2801 		skb_bond_set_mac_by_master(skb, master);
2802 	}
2803 
2804 	if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
2805 		if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
2806 		    skb->protocol == __cpu_to_be16(ETH_P_ARP))
2807 			return 0;
2808 
2809 		if (master->priv_flags & IFF_MASTER_ALB) {
2810 			if (skb->pkt_type != PACKET_BROADCAST &&
2811 			    skb->pkt_type != PACKET_MULTICAST)
2812 				return 0;
2813 		}
2814 		if (master->priv_flags & IFF_MASTER_8023AD &&
2815 		    skb->protocol == __cpu_to_be16(ETH_P_SLOW))
2816 			return 0;
2817 
2818 		return 1;
2819 	}
2820 	return 0;
2821 }
2822 EXPORT_SYMBOL(__skb_bond_should_drop);
2823 
2824 static int __netif_receive_skb(struct sk_buff *skb)
2825 {
2826 	struct packet_type *ptype, *pt_prev;
2827 	rx_handler_func_t *rx_handler;
2828 	struct net_device *orig_dev;
2829 	struct net_device *master;
2830 	struct net_device *null_or_orig;
2831 	struct net_device *orig_or_bond;
2832 	int ret = NET_RX_DROP;
2833 	__be16 type;
2834 
2835 	if (!netdev_tstamp_prequeue)
2836 		net_timestamp_check(skb);
2837 
2838 	trace_netif_receive_skb(skb);
2839 	if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
2840 		return NET_RX_SUCCESS;
2841 
2842 	/* if we've gotten here through NAPI, check netpoll */
2843 	if (netpoll_receive_skb(skb))
2844 		return NET_RX_DROP;
2845 
2846 	if (!skb->skb_iif)
2847 		skb->skb_iif = skb->dev->ifindex;
2848 
2849 	/*
2850 	 * bonding note: skbs received on inactive slaves should only
2851 	 * be delivered to pkt handlers that are exact matches.  Also
2852 	 * the deliver_no_wcard flag will be set.  If packet handlers
2853 	 * are sensitive to duplicate packets these skbs will need to
2854 	 * be dropped at the handler.  The vlan accel path may have
2855 	 * already set the deliver_no_wcard flag.
2856 	 */
2857 	null_or_orig = NULL;
2858 	orig_dev = skb->dev;
2859 	master = ACCESS_ONCE(orig_dev->master);
2860 	if (skb->deliver_no_wcard)
2861 		null_or_orig = orig_dev;
2862 	else if (master) {
2863 		if (skb_bond_should_drop(skb, master)) {
2864 			skb->deliver_no_wcard = 1;
2865 			null_or_orig = orig_dev; /* deliver only exact match */
2866 		} else
2867 			skb->dev = master;
2868 	}
2869 
2870 	__this_cpu_inc(softnet_data.processed);
2871 	skb_reset_network_header(skb);
2872 	skb_reset_transport_header(skb);
2873 	skb->mac_len = skb->network_header - skb->mac_header;
2874 
2875 	pt_prev = NULL;
2876 
2877 	rcu_read_lock();
2878 
2879 #ifdef CONFIG_NET_CLS_ACT
2880 	if (skb->tc_verd & TC_NCLS) {
2881 		skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2882 		goto ncls;
2883 	}
2884 #endif
2885 
2886 	list_for_each_entry_rcu(ptype, &ptype_all, list) {
2887 		if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2888 		    ptype->dev == orig_dev) {
2889 			if (pt_prev)
2890 				ret = deliver_skb(skb, pt_prev, orig_dev);
2891 			pt_prev = ptype;
2892 		}
2893 	}
2894 
2895 #ifdef CONFIG_NET_CLS_ACT
2896 	skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2897 	if (!skb)
2898 		goto out;
2899 ncls:
2900 #endif
2901 
2902 	/* Handle special case of bridge or macvlan */
2903 	rx_handler = rcu_dereference(skb->dev->rx_handler);
2904 	if (rx_handler) {
2905 		if (pt_prev) {
2906 			ret = deliver_skb(skb, pt_prev, orig_dev);
2907 			pt_prev = NULL;
2908 		}
2909 		skb = rx_handler(skb);
2910 		if (!skb)
2911 			goto out;
2912 	}
2913 
2914 	/*
2915 	 * Make sure frames received on VLAN interfaces stacked on
2916 	 * bonding interfaces still make their way to any base bonding
2917 	 * device that may have registered for a specific ptype.  The
2918 	 * handler may have to adjust skb->dev and orig_dev.
2919 	 */
2920 	orig_or_bond = orig_dev;
2921 	if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
2922 	    (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
2923 		orig_or_bond = vlan_dev_real_dev(skb->dev);
2924 	}
2925 
2926 	type = skb->protocol;
2927 	list_for_each_entry_rcu(ptype,
2928 			&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2929 		if (ptype->type == type && (ptype->dev == null_or_orig ||
2930 		     ptype->dev == skb->dev || ptype->dev == orig_dev ||
2931 		     ptype->dev == orig_or_bond)) {
2932 			if (pt_prev)
2933 				ret = deliver_skb(skb, pt_prev, orig_dev);
2934 			pt_prev = ptype;
2935 		}
2936 	}
2937 
2938 	if (pt_prev) {
2939 		ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2940 	} else {
2941 		kfree_skb(skb);
2942 		/* Jamal, now you will not able to escape explaining
2943 		 * me how you were going to use this. :-)
2944 		 */
2945 		ret = NET_RX_DROP;
2946 	}
2947 
2948 out:
2949 	rcu_read_unlock();
2950 	return ret;
2951 }
2952 
2953 /**
2954  *	netif_receive_skb - process receive buffer from network
2955  *	@skb: buffer to process
2956  *
2957  *	netif_receive_skb() is the main receive data processing function.
2958  *	It always succeeds. The buffer may be dropped during processing
2959  *	for congestion control or by the protocol layers.
2960  *
2961  *	This function may only be called from softirq context and interrupts
2962  *	should be enabled.
2963  *
2964  *	Return values (usually ignored):
2965  *	NET_RX_SUCCESS: no congestion
2966  *	NET_RX_DROP: packet was dropped
2967  */
2968 int netif_receive_skb(struct sk_buff *skb)
2969 {
2970 	if (netdev_tstamp_prequeue)
2971 		net_timestamp_check(skb);
2972 
2973 	if (skb_defer_rx_timestamp(skb))
2974 		return NET_RX_SUCCESS;
2975 
2976 #ifdef CONFIG_RPS
2977 	{
2978 		struct rps_dev_flow voidflow, *rflow = &voidflow;
2979 		int cpu, ret;
2980 
2981 		rcu_read_lock();
2982 
2983 		cpu = get_rps_cpu(skb->dev, skb, &rflow);
2984 
2985 		if (cpu >= 0) {
2986 			ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2987 			rcu_read_unlock();
2988 		} else {
2989 			rcu_read_unlock();
2990 			ret = __netif_receive_skb(skb);
2991 		}
2992 
2993 		return ret;
2994 	}
2995 #else
2996 	return __netif_receive_skb(skb);
2997 #endif
2998 }
2999 EXPORT_SYMBOL(netif_receive_skb);
3000 
3001 /* Network device is going away, flush any packets still pending
3002  * Called with irqs disabled.
3003  */
3004 static void flush_backlog(void *arg)
3005 {
3006 	struct net_device *dev = arg;
3007 	struct softnet_data *sd = &__get_cpu_var(softnet_data);
3008 	struct sk_buff *skb, *tmp;
3009 
3010 	rps_lock(sd);
3011 	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
3012 		if (skb->dev == dev) {
3013 			__skb_unlink(skb, &sd->input_pkt_queue);
3014 			kfree_skb(skb);
3015 			input_queue_head_incr(sd);
3016 		}
3017 	}
3018 	rps_unlock(sd);
3019 
3020 	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3021 		if (skb->dev == dev) {
3022 			__skb_unlink(skb, &sd->process_queue);
3023 			kfree_skb(skb);
3024 			input_queue_head_incr(sd);
3025 		}
3026 	}
3027 }
3028 
3029 static int napi_gro_complete(struct sk_buff *skb)
3030 {
3031 	struct packet_type *ptype;
3032 	__be16 type = skb->protocol;
3033 	struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3034 	int err = -ENOENT;
3035 
3036 	if (NAPI_GRO_CB(skb)->count == 1) {
3037 		skb_shinfo(skb)->gso_size = 0;
3038 		goto out;
3039 	}
3040 
3041 	rcu_read_lock();
3042 	list_for_each_entry_rcu(ptype, head, list) {
3043 		if (ptype->type != type || ptype->dev || !ptype->gro_complete)
3044 			continue;
3045 
3046 		err = ptype->gro_complete(skb);
3047 		break;
3048 	}
3049 	rcu_read_unlock();
3050 
3051 	if (err) {
3052 		WARN_ON(&ptype->list == head);
3053 		kfree_skb(skb);
3054 		return NET_RX_SUCCESS;
3055 	}
3056 
3057 out:
3058 	return netif_receive_skb(skb);
3059 }
3060 
3061 static void napi_gro_flush(struct napi_struct *napi)
3062 {
3063 	struct sk_buff *skb, *next;
3064 
3065 	for (skb = napi->gro_list; skb; skb = next) {
3066 		next = skb->next;
3067 		skb->next = NULL;
3068 		napi_gro_complete(skb);
3069 	}
3070 
3071 	napi->gro_count = 0;
3072 	napi->gro_list = NULL;
3073 }
3074 
3075 enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3076 {
3077 	struct sk_buff **pp = NULL;
3078 	struct packet_type *ptype;
3079 	__be16 type = skb->protocol;
3080 	struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3081 	int same_flow;
3082 	int mac_len;
3083 	enum gro_result ret;
3084 
3085 	if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
3086 		goto normal;
3087 
3088 	if (skb_is_gso(skb) || skb_has_frags(skb))
3089 		goto normal;
3090 
3091 	rcu_read_lock();
3092 	list_for_each_entry_rcu(ptype, head, list) {
3093 		if (ptype->type != type || ptype->dev || !ptype->gro_receive)
3094 			continue;
3095 
3096 		skb_set_network_header(skb, skb_gro_offset(skb));
3097 		mac_len = skb->network_header - skb->mac_header;
3098 		skb->mac_len = mac_len;
3099 		NAPI_GRO_CB(skb)->same_flow = 0;
3100 		NAPI_GRO_CB(skb)->flush = 0;
3101 		NAPI_GRO_CB(skb)->free = 0;
3102 
3103 		pp = ptype->gro_receive(&napi->gro_list, skb);
3104 		break;
3105 	}
3106 	rcu_read_unlock();
3107 
3108 	if (&ptype->list == head)
3109 		goto normal;
3110 
3111 	same_flow = NAPI_GRO_CB(skb)->same_flow;
3112 	ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
3113 
3114 	if (pp) {
3115 		struct sk_buff *nskb = *pp;
3116 
3117 		*pp = nskb->next;
3118 		nskb->next = NULL;
3119 		napi_gro_complete(nskb);
3120 		napi->gro_count--;
3121 	}
3122 
3123 	if (same_flow)
3124 		goto ok;
3125 
3126 	if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
3127 		goto normal;
3128 
3129 	napi->gro_count++;
3130 	NAPI_GRO_CB(skb)->count = 1;
3131 	skb_shinfo(skb)->gso_size = skb_gro_len(skb);
3132 	skb->next = napi->gro_list;
3133 	napi->gro_list = skb;
3134 	ret = GRO_HELD;
3135 
3136 pull:
3137 	if (skb_headlen(skb) < skb_gro_offset(skb)) {
3138 		int grow = skb_gro_offset(skb) - skb_headlen(skb);
3139 
3140 		BUG_ON(skb->end - skb->tail < grow);
3141 
3142 		memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3143 
3144 		skb->tail += grow;
3145 		skb->data_len -= grow;
3146 
3147 		skb_shinfo(skb)->frags[0].page_offset += grow;
3148 		skb_shinfo(skb)->frags[0].size -= grow;
3149 
3150 		if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
3151 			put_page(skb_shinfo(skb)->frags[0].page);
3152 			memmove(skb_shinfo(skb)->frags,
3153 				skb_shinfo(skb)->frags + 1,
3154 				--skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
3155 		}
3156 	}
3157 
3158 ok:
3159 	return ret;
3160 
3161 normal:
3162 	ret = GRO_NORMAL;
3163 	goto pull;
3164 }
3165 EXPORT_SYMBOL(dev_gro_receive);
3166 
3167 static gro_result_t
3168 __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3169 {
3170 	struct sk_buff *p;
3171 
3172 	for (p = napi->gro_list; p; p = p->next) {
3173 		NAPI_GRO_CB(p)->same_flow =
3174 			(p->dev == skb->dev) &&
3175 			!compare_ether_header(skb_mac_header(p),
3176 					      skb_gro_mac_header(skb));
3177 		NAPI_GRO_CB(p)->flush = 0;
3178 	}
3179 
3180 	return dev_gro_receive(napi, skb);
3181 }
3182 
3183 gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
3184 {
3185 	switch (ret) {
3186 	case GRO_NORMAL:
3187 		if (netif_receive_skb(skb))
3188 			ret = GRO_DROP;
3189 		break;
3190 
3191 	case GRO_DROP:
3192 	case GRO_MERGED_FREE:
3193 		kfree_skb(skb);
3194 		break;
3195 
3196 	case GRO_HELD:
3197 	case GRO_MERGED:
3198 		break;
3199 	}
3200 
3201 	return ret;
3202 }
3203 EXPORT_SYMBOL(napi_skb_finish);
3204 
3205 void skb_gro_reset_offset(struct sk_buff *skb)
3206 {
3207 	NAPI_GRO_CB(skb)->data_offset = 0;
3208 	NAPI_GRO_CB(skb)->frag0 = NULL;
3209 	NAPI_GRO_CB(skb)->frag0_len = 0;
3210 
3211 	if (skb->mac_header == skb->tail &&
3212 	    !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
3213 		NAPI_GRO_CB(skb)->frag0 =
3214 			page_address(skb_shinfo(skb)->frags[0].page) +
3215 			skb_shinfo(skb)->frags[0].page_offset;
3216 		NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
3217 	}
3218 }
3219 EXPORT_SYMBOL(skb_gro_reset_offset);
3220 
3221 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3222 {
3223 	skb_gro_reset_offset(skb);
3224 
3225 	return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
3226 }
3227 EXPORT_SYMBOL(napi_gro_receive);
3228 
3229 void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3230 {
3231 	__skb_pull(skb, skb_headlen(skb));
3232 	skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
3233 
3234 	napi->skb = skb;
3235 }
3236 EXPORT_SYMBOL(napi_reuse_skb);
3237 
3238 struct sk_buff *napi_get_frags(struct napi_struct *napi)
3239 {
3240 	struct sk_buff *skb = napi->skb;
3241 
3242 	if (!skb) {
3243 		skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3244 		if (skb)
3245 			napi->skb = skb;
3246 	}
3247 	return skb;
3248 }
3249 EXPORT_SYMBOL(napi_get_frags);
3250 
3251 gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3252 			       gro_result_t ret)
3253 {
3254 	switch (ret) {
3255 	case GRO_NORMAL:
3256 	case GRO_HELD:
3257 		skb->protocol = eth_type_trans(skb, skb->dev);
3258 
3259 		if (ret == GRO_HELD)
3260 			skb_gro_pull(skb, -ETH_HLEN);
3261 		else if (netif_receive_skb(skb))
3262 			ret = GRO_DROP;
3263 		break;
3264 
3265 	case GRO_DROP:
3266 	case GRO_MERGED_FREE:
3267 		napi_reuse_skb(napi, skb);
3268 		break;
3269 
3270 	case GRO_MERGED:
3271 		break;
3272 	}
3273 
3274 	return ret;
3275 }
3276 EXPORT_SYMBOL(napi_frags_finish);
3277 
3278 struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3279 {
3280 	struct sk_buff *skb = napi->skb;
3281 	struct ethhdr *eth;
3282 	unsigned int hlen;
3283 	unsigned int off;
3284 
3285 	napi->skb = NULL;
3286 
3287 	skb_reset_mac_header(skb);
3288 	skb_gro_reset_offset(skb);
3289 
3290 	off = skb_gro_offset(skb);
3291 	hlen = off + sizeof(*eth);
3292 	eth = skb_gro_header_fast(skb, off);
3293 	if (skb_gro_header_hard(skb, hlen)) {
3294 		eth = skb_gro_header_slow(skb, hlen, off);
3295 		if (unlikely(!eth)) {
3296 			napi_reuse_skb(napi, skb);
3297 			skb = NULL;
3298 			goto out;
3299 		}
3300 	}
3301 
3302 	skb_gro_pull(skb, sizeof(*eth));
3303 
3304 	/*
3305 	 * This works because the only protocols we care about don't require
3306 	 * special handling.  We'll fix it up properly at the end.
3307 	 */
3308 	skb->protocol = eth->h_proto;
3309 
3310 out:
3311 	return skb;
3312 }
3313 EXPORT_SYMBOL(napi_frags_skb);
3314 
3315 gro_result_t napi_gro_frags(struct napi_struct *napi)
3316 {
3317 	struct sk_buff *skb = napi_frags_skb(napi);
3318 
3319 	if (!skb)
3320 		return GRO_DROP;
3321 
3322 	return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
3323 }
3324 EXPORT_SYMBOL(napi_gro_frags);
3325 
3326 /*
3327  * net_rps_action sends any pending IPI's for rps.
3328  * Note: called with local irq disabled, but exits with local irq enabled.
3329  */
3330 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3331 {
3332 #ifdef CONFIG_RPS
3333 	struct softnet_data *remsd = sd->rps_ipi_list;
3334 
3335 	if (remsd) {
3336 		sd->rps_ipi_list = NULL;
3337 
3338 		local_irq_enable();
3339 
3340 		/* Send pending IPI's to kick RPS processing on remote cpus. */
3341 		while (remsd) {
3342 			struct softnet_data *next = remsd->rps_ipi_next;
3343 
3344 			if (cpu_online(remsd->cpu))
3345 				__smp_call_function_single(remsd->cpu,
3346 							   &remsd->csd, 0);
3347 			remsd = next;
3348 		}
3349 	} else
3350 #endif
3351 		local_irq_enable();
3352 }
3353 
3354 static int process_backlog(struct napi_struct *napi, int quota)
3355 {
3356 	int work = 0;
3357 	struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
3358 
3359 #ifdef CONFIG_RPS
3360 	/* Check if we have pending ipi, its better to send them now,
3361 	 * not waiting net_rx_action() end.
3362 	 */
3363 	if (sd->rps_ipi_list) {
3364 		local_irq_disable();
3365 		net_rps_action_and_irq_enable(sd);
3366 	}
3367 #endif
3368 	napi->weight = weight_p;
3369 	local_irq_disable();
3370 	while (work < quota) {
3371 		struct sk_buff *skb;
3372 		unsigned int qlen;
3373 
3374 		while ((skb = __skb_dequeue(&sd->process_queue))) {
3375 			local_irq_enable();
3376 			__netif_receive_skb(skb);
3377 			local_irq_disable();
3378 			input_queue_head_incr(sd);
3379 			if (++work >= quota) {
3380 				local_irq_enable();
3381 				return work;
3382 			}
3383 		}
3384 
3385 		rps_lock(sd);
3386 		qlen = skb_queue_len(&sd->input_pkt_queue);
3387 		if (qlen)
3388 			skb_queue_splice_tail_init(&sd->input_pkt_queue,
3389 						   &sd->process_queue);
3390 
3391 		if (qlen < quota - work) {
3392 			/*
3393 			 * Inline a custom version of __napi_complete().
3394 			 * only current cpu owns and manipulates this napi,
3395 			 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
3396 			 * we can use a plain write instead of clear_bit(),
3397 			 * and we dont need an smp_mb() memory barrier.
3398 			 */
3399 			list_del(&napi->poll_list);
3400 			napi->state = 0;
3401 
3402 			quota = work + qlen;
3403 		}
3404 		rps_unlock(sd);
3405 	}
3406 	local_irq_enable();
3407 
3408 	return work;
3409 }
3410 
3411 /**
3412  * __napi_schedule - schedule for receive
3413  * @n: entry to schedule
3414  *
3415  * The entry's receive function will be scheduled to run
3416  */
3417 void __napi_schedule(struct napi_struct *n)
3418 {
3419 	unsigned long flags;
3420 
3421 	local_irq_save(flags);
3422 	____napi_schedule(&__get_cpu_var(softnet_data), n);
3423 	local_irq_restore(flags);
3424 }
3425 EXPORT_SYMBOL(__napi_schedule);
3426 
3427 void __napi_complete(struct napi_struct *n)
3428 {
3429 	BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
3430 	BUG_ON(n->gro_list);
3431 
3432 	list_del(&n->poll_list);
3433 	smp_mb__before_clear_bit();
3434 	clear_bit(NAPI_STATE_SCHED, &n->state);
3435 }
3436 EXPORT_SYMBOL(__napi_complete);
3437 
3438 void napi_complete(struct napi_struct *n)
3439 {
3440 	unsigned long flags;
3441 
3442 	/*
3443 	 * don't let napi dequeue from the cpu poll list
3444 	 * just in case its running on a different cpu
3445 	 */
3446 	if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
3447 		return;
3448 
3449 	napi_gro_flush(n);
3450 	local_irq_save(flags);
3451 	__napi_complete(n);
3452 	local_irq_restore(flags);
3453 }
3454 EXPORT_SYMBOL(napi_complete);
3455 
3456 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
3457 		    int (*poll)(struct napi_struct *, int), int weight)
3458 {
3459 	INIT_LIST_HEAD(&napi->poll_list);
3460 	napi->gro_count = 0;
3461 	napi->gro_list = NULL;
3462 	napi->skb = NULL;
3463 	napi->poll = poll;
3464 	napi->weight = weight;
3465 	list_add(&napi->dev_list, &dev->napi_list);
3466 	napi->dev = dev;
3467 #ifdef CONFIG_NETPOLL
3468 	spin_lock_init(&napi->poll_lock);
3469 	napi->poll_owner = -1;
3470 #endif
3471 	set_bit(NAPI_STATE_SCHED, &napi->state);
3472 }
3473 EXPORT_SYMBOL(netif_napi_add);
3474 
3475 void netif_napi_del(struct napi_struct *napi)
3476 {
3477 	struct sk_buff *skb, *next;
3478 
3479 	list_del_init(&napi->dev_list);
3480 	napi_free_frags(napi);
3481 
3482 	for (skb = napi->gro_list; skb; skb = next) {
3483 		next = skb->next;
3484 		skb->next = NULL;
3485 		kfree_skb(skb);
3486 	}
3487 
3488 	napi->gro_list = NULL;
3489 	napi->gro_count = 0;
3490 }
3491 EXPORT_SYMBOL(netif_napi_del);
3492 
3493 static void net_rx_action(struct softirq_action *h)
3494 {
3495 	struct softnet_data *sd = &__get_cpu_var(softnet_data);
3496 	unsigned long time_limit = jiffies + 2;
3497 	int budget = netdev_budget;
3498 	void *have;
3499 
3500 	local_irq_disable();
3501 
3502 	while (!list_empty(&sd->poll_list)) {
3503 		struct napi_struct *n;
3504 		int work, weight;
3505 
3506 		/* If softirq window is exhuasted then punt.
3507 		 * Allow this to run for 2 jiffies since which will allow
3508 		 * an average latency of 1.5/HZ.
3509 		 */
3510 		if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
3511 			goto softnet_break;
3512 
3513 		local_irq_enable();
3514 
3515 		/* Even though interrupts have been re-enabled, this
3516 		 * access is safe because interrupts can only add new
3517 		 * entries to the tail of this list, and only ->poll()
3518 		 * calls can remove this head entry from the list.
3519 		 */
3520 		n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
3521 
3522 		have = netpoll_poll_lock(n);
3523 
3524 		weight = n->weight;
3525 
3526 		/* This NAPI_STATE_SCHED test is for avoiding a race
3527 		 * with netpoll's poll_napi().  Only the entity which
3528 		 * obtains the lock and sees NAPI_STATE_SCHED set will
3529 		 * actually make the ->poll() call.  Therefore we avoid
3530 		 * accidently calling ->poll() when NAPI is not scheduled.
3531 		 */
3532 		work = 0;
3533 		if (test_bit(NAPI_STATE_SCHED, &n->state)) {
3534 			work = n->poll(n, weight);
3535 			trace_napi_poll(n);
3536 		}
3537 
3538 		WARN_ON_ONCE(work > weight);
3539 
3540 		budget -= work;
3541 
3542 		local_irq_disable();
3543 
3544 		/* Drivers must not modify the NAPI state if they
3545 		 * consume the entire weight.  In such cases this code
3546 		 * still "owns" the NAPI instance and therefore can
3547 		 * move the instance around on the list at-will.
3548 		 */
3549 		if (unlikely(work == weight)) {
3550 			if (unlikely(napi_disable_pending(n))) {
3551 				local_irq_enable();
3552 				napi_complete(n);
3553 				local_irq_disable();
3554 			} else
3555 				list_move_tail(&n->poll_list, &sd->poll_list);
3556 		}
3557 
3558 		netpoll_poll_unlock(have);
3559 	}
3560 out:
3561 	net_rps_action_and_irq_enable(sd);
3562 
3563 #ifdef CONFIG_NET_DMA
3564 	/*
3565 	 * There may not be any more sk_buffs coming right now, so push
3566 	 * any pending DMA copies to hardware
3567 	 */
3568 	dma_issue_pending_all();
3569 #endif
3570 
3571 	return;
3572 
3573 softnet_break:
3574 	sd->time_squeeze++;
3575 	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
3576 	goto out;
3577 }
3578 
3579 static gifconf_func_t *gifconf_list[NPROTO];
3580 
3581 /**
3582  *	register_gifconf	-	register a SIOCGIF handler
3583  *	@family: Address family
3584  *	@gifconf: Function handler
3585  *
3586  *	Register protocol dependent address dumping routines. The handler
3587  *	that is passed must not be freed or reused until it has been replaced
3588  *	by another handler.
3589  */
3590 int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
3591 {
3592 	if (family >= NPROTO)
3593 		return -EINVAL;
3594 	gifconf_list[family] = gifconf;
3595 	return 0;
3596 }
3597 EXPORT_SYMBOL(register_gifconf);
3598 
3599 
3600 /*
3601  *	Map an interface index to its name (SIOCGIFNAME)
3602  */
3603 
3604 /*
3605  *	We need this ioctl for efficient implementation of the
3606  *	if_indextoname() function required by the IPv6 API.  Without
3607  *	it, we would have to search all the interfaces to find a
3608  *	match.  --pb
3609  */
3610 
3611 static int dev_ifname(struct net *net, struct ifreq __user *arg)
3612 {
3613 	struct net_device *dev;
3614 	struct ifreq ifr;
3615 
3616 	/*
3617 	 *	Fetch the caller's info block.
3618 	 */
3619 
3620 	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3621 		return -EFAULT;
3622 
3623 	rcu_read_lock();
3624 	dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
3625 	if (!dev) {
3626 		rcu_read_unlock();
3627 		return -ENODEV;
3628 	}
3629 
3630 	strcpy(ifr.ifr_name, dev->name);
3631 	rcu_read_unlock();
3632 
3633 	if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3634 		return -EFAULT;
3635 	return 0;
3636 }
3637 
3638 /*
3639  *	Perform a SIOCGIFCONF call. This structure will change
3640  *	size eventually, and there is nothing I can do about it.
3641  *	Thus we will need a 'compatibility mode'.
3642  */
3643 
3644 static int dev_ifconf(struct net *net, char __user *arg)
3645 {
3646 	struct ifconf ifc;
3647 	struct net_device *dev;
3648 	char __user *pos;
3649 	int len;
3650 	int total;
3651 	int i;
3652 
3653 	/*
3654 	 *	Fetch the caller's info block.
3655 	 */
3656 
3657 	if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3658 		return -EFAULT;
3659 
3660 	pos = ifc.ifc_buf;
3661 	len = ifc.ifc_len;
3662 
3663 	/*
3664 	 *	Loop over the interfaces, and write an info block for each.
3665 	 */
3666 
3667 	total = 0;
3668 	for_each_netdev(net, dev) {
3669 		for (i = 0; i < NPROTO; i++) {
3670 			if (gifconf_list[i]) {
3671 				int done;
3672 				if (!pos)
3673 					done = gifconf_list[i](dev, NULL, 0);
3674 				else
3675 					done = gifconf_list[i](dev, pos + total,
3676 							       len - total);
3677 				if (done < 0)
3678 					return -EFAULT;
3679 				total += done;
3680 			}
3681 		}
3682 	}
3683 
3684 	/*
3685 	 *	All done.  Write the updated control block back to the caller.
3686 	 */
3687 	ifc.ifc_len = total;
3688 
3689 	/*
3690 	 * 	Both BSD and Solaris return 0 here, so we do too.
3691 	 */
3692 	return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3693 }
3694 
3695 #ifdef CONFIG_PROC_FS
3696 /*
3697  *	This is invoked by the /proc filesystem handler to display a device
3698  *	in detail.
3699  */
3700 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
3701 	__acquires(RCU)
3702 {
3703 	struct net *net = seq_file_net(seq);
3704 	loff_t off;
3705 	struct net_device *dev;
3706 
3707 	rcu_read_lock();
3708 	if (!*pos)
3709 		return SEQ_START_TOKEN;
3710 
3711 	off = 1;
3712 	for_each_netdev_rcu(net, dev)
3713 		if (off++ == *pos)
3714 			return dev;
3715 
3716 	return NULL;
3717 }
3718 
3719 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3720 {
3721 	struct net_device *dev = (v == SEQ_START_TOKEN) ?
3722 				  first_net_device(seq_file_net(seq)) :
3723 				  next_net_device((struct net_device *)v);
3724 
3725 	++*pos;
3726 	return rcu_dereference(dev);
3727 }
3728 
3729 void dev_seq_stop(struct seq_file *seq, void *v)
3730 	__releases(RCU)
3731 {
3732 	rcu_read_unlock();
3733 }
3734 
3735 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3736 {
3737 	struct rtnl_link_stats64 temp;
3738 	const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
3739 
3740 	seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
3741 		   "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
3742 		   dev->name, stats->rx_bytes, stats->rx_packets,
3743 		   stats->rx_errors,
3744 		   stats->rx_dropped + stats->rx_missed_errors,
3745 		   stats->rx_fifo_errors,
3746 		   stats->rx_length_errors + stats->rx_over_errors +
3747 		    stats->rx_crc_errors + stats->rx_frame_errors,
3748 		   stats->rx_compressed, stats->multicast,
3749 		   stats->tx_bytes, stats->tx_packets,
3750 		   stats->tx_errors, stats->tx_dropped,
3751 		   stats->tx_fifo_errors, stats->collisions,
3752 		   stats->tx_carrier_errors +
3753 		    stats->tx_aborted_errors +
3754 		    stats->tx_window_errors +
3755 		    stats->tx_heartbeat_errors,
3756 		   stats->tx_compressed);
3757 }
3758 
3759 /*
3760  *	Called from the PROCfs module. This now uses the new arbitrary sized
3761  *	/proc/net interface to create /proc/net/dev
3762  */
3763 static int dev_seq_show(struct seq_file *seq, void *v)
3764 {
3765 	if (v == SEQ_START_TOKEN)
3766 		seq_puts(seq, "Inter-|   Receive                            "
3767 			      "                    |  Transmit\n"
3768 			      " face |bytes    packets errs drop fifo frame "
3769 			      "compressed multicast|bytes    packets errs "
3770 			      "drop fifo colls carrier compressed\n");
3771 	else
3772 		dev_seq_printf_stats(seq, v);
3773 	return 0;
3774 }
3775 
3776 static struct softnet_data *softnet_get_online(loff_t *pos)
3777 {
3778 	struct softnet_data *sd = NULL;
3779 
3780 	while (*pos < nr_cpu_ids)
3781 		if (cpu_online(*pos)) {
3782 			sd = &per_cpu(softnet_data, *pos);
3783 			break;
3784 		} else
3785 			++*pos;
3786 	return sd;
3787 }
3788 
3789 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3790 {
3791 	return softnet_get_online(pos);
3792 }
3793 
3794 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3795 {
3796 	++*pos;
3797 	return softnet_get_online(pos);
3798 }
3799 
3800 static void softnet_seq_stop(struct seq_file *seq, void *v)
3801 {
3802 }
3803 
3804 static int softnet_seq_show(struct seq_file *seq, void *v)
3805 {
3806 	struct softnet_data *sd = v;
3807 
3808 	seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
3809 		   sd->processed, sd->dropped, sd->time_squeeze, 0,
3810 		   0, 0, 0, 0, /* was fastroute */
3811 		   sd->cpu_collision, sd->received_rps);
3812 	return 0;
3813 }
3814 
3815 static const struct seq_operations dev_seq_ops = {
3816 	.start = dev_seq_start,
3817 	.next  = dev_seq_next,
3818 	.stop  = dev_seq_stop,
3819 	.show  = dev_seq_show,
3820 };
3821 
3822 static int dev_seq_open(struct inode *inode, struct file *file)
3823 {
3824 	return seq_open_net(inode, file, &dev_seq_ops,
3825 			    sizeof(struct seq_net_private));
3826 }
3827 
3828 static const struct file_operations dev_seq_fops = {
3829 	.owner	 = THIS_MODULE,
3830 	.open    = dev_seq_open,
3831 	.read    = seq_read,
3832 	.llseek  = seq_lseek,
3833 	.release = seq_release_net,
3834 };
3835 
3836 static const struct seq_operations softnet_seq_ops = {
3837 	.start = softnet_seq_start,
3838 	.next  = softnet_seq_next,
3839 	.stop  = softnet_seq_stop,
3840 	.show  = softnet_seq_show,
3841 };
3842 
3843 static int softnet_seq_open(struct inode *inode, struct file *file)
3844 {
3845 	return seq_open(file, &softnet_seq_ops);
3846 }
3847 
3848 static const struct file_operations softnet_seq_fops = {
3849 	.owner	 = THIS_MODULE,
3850 	.open    = softnet_seq_open,
3851 	.read    = seq_read,
3852 	.llseek  = seq_lseek,
3853 	.release = seq_release,
3854 };
3855 
3856 static void *ptype_get_idx(loff_t pos)
3857 {
3858 	struct packet_type *pt = NULL;
3859 	loff_t i = 0;
3860 	int t;
3861 
3862 	list_for_each_entry_rcu(pt, &ptype_all, list) {
3863 		if (i == pos)
3864 			return pt;
3865 		++i;
3866 	}
3867 
3868 	for (t = 0; t < PTYPE_HASH_SIZE; t++) {
3869 		list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3870 			if (i == pos)
3871 				return pt;
3872 			++i;
3873 		}
3874 	}
3875 	return NULL;
3876 }
3877 
3878 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
3879 	__acquires(RCU)
3880 {
3881 	rcu_read_lock();
3882 	return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3883 }
3884 
3885 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3886 {
3887 	struct packet_type *pt;
3888 	struct list_head *nxt;
3889 	int hash;
3890 
3891 	++*pos;
3892 	if (v == SEQ_START_TOKEN)
3893 		return ptype_get_idx(0);
3894 
3895 	pt = v;
3896 	nxt = pt->list.next;
3897 	if (pt->type == htons(ETH_P_ALL)) {
3898 		if (nxt != &ptype_all)
3899 			goto found;
3900 		hash = 0;
3901 		nxt = ptype_base[0].next;
3902 	} else
3903 		hash = ntohs(pt->type) & PTYPE_HASH_MASK;
3904 
3905 	while (nxt == &ptype_base[hash]) {
3906 		if (++hash >= PTYPE_HASH_SIZE)
3907 			return NULL;
3908 		nxt = ptype_base[hash].next;
3909 	}
3910 found:
3911 	return list_entry(nxt, struct packet_type, list);
3912 }
3913 
3914 static void ptype_seq_stop(struct seq_file *seq, void *v)
3915 	__releases(RCU)
3916 {
3917 	rcu_read_unlock();
3918 }
3919 
3920 static int ptype_seq_show(struct seq_file *seq, void *v)
3921 {
3922 	struct packet_type *pt = v;
3923 
3924 	if (v == SEQ_START_TOKEN)
3925 		seq_puts(seq, "Type Device      Function\n");
3926 	else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
3927 		if (pt->type == htons(ETH_P_ALL))
3928 			seq_puts(seq, "ALL ");
3929 		else
3930 			seq_printf(seq, "%04x", ntohs(pt->type));
3931 
3932 		seq_printf(seq, " %-8s %pF\n",
3933 			   pt->dev ? pt->dev->name : "", pt->func);
3934 	}
3935 
3936 	return 0;
3937 }
3938 
3939 static const struct seq_operations ptype_seq_ops = {
3940 	.start = ptype_seq_start,
3941 	.next  = ptype_seq_next,
3942 	.stop  = ptype_seq_stop,
3943 	.show  = ptype_seq_show,
3944 };
3945 
3946 static int ptype_seq_open(struct inode *inode, struct file *file)
3947 {
3948 	return seq_open_net(inode, file, &ptype_seq_ops,
3949 			sizeof(struct seq_net_private));
3950 }
3951 
3952 static const struct file_operations ptype_seq_fops = {
3953 	.owner	 = THIS_MODULE,
3954 	.open    = ptype_seq_open,
3955 	.read    = seq_read,
3956 	.llseek  = seq_lseek,
3957 	.release = seq_release_net,
3958 };
3959 
3960 
3961 static int __net_init dev_proc_net_init(struct net *net)
3962 {
3963 	int rc = -ENOMEM;
3964 
3965 	if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
3966 		goto out;
3967 	if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
3968 		goto out_dev;
3969 	if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
3970 		goto out_softnet;
3971 
3972 	if (wext_proc_init(net))
3973 		goto out_ptype;
3974 	rc = 0;
3975 out:
3976 	return rc;
3977 out_ptype:
3978 	proc_net_remove(net, "ptype");
3979 out_softnet:
3980 	proc_net_remove(net, "softnet_stat");
3981 out_dev:
3982 	proc_net_remove(net, "dev");
3983 	goto out;
3984 }
3985 
3986 static void __net_exit dev_proc_net_exit(struct net *net)
3987 {
3988 	wext_proc_exit(net);
3989 
3990 	proc_net_remove(net, "ptype");
3991 	proc_net_remove(net, "softnet_stat");
3992 	proc_net_remove(net, "dev");
3993 }
3994 
3995 static struct pernet_operations __net_initdata dev_proc_ops = {
3996 	.init = dev_proc_net_init,
3997 	.exit = dev_proc_net_exit,
3998 };
3999 
4000 static int __init dev_proc_init(void)
4001 {
4002 	return register_pernet_subsys(&dev_proc_ops);
4003 }
4004 #else
4005 #define dev_proc_init() 0
4006 #endif	/* CONFIG_PROC_FS */
4007 
4008 
4009 /**
4010  *	netdev_set_master	-	set up master/slave pair
4011  *	@slave: slave device
4012  *	@master: new master device
4013  *
4014  *	Changes the master device of the slave. Pass %NULL to break the
4015  *	bonding. The caller must hold the RTNL semaphore. On a failure
4016  *	a negative errno code is returned. On success the reference counts
4017  *	are adjusted, %RTM_NEWLINK is sent to the routing socket and the
4018  *	function returns zero.
4019  */
4020 int netdev_set_master(struct net_device *slave, struct net_device *master)
4021 {
4022 	struct net_device *old = slave->master;
4023 
4024 	ASSERT_RTNL();
4025 
4026 	if (master) {
4027 		if (old)
4028 			return -EBUSY;
4029 		dev_hold(master);
4030 	}
4031 
4032 	slave->master = master;
4033 
4034 	if (old) {
4035 		synchronize_net();
4036 		dev_put(old);
4037 	}
4038 	if (master)
4039 		slave->flags |= IFF_SLAVE;
4040 	else
4041 		slave->flags &= ~IFF_SLAVE;
4042 
4043 	rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
4044 	return 0;
4045 }
4046 EXPORT_SYMBOL(netdev_set_master);
4047 
4048 static void dev_change_rx_flags(struct net_device *dev, int flags)
4049 {
4050 	const struct net_device_ops *ops = dev->netdev_ops;
4051 
4052 	if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
4053 		ops->ndo_change_rx_flags(dev, flags);
4054 }
4055 
4056 static int __dev_set_promiscuity(struct net_device *dev, int inc)
4057 {
4058 	unsigned short old_flags = dev->flags;
4059 	uid_t uid;
4060 	gid_t gid;
4061 
4062 	ASSERT_RTNL();
4063 
4064 	dev->flags |= IFF_PROMISC;
4065 	dev->promiscuity += inc;
4066 	if (dev->promiscuity == 0) {
4067 		/*
4068 		 * Avoid overflow.
4069 		 * If inc causes overflow, untouch promisc and return error.
4070 		 */
4071 		if (inc < 0)
4072 			dev->flags &= ~IFF_PROMISC;
4073 		else {
4074 			dev->promiscuity -= inc;
4075 			printk(KERN_WARNING "%s: promiscuity touches roof, "
4076 				"set promiscuity failed, promiscuity feature "
4077 				"of device might be broken.\n", dev->name);
4078 			return -EOVERFLOW;
4079 		}
4080 	}
4081 	if (dev->flags != old_flags) {
4082 		printk(KERN_INFO "device %s %s promiscuous mode\n",
4083 		       dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
4084 							       "left");
4085 		if (audit_enabled) {
4086 			current_uid_gid(&uid, &gid);
4087 			audit_log(current->audit_context, GFP_ATOMIC,
4088 				AUDIT_ANOM_PROMISCUOUS,
4089 				"dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4090 				dev->name, (dev->flags & IFF_PROMISC),
4091 				(old_flags & IFF_PROMISC),
4092 				audit_get_loginuid(current),
4093 				uid, gid,
4094 				audit_get_sessionid(current));
4095 		}
4096 
4097 		dev_change_rx_flags(dev, IFF_PROMISC);
4098 	}
4099 	return 0;
4100 }
4101 
4102 /**
4103  *	dev_set_promiscuity	- update promiscuity count on a device
4104  *	@dev: device
4105  *	@inc: modifier
4106  *
4107  *	Add or remove promiscuity from a device. While the count in the device
4108  *	remains above zero the interface remains promiscuous. Once it hits zero
4109  *	the device reverts back to normal filtering operation. A negative inc
4110  *	value is used to drop promiscuity on the device.
4111  *	Return 0 if successful or a negative errno code on error.
4112  */
4113 int dev_set_promiscuity(struct net_device *dev, int inc)
4114 {
4115 	unsigned short old_flags = dev->flags;
4116 	int err;
4117 
4118 	err = __dev_set_promiscuity(dev, inc);
4119 	if (err < 0)
4120 		return err;
4121 	if (dev->flags != old_flags)
4122 		dev_set_rx_mode(dev);
4123 	return err;
4124 }
4125 EXPORT_SYMBOL(dev_set_promiscuity);
4126 
4127 /**
4128  *	dev_set_allmulti	- update allmulti count on a device
4129  *	@dev: device
4130  *	@inc: modifier
4131  *
4132  *	Add or remove reception of all multicast frames to a device. While the
4133  *	count in the device remains above zero the interface remains listening
4134  *	to all interfaces. Once it hits zero the device reverts back to normal
4135  *	filtering operation. A negative @inc value is used to drop the counter
4136  *	when releasing a resource needing all multicasts.
4137  *	Return 0 if successful or a negative errno code on error.
4138  */
4139 
4140 int dev_set_allmulti(struct net_device *dev, int inc)
4141 {
4142 	unsigned short old_flags = dev->flags;
4143 
4144 	ASSERT_RTNL();
4145 
4146 	dev->flags |= IFF_ALLMULTI;
4147 	dev->allmulti += inc;
4148 	if (dev->allmulti == 0) {
4149 		/*
4150 		 * Avoid overflow.
4151 		 * If inc causes overflow, untouch allmulti and return error.
4152 		 */
4153 		if (inc < 0)
4154 			dev->flags &= ~IFF_ALLMULTI;
4155 		else {
4156 			dev->allmulti -= inc;
4157 			printk(KERN_WARNING "%s: allmulti touches roof, "
4158 				"set allmulti failed, allmulti feature of "
4159 				"device might be broken.\n", dev->name);
4160 			return -EOVERFLOW;
4161 		}
4162 	}
4163 	if (dev->flags ^ old_flags) {
4164 		dev_change_rx_flags(dev, IFF_ALLMULTI);
4165 		dev_set_rx_mode(dev);
4166 	}
4167 	return 0;
4168 }
4169 EXPORT_SYMBOL(dev_set_allmulti);
4170 
4171 /*
4172  *	Upload unicast and multicast address lists to device and
4173  *	configure RX filtering. When the device doesn't support unicast
4174  *	filtering it is put in promiscuous mode while unicast addresses
4175  *	are present.
4176  */
4177 void __dev_set_rx_mode(struct net_device *dev)
4178 {
4179 	const struct net_device_ops *ops = dev->netdev_ops;
4180 
4181 	/* dev_open will call this function so the list will stay sane. */
4182 	if (!(dev->flags&IFF_UP))
4183 		return;
4184 
4185 	if (!netif_device_present(dev))
4186 		return;
4187 
4188 	if (ops->ndo_set_rx_mode)
4189 		ops->ndo_set_rx_mode(dev);
4190 	else {
4191 		/* Unicast addresses changes may only happen under the rtnl,
4192 		 * therefore calling __dev_set_promiscuity here is safe.
4193 		 */
4194 		if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4195 			__dev_set_promiscuity(dev, 1);
4196 			dev->uc_promisc = 1;
4197 		} else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4198 			__dev_set_promiscuity(dev, -1);
4199 			dev->uc_promisc = 0;
4200 		}
4201 
4202 		if (ops->ndo_set_multicast_list)
4203 			ops->ndo_set_multicast_list(dev);
4204 	}
4205 }
4206 
4207 void dev_set_rx_mode(struct net_device *dev)
4208 {
4209 	netif_addr_lock_bh(dev);
4210 	__dev_set_rx_mode(dev);
4211 	netif_addr_unlock_bh(dev);
4212 }
4213 
4214 /**
4215  *	dev_get_flags - get flags reported to userspace
4216  *	@dev: device
4217  *
4218  *	Get the combination of flag bits exported through APIs to userspace.
4219  */
4220 unsigned dev_get_flags(const struct net_device *dev)
4221 {
4222 	unsigned flags;
4223 
4224 	flags = (dev->flags & ~(IFF_PROMISC |
4225 				IFF_ALLMULTI |
4226 				IFF_RUNNING |
4227 				IFF_LOWER_UP |
4228 				IFF_DORMANT)) |
4229 		(dev->gflags & (IFF_PROMISC |
4230 				IFF_ALLMULTI));
4231 
4232 	if (netif_running(dev)) {
4233 		if (netif_oper_up(dev))
4234 			flags |= IFF_RUNNING;
4235 		if (netif_carrier_ok(dev))
4236 			flags |= IFF_LOWER_UP;
4237 		if (netif_dormant(dev))
4238 			flags |= IFF_DORMANT;
4239 	}
4240 
4241 	return flags;
4242 }
4243 EXPORT_SYMBOL(dev_get_flags);
4244 
4245 int __dev_change_flags(struct net_device *dev, unsigned int flags)
4246 {
4247 	int old_flags = dev->flags;
4248 	int ret;
4249 
4250 	ASSERT_RTNL();
4251 
4252 	/*
4253 	 *	Set the flags on our device.
4254 	 */
4255 
4256 	dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4257 			       IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4258 			       IFF_AUTOMEDIA)) |
4259 		     (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4260 				    IFF_ALLMULTI));
4261 
4262 	/*
4263 	 *	Load in the correct multicast list now the flags have changed.
4264 	 */
4265 
4266 	if ((old_flags ^ flags) & IFF_MULTICAST)
4267 		dev_change_rx_flags(dev, IFF_MULTICAST);
4268 
4269 	dev_set_rx_mode(dev);
4270 
4271 	/*
4272 	 *	Have we downed the interface. We handle IFF_UP ourselves
4273 	 *	according to user attempts to set it, rather than blindly
4274 	 *	setting it.
4275 	 */
4276 
4277 	ret = 0;
4278 	if ((old_flags ^ flags) & IFF_UP) {	/* Bit is different  ? */
4279 		ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
4280 
4281 		if (!ret)
4282 			dev_set_rx_mode(dev);
4283 	}
4284 
4285 	if ((flags ^ dev->gflags) & IFF_PROMISC) {
4286 		int inc = (flags & IFF_PROMISC) ? 1 : -1;
4287 
4288 		dev->gflags ^= IFF_PROMISC;
4289 		dev_set_promiscuity(dev, inc);
4290 	}
4291 
4292 	/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4293 	   is important. Some (broken) drivers set IFF_PROMISC, when
4294 	   IFF_ALLMULTI is requested not asking us and not reporting.
4295 	 */
4296 	if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4297 		int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4298 
4299 		dev->gflags ^= IFF_ALLMULTI;
4300 		dev_set_allmulti(dev, inc);
4301 	}
4302 
4303 	return ret;
4304 }
4305 
4306 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4307 {
4308 	unsigned int changes = dev->flags ^ old_flags;
4309 
4310 	if (changes & IFF_UP) {
4311 		if (dev->flags & IFF_UP)
4312 			call_netdevice_notifiers(NETDEV_UP, dev);
4313 		else
4314 			call_netdevice_notifiers(NETDEV_DOWN, dev);
4315 	}
4316 
4317 	if (dev->flags & IFF_UP &&
4318 	    (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4319 		call_netdevice_notifiers(NETDEV_CHANGE, dev);
4320 }
4321 
4322 /**
4323  *	dev_change_flags - change device settings
4324  *	@dev: device
4325  *	@flags: device state flags
4326  *
4327  *	Change settings on device based state flags. The flags are
4328  *	in the userspace exported format.
4329  */
4330 int dev_change_flags(struct net_device *dev, unsigned flags)
4331 {
4332 	int ret, changes;
4333 	int old_flags = dev->flags;
4334 
4335 	ret = __dev_change_flags(dev, flags);
4336 	if (ret < 0)
4337 		return ret;
4338 
4339 	changes = old_flags ^ dev->flags;
4340 	if (changes)
4341 		rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
4342 
4343 	__dev_notify_flags(dev, old_flags);
4344 	return ret;
4345 }
4346 EXPORT_SYMBOL(dev_change_flags);
4347 
4348 /**
4349  *	dev_set_mtu - Change maximum transfer unit
4350  *	@dev: device
4351  *	@new_mtu: new transfer unit
4352  *
4353  *	Change the maximum transfer size of the network device.
4354  */
4355 int dev_set_mtu(struct net_device *dev, int new_mtu)
4356 {
4357 	const struct net_device_ops *ops = dev->netdev_ops;
4358 	int err;
4359 
4360 	if (new_mtu == dev->mtu)
4361 		return 0;
4362 
4363 	/*	MTU must be positive.	 */
4364 	if (new_mtu < 0)
4365 		return -EINVAL;
4366 
4367 	if (!netif_device_present(dev))
4368 		return -ENODEV;
4369 
4370 	err = 0;
4371 	if (ops->ndo_change_mtu)
4372 		err = ops->ndo_change_mtu(dev, new_mtu);
4373 	else
4374 		dev->mtu = new_mtu;
4375 
4376 	if (!err && dev->flags & IFF_UP)
4377 		call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
4378 	return err;
4379 }
4380 EXPORT_SYMBOL(dev_set_mtu);
4381 
4382 /**
4383  *	dev_set_mac_address - Change Media Access Control Address
4384  *	@dev: device
4385  *	@sa: new address
4386  *
4387  *	Change the hardware (MAC) address of the device
4388  */
4389 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4390 {
4391 	const struct net_device_ops *ops = dev->netdev_ops;
4392 	int err;
4393 
4394 	if (!ops->ndo_set_mac_address)
4395 		return -EOPNOTSUPP;
4396 	if (sa->sa_family != dev->type)
4397 		return -EINVAL;
4398 	if (!netif_device_present(dev))
4399 		return -ENODEV;
4400 	err = ops->ndo_set_mac_address(dev, sa);
4401 	if (!err)
4402 		call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4403 	return err;
4404 }
4405 EXPORT_SYMBOL(dev_set_mac_address);
4406 
4407 /*
4408  *	Perform the SIOCxIFxxx calls, inside rcu_read_lock()
4409  */
4410 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
4411 {
4412 	int err;
4413 	struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
4414 
4415 	if (!dev)
4416 		return -ENODEV;
4417 
4418 	switch (cmd) {
4419 	case SIOCGIFFLAGS:	/* Get interface flags */
4420 		ifr->ifr_flags = (short) dev_get_flags(dev);
4421 		return 0;
4422 
4423 	case SIOCGIFMETRIC:	/* Get the metric on the interface
4424 				   (currently unused) */
4425 		ifr->ifr_metric = 0;
4426 		return 0;
4427 
4428 	case SIOCGIFMTU:	/* Get the MTU of a device */
4429 		ifr->ifr_mtu = dev->mtu;
4430 		return 0;
4431 
4432 	case SIOCGIFHWADDR:
4433 		if (!dev->addr_len)
4434 			memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4435 		else
4436 			memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4437 			       min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4438 		ifr->ifr_hwaddr.sa_family = dev->type;
4439 		return 0;
4440 
4441 	case SIOCGIFSLAVE:
4442 		err = -EINVAL;
4443 		break;
4444 
4445 	case SIOCGIFMAP:
4446 		ifr->ifr_map.mem_start = dev->mem_start;
4447 		ifr->ifr_map.mem_end   = dev->mem_end;
4448 		ifr->ifr_map.base_addr = dev->base_addr;
4449 		ifr->ifr_map.irq       = dev->irq;
4450 		ifr->ifr_map.dma       = dev->dma;
4451 		ifr->ifr_map.port      = dev->if_port;
4452 		return 0;
4453 
4454 	case SIOCGIFINDEX:
4455 		ifr->ifr_ifindex = dev->ifindex;
4456 		return 0;
4457 
4458 	case SIOCGIFTXQLEN:
4459 		ifr->ifr_qlen = dev->tx_queue_len;
4460 		return 0;
4461 
4462 	default:
4463 		/* dev_ioctl() should ensure this case
4464 		 * is never reached
4465 		 */
4466 		WARN_ON(1);
4467 		err = -EINVAL;
4468 		break;
4469 
4470 	}
4471 	return err;
4472 }
4473 
4474 /*
4475  *	Perform the SIOCxIFxxx calls, inside rtnl_lock()
4476  */
4477 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4478 {
4479 	int err;
4480 	struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
4481 	const struct net_device_ops *ops;
4482 
4483 	if (!dev)
4484 		return -ENODEV;
4485 
4486 	ops = dev->netdev_ops;
4487 
4488 	switch (cmd) {
4489 	case SIOCSIFFLAGS:	/* Set interface flags */
4490 		return dev_change_flags(dev, ifr->ifr_flags);
4491 
4492 	case SIOCSIFMETRIC:	/* Set the metric on the interface
4493 				   (currently unused) */
4494 		return -EOPNOTSUPP;
4495 
4496 	case SIOCSIFMTU:	/* Set the MTU of a device */
4497 		return dev_set_mtu(dev, ifr->ifr_mtu);
4498 
4499 	case SIOCSIFHWADDR:
4500 		return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4501 
4502 	case SIOCSIFHWBROADCAST:
4503 		if (ifr->ifr_hwaddr.sa_family != dev->type)
4504 			return -EINVAL;
4505 		memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4506 		       min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4507 		call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4508 		return 0;
4509 
4510 	case SIOCSIFMAP:
4511 		if (ops->ndo_set_config) {
4512 			if (!netif_device_present(dev))
4513 				return -ENODEV;
4514 			return ops->ndo_set_config(dev, &ifr->ifr_map);
4515 		}
4516 		return -EOPNOTSUPP;
4517 
4518 	case SIOCADDMULTI:
4519 		if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4520 		    ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4521 			return -EINVAL;
4522 		if (!netif_device_present(dev))
4523 			return -ENODEV;
4524 		return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
4525 
4526 	case SIOCDELMULTI:
4527 		if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4528 		    ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4529 			return -EINVAL;
4530 		if (!netif_device_present(dev))
4531 			return -ENODEV;
4532 		return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
4533 
4534 	case SIOCSIFTXQLEN:
4535 		if (ifr->ifr_qlen < 0)
4536 			return -EINVAL;
4537 		dev->tx_queue_len = ifr->ifr_qlen;
4538 		return 0;
4539 
4540 	case SIOCSIFNAME:
4541 		ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4542 		return dev_change_name(dev, ifr->ifr_newname);
4543 
4544 	/*
4545 	 *	Unknown or private ioctl
4546 	 */
4547 	default:
4548 		if ((cmd >= SIOCDEVPRIVATE &&
4549 		    cmd <= SIOCDEVPRIVATE + 15) ||
4550 		    cmd == SIOCBONDENSLAVE ||
4551 		    cmd == SIOCBONDRELEASE ||
4552 		    cmd == SIOCBONDSETHWADDR ||
4553 		    cmd == SIOCBONDSLAVEINFOQUERY ||
4554 		    cmd == SIOCBONDINFOQUERY ||
4555 		    cmd == SIOCBONDCHANGEACTIVE ||
4556 		    cmd == SIOCGMIIPHY ||
4557 		    cmd == SIOCGMIIREG ||
4558 		    cmd == SIOCSMIIREG ||
4559 		    cmd == SIOCBRADDIF ||
4560 		    cmd == SIOCBRDELIF ||
4561 		    cmd == SIOCSHWTSTAMP ||
4562 		    cmd == SIOCWANDEV) {
4563 			err = -EOPNOTSUPP;
4564 			if (ops->ndo_do_ioctl) {
4565 				if (netif_device_present(dev))
4566 					err = ops->ndo_do_ioctl(dev, ifr, cmd);
4567 				else
4568 					err = -ENODEV;
4569 			}
4570 		} else
4571 			err = -EINVAL;
4572 
4573 	}
4574 	return err;
4575 }
4576 
4577 /*
4578  *	This function handles all "interface"-type I/O control requests. The actual
4579  *	'doing' part of this is dev_ifsioc above.
4580  */
4581 
4582 /**
4583  *	dev_ioctl	-	network device ioctl
4584  *	@net: the applicable net namespace
4585  *	@cmd: command to issue
4586  *	@arg: pointer to a struct ifreq in user space
4587  *
4588  *	Issue ioctl functions to devices. This is normally called by the
4589  *	user space syscall interfaces but can sometimes be useful for
4590  *	other purposes. The return value is the return from the syscall if
4591  *	positive or a negative errno code on error.
4592  */
4593 
4594 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
4595 {
4596 	struct ifreq ifr;
4597 	int ret;
4598 	char *colon;
4599 
4600 	/* One special case: SIOCGIFCONF takes ifconf argument
4601 	   and requires shared lock, because it sleeps writing
4602 	   to user space.
4603 	 */
4604 
4605 	if (cmd == SIOCGIFCONF) {
4606 		rtnl_lock();
4607 		ret = dev_ifconf(net, (char __user *) arg);
4608 		rtnl_unlock();
4609 		return ret;
4610 	}
4611 	if (cmd == SIOCGIFNAME)
4612 		return dev_ifname(net, (struct ifreq __user *)arg);
4613 
4614 	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4615 		return -EFAULT;
4616 
4617 	ifr.ifr_name[IFNAMSIZ-1] = 0;
4618 
4619 	colon = strchr(ifr.ifr_name, ':');
4620 	if (colon)
4621 		*colon = 0;
4622 
4623 	/*
4624 	 *	See which interface the caller is talking about.
4625 	 */
4626 
4627 	switch (cmd) {
4628 	/*
4629 	 *	These ioctl calls:
4630 	 *	- can be done by all.
4631 	 *	- atomic and do not require locking.
4632 	 *	- return a value
4633 	 */
4634 	case SIOCGIFFLAGS:
4635 	case SIOCGIFMETRIC:
4636 	case SIOCGIFMTU:
4637 	case SIOCGIFHWADDR:
4638 	case SIOCGIFSLAVE:
4639 	case SIOCGIFMAP:
4640 	case SIOCGIFINDEX:
4641 	case SIOCGIFTXQLEN:
4642 		dev_load(net, ifr.ifr_name);
4643 		rcu_read_lock();
4644 		ret = dev_ifsioc_locked(net, &ifr, cmd);
4645 		rcu_read_unlock();
4646 		if (!ret) {
4647 			if (colon)
4648 				*colon = ':';
4649 			if (copy_to_user(arg, &ifr,
4650 					 sizeof(struct ifreq)))
4651 				ret = -EFAULT;
4652 		}
4653 		return ret;
4654 
4655 	case SIOCETHTOOL:
4656 		dev_load(net, ifr.ifr_name);
4657 		rtnl_lock();
4658 		ret = dev_ethtool(net, &ifr);
4659 		rtnl_unlock();
4660 		if (!ret) {
4661 			if (colon)
4662 				*colon = ':';
4663 			if (copy_to_user(arg, &ifr,
4664 					 sizeof(struct ifreq)))
4665 				ret = -EFAULT;
4666 		}
4667 		return ret;
4668 
4669 	/*
4670 	 *	These ioctl calls:
4671 	 *	- require superuser power.
4672 	 *	- require strict serialization.
4673 	 *	- return a value
4674 	 */
4675 	case SIOCGMIIPHY:
4676 	case SIOCGMIIREG:
4677 	case SIOCSIFNAME:
4678 		if (!capable(CAP_NET_ADMIN))
4679 			return -EPERM;
4680 		dev_load(net, ifr.ifr_name);
4681 		rtnl_lock();
4682 		ret = dev_ifsioc(net, &ifr, cmd);
4683 		rtnl_unlock();
4684 		if (!ret) {
4685 			if (colon)
4686 				*colon = ':';
4687 			if (copy_to_user(arg, &ifr,
4688 					 sizeof(struct ifreq)))
4689 				ret = -EFAULT;
4690 		}
4691 		return ret;
4692 
4693 	/*
4694 	 *	These ioctl calls:
4695 	 *	- require superuser power.
4696 	 *	- require strict serialization.
4697 	 *	- do not return a value
4698 	 */
4699 	case SIOCSIFFLAGS:
4700 	case SIOCSIFMETRIC:
4701 	case SIOCSIFMTU:
4702 	case SIOCSIFMAP:
4703 	case SIOCSIFHWADDR:
4704 	case SIOCSIFSLAVE:
4705 	case SIOCADDMULTI:
4706 	case SIOCDELMULTI:
4707 	case SIOCSIFHWBROADCAST:
4708 	case SIOCSIFTXQLEN:
4709 	case SIOCSMIIREG:
4710 	case SIOCBONDENSLAVE:
4711 	case SIOCBONDRELEASE:
4712 	case SIOCBONDSETHWADDR:
4713 	case SIOCBONDCHANGEACTIVE:
4714 	case SIOCBRADDIF:
4715 	case SIOCBRDELIF:
4716 	case SIOCSHWTSTAMP:
4717 		if (!capable(CAP_NET_ADMIN))
4718 			return -EPERM;
4719 		/* fall through */
4720 	case SIOCBONDSLAVEINFOQUERY:
4721 	case SIOCBONDINFOQUERY:
4722 		dev_load(net, ifr.ifr_name);
4723 		rtnl_lock();
4724 		ret = dev_ifsioc(net, &ifr, cmd);
4725 		rtnl_unlock();
4726 		return ret;
4727 
4728 	case SIOCGIFMEM:
4729 		/* Get the per device memory space. We can add this but
4730 		 * currently do not support it */
4731 	case SIOCSIFMEM:
4732 		/* Set the per device memory buffer space.
4733 		 * Not applicable in our case */
4734 	case SIOCSIFLINK:
4735 		return -EINVAL;
4736 
4737 	/*
4738 	 *	Unknown or private ioctl.
4739 	 */
4740 	default:
4741 		if (cmd == SIOCWANDEV ||
4742 		    (cmd >= SIOCDEVPRIVATE &&
4743 		     cmd <= SIOCDEVPRIVATE + 15)) {
4744 			dev_load(net, ifr.ifr_name);
4745 			rtnl_lock();
4746 			ret = dev_ifsioc(net, &ifr, cmd);
4747 			rtnl_unlock();
4748 			if (!ret && copy_to_user(arg, &ifr,
4749 						 sizeof(struct ifreq)))
4750 				ret = -EFAULT;
4751 			return ret;
4752 		}
4753 		/* Take care of Wireless Extensions */
4754 		if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4755 			return wext_handle_ioctl(net, &ifr, cmd, arg);
4756 		return -EINVAL;
4757 	}
4758 }
4759 
4760 
4761 /**
4762  *	dev_new_index	-	allocate an ifindex
4763  *	@net: the applicable net namespace
4764  *
4765  *	Returns a suitable unique value for a new device interface
4766  *	number.  The caller must hold the rtnl semaphore or the
4767  *	dev_base_lock to be sure it remains unique.
4768  */
4769 static int dev_new_index(struct net *net)
4770 {
4771 	static int ifindex;
4772 	for (;;) {
4773 		if (++ifindex <= 0)
4774 			ifindex = 1;
4775 		if (!__dev_get_by_index(net, ifindex))
4776 			return ifindex;
4777 	}
4778 }
4779 
4780 /* Delayed registration/unregisteration */
4781 static LIST_HEAD(net_todo_list);
4782 
4783 static void net_set_todo(struct net_device *dev)
4784 {
4785 	list_add_tail(&dev->todo_list, &net_todo_list);
4786 }
4787 
4788 static void rollback_registered_many(struct list_head *head)
4789 {
4790 	struct net_device *dev, *tmp;
4791 
4792 	BUG_ON(dev_boot_phase);
4793 	ASSERT_RTNL();
4794 
4795 	list_for_each_entry_safe(dev, tmp, head, unreg_list) {
4796 		/* Some devices call without registering
4797 		 * for initialization unwind. Remove those
4798 		 * devices and proceed with the remaining.
4799 		 */
4800 		if (dev->reg_state == NETREG_UNINITIALIZED) {
4801 			pr_debug("unregister_netdevice: device %s/%p never "
4802 				 "was registered\n", dev->name, dev);
4803 
4804 			WARN_ON(1);
4805 			list_del(&dev->unreg_list);
4806 			continue;
4807 		}
4808 
4809 		BUG_ON(dev->reg_state != NETREG_REGISTERED);
4810 
4811 		/* If device is running, close it first. */
4812 		dev_close(dev);
4813 
4814 		/* And unlink it from device chain. */
4815 		unlist_netdevice(dev);
4816 
4817 		dev->reg_state = NETREG_UNREGISTERING;
4818 	}
4819 
4820 	synchronize_net();
4821 
4822 	list_for_each_entry(dev, head, unreg_list) {
4823 		/* Shutdown queueing discipline. */
4824 		dev_shutdown(dev);
4825 
4826 
4827 		/* Notify protocols, that we are about to destroy
4828 		   this device. They should clean all the things.
4829 		*/
4830 		call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4831 
4832 		if (!dev->rtnl_link_ops ||
4833 		    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
4834 			rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
4835 
4836 		/*
4837 		 *	Flush the unicast and multicast chains
4838 		 */
4839 		dev_uc_flush(dev);
4840 		dev_mc_flush(dev);
4841 
4842 		if (dev->netdev_ops->ndo_uninit)
4843 			dev->netdev_ops->ndo_uninit(dev);
4844 
4845 		/* Notifier chain MUST detach us from master device. */
4846 		WARN_ON(dev->master);
4847 
4848 		/* Remove entries from kobject tree */
4849 		netdev_unregister_kobject(dev);
4850 	}
4851 
4852 	/* Process any work delayed until the end of the batch */
4853 	dev = list_first_entry(head, struct net_device, unreg_list);
4854 	call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
4855 
4856 	rcu_barrier();
4857 
4858 	list_for_each_entry(dev, head, unreg_list)
4859 		dev_put(dev);
4860 }
4861 
4862 static void rollback_registered(struct net_device *dev)
4863 {
4864 	LIST_HEAD(single);
4865 
4866 	list_add(&dev->unreg_list, &single);
4867 	rollback_registered_many(&single);
4868 }
4869 
4870 static void __netdev_init_queue_locks_one(struct net_device *dev,
4871 					  struct netdev_queue *dev_queue,
4872 					  void *_unused)
4873 {
4874 	spin_lock_init(&dev_queue->_xmit_lock);
4875 	netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
4876 	dev_queue->xmit_lock_owner = -1;
4877 }
4878 
4879 static void netdev_init_queue_locks(struct net_device *dev)
4880 {
4881 	netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4882 	__netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
4883 }
4884 
4885 unsigned long netdev_fix_features(unsigned long features, const char *name)
4886 {
4887 	/* Fix illegal SG+CSUM combinations. */
4888 	if ((features & NETIF_F_SG) &&
4889 	    !(features & NETIF_F_ALL_CSUM)) {
4890 		if (name)
4891 			printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4892 			       "checksum feature.\n", name);
4893 		features &= ~NETIF_F_SG;
4894 	}
4895 
4896 	/* TSO requires that SG is present as well. */
4897 	if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4898 		if (name)
4899 			printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4900 			       "SG feature.\n", name);
4901 		features &= ~NETIF_F_TSO;
4902 	}
4903 
4904 	if (features & NETIF_F_UFO) {
4905 		if (!(features & NETIF_F_GEN_CSUM)) {
4906 			if (name)
4907 				printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4908 				       "since no NETIF_F_HW_CSUM feature.\n",
4909 				       name);
4910 			features &= ~NETIF_F_UFO;
4911 		}
4912 
4913 		if (!(features & NETIF_F_SG)) {
4914 			if (name)
4915 				printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4916 				       "since no NETIF_F_SG feature.\n", name);
4917 			features &= ~NETIF_F_UFO;
4918 		}
4919 	}
4920 
4921 	return features;
4922 }
4923 EXPORT_SYMBOL(netdev_fix_features);
4924 
4925 /**
4926  *	netif_stacked_transfer_operstate -	transfer operstate
4927  *	@rootdev: the root or lower level device to transfer state from
4928  *	@dev: the device to transfer operstate to
4929  *
4930  *	Transfer operational state from root to device. This is normally
4931  *	called when a stacking relationship exists between the root
4932  *	device and the device(a leaf device).
4933  */
4934 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4935 					struct net_device *dev)
4936 {
4937 	if (rootdev->operstate == IF_OPER_DORMANT)
4938 		netif_dormant_on(dev);
4939 	else
4940 		netif_dormant_off(dev);
4941 
4942 	if (netif_carrier_ok(rootdev)) {
4943 		if (!netif_carrier_ok(dev))
4944 			netif_carrier_on(dev);
4945 	} else {
4946 		if (netif_carrier_ok(dev))
4947 			netif_carrier_off(dev);
4948 	}
4949 }
4950 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
4951 
4952 /**
4953  *	register_netdevice	- register a network device
4954  *	@dev: device to register
4955  *
4956  *	Take a completed network device structure and add it to the kernel
4957  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4958  *	chain. 0 is returned on success. A negative errno code is returned
4959  *	on a failure to set up the device, or if the name is a duplicate.
4960  *
4961  *	Callers must hold the rtnl semaphore. You may want
4962  *	register_netdev() instead of this.
4963  *
4964  *	BUGS:
4965  *	The locking appears insufficient to guarantee two parallel registers
4966  *	will not get the same name.
4967  */
4968 
4969 int register_netdevice(struct net_device *dev)
4970 {
4971 	int ret;
4972 	struct net *net = dev_net(dev);
4973 
4974 	BUG_ON(dev_boot_phase);
4975 	ASSERT_RTNL();
4976 
4977 	might_sleep();
4978 
4979 	/* When net_device's are persistent, this will be fatal. */
4980 	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
4981 	BUG_ON(!net);
4982 
4983 	spin_lock_init(&dev->addr_list_lock);
4984 	netdev_set_addr_lockdep_class(dev);
4985 	netdev_init_queue_locks(dev);
4986 
4987 	dev->iflink = -1;
4988 
4989 #ifdef CONFIG_RPS
4990 	if (!dev->num_rx_queues) {
4991 		/*
4992 		 * Allocate a single RX queue if driver never called
4993 		 * alloc_netdev_mq
4994 		 */
4995 
4996 		dev->_rx = kzalloc(sizeof(struct netdev_rx_queue), GFP_KERNEL);
4997 		if (!dev->_rx) {
4998 			ret = -ENOMEM;
4999 			goto out;
5000 		}
5001 
5002 		dev->_rx->first = dev->_rx;
5003 		atomic_set(&dev->_rx->count, 1);
5004 		dev->num_rx_queues = 1;
5005 	}
5006 #endif
5007 	/* Init, if this function is available */
5008 	if (dev->netdev_ops->ndo_init) {
5009 		ret = dev->netdev_ops->ndo_init(dev);
5010 		if (ret) {
5011 			if (ret > 0)
5012 				ret = -EIO;
5013 			goto out;
5014 		}
5015 	}
5016 
5017 	ret = dev_get_valid_name(dev, dev->name, 0);
5018 	if (ret)
5019 		goto err_uninit;
5020 
5021 	dev->ifindex = dev_new_index(net);
5022 	if (dev->iflink == -1)
5023 		dev->iflink = dev->ifindex;
5024 
5025 	/* Fix illegal checksum combinations */
5026 	if ((dev->features & NETIF_F_HW_CSUM) &&
5027 	    (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5028 		printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
5029 		       dev->name);
5030 		dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5031 	}
5032 
5033 	if ((dev->features & NETIF_F_NO_CSUM) &&
5034 	    (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5035 		printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
5036 		       dev->name);
5037 		dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
5038 	}
5039 
5040 	dev->features = netdev_fix_features(dev->features, dev->name);
5041 
5042 	/* Enable software GSO if SG is supported. */
5043 	if (dev->features & NETIF_F_SG)
5044 		dev->features |= NETIF_F_GSO;
5045 
5046 	ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5047 	ret = notifier_to_errno(ret);
5048 	if (ret)
5049 		goto err_uninit;
5050 
5051 	ret = netdev_register_kobject(dev);
5052 	if (ret)
5053 		goto err_uninit;
5054 	dev->reg_state = NETREG_REGISTERED;
5055 
5056 	/*
5057 	 *	Default initial state at registry is that the
5058 	 *	device is present.
5059 	 */
5060 
5061 	set_bit(__LINK_STATE_PRESENT, &dev->state);
5062 
5063 	dev_init_scheduler(dev);
5064 	dev_hold(dev);
5065 	list_netdevice(dev);
5066 
5067 	/* Notify protocols, that a new device appeared. */
5068 	ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
5069 	ret = notifier_to_errno(ret);
5070 	if (ret) {
5071 		rollback_registered(dev);
5072 		dev->reg_state = NETREG_UNREGISTERED;
5073 	}
5074 	/*
5075 	 *	Prevent userspace races by waiting until the network
5076 	 *	device is fully setup before sending notifications.
5077 	 */
5078 	if (!dev->rtnl_link_ops ||
5079 	    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5080 		rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5081 
5082 out:
5083 	return ret;
5084 
5085 err_uninit:
5086 	if (dev->netdev_ops->ndo_uninit)
5087 		dev->netdev_ops->ndo_uninit(dev);
5088 	goto out;
5089 }
5090 EXPORT_SYMBOL(register_netdevice);
5091 
5092 /**
5093  *	init_dummy_netdev	- init a dummy network device for NAPI
5094  *	@dev: device to init
5095  *
5096  *	This takes a network device structure and initialize the minimum
5097  *	amount of fields so it can be used to schedule NAPI polls without
5098  *	registering a full blown interface. This is to be used by drivers
5099  *	that need to tie several hardware interfaces to a single NAPI
5100  *	poll scheduler due to HW limitations.
5101  */
5102 int init_dummy_netdev(struct net_device *dev)
5103 {
5104 	/* Clear everything. Note we don't initialize spinlocks
5105 	 * are they aren't supposed to be taken by any of the
5106 	 * NAPI code and this dummy netdev is supposed to be
5107 	 * only ever used for NAPI polls
5108 	 */
5109 	memset(dev, 0, sizeof(struct net_device));
5110 
5111 	/* make sure we BUG if trying to hit standard
5112 	 * register/unregister code path
5113 	 */
5114 	dev->reg_state = NETREG_DUMMY;
5115 
5116 	/* initialize the ref count */
5117 	atomic_set(&dev->refcnt, 1);
5118 
5119 	/* NAPI wants this */
5120 	INIT_LIST_HEAD(&dev->napi_list);
5121 
5122 	/* a dummy interface is started by default */
5123 	set_bit(__LINK_STATE_PRESENT, &dev->state);
5124 	set_bit(__LINK_STATE_START, &dev->state);
5125 
5126 	return 0;
5127 }
5128 EXPORT_SYMBOL_GPL(init_dummy_netdev);
5129 
5130 
5131 /**
5132  *	register_netdev	- register a network device
5133  *	@dev: device to register
5134  *
5135  *	Take a completed network device structure and add it to the kernel
5136  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5137  *	chain. 0 is returned on success. A negative errno code is returned
5138  *	on a failure to set up the device, or if the name is a duplicate.
5139  *
5140  *	This is a wrapper around register_netdevice that takes the rtnl semaphore
5141  *	and expands the device name if you passed a format string to
5142  *	alloc_netdev.
5143  */
5144 int register_netdev(struct net_device *dev)
5145 {
5146 	int err;
5147 
5148 	rtnl_lock();
5149 
5150 	/*
5151 	 * If the name is a format string the caller wants us to do a
5152 	 * name allocation.
5153 	 */
5154 	if (strchr(dev->name, '%')) {
5155 		err = dev_alloc_name(dev, dev->name);
5156 		if (err < 0)
5157 			goto out;
5158 	}
5159 
5160 	err = register_netdevice(dev);
5161 out:
5162 	rtnl_unlock();
5163 	return err;
5164 }
5165 EXPORT_SYMBOL(register_netdev);
5166 
5167 /*
5168  * netdev_wait_allrefs - wait until all references are gone.
5169  *
5170  * This is called when unregistering network devices.
5171  *
5172  * Any protocol or device that holds a reference should register
5173  * for netdevice notification, and cleanup and put back the
5174  * reference if they receive an UNREGISTER event.
5175  * We can get stuck here if buggy protocols don't correctly
5176  * call dev_put.
5177  */
5178 static void netdev_wait_allrefs(struct net_device *dev)
5179 {
5180 	unsigned long rebroadcast_time, warning_time;
5181 
5182 	linkwatch_forget_dev(dev);
5183 
5184 	rebroadcast_time = warning_time = jiffies;
5185 	while (atomic_read(&dev->refcnt) != 0) {
5186 		if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
5187 			rtnl_lock();
5188 
5189 			/* Rebroadcast unregister notification */
5190 			call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5191 			/* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
5192 			 * should have already handle it the first time */
5193 
5194 			if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5195 				     &dev->state)) {
5196 				/* We must not have linkwatch events
5197 				 * pending on unregister. If this
5198 				 * happens, we simply run the queue
5199 				 * unscheduled, resulting in a noop
5200 				 * for this device.
5201 				 */
5202 				linkwatch_run_queue();
5203 			}
5204 
5205 			__rtnl_unlock();
5206 
5207 			rebroadcast_time = jiffies;
5208 		}
5209 
5210 		msleep(250);
5211 
5212 		if (time_after(jiffies, warning_time + 10 * HZ)) {
5213 			printk(KERN_EMERG "unregister_netdevice: "
5214 			       "waiting for %s to become free. Usage "
5215 			       "count = %d\n",
5216 			       dev->name, atomic_read(&dev->refcnt));
5217 			warning_time = jiffies;
5218 		}
5219 	}
5220 }
5221 
5222 /* The sequence is:
5223  *
5224  *	rtnl_lock();
5225  *	...
5226  *	register_netdevice(x1);
5227  *	register_netdevice(x2);
5228  *	...
5229  *	unregister_netdevice(y1);
5230  *	unregister_netdevice(y2);
5231  *      ...
5232  *	rtnl_unlock();
5233  *	free_netdev(y1);
5234  *	free_netdev(y2);
5235  *
5236  * We are invoked by rtnl_unlock().
5237  * This allows us to deal with problems:
5238  * 1) We can delete sysfs objects which invoke hotplug
5239  *    without deadlocking with linkwatch via keventd.
5240  * 2) Since we run with the RTNL semaphore not held, we can sleep
5241  *    safely in order to wait for the netdev refcnt to drop to zero.
5242  *
5243  * We must not return until all unregister events added during
5244  * the interval the lock was held have been completed.
5245  */
5246 void netdev_run_todo(void)
5247 {
5248 	struct list_head list;
5249 
5250 	/* Snapshot list, allow later requests */
5251 	list_replace_init(&net_todo_list, &list);
5252 
5253 	__rtnl_unlock();
5254 
5255 	while (!list_empty(&list)) {
5256 		struct net_device *dev
5257 			= list_first_entry(&list, struct net_device, todo_list);
5258 		list_del(&dev->todo_list);
5259 
5260 		if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5261 			printk(KERN_ERR "network todo '%s' but state %d\n",
5262 			       dev->name, dev->reg_state);
5263 			dump_stack();
5264 			continue;
5265 		}
5266 
5267 		dev->reg_state = NETREG_UNREGISTERED;
5268 
5269 		on_each_cpu(flush_backlog, dev, 1);
5270 
5271 		netdev_wait_allrefs(dev);
5272 
5273 		/* paranoia */
5274 		BUG_ON(atomic_read(&dev->refcnt));
5275 		WARN_ON(dev->ip_ptr);
5276 		WARN_ON(dev->ip6_ptr);
5277 		WARN_ON(dev->dn_ptr);
5278 
5279 		if (dev->destructor)
5280 			dev->destructor(dev);
5281 
5282 		/* Free network device */
5283 		kobject_put(&dev->dev.kobj);
5284 	}
5285 }
5286 
5287 /**
5288  *	dev_txq_stats_fold - fold tx_queues stats
5289  *	@dev: device to get statistics from
5290  *	@stats: struct rtnl_link_stats64 to hold results
5291  */
5292 void dev_txq_stats_fold(const struct net_device *dev,
5293 			struct rtnl_link_stats64 *stats)
5294 {
5295 	u64 tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5296 	unsigned int i;
5297 	struct netdev_queue *txq;
5298 
5299 	for (i = 0; i < dev->num_tx_queues; i++) {
5300 		txq = netdev_get_tx_queue(dev, i);
5301 		spin_lock_bh(&txq->_xmit_lock);
5302 		tx_bytes   += txq->tx_bytes;
5303 		tx_packets += txq->tx_packets;
5304 		tx_dropped += txq->tx_dropped;
5305 		spin_unlock_bh(&txq->_xmit_lock);
5306 	}
5307 	if (tx_bytes || tx_packets || tx_dropped) {
5308 		stats->tx_bytes   = tx_bytes;
5309 		stats->tx_packets = tx_packets;
5310 		stats->tx_dropped = tx_dropped;
5311 	}
5312 }
5313 EXPORT_SYMBOL(dev_txq_stats_fold);
5314 
5315 /* Convert net_device_stats to rtnl_link_stats64.  They have the same
5316  * fields in the same order, with only the type differing.
5317  */
5318 static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5319 				    const struct net_device_stats *netdev_stats)
5320 {
5321 #if BITS_PER_LONG == 64
5322         BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5323         memcpy(stats64, netdev_stats, sizeof(*stats64));
5324 #else
5325 	size_t i, n = sizeof(*stats64) / sizeof(u64);
5326 	const unsigned long *src = (const unsigned long *)netdev_stats;
5327 	u64 *dst = (u64 *)stats64;
5328 
5329 	BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
5330 		     sizeof(*stats64) / sizeof(u64));
5331 	for (i = 0; i < n; i++)
5332 		dst[i] = src[i];
5333 #endif
5334 }
5335 
5336 /**
5337  *	dev_get_stats	- get network device statistics
5338  *	@dev: device to get statistics from
5339  *	@storage: place to store stats
5340  *
5341  *	Get network statistics from device. Return @storage.
5342  *	The device driver may provide its own method by setting
5343  *	dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
5344  *	otherwise the internal statistics structure is used.
5345  */
5346 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
5347 					struct rtnl_link_stats64 *storage)
5348 {
5349 	const struct net_device_ops *ops = dev->netdev_ops;
5350 
5351 	if (ops->ndo_get_stats64) {
5352 		memset(storage, 0, sizeof(*storage));
5353 		return ops->ndo_get_stats64(dev, storage);
5354 	}
5355 	if (ops->ndo_get_stats) {
5356 		netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
5357 		return storage;
5358 	}
5359 	netdev_stats_to_stats64(storage, &dev->stats);
5360 	dev_txq_stats_fold(dev, storage);
5361 	return storage;
5362 }
5363 EXPORT_SYMBOL(dev_get_stats);
5364 
5365 static void netdev_init_one_queue(struct net_device *dev,
5366 				  struct netdev_queue *queue,
5367 				  void *_unused)
5368 {
5369 	queue->dev = dev;
5370 }
5371 
5372 static void netdev_init_queues(struct net_device *dev)
5373 {
5374 	netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5375 	netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5376 	spin_lock_init(&dev->tx_global_lock);
5377 }
5378 
5379 /**
5380  *	alloc_netdev_mq - allocate network device
5381  *	@sizeof_priv:	size of private data to allocate space for
5382  *	@name:		device name format string
5383  *	@setup:		callback to initialize device
5384  *	@queue_count:	the number of subqueues to allocate
5385  *
5386  *	Allocates a struct net_device with private data area for driver use
5387  *	and performs basic initialization.  Also allocates subquue structs
5388  *	for each queue on the device at the end of the netdevice.
5389  */
5390 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5391 		void (*setup)(struct net_device *), unsigned int queue_count)
5392 {
5393 	struct netdev_queue *tx;
5394 	struct net_device *dev;
5395 	size_t alloc_size;
5396 	struct net_device *p;
5397 #ifdef CONFIG_RPS
5398 	struct netdev_rx_queue *rx;
5399 	int i;
5400 #endif
5401 
5402 	BUG_ON(strlen(name) >= sizeof(dev->name));
5403 
5404 	alloc_size = sizeof(struct net_device);
5405 	if (sizeof_priv) {
5406 		/* ensure 32-byte alignment of private area */
5407 		alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
5408 		alloc_size += sizeof_priv;
5409 	}
5410 	/* ensure 32-byte alignment of whole construct */
5411 	alloc_size += NETDEV_ALIGN - 1;
5412 
5413 	p = kzalloc(alloc_size, GFP_KERNEL);
5414 	if (!p) {
5415 		printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
5416 		return NULL;
5417 	}
5418 
5419 	tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
5420 	if (!tx) {
5421 		printk(KERN_ERR "alloc_netdev: Unable to allocate "
5422 		       "tx qdiscs.\n");
5423 		goto free_p;
5424 	}
5425 
5426 #ifdef CONFIG_RPS
5427 	rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5428 	if (!rx) {
5429 		printk(KERN_ERR "alloc_netdev: Unable to allocate "
5430 		       "rx queues.\n");
5431 		goto free_tx;
5432 	}
5433 
5434 	atomic_set(&rx->count, queue_count);
5435 
5436 	/*
5437 	 * Set a pointer to first element in the array which holds the
5438 	 * reference count.
5439 	 */
5440 	for (i = 0; i < queue_count; i++)
5441 		rx[i].first = rx;
5442 #endif
5443 
5444 	dev = PTR_ALIGN(p, NETDEV_ALIGN);
5445 	dev->padded = (char *)dev - (char *)p;
5446 
5447 	if (dev_addr_init(dev))
5448 		goto free_rx;
5449 
5450 	dev_mc_init(dev);
5451 	dev_uc_init(dev);
5452 
5453 	dev_net_set(dev, &init_net);
5454 
5455 	dev->_tx = tx;
5456 	dev->num_tx_queues = queue_count;
5457 	dev->real_num_tx_queues = queue_count;
5458 
5459 #ifdef CONFIG_RPS
5460 	dev->_rx = rx;
5461 	dev->num_rx_queues = queue_count;
5462 #endif
5463 
5464 	dev->gso_max_size = GSO_MAX_SIZE;
5465 
5466 	netdev_init_queues(dev);
5467 
5468 	INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
5469 	dev->ethtool_ntuple_list.count = 0;
5470 	INIT_LIST_HEAD(&dev->napi_list);
5471 	INIT_LIST_HEAD(&dev->unreg_list);
5472 	INIT_LIST_HEAD(&dev->link_watch_list);
5473 	dev->priv_flags = IFF_XMIT_DST_RELEASE;
5474 	setup(dev);
5475 	strcpy(dev->name, name);
5476 	return dev;
5477 
5478 free_rx:
5479 #ifdef CONFIG_RPS
5480 	kfree(rx);
5481 free_tx:
5482 #endif
5483 	kfree(tx);
5484 free_p:
5485 	kfree(p);
5486 	return NULL;
5487 }
5488 EXPORT_SYMBOL(alloc_netdev_mq);
5489 
5490 /**
5491  *	free_netdev - free network device
5492  *	@dev: device
5493  *
5494  *	This function does the last stage of destroying an allocated device
5495  * 	interface. The reference to the device object is released.
5496  *	If this is the last reference then it will be freed.
5497  */
5498 void free_netdev(struct net_device *dev)
5499 {
5500 	struct napi_struct *p, *n;
5501 
5502 	release_net(dev_net(dev));
5503 
5504 	kfree(dev->_tx);
5505 
5506 	/* Flush device addresses */
5507 	dev_addr_flush(dev);
5508 
5509 	/* Clear ethtool n-tuple list */
5510 	ethtool_ntuple_flush(dev);
5511 
5512 	list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5513 		netif_napi_del(p);
5514 
5515 	/*  Compatibility with error handling in drivers */
5516 	if (dev->reg_state == NETREG_UNINITIALIZED) {
5517 		kfree((char *)dev - dev->padded);
5518 		return;
5519 	}
5520 
5521 	BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5522 	dev->reg_state = NETREG_RELEASED;
5523 
5524 	/* will free via device release */
5525 	put_device(&dev->dev);
5526 }
5527 EXPORT_SYMBOL(free_netdev);
5528 
5529 /**
5530  *	synchronize_net -  Synchronize with packet receive processing
5531  *
5532  *	Wait for packets currently being received to be done.
5533  *	Does not block later packets from starting.
5534  */
5535 void synchronize_net(void)
5536 {
5537 	might_sleep();
5538 	synchronize_rcu();
5539 }
5540 EXPORT_SYMBOL(synchronize_net);
5541 
5542 /**
5543  *	unregister_netdevice_queue - remove device from the kernel
5544  *	@dev: device
5545  *	@head: list
5546  *
5547  *	This function shuts down a device interface and removes it
5548  *	from the kernel tables.
5549  *	If head not NULL, device is queued to be unregistered later.
5550  *
5551  *	Callers must hold the rtnl semaphore.  You may want
5552  *	unregister_netdev() instead of this.
5553  */
5554 
5555 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
5556 {
5557 	ASSERT_RTNL();
5558 
5559 	if (head) {
5560 		list_move_tail(&dev->unreg_list, head);
5561 	} else {
5562 		rollback_registered(dev);
5563 		/* Finish processing unregister after unlock */
5564 		net_set_todo(dev);
5565 	}
5566 }
5567 EXPORT_SYMBOL(unregister_netdevice_queue);
5568 
5569 /**
5570  *	unregister_netdevice_many - unregister many devices
5571  *	@head: list of devices
5572  */
5573 void unregister_netdevice_many(struct list_head *head)
5574 {
5575 	struct net_device *dev;
5576 
5577 	if (!list_empty(head)) {
5578 		rollback_registered_many(head);
5579 		list_for_each_entry(dev, head, unreg_list)
5580 			net_set_todo(dev);
5581 	}
5582 }
5583 EXPORT_SYMBOL(unregister_netdevice_many);
5584 
5585 /**
5586  *	unregister_netdev - remove device from the kernel
5587  *	@dev: device
5588  *
5589  *	This function shuts down a device interface and removes it
5590  *	from the kernel tables.
5591  *
5592  *	This is just a wrapper for unregister_netdevice that takes
5593  *	the rtnl semaphore.  In general you want to use this and not
5594  *	unregister_netdevice.
5595  */
5596 void unregister_netdev(struct net_device *dev)
5597 {
5598 	rtnl_lock();
5599 	unregister_netdevice(dev);
5600 	rtnl_unlock();
5601 }
5602 EXPORT_SYMBOL(unregister_netdev);
5603 
5604 /**
5605  *	dev_change_net_namespace - move device to different nethost namespace
5606  *	@dev: device
5607  *	@net: network namespace
5608  *	@pat: If not NULL name pattern to try if the current device name
5609  *	      is already taken in the destination network namespace.
5610  *
5611  *	This function shuts down a device interface and moves it
5612  *	to a new network namespace. On success 0 is returned, on
5613  *	a failure a netagive errno code is returned.
5614  *
5615  *	Callers must hold the rtnl semaphore.
5616  */
5617 
5618 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5619 {
5620 	int err;
5621 
5622 	ASSERT_RTNL();
5623 
5624 	/* Don't allow namespace local devices to be moved. */
5625 	err = -EINVAL;
5626 	if (dev->features & NETIF_F_NETNS_LOCAL)
5627 		goto out;
5628 
5629 	/* Ensure the device has been registrered */
5630 	err = -EINVAL;
5631 	if (dev->reg_state != NETREG_REGISTERED)
5632 		goto out;
5633 
5634 	/* Get out if there is nothing todo */
5635 	err = 0;
5636 	if (net_eq(dev_net(dev), net))
5637 		goto out;
5638 
5639 	/* Pick the destination device name, and ensure
5640 	 * we can use it in the destination network namespace.
5641 	 */
5642 	err = -EEXIST;
5643 	if (__dev_get_by_name(net, dev->name)) {
5644 		/* We get here if we can't use the current device name */
5645 		if (!pat)
5646 			goto out;
5647 		if (dev_get_valid_name(dev, pat, 1))
5648 			goto out;
5649 	}
5650 
5651 	/*
5652 	 * And now a mini version of register_netdevice unregister_netdevice.
5653 	 */
5654 
5655 	/* If device is running close it first. */
5656 	dev_close(dev);
5657 
5658 	/* And unlink it from device chain */
5659 	err = -ENODEV;
5660 	unlist_netdevice(dev);
5661 
5662 	synchronize_net();
5663 
5664 	/* Shutdown queueing discipline. */
5665 	dev_shutdown(dev);
5666 
5667 	/* Notify protocols, that we are about to destroy
5668 	   this device. They should clean all the things.
5669 	*/
5670 	call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5671 	call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
5672 
5673 	/*
5674 	 *	Flush the unicast and multicast chains
5675 	 */
5676 	dev_uc_flush(dev);
5677 	dev_mc_flush(dev);
5678 
5679 	/* Actually switch the network namespace */
5680 	dev_net_set(dev, net);
5681 
5682 	/* If there is an ifindex conflict assign a new one */
5683 	if (__dev_get_by_index(net, dev->ifindex)) {
5684 		int iflink = (dev->iflink == dev->ifindex);
5685 		dev->ifindex = dev_new_index(net);
5686 		if (iflink)
5687 			dev->iflink = dev->ifindex;
5688 	}
5689 
5690 	/* Fixup kobjects */
5691 	err = device_rename(&dev->dev, dev->name);
5692 	WARN_ON(err);
5693 
5694 	/* Add the device back in the hashes */
5695 	list_netdevice(dev);
5696 
5697 	/* Notify protocols, that a new device appeared. */
5698 	call_netdevice_notifiers(NETDEV_REGISTER, dev);
5699 
5700 	/*
5701 	 *	Prevent userspace races by waiting until the network
5702 	 *	device is fully setup before sending notifications.
5703 	 */
5704 	rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5705 
5706 	synchronize_net();
5707 	err = 0;
5708 out:
5709 	return err;
5710 }
5711 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
5712 
5713 static int dev_cpu_callback(struct notifier_block *nfb,
5714 			    unsigned long action,
5715 			    void *ocpu)
5716 {
5717 	struct sk_buff **list_skb;
5718 	struct sk_buff *skb;
5719 	unsigned int cpu, oldcpu = (unsigned long)ocpu;
5720 	struct softnet_data *sd, *oldsd;
5721 
5722 	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
5723 		return NOTIFY_OK;
5724 
5725 	local_irq_disable();
5726 	cpu = smp_processor_id();
5727 	sd = &per_cpu(softnet_data, cpu);
5728 	oldsd = &per_cpu(softnet_data, oldcpu);
5729 
5730 	/* Find end of our completion_queue. */
5731 	list_skb = &sd->completion_queue;
5732 	while (*list_skb)
5733 		list_skb = &(*list_skb)->next;
5734 	/* Append completion queue from offline CPU. */
5735 	*list_skb = oldsd->completion_queue;
5736 	oldsd->completion_queue = NULL;
5737 
5738 	/* Append output queue from offline CPU. */
5739 	if (oldsd->output_queue) {
5740 		*sd->output_queue_tailp = oldsd->output_queue;
5741 		sd->output_queue_tailp = oldsd->output_queue_tailp;
5742 		oldsd->output_queue = NULL;
5743 		oldsd->output_queue_tailp = &oldsd->output_queue;
5744 	}
5745 
5746 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
5747 	local_irq_enable();
5748 
5749 	/* Process offline CPU's input_pkt_queue */
5750 	while ((skb = __skb_dequeue(&oldsd->process_queue))) {
5751 		netif_rx(skb);
5752 		input_queue_head_incr(oldsd);
5753 	}
5754 	while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
5755 		netif_rx(skb);
5756 		input_queue_head_incr(oldsd);
5757 	}
5758 
5759 	return NOTIFY_OK;
5760 }
5761 
5762 
5763 /**
5764  *	netdev_increment_features - increment feature set by one
5765  *	@all: current feature set
5766  *	@one: new feature set
5767  *	@mask: mask feature set
5768  *
5769  *	Computes a new feature set after adding a device with feature set
5770  *	@one to the master device with current feature set @all.  Will not
5771  *	enable anything that is off in @mask. Returns the new feature set.
5772  */
5773 unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5774 					unsigned long mask)
5775 {
5776 	/* If device needs checksumming, downgrade to it. */
5777 	if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
5778 		all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5779 	else if (mask & NETIF_F_ALL_CSUM) {
5780 		/* If one device supports v4/v6 checksumming, set for all. */
5781 		if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5782 		    !(all & NETIF_F_GEN_CSUM)) {
5783 			all &= ~NETIF_F_ALL_CSUM;
5784 			all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5785 		}
5786 
5787 		/* If one device supports hw checksumming, set for all. */
5788 		if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5789 			all &= ~NETIF_F_ALL_CSUM;
5790 			all |= NETIF_F_HW_CSUM;
5791 		}
5792 	}
5793 
5794 	one |= NETIF_F_ALL_CSUM;
5795 
5796 	one |= all & NETIF_F_ONE_FOR_ALL;
5797 	all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
5798 	all |= one & mask & NETIF_F_ONE_FOR_ALL;
5799 
5800 	return all;
5801 }
5802 EXPORT_SYMBOL(netdev_increment_features);
5803 
5804 static struct hlist_head *netdev_create_hash(void)
5805 {
5806 	int i;
5807 	struct hlist_head *hash;
5808 
5809 	hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5810 	if (hash != NULL)
5811 		for (i = 0; i < NETDEV_HASHENTRIES; i++)
5812 			INIT_HLIST_HEAD(&hash[i]);
5813 
5814 	return hash;
5815 }
5816 
5817 /* Initialize per network namespace state */
5818 static int __net_init netdev_init(struct net *net)
5819 {
5820 	INIT_LIST_HEAD(&net->dev_base_head);
5821 
5822 	net->dev_name_head = netdev_create_hash();
5823 	if (net->dev_name_head == NULL)
5824 		goto err_name;
5825 
5826 	net->dev_index_head = netdev_create_hash();
5827 	if (net->dev_index_head == NULL)
5828 		goto err_idx;
5829 
5830 	return 0;
5831 
5832 err_idx:
5833 	kfree(net->dev_name_head);
5834 err_name:
5835 	return -ENOMEM;
5836 }
5837 
5838 /**
5839  *	netdev_drivername - network driver for the device
5840  *	@dev: network device
5841  *	@buffer: buffer for resulting name
5842  *	@len: size of buffer
5843  *
5844  *	Determine network driver for device.
5845  */
5846 char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
5847 {
5848 	const struct device_driver *driver;
5849 	const struct device *parent;
5850 
5851 	if (len <= 0 || !buffer)
5852 		return buffer;
5853 	buffer[0] = 0;
5854 
5855 	parent = dev->dev.parent;
5856 
5857 	if (!parent)
5858 		return buffer;
5859 
5860 	driver = parent->driver;
5861 	if (driver && driver->name)
5862 		strlcpy(buffer, driver->name, len);
5863 	return buffer;
5864 }
5865 
5866 static int __netdev_printk(const char *level, const struct net_device *dev,
5867 			   struct va_format *vaf)
5868 {
5869 	int r;
5870 
5871 	if (dev && dev->dev.parent)
5872 		r = dev_printk(level, dev->dev.parent, "%s: %pV",
5873 			       netdev_name(dev), vaf);
5874 	else if (dev)
5875 		r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
5876 	else
5877 		r = printk("%s(NULL net_device): %pV", level, vaf);
5878 
5879 	return r;
5880 }
5881 
5882 int netdev_printk(const char *level, const struct net_device *dev,
5883 		  const char *format, ...)
5884 {
5885 	struct va_format vaf;
5886 	va_list args;
5887 	int r;
5888 
5889 	va_start(args, format);
5890 
5891 	vaf.fmt = format;
5892 	vaf.va = &args;
5893 
5894 	r = __netdev_printk(level, dev, &vaf);
5895 	va_end(args);
5896 
5897 	return r;
5898 }
5899 EXPORT_SYMBOL(netdev_printk);
5900 
5901 #define define_netdev_printk_level(func, level)			\
5902 int func(const struct net_device *dev, const char *fmt, ...)	\
5903 {								\
5904 	int r;							\
5905 	struct va_format vaf;					\
5906 	va_list args;						\
5907 								\
5908 	va_start(args, fmt);					\
5909 								\
5910 	vaf.fmt = fmt;						\
5911 	vaf.va = &args;						\
5912 								\
5913 	r = __netdev_printk(level, dev, &vaf);			\
5914 	va_end(args);						\
5915 								\
5916 	return r;						\
5917 }								\
5918 EXPORT_SYMBOL(func);
5919 
5920 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
5921 define_netdev_printk_level(netdev_alert, KERN_ALERT);
5922 define_netdev_printk_level(netdev_crit, KERN_CRIT);
5923 define_netdev_printk_level(netdev_err, KERN_ERR);
5924 define_netdev_printk_level(netdev_warn, KERN_WARNING);
5925 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
5926 define_netdev_printk_level(netdev_info, KERN_INFO);
5927 
5928 static void __net_exit netdev_exit(struct net *net)
5929 {
5930 	kfree(net->dev_name_head);
5931 	kfree(net->dev_index_head);
5932 }
5933 
5934 static struct pernet_operations __net_initdata netdev_net_ops = {
5935 	.init = netdev_init,
5936 	.exit = netdev_exit,
5937 };
5938 
5939 static void __net_exit default_device_exit(struct net *net)
5940 {
5941 	struct net_device *dev, *aux;
5942 	/*
5943 	 * Push all migratable network devices back to the
5944 	 * initial network namespace
5945 	 */
5946 	rtnl_lock();
5947 	for_each_netdev_safe(net, dev, aux) {
5948 		int err;
5949 		char fb_name[IFNAMSIZ];
5950 
5951 		/* Ignore unmoveable devices (i.e. loopback) */
5952 		if (dev->features & NETIF_F_NETNS_LOCAL)
5953 			continue;
5954 
5955 		/* Leave virtual devices for the generic cleanup */
5956 		if (dev->rtnl_link_ops)
5957 			continue;
5958 
5959 		/* Push remaing network devices to init_net */
5960 		snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5961 		err = dev_change_net_namespace(dev, &init_net, fb_name);
5962 		if (err) {
5963 			printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
5964 				__func__, dev->name, err);
5965 			BUG();
5966 		}
5967 	}
5968 	rtnl_unlock();
5969 }
5970 
5971 static void __net_exit default_device_exit_batch(struct list_head *net_list)
5972 {
5973 	/* At exit all network devices most be removed from a network
5974 	 * namespace.  Do this in the reverse order of registeration.
5975 	 * Do this across as many network namespaces as possible to
5976 	 * improve batching efficiency.
5977 	 */
5978 	struct net_device *dev;
5979 	struct net *net;
5980 	LIST_HEAD(dev_kill_list);
5981 
5982 	rtnl_lock();
5983 	list_for_each_entry(net, net_list, exit_list) {
5984 		for_each_netdev_reverse(net, dev) {
5985 			if (dev->rtnl_link_ops)
5986 				dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
5987 			else
5988 				unregister_netdevice_queue(dev, &dev_kill_list);
5989 		}
5990 	}
5991 	unregister_netdevice_many(&dev_kill_list);
5992 	rtnl_unlock();
5993 }
5994 
5995 static struct pernet_operations __net_initdata default_device_ops = {
5996 	.exit = default_device_exit,
5997 	.exit_batch = default_device_exit_batch,
5998 };
5999 
6000 /*
6001  *	Initialize the DEV module. At boot time this walks the device list and
6002  *	unhooks any devices that fail to initialise (normally hardware not
6003  *	present) and leaves us with a valid list of present and active devices.
6004  *
6005  */
6006 
6007 /*
6008  *       This is called single threaded during boot, so no need
6009  *       to take the rtnl semaphore.
6010  */
6011 static int __init net_dev_init(void)
6012 {
6013 	int i, rc = -ENOMEM;
6014 
6015 	BUG_ON(!dev_boot_phase);
6016 
6017 	if (dev_proc_init())
6018 		goto out;
6019 
6020 	if (netdev_kobject_init())
6021 		goto out;
6022 
6023 	INIT_LIST_HEAD(&ptype_all);
6024 	for (i = 0; i < PTYPE_HASH_SIZE; i++)
6025 		INIT_LIST_HEAD(&ptype_base[i]);
6026 
6027 	if (register_pernet_subsys(&netdev_net_ops))
6028 		goto out;
6029 
6030 	/*
6031 	 *	Initialise the packet receive queues.
6032 	 */
6033 
6034 	for_each_possible_cpu(i) {
6035 		struct softnet_data *sd = &per_cpu(softnet_data, i);
6036 
6037 		memset(sd, 0, sizeof(*sd));
6038 		skb_queue_head_init(&sd->input_pkt_queue);
6039 		skb_queue_head_init(&sd->process_queue);
6040 		sd->completion_queue = NULL;
6041 		INIT_LIST_HEAD(&sd->poll_list);
6042 		sd->output_queue = NULL;
6043 		sd->output_queue_tailp = &sd->output_queue;
6044 #ifdef CONFIG_RPS
6045 		sd->csd.func = rps_trigger_softirq;
6046 		sd->csd.info = sd;
6047 		sd->csd.flags = 0;
6048 		sd->cpu = i;
6049 #endif
6050 
6051 		sd->backlog.poll = process_backlog;
6052 		sd->backlog.weight = weight_p;
6053 		sd->backlog.gro_list = NULL;
6054 		sd->backlog.gro_count = 0;
6055 	}
6056 
6057 	dev_boot_phase = 0;
6058 
6059 	/* The loopback device is special if any other network devices
6060 	 * is present in a network namespace the loopback device must
6061 	 * be present. Since we now dynamically allocate and free the
6062 	 * loopback device ensure this invariant is maintained by
6063 	 * keeping the loopback device as the first device on the
6064 	 * list of network devices.  Ensuring the loopback devices
6065 	 * is the first device that appears and the last network device
6066 	 * that disappears.
6067 	 */
6068 	if (register_pernet_device(&loopback_net_ops))
6069 		goto out;
6070 
6071 	if (register_pernet_device(&default_device_ops))
6072 		goto out;
6073 
6074 	open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6075 	open_softirq(NET_RX_SOFTIRQ, net_rx_action);
6076 
6077 	hotcpu_notifier(dev_cpu_callback, 0);
6078 	dst_init();
6079 	dev_mcast_init();
6080 	rc = 0;
6081 out:
6082 	return rc;
6083 }
6084 
6085 subsys_initcall(net_dev_init);
6086 
6087 static int __init initialize_hashrnd(void)
6088 {
6089 	get_random_bytes(&hashrnd, sizeof(hashrnd));
6090 	return 0;
6091 }
6092 
6093 late_initcall_sync(initialize_hashrnd);
6094 
6095