xref: /linux/net/core/dev.c (revision 4f1933620f57145212cdbb1ac6ce099eeeb21c5a)
1 /*
2  * 	NET3	Protocol independent device support routines.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  *	Derived from the non IP parts of dev.c 1.0.19
10  * 		Authors:	Ross Biro
11  *				Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *				Mark Evans, <evansmp@uhura.aston.ac.uk>
13  *
14  *	Additional Authors:
15  *		Florian la Roche <rzsfl@rz.uni-sb.de>
16  *		Alan Cox <gw4pts@gw4pts.ampr.org>
17  *		David Hinds <dahinds@users.sourceforge.net>
18  *		Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19  *		Adam Sulmicki <adam@cfar.umd.edu>
20  *              Pekka Riikonen <priikone@poesidon.pspt.fi>
21  *
22  *	Changes:
23  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
24  *              			to 2 if register_netdev gets called
25  *              			before net_dev_init & also removed a
26  *              			few lines of code in the process.
27  *		Alan Cox	:	device private ioctl copies fields back.
28  *		Alan Cox	:	Transmit queue code does relevant
29  *					stunts to keep the queue safe.
30  *		Alan Cox	:	Fixed double lock.
31  *		Alan Cox	:	Fixed promisc NULL pointer trap
32  *		????????	:	Support the full private ioctl range
33  *		Alan Cox	:	Moved ioctl permission check into
34  *					drivers
35  *		Tim Kordas	:	SIOCADDMULTI/SIOCDELMULTI
36  *		Alan Cox	:	100 backlog just doesn't cut it when
37  *					you start doing multicast video 8)
38  *		Alan Cox	:	Rewrote net_bh and list manager.
39  *		Alan Cox	: 	Fix ETH_P_ALL echoback lengths.
40  *		Alan Cox	:	Took out transmit every packet pass
41  *					Saved a few bytes in the ioctl handler
42  *		Alan Cox	:	Network driver sets packet type before
43  *					calling netif_rx. Saves a function
44  *					call a packet.
45  *		Alan Cox	:	Hashed net_bh()
46  *		Richard Kooijman:	Timestamp fixes.
47  *		Alan Cox	:	Wrong field in SIOCGIFDSTADDR
48  *		Alan Cox	:	Device lock protection.
49  *		Alan Cox	: 	Fixed nasty side effect of device close
50  *					changes.
51  *		Rudi Cilibrasi	:	Pass the right thing to
52  *					set_mac_address()
53  *		Dave Miller	:	32bit quantity for the device lock to
54  *					make it work out on a Sparc.
55  *		Bjorn Ekwall	:	Added KERNELD hack.
56  *		Alan Cox	:	Cleaned up the backlog initialise.
57  *		Craig Metz	:	SIOCGIFCONF fix if space for under
58  *					1 device.
59  *	    Thomas Bogendoerfer :	Return ENODEV for dev_open, if there
60  *					is no device open function.
61  *		Andi Kleen	:	Fix error reporting for SIOCGIFCONF
62  *	    Michael Chastain	:	Fix signed/unsigned for SIOCGIFCONF
63  *		Cyrus Durgin	:	Cleaned for KMOD
64  *		Adam Sulmicki   :	Bug Fix : Network Device Unload
65  *					A network device unload needs to purge
66  *					the backlog queue.
67  *	Paul Rusty Russell	:	SIOCSIFNAME
68  *              Pekka Riikonen  :	Netdev boot-time settings code
69  *              Andrew Morton   :       Make unregister_netdevice wait
70  *              			indefinitely on dev->refcnt
71  * 		J Hadi Salim	:	- Backlog queue sampling
72  *				        - netif_rx() feedback
73  */
74 
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/config.h>
80 #include <linux/cpu.h>
81 #include <linux/types.h>
82 #include <linux/kernel.h>
83 #include <linux/sched.h>
84 #include <linux/string.h>
85 #include <linux/mm.h>
86 #include <linux/socket.h>
87 #include <linux/sockios.h>
88 #include <linux/errno.h>
89 #include <linux/interrupt.h>
90 #include <linux/if_ether.h>
91 #include <linux/netdevice.h>
92 #include <linux/etherdevice.h>
93 #include <linux/notifier.h>
94 #include <linux/skbuff.h>
95 #include <net/sock.h>
96 #include <linux/rtnetlink.h>
97 #include <linux/proc_fs.h>
98 #include <linux/seq_file.h>
99 #include <linux/stat.h>
100 #include <linux/if_bridge.h>
101 #include <linux/divert.h>
102 #include <net/dst.h>
103 #include <net/pkt_sched.h>
104 #include <net/checksum.h>
105 #include <linux/highmem.h>
106 #include <linux/init.h>
107 #include <linux/kmod.h>
108 #include <linux/module.h>
109 #include <linux/kallsyms.h>
110 #include <linux/netpoll.h>
111 #include <linux/rcupdate.h>
112 #include <linux/delay.h>
113 #ifdef CONFIG_NET_RADIO
114 #include <linux/wireless.h>		/* Note : will define WIRELESS_EXT */
115 #include <net/iw_handler.h>
116 #endif	/* CONFIG_NET_RADIO */
117 #include <asm/current.h>
118 
119 /*
120  *	The list of packet types we will receive (as opposed to discard)
121  *	and the routines to invoke.
122  *
123  *	Why 16. Because with 16 the only overlap we get on a hash of the
124  *	low nibble of the protocol value is RARP/SNAP/X.25.
125  *
126  *      NOTE:  That is no longer true with the addition of VLAN tags.  Not
127  *             sure which should go first, but I bet it won't make much
128  *             difference if we are running VLANs.  The good news is that
129  *             this protocol won't be in the list unless compiled in, so
130  *             the average user (w/out VLANs) will not be adversly affected.
131  *             --BLG
132  *
133  *		0800	IP
134  *		8100    802.1Q VLAN
135  *		0001	802.3
136  *		0002	AX.25
137  *		0004	802.2
138  *		8035	RARP
139  *		0005	SNAP
140  *		0805	X.25
141  *		0806	ARP
142  *		8137	IPX
143  *		0009	Localtalk
144  *		86DD	IPv6
145  */
146 
147 static DEFINE_SPINLOCK(ptype_lock);
148 static struct list_head ptype_base[16];	/* 16 way hashed list */
149 static struct list_head ptype_all;		/* Taps */
150 
151 /*
152  * The @dev_base list is protected by @dev_base_lock and the rtln
153  * semaphore.
154  *
155  * Pure readers hold dev_base_lock for reading.
156  *
157  * Writers must hold the rtnl semaphore while they loop through the
158  * dev_base list, and hold dev_base_lock for writing when they do the
159  * actual updates.  This allows pure readers to access the list even
160  * while a writer is preparing to update it.
161  *
162  * To put it another way, dev_base_lock is held for writing only to
163  * protect against pure readers; the rtnl semaphore provides the
164  * protection against other writers.
165  *
166  * See, for example usages, register_netdevice() and
167  * unregister_netdevice(), which must be called with the rtnl
168  * semaphore held.
169  */
170 struct net_device *dev_base;
171 static struct net_device **dev_tail = &dev_base;
172 DEFINE_RWLOCK(dev_base_lock);
173 
174 EXPORT_SYMBOL(dev_base);
175 EXPORT_SYMBOL(dev_base_lock);
176 
177 #define NETDEV_HASHBITS	8
178 static struct hlist_head dev_name_head[1<<NETDEV_HASHBITS];
179 static struct hlist_head dev_index_head[1<<NETDEV_HASHBITS];
180 
181 static inline struct hlist_head *dev_name_hash(const char *name)
182 {
183 	unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
184 	return &dev_name_head[hash & ((1<<NETDEV_HASHBITS)-1)];
185 }
186 
187 static inline struct hlist_head *dev_index_hash(int ifindex)
188 {
189 	return &dev_index_head[ifindex & ((1<<NETDEV_HASHBITS)-1)];
190 }
191 
192 /*
193  *	Our notifier list
194  */
195 
196 static struct notifier_block *netdev_chain;
197 
198 /*
199  *	Device drivers call our routines to queue packets here. We empty the
200  *	queue in the local softnet handler.
201  */
202 DEFINE_PER_CPU(struct softnet_data, softnet_data) = { NULL };
203 
204 #ifdef CONFIG_SYSFS
205 extern int netdev_sysfs_init(void);
206 extern int netdev_register_sysfs(struct net_device *);
207 extern void netdev_unregister_sysfs(struct net_device *);
208 #else
209 #define netdev_sysfs_init()	 	(0)
210 #define netdev_register_sysfs(dev)	(0)
211 #define	netdev_unregister_sysfs(dev)	do { } while(0)
212 #endif
213 
214 
215 /*******************************************************************************
216 
217 		Protocol management and registration routines
218 
219 *******************************************************************************/
220 
221 /*
222  *	For efficiency
223  */
224 
225 int netdev_nit;
226 
227 /*
228  *	Add a protocol ID to the list. Now that the input handler is
229  *	smarter we can dispense with all the messy stuff that used to be
230  *	here.
231  *
232  *	BEWARE!!! Protocol handlers, mangling input packets,
233  *	MUST BE last in hash buckets and checking protocol handlers
234  *	MUST start from promiscuous ptype_all chain in net_bh.
235  *	It is true now, do not change it.
236  *	Explanation follows: if protocol handler, mangling packet, will
237  *	be the first on list, it is not able to sense, that packet
238  *	is cloned and should be copied-on-write, so that it will
239  *	change it and subsequent readers will get broken packet.
240  *							--ANK (980803)
241  */
242 
243 /**
244  *	dev_add_pack - add packet handler
245  *	@pt: packet type declaration
246  *
247  *	Add a protocol handler to the networking stack. The passed &packet_type
248  *	is linked into kernel lists and may not be freed until it has been
249  *	removed from the kernel lists.
250  *
251  *	This call does not sleep therefore it can not
252  *	guarantee all CPU's that are in middle of receiving packets
253  *	will see the new packet type (until the next received packet).
254  */
255 
256 void dev_add_pack(struct packet_type *pt)
257 {
258 	int hash;
259 
260 	spin_lock_bh(&ptype_lock);
261 	if (pt->type == htons(ETH_P_ALL)) {
262 		netdev_nit++;
263 		list_add_rcu(&pt->list, &ptype_all);
264 	} else {
265 		hash = ntohs(pt->type) & 15;
266 		list_add_rcu(&pt->list, &ptype_base[hash]);
267 	}
268 	spin_unlock_bh(&ptype_lock);
269 }
270 
271 /**
272  *	__dev_remove_pack	 - remove packet handler
273  *	@pt: packet type declaration
274  *
275  *	Remove a protocol handler that was previously added to the kernel
276  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
277  *	from the kernel lists and can be freed or reused once this function
278  *	returns.
279  *
280  *      The packet type might still be in use by receivers
281  *	and must not be freed until after all the CPU's have gone
282  *	through a quiescent state.
283  */
284 void __dev_remove_pack(struct packet_type *pt)
285 {
286 	struct list_head *head;
287 	struct packet_type *pt1;
288 
289 	spin_lock_bh(&ptype_lock);
290 
291 	if (pt->type == htons(ETH_P_ALL)) {
292 		netdev_nit--;
293 		head = &ptype_all;
294 	} else
295 		head = &ptype_base[ntohs(pt->type) & 15];
296 
297 	list_for_each_entry(pt1, head, list) {
298 		if (pt == pt1) {
299 			list_del_rcu(&pt->list);
300 			goto out;
301 		}
302 	}
303 
304 	printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
305 out:
306 	spin_unlock_bh(&ptype_lock);
307 }
308 /**
309  *	dev_remove_pack	 - remove packet handler
310  *	@pt: packet type declaration
311  *
312  *	Remove a protocol handler that was previously added to the kernel
313  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
314  *	from the kernel lists and can be freed or reused once this function
315  *	returns.
316  *
317  *	This call sleeps to guarantee that no CPU is looking at the packet
318  *	type after return.
319  */
320 void dev_remove_pack(struct packet_type *pt)
321 {
322 	__dev_remove_pack(pt);
323 
324 	synchronize_net();
325 }
326 
327 /******************************************************************************
328 
329 		      Device Boot-time Settings Routines
330 
331 *******************************************************************************/
332 
333 /* Boot time configuration table */
334 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
335 
336 /**
337  *	netdev_boot_setup_add	- add new setup entry
338  *	@name: name of the device
339  *	@map: configured settings for the device
340  *
341  *	Adds new setup entry to the dev_boot_setup list.  The function
342  *	returns 0 on error and 1 on success.  This is a generic routine to
343  *	all netdevices.
344  */
345 static int netdev_boot_setup_add(char *name, struct ifmap *map)
346 {
347 	struct netdev_boot_setup *s;
348 	int i;
349 
350 	s = dev_boot_setup;
351 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
352 		if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
353 			memset(s[i].name, 0, sizeof(s[i].name));
354 			strcpy(s[i].name, name);
355 			memcpy(&s[i].map, map, sizeof(s[i].map));
356 			break;
357 		}
358 	}
359 
360 	return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
361 }
362 
363 /**
364  *	netdev_boot_setup_check	- check boot time settings
365  *	@dev: the netdevice
366  *
367  * 	Check boot time settings for the device.
368  *	The found settings are set for the device to be used
369  *	later in the device probing.
370  *	Returns 0 if no settings found, 1 if they are.
371  */
372 int netdev_boot_setup_check(struct net_device *dev)
373 {
374 	struct netdev_boot_setup *s = dev_boot_setup;
375 	int i;
376 
377 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
378 		if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
379 		    !strncmp(dev->name, s[i].name, strlen(s[i].name))) {
380 			dev->irq 	= s[i].map.irq;
381 			dev->base_addr 	= s[i].map.base_addr;
382 			dev->mem_start 	= s[i].map.mem_start;
383 			dev->mem_end 	= s[i].map.mem_end;
384 			return 1;
385 		}
386 	}
387 	return 0;
388 }
389 
390 
391 /**
392  *	netdev_boot_base	- get address from boot time settings
393  *	@prefix: prefix for network device
394  *	@unit: id for network device
395  *
396  * 	Check boot time settings for the base address of device.
397  *	The found settings are set for the device to be used
398  *	later in the device probing.
399  *	Returns 0 if no settings found.
400  */
401 unsigned long netdev_boot_base(const char *prefix, int unit)
402 {
403 	const struct netdev_boot_setup *s = dev_boot_setup;
404 	char name[IFNAMSIZ];
405 	int i;
406 
407 	sprintf(name, "%s%d", prefix, unit);
408 
409 	/*
410 	 * If device already registered then return base of 1
411 	 * to indicate not to probe for this interface
412 	 */
413 	if (__dev_get_by_name(name))
414 		return 1;
415 
416 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
417 		if (!strcmp(name, s[i].name))
418 			return s[i].map.base_addr;
419 	return 0;
420 }
421 
422 /*
423  * Saves at boot time configured settings for any netdevice.
424  */
425 int __init netdev_boot_setup(char *str)
426 {
427 	int ints[5];
428 	struct ifmap map;
429 
430 	str = get_options(str, ARRAY_SIZE(ints), ints);
431 	if (!str || !*str)
432 		return 0;
433 
434 	/* Save settings */
435 	memset(&map, 0, sizeof(map));
436 	if (ints[0] > 0)
437 		map.irq = ints[1];
438 	if (ints[0] > 1)
439 		map.base_addr = ints[2];
440 	if (ints[0] > 2)
441 		map.mem_start = ints[3];
442 	if (ints[0] > 3)
443 		map.mem_end = ints[4];
444 
445 	/* Add new entry to the list */
446 	return netdev_boot_setup_add(str, &map);
447 }
448 
449 __setup("netdev=", netdev_boot_setup);
450 
451 /*******************************************************************************
452 
453 			    Device Interface Subroutines
454 
455 *******************************************************************************/
456 
457 /**
458  *	__dev_get_by_name	- find a device by its name
459  *	@name: name to find
460  *
461  *	Find an interface by name. Must be called under RTNL semaphore
462  *	or @dev_base_lock. If the name is found a pointer to the device
463  *	is returned. If the name is not found then %NULL is returned. The
464  *	reference counters are not incremented so the caller must be
465  *	careful with locks.
466  */
467 
468 struct net_device *__dev_get_by_name(const char *name)
469 {
470 	struct hlist_node *p;
471 
472 	hlist_for_each(p, dev_name_hash(name)) {
473 		struct net_device *dev
474 			= hlist_entry(p, struct net_device, name_hlist);
475 		if (!strncmp(dev->name, name, IFNAMSIZ))
476 			return dev;
477 	}
478 	return NULL;
479 }
480 
481 /**
482  *	dev_get_by_name		- find a device by its name
483  *	@name: name to find
484  *
485  *	Find an interface by name. This can be called from any
486  *	context and does its own locking. The returned handle has
487  *	the usage count incremented and the caller must use dev_put() to
488  *	release it when it is no longer needed. %NULL is returned if no
489  *	matching device is found.
490  */
491 
492 struct net_device *dev_get_by_name(const char *name)
493 {
494 	struct net_device *dev;
495 
496 	read_lock(&dev_base_lock);
497 	dev = __dev_get_by_name(name);
498 	if (dev)
499 		dev_hold(dev);
500 	read_unlock(&dev_base_lock);
501 	return dev;
502 }
503 
504 /**
505  *	__dev_get_by_index - find a device by its ifindex
506  *	@ifindex: index of device
507  *
508  *	Search for an interface by index. Returns %NULL if the device
509  *	is not found or a pointer to the device. The device has not
510  *	had its reference counter increased so the caller must be careful
511  *	about locking. The caller must hold either the RTNL semaphore
512  *	or @dev_base_lock.
513  */
514 
515 struct net_device *__dev_get_by_index(int ifindex)
516 {
517 	struct hlist_node *p;
518 
519 	hlist_for_each(p, dev_index_hash(ifindex)) {
520 		struct net_device *dev
521 			= hlist_entry(p, struct net_device, index_hlist);
522 		if (dev->ifindex == ifindex)
523 			return dev;
524 	}
525 	return NULL;
526 }
527 
528 
529 /**
530  *	dev_get_by_index - find a device by its ifindex
531  *	@ifindex: index of device
532  *
533  *	Search for an interface by index. Returns NULL if the device
534  *	is not found or a pointer to the device. The device returned has
535  *	had a reference added and the pointer is safe until the user calls
536  *	dev_put to indicate they have finished with it.
537  */
538 
539 struct net_device *dev_get_by_index(int ifindex)
540 {
541 	struct net_device *dev;
542 
543 	read_lock(&dev_base_lock);
544 	dev = __dev_get_by_index(ifindex);
545 	if (dev)
546 		dev_hold(dev);
547 	read_unlock(&dev_base_lock);
548 	return dev;
549 }
550 
551 /**
552  *	dev_getbyhwaddr - find a device by its hardware address
553  *	@type: media type of device
554  *	@ha: hardware address
555  *
556  *	Search for an interface by MAC address. Returns NULL if the device
557  *	is not found or a pointer to the device. The caller must hold the
558  *	rtnl semaphore. The returned device has not had its ref count increased
559  *	and the caller must therefore be careful about locking
560  *
561  *	BUGS:
562  *	If the API was consistent this would be __dev_get_by_hwaddr
563  */
564 
565 struct net_device *dev_getbyhwaddr(unsigned short type, char *ha)
566 {
567 	struct net_device *dev;
568 
569 	ASSERT_RTNL();
570 
571 	for (dev = dev_base; dev; dev = dev->next)
572 		if (dev->type == type &&
573 		    !memcmp(dev->dev_addr, ha, dev->addr_len))
574 			break;
575 	return dev;
576 }
577 
578 EXPORT_SYMBOL(dev_getbyhwaddr);
579 
580 struct net_device *dev_getfirstbyhwtype(unsigned short type)
581 {
582 	struct net_device *dev;
583 
584 	rtnl_lock();
585 	for (dev = dev_base; dev; dev = dev->next) {
586 		if (dev->type == type) {
587 			dev_hold(dev);
588 			break;
589 		}
590 	}
591 	rtnl_unlock();
592 	return dev;
593 }
594 
595 EXPORT_SYMBOL(dev_getfirstbyhwtype);
596 
597 /**
598  *	dev_get_by_flags - find any device with given flags
599  *	@if_flags: IFF_* values
600  *	@mask: bitmask of bits in if_flags to check
601  *
602  *	Search for any interface with the given flags. Returns NULL if a device
603  *	is not found or a pointer to the device. The device returned has
604  *	had a reference added and the pointer is safe until the user calls
605  *	dev_put to indicate they have finished with it.
606  */
607 
608 struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mask)
609 {
610 	struct net_device *dev;
611 
612 	read_lock(&dev_base_lock);
613 	for (dev = dev_base; dev != NULL; dev = dev->next) {
614 		if (((dev->flags ^ if_flags) & mask) == 0) {
615 			dev_hold(dev);
616 			break;
617 		}
618 	}
619 	read_unlock(&dev_base_lock);
620 	return dev;
621 }
622 
623 /**
624  *	dev_valid_name - check if name is okay for network device
625  *	@name: name string
626  *
627  *	Network device names need to be valid file names to
628  *	to allow sysfs to work
629  */
630 int dev_valid_name(const char *name)
631 {
632 	return !(*name == '\0'
633 		 || !strcmp(name, ".")
634 		 || !strcmp(name, "..")
635 		 || strchr(name, '/'));
636 }
637 
638 /**
639  *	dev_alloc_name - allocate a name for a device
640  *	@dev: device
641  *	@name: name format string
642  *
643  *	Passed a format string - eg "lt%d" it will try and find a suitable
644  *	id. Not efficient for many devices, not called a lot. The caller
645  *	must hold the dev_base or rtnl lock while allocating the name and
646  *	adding the device in order to avoid duplicates. Returns the number
647  *	of the unit assigned or a negative errno code.
648  */
649 
650 int dev_alloc_name(struct net_device *dev, const char *name)
651 {
652 	int i = 0;
653 	char buf[IFNAMSIZ];
654 	const char *p;
655 	const int max_netdevices = 8*PAGE_SIZE;
656 	long *inuse;
657 	struct net_device *d;
658 
659 	p = strnchr(name, IFNAMSIZ-1, '%');
660 	if (p) {
661 		/*
662 		 * Verify the string as this thing may have come from
663 		 * the user.  There must be either one "%d" and no other "%"
664 		 * characters.
665 		 */
666 		if (p[1] != 'd' || strchr(p + 2, '%'))
667 			return -EINVAL;
668 
669 		/* Use one page as a bit array of possible slots */
670 		inuse = (long *) get_zeroed_page(GFP_ATOMIC);
671 		if (!inuse)
672 			return -ENOMEM;
673 
674 		for (d = dev_base; d; d = d->next) {
675 			if (!sscanf(d->name, name, &i))
676 				continue;
677 			if (i < 0 || i >= max_netdevices)
678 				continue;
679 
680 			/*  avoid cases where sscanf is not exact inverse of printf */
681 			snprintf(buf, sizeof(buf), name, i);
682 			if (!strncmp(buf, d->name, IFNAMSIZ))
683 				set_bit(i, inuse);
684 		}
685 
686 		i = find_first_zero_bit(inuse, max_netdevices);
687 		free_page((unsigned long) inuse);
688 	}
689 
690 	snprintf(buf, sizeof(buf), name, i);
691 	if (!__dev_get_by_name(buf)) {
692 		strlcpy(dev->name, buf, IFNAMSIZ);
693 		return i;
694 	}
695 
696 	/* It is possible to run out of possible slots
697 	 * when the name is long and there isn't enough space left
698 	 * for the digits, or if all bits are used.
699 	 */
700 	return -ENFILE;
701 }
702 
703 
704 /**
705  *	dev_change_name - change name of a device
706  *	@dev: device
707  *	@newname: name (or format string) must be at least IFNAMSIZ
708  *
709  *	Change name of a device, can pass format strings "eth%d".
710  *	for wildcarding.
711  */
712 int dev_change_name(struct net_device *dev, char *newname)
713 {
714 	int err = 0;
715 
716 	ASSERT_RTNL();
717 
718 	if (dev->flags & IFF_UP)
719 		return -EBUSY;
720 
721 	if (!dev_valid_name(newname))
722 		return -EINVAL;
723 
724 	if (strchr(newname, '%')) {
725 		err = dev_alloc_name(dev, newname);
726 		if (err < 0)
727 			return err;
728 		strcpy(newname, dev->name);
729 	}
730 	else if (__dev_get_by_name(newname))
731 		return -EEXIST;
732 	else
733 		strlcpy(dev->name, newname, IFNAMSIZ);
734 
735 	err = class_device_rename(&dev->class_dev, dev->name);
736 	if (!err) {
737 		hlist_del(&dev->name_hlist);
738 		hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name));
739 		notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev);
740 	}
741 
742 	return err;
743 }
744 
745 /**
746  *	netdev_features_change - device changes fatures
747  *	@dev: device to cause notification
748  *
749  *	Called to indicate a device has changed features.
750  */
751 void netdev_features_change(struct net_device *dev)
752 {
753 	notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev);
754 }
755 EXPORT_SYMBOL(netdev_features_change);
756 
757 /**
758  *	netdev_state_change - device changes state
759  *	@dev: device to cause notification
760  *
761  *	Called to indicate a device has changed state. This function calls
762  *	the notifier chains for netdev_chain and sends a NEWLINK message
763  *	to the routing socket.
764  */
765 void netdev_state_change(struct net_device *dev)
766 {
767 	if (dev->flags & IFF_UP) {
768 		notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
769 		rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
770 	}
771 }
772 
773 /**
774  *	dev_load 	- load a network module
775  *	@name: name of interface
776  *
777  *	If a network interface is not present and the process has suitable
778  *	privileges this function loads the module. If module loading is not
779  *	available in this kernel then it becomes a nop.
780  */
781 
782 void dev_load(const char *name)
783 {
784 	struct net_device *dev;
785 
786 	read_lock(&dev_base_lock);
787 	dev = __dev_get_by_name(name);
788 	read_unlock(&dev_base_lock);
789 
790 	if (!dev && capable(CAP_SYS_MODULE))
791 		request_module("%s", name);
792 }
793 
794 static int default_rebuild_header(struct sk_buff *skb)
795 {
796 	printk(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n",
797 	       skb->dev ? skb->dev->name : "NULL!!!");
798 	kfree_skb(skb);
799 	return 1;
800 }
801 
802 
803 /**
804  *	dev_open	- prepare an interface for use.
805  *	@dev:	device to open
806  *
807  *	Takes a device from down to up state. The device's private open
808  *	function is invoked and then the multicast lists are loaded. Finally
809  *	the device is moved into the up state and a %NETDEV_UP message is
810  *	sent to the netdev notifier chain.
811  *
812  *	Calling this function on an active interface is a nop. On a failure
813  *	a negative errno code is returned.
814  */
815 int dev_open(struct net_device *dev)
816 {
817 	int ret = 0;
818 
819 	/*
820 	 *	Is it already up?
821 	 */
822 
823 	if (dev->flags & IFF_UP)
824 		return 0;
825 
826 	/*
827 	 *	Is it even present?
828 	 */
829 	if (!netif_device_present(dev))
830 		return -ENODEV;
831 
832 	/*
833 	 *	Call device private open method
834 	 */
835 	set_bit(__LINK_STATE_START, &dev->state);
836 	if (dev->open) {
837 		ret = dev->open(dev);
838 		if (ret)
839 			clear_bit(__LINK_STATE_START, &dev->state);
840 	}
841 
842  	/*
843 	 *	If it went open OK then:
844 	 */
845 
846 	if (!ret) {
847 		/*
848 		 *	Set the flags.
849 		 */
850 		dev->flags |= IFF_UP;
851 
852 		/*
853 		 *	Initialize multicasting status
854 		 */
855 		dev_mc_upload(dev);
856 
857 		/*
858 		 *	Wakeup transmit queue engine
859 		 */
860 		dev_activate(dev);
861 
862 		/*
863 		 *	... and announce new interface.
864 		 */
865 		notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
866 	}
867 	return ret;
868 }
869 
870 /**
871  *	dev_close - shutdown an interface.
872  *	@dev: device to shutdown
873  *
874  *	This function moves an active device into down state. A
875  *	%NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
876  *	is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
877  *	chain.
878  */
879 int dev_close(struct net_device *dev)
880 {
881 	if (!(dev->flags & IFF_UP))
882 		return 0;
883 
884 	/*
885 	 *	Tell people we are going down, so that they can
886 	 *	prepare to death, when device is still operating.
887 	 */
888 	notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
889 
890 	dev_deactivate(dev);
891 
892 	clear_bit(__LINK_STATE_START, &dev->state);
893 
894 	/* Synchronize to scheduled poll. We cannot touch poll list,
895 	 * it can be even on different cpu. So just clear netif_running(),
896 	 * and wait when poll really will happen. Actually, the best place
897 	 * for this is inside dev->stop() after device stopped its irq
898 	 * engine, but this requires more changes in devices. */
899 
900 	smp_mb__after_clear_bit(); /* Commit netif_running(). */
901 	while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
902 		/* No hurry. */
903 		msleep(1);
904 	}
905 
906 	/*
907 	 *	Call the device specific close. This cannot fail.
908 	 *	Only if device is UP
909 	 *
910 	 *	We allow it to be called even after a DETACH hot-plug
911 	 *	event.
912 	 */
913 	if (dev->stop)
914 		dev->stop(dev);
915 
916 	/*
917 	 *	Device is now down.
918 	 */
919 
920 	dev->flags &= ~IFF_UP;
921 
922 	/*
923 	 * Tell people we are down
924 	 */
925 	notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
926 
927 	return 0;
928 }
929 
930 
931 /*
932  *	Device change register/unregister. These are not inline or static
933  *	as we export them to the world.
934  */
935 
936 /**
937  *	register_netdevice_notifier - register a network notifier block
938  *	@nb: notifier
939  *
940  *	Register a notifier to be called when network device events occur.
941  *	The notifier passed is linked into the kernel structures and must
942  *	not be reused until it has been unregistered. A negative errno code
943  *	is returned on a failure.
944  *
945  * 	When registered all registration and up events are replayed
946  *	to the new notifier to allow device to have a race free
947  *	view of the network device list.
948  */
949 
950 int register_netdevice_notifier(struct notifier_block *nb)
951 {
952 	struct net_device *dev;
953 	int err;
954 
955 	rtnl_lock();
956 	err = notifier_chain_register(&netdev_chain, nb);
957 	if (!err) {
958 		for (dev = dev_base; dev; dev = dev->next) {
959 			nb->notifier_call(nb, NETDEV_REGISTER, dev);
960 
961 			if (dev->flags & IFF_UP)
962 				nb->notifier_call(nb, NETDEV_UP, dev);
963 		}
964 	}
965 	rtnl_unlock();
966 	return err;
967 }
968 
969 /**
970  *	unregister_netdevice_notifier - unregister a network notifier block
971  *	@nb: notifier
972  *
973  *	Unregister a notifier previously registered by
974  *	register_netdevice_notifier(). The notifier is unlinked into the
975  *	kernel structures and may then be reused. A negative errno code
976  *	is returned on a failure.
977  */
978 
979 int unregister_netdevice_notifier(struct notifier_block *nb)
980 {
981 	return notifier_chain_unregister(&netdev_chain, nb);
982 }
983 
984 /**
985  *	call_netdevice_notifiers - call all network notifier blocks
986  *      @val: value passed unmodified to notifier function
987  *      @v:   pointer passed unmodified to notifier function
988  *
989  *	Call all network notifier blocks.  Parameters and return value
990  *	are as for notifier_call_chain().
991  */
992 
993 int call_netdevice_notifiers(unsigned long val, void *v)
994 {
995 	return notifier_call_chain(&netdev_chain, val, v);
996 }
997 
998 /* When > 0 there are consumers of rx skb time stamps */
999 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1000 
1001 void net_enable_timestamp(void)
1002 {
1003 	atomic_inc(&netstamp_needed);
1004 }
1005 
1006 void net_disable_timestamp(void)
1007 {
1008 	atomic_dec(&netstamp_needed);
1009 }
1010 
1011 void __net_timestamp(struct sk_buff *skb)
1012 {
1013 	struct timeval tv;
1014 
1015 	do_gettimeofday(&tv);
1016 	skb_set_timestamp(skb, &tv);
1017 }
1018 EXPORT_SYMBOL(__net_timestamp);
1019 
1020 static inline void net_timestamp(struct sk_buff *skb)
1021 {
1022 	if (atomic_read(&netstamp_needed))
1023 		__net_timestamp(skb);
1024 	else {
1025 		skb->tstamp.off_sec = 0;
1026 		skb->tstamp.off_usec = 0;
1027 	}
1028 }
1029 
1030 /*
1031  *	Support routine. Sends outgoing frames to any network
1032  *	taps currently in use.
1033  */
1034 
1035 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1036 {
1037 	struct packet_type *ptype;
1038 
1039 	net_timestamp(skb);
1040 
1041 	rcu_read_lock();
1042 	list_for_each_entry_rcu(ptype, &ptype_all, list) {
1043 		/* Never send packets back to the socket
1044 		 * they originated from - MvS (miquels@drinkel.ow.org)
1045 		 */
1046 		if ((ptype->dev == dev || !ptype->dev) &&
1047 		    (ptype->af_packet_priv == NULL ||
1048 		     (struct sock *)ptype->af_packet_priv != skb->sk)) {
1049 			struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1050 			if (!skb2)
1051 				break;
1052 
1053 			/* skb->nh should be correctly
1054 			   set by sender, so that the second statement is
1055 			   just protection against buggy protocols.
1056 			 */
1057 			skb2->mac.raw = skb2->data;
1058 
1059 			if (skb2->nh.raw < skb2->data ||
1060 			    skb2->nh.raw > skb2->tail) {
1061 				if (net_ratelimit())
1062 					printk(KERN_CRIT "protocol %04x is "
1063 					       "buggy, dev %s\n",
1064 					       skb2->protocol, dev->name);
1065 				skb2->nh.raw = skb2->data;
1066 			}
1067 
1068 			skb2->h.raw = skb2->nh.raw;
1069 			skb2->pkt_type = PACKET_OUTGOING;
1070 			ptype->func(skb2, skb->dev, ptype, skb->dev);
1071 		}
1072 	}
1073 	rcu_read_unlock();
1074 }
1075 
1076 /*
1077  * Invalidate hardware checksum when packet is to be mangled, and
1078  * complete checksum manually on outgoing path.
1079  */
1080 int skb_checksum_help(struct sk_buff *skb, int inward)
1081 {
1082 	unsigned int csum;
1083 	int ret = 0, offset = skb->h.raw - skb->data;
1084 
1085 	if (inward) {
1086 		skb->ip_summed = CHECKSUM_NONE;
1087 		goto out;
1088 	}
1089 
1090 	if (skb_cloned(skb)) {
1091 		ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1092 		if (ret)
1093 			goto out;
1094 	}
1095 
1096 	BUG_ON(offset > (int)skb->len);
1097 	csum = skb_checksum(skb, offset, skb->len-offset, 0);
1098 
1099 	offset = skb->tail - skb->h.raw;
1100 	BUG_ON(offset <= 0);
1101 	BUG_ON(skb->csum + 2 > offset);
1102 
1103 	*(u16*)(skb->h.raw + skb->csum) = csum_fold(csum);
1104 	skb->ip_summed = CHECKSUM_NONE;
1105 out:
1106 	return ret;
1107 }
1108 
1109 /* Take action when hardware reception checksum errors are detected. */
1110 #ifdef CONFIG_BUG
1111 void netdev_rx_csum_fault(struct net_device *dev)
1112 {
1113 	if (net_ratelimit()) {
1114 		printk(KERN_ERR "%s: hw csum failure.\n",
1115 			dev ? dev->name : "<unknown>");
1116 		dump_stack();
1117 	}
1118 }
1119 EXPORT_SYMBOL(netdev_rx_csum_fault);
1120 #endif
1121 
1122 #ifdef CONFIG_HIGHMEM
1123 /* Actually, we should eliminate this check as soon as we know, that:
1124  * 1. IOMMU is present and allows to map all the memory.
1125  * 2. No high memory really exists on this machine.
1126  */
1127 
1128 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1129 {
1130 	int i;
1131 
1132 	if (dev->features & NETIF_F_HIGHDMA)
1133 		return 0;
1134 
1135 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1136 		if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1137 			return 1;
1138 
1139 	return 0;
1140 }
1141 #else
1142 #define illegal_highdma(dev, skb)	(0)
1143 #endif
1144 
1145 /* Keep head the same: replace data */
1146 int __skb_linearize(struct sk_buff *skb, gfp_t gfp_mask)
1147 {
1148 	unsigned int size;
1149 	u8 *data;
1150 	long offset;
1151 	struct skb_shared_info *ninfo;
1152 	int headerlen = skb->data - skb->head;
1153 	int expand = (skb->tail + skb->data_len) - skb->end;
1154 
1155 	if (skb_shared(skb))
1156 		BUG();
1157 
1158 	if (expand <= 0)
1159 		expand = 0;
1160 
1161 	size = skb->end - skb->head + expand;
1162 	size = SKB_DATA_ALIGN(size);
1163 	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
1164 	if (!data)
1165 		return -ENOMEM;
1166 
1167 	/* Copy entire thing */
1168 	if (skb_copy_bits(skb, -headerlen, data, headerlen + skb->len))
1169 		BUG();
1170 
1171 	/* Set up shinfo */
1172 	ninfo = (struct skb_shared_info*)(data + size);
1173 	atomic_set(&ninfo->dataref, 1);
1174 	ninfo->tso_size = skb_shinfo(skb)->tso_size;
1175 	ninfo->tso_segs = skb_shinfo(skb)->tso_segs;
1176 	ninfo->nr_frags = 0;
1177 	ninfo->frag_list = NULL;
1178 
1179 	/* Offset between the two in bytes */
1180 	offset = data - skb->head;
1181 
1182 	/* Free old data. */
1183 	skb_release_data(skb);
1184 
1185 	skb->head = data;
1186 	skb->end  = data + size;
1187 
1188 	/* Set up new pointers */
1189 	skb->h.raw   += offset;
1190 	skb->nh.raw  += offset;
1191 	skb->mac.raw += offset;
1192 	skb->tail    += offset;
1193 	skb->data    += offset;
1194 
1195 	/* We are no longer a clone, even if we were. */
1196 	skb->cloned    = 0;
1197 
1198 	skb->tail     += skb->data_len;
1199 	skb->data_len  = 0;
1200 	return 0;
1201 }
1202 
1203 #define HARD_TX_LOCK(dev, cpu) {			\
1204 	if ((dev->features & NETIF_F_LLTX) == 0) {	\
1205 		spin_lock(&dev->xmit_lock);		\
1206 		dev->xmit_lock_owner = cpu;		\
1207 	}						\
1208 }
1209 
1210 #define HARD_TX_UNLOCK(dev) {				\
1211 	if ((dev->features & NETIF_F_LLTX) == 0) {	\
1212 		dev->xmit_lock_owner = -1;		\
1213 		spin_unlock(&dev->xmit_lock);		\
1214 	}						\
1215 }
1216 
1217 /**
1218  *	dev_queue_xmit - transmit a buffer
1219  *	@skb: buffer to transmit
1220  *
1221  *	Queue a buffer for transmission to a network device. The caller must
1222  *	have set the device and priority and built the buffer before calling
1223  *	this function. The function can be called from an interrupt.
1224  *
1225  *	A negative errno code is returned on a failure. A success does not
1226  *	guarantee the frame will be transmitted as it may be dropped due
1227  *	to congestion or traffic shaping.
1228  *
1229  * -----------------------------------------------------------------------------------
1230  *      I notice this method can also return errors from the queue disciplines,
1231  *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
1232  *      be positive.
1233  *
1234  *      Regardless of the return value, the skb is consumed, so it is currently
1235  *      difficult to retry a send to this method.  (You can bump the ref count
1236  *      before sending to hold a reference for retry if you are careful.)
1237  *
1238  *      When calling this method, interrupts MUST be enabled.  This is because
1239  *      the BH enable code must have IRQs enabled so that it will not deadlock.
1240  *          --BLG
1241  */
1242 
1243 int dev_queue_xmit(struct sk_buff *skb)
1244 {
1245 	struct net_device *dev = skb->dev;
1246 	struct Qdisc *q;
1247 	int rc = -ENOMEM;
1248 
1249 	if (skb_shinfo(skb)->frag_list &&
1250 	    !(dev->features & NETIF_F_FRAGLIST) &&
1251 	    __skb_linearize(skb, GFP_ATOMIC))
1252 		goto out_kfree_skb;
1253 
1254 	/* Fragmented skb is linearized if device does not support SG,
1255 	 * or if at least one of fragments is in highmem and device
1256 	 * does not support DMA from it.
1257 	 */
1258 	if (skb_shinfo(skb)->nr_frags &&
1259 	    (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1260 	    __skb_linearize(skb, GFP_ATOMIC))
1261 		goto out_kfree_skb;
1262 
1263 	/* If packet is not checksummed and device does not support
1264 	 * checksumming for this protocol, complete checksumming here.
1265 	 */
1266 	if (skb->ip_summed == CHECKSUM_HW &&
1267 	    (!(dev->features & (NETIF_F_HW_CSUM | NETIF_F_NO_CSUM)) &&
1268 	     (!(dev->features & NETIF_F_IP_CSUM) ||
1269 	      skb->protocol != htons(ETH_P_IP))))
1270 	      	if (skb_checksum_help(skb, 0))
1271 	      		goto out_kfree_skb;
1272 
1273 	spin_lock_prefetch(&dev->queue_lock);
1274 
1275 	/* Disable soft irqs for various locks below. Also
1276 	 * stops preemption for RCU.
1277 	 */
1278 	local_bh_disable();
1279 
1280 	/* Updates of qdisc are serialized by queue_lock.
1281 	 * The struct Qdisc which is pointed to by qdisc is now a
1282 	 * rcu structure - it may be accessed without acquiring
1283 	 * a lock (but the structure may be stale.) The freeing of the
1284 	 * qdisc will be deferred until it's known that there are no
1285 	 * more references to it.
1286 	 *
1287 	 * If the qdisc has an enqueue function, we still need to
1288 	 * hold the queue_lock before calling it, since queue_lock
1289 	 * also serializes access to the device queue.
1290 	 */
1291 
1292 	q = rcu_dereference(dev->qdisc);
1293 #ifdef CONFIG_NET_CLS_ACT
1294 	skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1295 #endif
1296 	if (q->enqueue) {
1297 		/* Grab device queue */
1298 		spin_lock(&dev->queue_lock);
1299 
1300 		rc = q->enqueue(skb, q);
1301 
1302 		qdisc_run(dev);
1303 
1304 		spin_unlock(&dev->queue_lock);
1305 		rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1306 		goto out;
1307 	}
1308 
1309 	/* The device has no queue. Common case for software devices:
1310 	   loopback, all the sorts of tunnels...
1311 
1312 	   Really, it is unlikely that xmit_lock protection is necessary here.
1313 	   (f.e. loopback and IP tunnels are clean ignoring statistics
1314 	   counters.)
1315 	   However, it is possible, that they rely on protection
1316 	   made by us here.
1317 
1318 	   Check this and shot the lock. It is not prone from deadlocks.
1319 	   Either shot noqueue qdisc, it is even simpler 8)
1320 	 */
1321 	if (dev->flags & IFF_UP) {
1322 		int cpu = smp_processor_id(); /* ok because BHs are off */
1323 
1324 		if (dev->xmit_lock_owner != cpu) {
1325 
1326 			HARD_TX_LOCK(dev, cpu);
1327 
1328 			if (!netif_queue_stopped(dev)) {
1329 				if (netdev_nit)
1330 					dev_queue_xmit_nit(skb, dev);
1331 
1332 				rc = 0;
1333 				if (!dev->hard_start_xmit(skb, dev)) {
1334 					HARD_TX_UNLOCK(dev);
1335 					goto out;
1336 				}
1337 			}
1338 			HARD_TX_UNLOCK(dev);
1339 			if (net_ratelimit())
1340 				printk(KERN_CRIT "Virtual device %s asks to "
1341 				       "queue packet!\n", dev->name);
1342 		} else {
1343 			/* Recursion is detected! It is possible,
1344 			 * unfortunately */
1345 			if (net_ratelimit())
1346 				printk(KERN_CRIT "Dead loop on virtual device "
1347 				       "%s, fix it urgently!\n", dev->name);
1348 		}
1349 	}
1350 
1351 	rc = -ENETDOWN;
1352 	local_bh_enable();
1353 
1354 out_kfree_skb:
1355 	kfree_skb(skb);
1356 	return rc;
1357 out:
1358 	local_bh_enable();
1359 	return rc;
1360 }
1361 
1362 
1363 /*=======================================================================
1364 			Receiver routines
1365   =======================================================================*/
1366 
1367 int netdev_max_backlog = 1000;
1368 int netdev_budget = 300;
1369 int weight_p = 64;            /* old backlog weight */
1370 
1371 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1372 
1373 
1374 /**
1375  *	netif_rx	-	post buffer to the network code
1376  *	@skb: buffer to post
1377  *
1378  *	This function receives a packet from a device driver and queues it for
1379  *	the upper (protocol) levels to process.  It always succeeds. The buffer
1380  *	may be dropped during processing for congestion control or by the
1381  *	protocol layers.
1382  *
1383  *	return values:
1384  *	NET_RX_SUCCESS	(no congestion)
1385  *	NET_RX_CN_LOW   (low congestion)
1386  *	NET_RX_CN_MOD   (moderate congestion)
1387  *	NET_RX_CN_HIGH  (high congestion)
1388  *	NET_RX_DROP     (packet was dropped)
1389  *
1390  */
1391 
1392 int netif_rx(struct sk_buff *skb)
1393 {
1394 	struct softnet_data *queue;
1395 	unsigned long flags;
1396 
1397 	/* if netpoll wants it, pretend we never saw it */
1398 	if (netpoll_rx(skb))
1399 		return NET_RX_DROP;
1400 
1401 	if (!skb->tstamp.off_sec)
1402 		net_timestamp(skb);
1403 
1404 	/*
1405 	 * The code is rearranged so that the path is the most
1406 	 * short when CPU is congested, but is still operating.
1407 	 */
1408 	local_irq_save(flags);
1409 	queue = &__get_cpu_var(softnet_data);
1410 
1411 	__get_cpu_var(netdev_rx_stat).total++;
1412 	if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1413 		if (queue->input_pkt_queue.qlen) {
1414 enqueue:
1415 			dev_hold(skb->dev);
1416 			__skb_queue_tail(&queue->input_pkt_queue, skb);
1417 			local_irq_restore(flags);
1418 			return NET_RX_SUCCESS;
1419 		}
1420 
1421 		netif_rx_schedule(&queue->backlog_dev);
1422 		goto enqueue;
1423 	}
1424 
1425 	__get_cpu_var(netdev_rx_stat).dropped++;
1426 	local_irq_restore(flags);
1427 
1428 	kfree_skb(skb);
1429 	return NET_RX_DROP;
1430 }
1431 
1432 int netif_rx_ni(struct sk_buff *skb)
1433 {
1434 	int err;
1435 
1436 	preempt_disable();
1437 	err = netif_rx(skb);
1438 	if (local_softirq_pending())
1439 		do_softirq();
1440 	preempt_enable();
1441 
1442 	return err;
1443 }
1444 
1445 EXPORT_SYMBOL(netif_rx_ni);
1446 
1447 static inline struct net_device *skb_bond(struct sk_buff *skb)
1448 {
1449 	struct net_device *dev = skb->dev;
1450 
1451 	if (dev->master)
1452 		skb->dev = dev->master;
1453 
1454 	return dev;
1455 }
1456 
1457 static void net_tx_action(struct softirq_action *h)
1458 {
1459 	struct softnet_data *sd = &__get_cpu_var(softnet_data);
1460 
1461 	if (sd->completion_queue) {
1462 		struct sk_buff *clist;
1463 
1464 		local_irq_disable();
1465 		clist = sd->completion_queue;
1466 		sd->completion_queue = NULL;
1467 		local_irq_enable();
1468 
1469 		while (clist) {
1470 			struct sk_buff *skb = clist;
1471 			clist = clist->next;
1472 
1473 			BUG_TRAP(!atomic_read(&skb->users));
1474 			__kfree_skb(skb);
1475 		}
1476 	}
1477 
1478 	if (sd->output_queue) {
1479 		struct net_device *head;
1480 
1481 		local_irq_disable();
1482 		head = sd->output_queue;
1483 		sd->output_queue = NULL;
1484 		local_irq_enable();
1485 
1486 		while (head) {
1487 			struct net_device *dev = head;
1488 			head = head->next_sched;
1489 
1490 			smp_mb__before_clear_bit();
1491 			clear_bit(__LINK_STATE_SCHED, &dev->state);
1492 
1493 			if (spin_trylock(&dev->queue_lock)) {
1494 				qdisc_run(dev);
1495 				spin_unlock(&dev->queue_lock);
1496 			} else {
1497 				netif_schedule(dev);
1498 			}
1499 		}
1500 	}
1501 }
1502 
1503 static __inline__ int deliver_skb(struct sk_buff *skb,
1504 				  struct packet_type *pt_prev,
1505 				  struct net_device *orig_dev)
1506 {
1507 	atomic_inc(&skb->users);
1508 	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1509 }
1510 
1511 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
1512 int (*br_handle_frame_hook)(struct net_bridge_port *p, struct sk_buff **pskb);
1513 struct net_bridge;
1514 struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
1515 						unsigned char *addr);
1516 void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent);
1517 
1518 static __inline__ int handle_bridge(struct sk_buff **pskb,
1519 				    struct packet_type **pt_prev, int *ret,
1520 				    struct net_device *orig_dev)
1521 {
1522 	struct net_bridge_port *port;
1523 
1524 	if ((*pskb)->pkt_type == PACKET_LOOPBACK ||
1525 	    (port = rcu_dereference((*pskb)->dev->br_port)) == NULL)
1526 		return 0;
1527 
1528 	if (*pt_prev) {
1529 		*ret = deliver_skb(*pskb, *pt_prev, orig_dev);
1530 		*pt_prev = NULL;
1531 	}
1532 
1533 	return br_handle_frame_hook(port, pskb);
1534 }
1535 #else
1536 #define handle_bridge(skb, pt_prev, ret, orig_dev)	(0)
1537 #endif
1538 
1539 #ifdef CONFIG_NET_CLS_ACT
1540 /* TODO: Maybe we should just force sch_ingress to be compiled in
1541  * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
1542  * a compare and 2 stores extra right now if we dont have it on
1543  * but have CONFIG_NET_CLS_ACT
1544  * NOTE: This doesnt stop any functionality; if you dont have
1545  * the ingress scheduler, you just cant add policies on ingress.
1546  *
1547  */
1548 static int ing_filter(struct sk_buff *skb)
1549 {
1550 	struct Qdisc *q;
1551 	struct net_device *dev = skb->dev;
1552 	int result = TC_ACT_OK;
1553 
1554 	if (dev->qdisc_ingress) {
1555 		__u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd);
1556 		if (MAX_RED_LOOP < ttl++) {
1557 			printk("Redir loop detected Dropping packet (%s->%s)\n",
1558 				skb->input_dev->name, skb->dev->name);
1559 			return TC_ACT_SHOT;
1560 		}
1561 
1562 		skb->tc_verd = SET_TC_RTTL(skb->tc_verd,ttl);
1563 
1564 		skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS);
1565 
1566 		spin_lock(&dev->ingress_lock);
1567 		if ((q = dev->qdisc_ingress) != NULL)
1568 			result = q->enqueue(skb, q);
1569 		spin_unlock(&dev->ingress_lock);
1570 
1571 	}
1572 
1573 	return result;
1574 }
1575 #endif
1576 
1577 int netif_receive_skb(struct sk_buff *skb)
1578 {
1579 	struct packet_type *ptype, *pt_prev;
1580 	struct net_device *orig_dev;
1581 	int ret = NET_RX_DROP;
1582 	unsigned short type;
1583 
1584 	/* if we've gotten here through NAPI, check netpoll */
1585 	if (skb->dev->poll && netpoll_rx(skb))
1586 		return NET_RX_DROP;
1587 
1588 	if (!skb->tstamp.off_sec)
1589 		net_timestamp(skb);
1590 
1591 	if (!skb->input_dev)
1592 		skb->input_dev = skb->dev;
1593 
1594 	orig_dev = skb_bond(skb);
1595 
1596 	__get_cpu_var(netdev_rx_stat).total++;
1597 
1598 	skb->h.raw = skb->nh.raw = skb->data;
1599 	skb->mac_len = skb->nh.raw - skb->mac.raw;
1600 
1601 	pt_prev = NULL;
1602 
1603 	rcu_read_lock();
1604 
1605 #ifdef CONFIG_NET_CLS_ACT
1606 	if (skb->tc_verd & TC_NCLS) {
1607 		skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
1608 		goto ncls;
1609 	}
1610 #endif
1611 
1612 	list_for_each_entry_rcu(ptype, &ptype_all, list) {
1613 		if (!ptype->dev || ptype->dev == skb->dev) {
1614 			if (pt_prev)
1615 				ret = deliver_skb(skb, pt_prev, orig_dev);
1616 			pt_prev = ptype;
1617 		}
1618 	}
1619 
1620 #ifdef CONFIG_NET_CLS_ACT
1621 	if (pt_prev) {
1622 		ret = deliver_skb(skb, pt_prev, orig_dev);
1623 		pt_prev = NULL; /* noone else should process this after*/
1624 	} else {
1625 		skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
1626 	}
1627 
1628 	ret = ing_filter(skb);
1629 
1630 	if (ret == TC_ACT_SHOT || (ret == TC_ACT_STOLEN)) {
1631 		kfree_skb(skb);
1632 		goto out;
1633 	}
1634 
1635 	skb->tc_verd = 0;
1636 ncls:
1637 #endif
1638 
1639 	handle_diverter(skb);
1640 
1641 	if (handle_bridge(&skb, &pt_prev, &ret, orig_dev))
1642 		goto out;
1643 
1644 	type = skb->protocol;
1645 	list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) {
1646 		if (ptype->type == type &&
1647 		    (!ptype->dev || ptype->dev == skb->dev)) {
1648 			if (pt_prev)
1649 				ret = deliver_skb(skb, pt_prev, orig_dev);
1650 			pt_prev = ptype;
1651 		}
1652 	}
1653 
1654 	if (pt_prev) {
1655 		ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1656 	} else {
1657 		kfree_skb(skb);
1658 		/* Jamal, now you will not able to escape explaining
1659 		 * me how you were going to use this. :-)
1660 		 */
1661 		ret = NET_RX_DROP;
1662 	}
1663 
1664 out:
1665 	rcu_read_unlock();
1666 	return ret;
1667 }
1668 
1669 static int process_backlog(struct net_device *backlog_dev, int *budget)
1670 {
1671 	int work = 0;
1672 	int quota = min(backlog_dev->quota, *budget);
1673 	struct softnet_data *queue = &__get_cpu_var(softnet_data);
1674 	unsigned long start_time = jiffies;
1675 
1676 	backlog_dev->weight = weight_p;
1677 	for (;;) {
1678 		struct sk_buff *skb;
1679 		struct net_device *dev;
1680 
1681 		local_irq_disable();
1682 		skb = __skb_dequeue(&queue->input_pkt_queue);
1683 		if (!skb)
1684 			goto job_done;
1685 		local_irq_enable();
1686 
1687 		dev = skb->dev;
1688 
1689 		netif_receive_skb(skb);
1690 
1691 		dev_put(dev);
1692 
1693 		work++;
1694 
1695 		if (work >= quota || jiffies - start_time > 1)
1696 			break;
1697 
1698 	}
1699 
1700 	backlog_dev->quota -= work;
1701 	*budget -= work;
1702 	return -1;
1703 
1704 job_done:
1705 	backlog_dev->quota -= work;
1706 	*budget -= work;
1707 
1708 	list_del(&backlog_dev->poll_list);
1709 	smp_mb__before_clear_bit();
1710 	netif_poll_enable(backlog_dev);
1711 
1712 	local_irq_enable();
1713 	return 0;
1714 }
1715 
1716 static void net_rx_action(struct softirq_action *h)
1717 {
1718 	struct softnet_data *queue = &__get_cpu_var(softnet_data);
1719 	unsigned long start_time = jiffies;
1720 	int budget = netdev_budget;
1721 	void *have;
1722 
1723 	local_irq_disable();
1724 
1725 	while (!list_empty(&queue->poll_list)) {
1726 		struct net_device *dev;
1727 
1728 		if (budget <= 0 || jiffies - start_time > 1)
1729 			goto softnet_break;
1730 
1731 		local_irq_enable();
1732 
1733 		dev = list_entry(queue->poll_list.next,
1734 				 struct net_device, poll_list);
1735 		have = netpoll_poll_lock(dev);
1736 
1737 		if (dev->quota <= 0 || dev->poll(dev, &budget)) {
1738 			netpoll_poll_unlock(have);
1739 			local_irq_disable();
1740 			list_del(&dev->poll_list);
1741 			list_add_tail(&dev->poll_list, &queue->poll_list);
1742 			if (dev->quota < 0)
1743 				dev->quota += dev->weight;
1744 			else
1745 				dev->quota = dev->weight;
1746 		} else {
1747 			netpoll_poll_unlock(have);
1748 			dev_put(dev);
1749 			local_irq_disable();
1750 		}
1751 	}
1752 out:
1753 	local_irq_enable();
1754 	return;
1755 
1756 softnet_break:
1757 	__get_cpu_var(netdev_rx_stat).time_squeeze++;
1758 	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
1759 	goto out;
1760 }
1761 
1762 static gifconf_func_t * gifconf_list [NPROTO];
1763 
1764 /**
1765  *	register_gifconf	-	register a SIOCGIF handler
1766  *	@family: Address family
1767  *	@gifconf: Function handler
1768  *
1769  *	Register protocol dependent address dumping routines. The handler
1770  *	that is passed must not be freed or reused until it has been replaced
1771  *	by another handler.
1772  */
1773 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
1774 {
1775 	if (family >= NPROTO)
1776 		return -EINVAL;
1777 	gifconf_list[family] = gifconf;
1778 	return 0;
1779 }
1780 
1781 
1782 /*
1783  *	Map an interface index to its name (SIOCGIFNAME)
1784  */
1785 
1786 /*
1787  *	We need this ioctl for efficient implementation of the
1788  *	if_indextoname() function required by the IPv6 API.  Without
1789  *	it, we would have to search all the interfaces to find a
1790  *	match.  --pb
1791  */
1792 
1793 static int dev_ifname(struct ifreq __user *arg)
1794 {
1795 	struct net_device *dev;
1796 	struct ifreq ifr;
1797 
1798 	/*
1799 	 *	Fetch the caller's info block.
1800 	 */
1801 
1802 	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
1803 		return -EFAULT;
1804 
1805 	read_lock(&dev_base_lock);
1806 	dev = __dev_get_by_index(ifr.ifr_ifindex);
1807 	if (!dev) {
1808 		read_unlock(&dev_base_lock);
1809 		return -ENODEV;
1810 	}
1811 
1812 	strcpy(ifr.ifr_name, dev->name);
1813 	read_unlock(&dev_base_lock);
1814 
1815 	if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
1816 		return -EFAULT;
1817 	return 0;
1818 }
1819 
1820 /*
1821  *	Perform a SIOCGIFCONF call. This structure will change
1822  *	size eventually, and there is nothing I can do about it.
1823  *	Thus we will need a 'compatibility mode'.
1824  */
1825 
1826 static int dev_ifconf(char __user *arg)
1827 {
1828 	struct ifconf ifc;
1829 	struct net_device *dev;
1830 	char __user *pos;
1831 	int len;
1832 	int total;
1833 	int i;
1834 
1835 	/*
1836 	 *	Fetch the caller's info block.
1837 	 */
1838 
1839 	if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
1840 		return -EFAULT;
1841 
1842 	pos = ifc.ifc_buf;
1843 	len = ifc.ifc_len;
1844 
1845 	/*
1846 	 *	Loop over the interfaces, and write an info block for each.
1847 	 */
1848 
1849 	total = 0;
1850 	for (dev = dev_base; dev; dev = dev->next) {
1851 		for (i = 0; i < NPROTO; i++) {
1852 			if (gifconf_list[i]) {
1853 				int done;
1854 				if (!pos)
1855 					done = gifconf_list[i](dev, NULL, 0);
1856 				else
1857 					done = gifconf_list[i](dev, pos + total,
1858 							       len - total);
1859 				if (done < 0)
1860 					return -EFAULT;
1861 				total += done;
1862 			}
1863 		}
1864   	}
1865 
1866 	/*
1867 	 *	All done.  Write the updated control block back to the caller.
1868 	 */
1869 	ifc.ifc_len = total;
1870 
1871 	/*
1872 	 * 	Both BSD and Solaris return 0 here, so we do too.
1873 	 */
1874 	return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
1875 }
1876 
1877 #ifdef CONFIG_PROC_FS
1878 /*
1879  *	This is invoked by the /proc filesystem handler to display a device
1880  *	in detail.
1881  */
1882 static __inline__ struct net_device *dev_get_idx(loff_t pos)
1883 {
1884 	struct net_device *dev;
1885 	loff_t i;
1886 
1887 	for (i = 0, dev = dev_base; dev && i < pos; ++i, dev = dev->next);
1888 
1889 	return i == pos ? dev : NULL;
1890 }
1891 
1892 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
1893 {
1894 	read_lock(&dev_base_lock);
1895 	return *pos ? dev_get_idx(*pos - 1) : SEQ_START_TOKEN;
1896 }
1897 
1898 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1899 {
1900 	++*pos;
1901 	return v == SEQ_START_TOKEN ? dev_base : ((struct net_device *)v)->next;
1902 }
1903 
1904 void dev_seq_stop(struct seq_file *seq, void *v)
1905 {
1906 	read_unlock(&dev_base_lock);
1907 }
1908 
1909 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
1910 {
1911 	if (dev->get_stats) {
1912 		struct net_device_stats *stats = dev->get_stats(dev);
1913 
1914 		seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
1915 				"%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
1916 			   dev->name, stats->rx_bytes, stats->rx_packets,
1917 			   stats->rx_errors,
1918 			   stats->rx_dropped + stats->rx_missed_errors,
1919 			   stats->rx_fifo_errors,
1920 			   stats->rx_length_errors + stats->rx_over_errors +
1921 			     stats->rx_crc_errors + stats->rx_frame_errors,
1922 			   stats->rx_compressed, stats->multicast,
1923 			   stats->tx_bytes, stats->tx_packets,
1924 			   stats->tx_errors, stats->tx_dropped,
1925 			   stats->tx_fifo_errors, stats->collisions,
1926 			   stats->tx_carrier_errors +
1927 			     stats->tx_aborted_errors +
1928 			     stats->tx_window_errors +
1929 			     stats->tx_heartbeat_errors,
1930 			   stats->tx_compressed);
1931 	} else
1932 		seq_printf(seq, "%6s: No statistics available.\n", dev->name);
1933 }
1934 
1935 /*
1936  *	Called from the PROCfs module. This now uses the new arbitrary sized
1937  *	/proc/net interface to create /proc/net/dev
1938  */
1939 static int dev_seq_show(struct seq_file *seq, void *v)
1940 {
1941 	if (v == SEQ_START_TOKEN)
1942 		seq_puts(seq, "Inter-|   Receive                            "
1943 			      "                    |  Transmit\n"
1944 			      " face |bytes    packets errs drop fifo frame "
1945 			      "compressed multicast|bytes    packets errs "
1946 			      "drop fifo colls carrier compressed\n");
1947 	else
1948 		dev_seq_printf_stats(seq, v);
1949 	return 0;
1950 }
1951 
1952 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
1953 {
1954 	struct netif_rx_stats *rc = NULL;
1955 
1956 	while (*pos < NR_CPUS)
1957 	       	if (cpu_online(*pos)) {
1958 			rc = &per_cpu(netdev_rx_stat, *pos);
1959 			break;
1960 		} else
1961 			++*pos;
1962 	return rc;
1963 }
1964 
1965 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
1966 {
1967 	return softnet_get_online(pos);
1968 }
1969 
1970 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1971 {
1972 	++*pos;
1973 	return softnet_get_online(pos);
1974 }
1975 
1976 static void softnet_seq_stop(struct seq_file *seq, void *v)
1977 {
1978 }
1979 
1980 static int softnet_seq_show(struct seq_file *seq, void *v)
1981 {
1982 	struct netif_rx_stats *s = v;
1983 
1984 	seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
1985 		   s->total, s->dropped, s->time_squeeze, 0,
1986 		   0, 0, 0, 0, /* was fastroute */
1987 		   s->cpu_collision );
1988 	return 0;
1989 }
1990 
1991 static struct seq_operations dev_seq_ops = {
1992 	.start = dev_seq_start,
1993 	.next  = dev_seq_next,
1994 	.stop  = dev_seq_stop,
1995 	.show  = dev_seq_show,
1996 };
1997 
1998 static int dev_seq_open(struct inode *inode, struct file *file)
1999 {
2000 	return seq_open(file, &dev_seq_ops);
2001 }
2002 
2003 static struct file_operations dev_seq_fops = {
2004 	.owner	 = THIS_MODULE,
2005 	.open    = dev_seq_open,
2006 	.read    = seq_read,
2007 	.llseek  = seq_lseek,
2008 	.release = seq_release,
2009 };
2010 
2011 static struct seq_operations softnet_seq_ops = {
2012 	.start = softnet_seq_start,
2013 	.next  = softnet_seq_next,
2014 	.stop  = softnet_seq_stop,
2015 	.show  = softnet_seq_show,
2016 };
2017 
2018 static int softnet_seq_open(struct inode *inode, struct file *file)
2019 {
2020 	return seq_open(file, &softnet_seq_ops);
2021 }
2022 
2023 static struct file_operations softnet_seq_fops = {
2024 	.owner	 = THIS_MODULE,
2025 	.open    = softnet_seq_open,
2026 	.read    = seq_read,
2027 	.llseek  = seq_lseek,
2028 	.release = seq_release,
2029 };
2030 
2031 #ifdef WIRELESS_EXT
2032 extern int wireless_proc_init(void);
2033 #else
2034 #define wireless_proc_init() 0
2035 #endif
2036 
2037 static int __init dev_proc_init(void)
2038 {
2039 	int rc = -ENOMEM;
2040 
2041 	if (!proc_net_fops_create("dev", S_IRUGO, &dev_seq_fops))
2042 		goto out;
2043 	if (!proc_net_fops_create("softnet_stat", S_IRUGO, &softnet_seq_fops))
2044 		goto out_dev;
2045 	if (wireless_proc_init())
2046 		goto out_softnet;
2047 	rc = 0;
2048 out:
2049 	return rc;
2050 out_softnet:
2051 	proc_net_remove("softnet_stat");
2052 out_dev:
2053 	proc_net_remove("dev");
2054 	goto out;
2055 }
2056 #else
2057 #define dev_proc_init() 0
2058 #endif	/* CONFIG_PROC_FS */
2059 
2060 
2061 /**
2062  *	netdev_set_master	-	set up master/slave pair
2063  *	@slave: slave device
2064  *	@master: new master device
2065  *
2066  *	Changes the master device of the slave. Pass %NULL to break the
2067  *	bonding. The caller must hold the RTNL semaphore. On a failure
2068  *	a negative errno code is returned. On success the reference counts
2069  *	are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2070  *	function returns zero.
2071  */
2072 int netdev_set_master(struct net_device *slave, struct net_device *master)
2073 {
2074 	struct net_device *old = slave->master;
2075 
2076 	ASSERT_RTNL();
2077 
2078 	if (master) {
2079 		if (old)
2080 			return -EBUSY;
2081 		dev_hold(master);
2082 	}
2083 
2084 	slave->master = master;
2085 
2086 	synchronize_net();
2087 
2088 	if (old)
2089 		dev_put(old);
2090 
2091 	if (master)
2092 		slave->flags |= IFF_SLAVE;
2093 	else
2094 		slave->flags &= ~IFF_SLAVE;
2095 
2096 	rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2097 	return 0;
2098 }
2099 
2100 /**
2101  *	dev_set_promiscuity	- update promiscuity count on a device
2102  *	@dev: device
2103  *	@inc: modifier
2104  *
2105  *	Add or remove promsicuity from a device. While the count in the device
2106  *	remains above zero the interface remains promiscuous. Once it hits zero
2107  *	the device reverts back to normal filtering operation. A negative inc
2108  *	value is used to drop promiscuity on the device.
2109  */
2110 void dev_set_promiscuity(struct net_device *dev, int inc)
2111 {
2112 	unsigned short old_flags = dev->flags;
2113 
2114 	if ((dev->promiscuity += inc) == 0)
2115 		dev->flags &= ~IFF_PROMISC;
2116 	else
2117 		dev->flags |= IFF_PROMISC;
2118 	if (dev->flags != old_flags) {
2119 		dev_mc_upload(dev);
2120 		printk(KERN_INFO "device %s %s promiscuous mode\n",
2121 		       dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2122 		       					       "left");
2123 	}
2124 }
2125 
2126 /**
2127  *	dev_set_allmulti	- update allmulti count on a device
2128  *	@dev: device
2129  *	@inc: modifier
2130  *
2131  *	Add or remove reception of all multicast frames to a device. While the
2132  *	count in the device remains above zero the interface remains listening
2133  *	to all interfaces. Once it hits zero the device reverts back to normal
2134  *	filtering operation. A negative @inc value is used to drop the counter
2135  *	when releasing a resource needing all multicasts.
2136  */
2137 
2138 void dev_set_allmulti(struct net_device *dev, int inc)
2139 {
2140 	unsigned short old_flags = dev->flags;
2141 
2142 	dev->flags |= IFF_ALLMULTI;
2143 	if ((dev->allmulti += inc) == 0)
2144 		dev->flags &= ~IFF_ALLMULTI;
2145 	if (dev->flags ^ old_flags)
2146 		dev_mc_upload(dev);
2147 }
2148 
2149 unsigned dev_get_flags(const struct net_device *dev)
2150 {
2151 	unsigned flags;
2152 
2153 	flags = (dev->flags & ~(IFF_PROMISC |
2154 				IFF_ALLMULTI |
2155 				IFF_RUNNING)) |
2156 		(dev->gflags & (IFF_PROMISC |
2157 				IFF_ALLMULTI));
2158 
2159 	if (netif_running(dev) && netif_carrier_ok(dev))
2160 		flags |= IFF_RUNNING;
2161 
2162 	return flags;
2163 }
2164 
2165 int dev_change_flags(struct net_device *dev, unsigned flags)
2166 {
2167 	int ret;
2168 	int old_flags = dev->flags;
2169 
2170 	/*
2171 	 *	Set the flags on our device.
2172 	 */
2173 
2174 	dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
2175 			       IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
2176 			       IFF_AUTOMEDIA)) |
2177 		     (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
2178 				    IFF_ALLMULTI));
2179 
2180 	/*
2181 	 *	Load in the correct multicast list now the flags have changed.
2182 	 */
2183 
2184 	dev_mc_upload(dev);
2185 
2186 	/*
2187 	 *	Have we downed the interface. We handle IFF_UP ourselves
2188 	 *	according to user attempts to set it, rather than blindly
2189 	 *	setting it.
2190 	 */
2191 
2192 	ret = 0;
2193 	if ((old_flags ^ flags) & IFF_UP) {	/* Bit is different  ? */
2194 		ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
2195 
2196 		if (!ret)
2197 			dev_mc_upload(dev);
2198 	}
2199 
2200 	if (dev->flags & IFF_UP &&
2201 	    ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
2202 					  IFF_VOLATILE)))
2203 		notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
2204 
2205 	if ((flags ^ dev->gflags) & IFF_PROMISC) {
2206 		int inc = (flags & IFF_PROMISC) ? +1 : -1;
2207 		dev->gflags ^= IFF_PROMISC;
2208 		dev_set_promiscuity(dev, inc);
2209 	}
2210 
2211 	/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
2212 	   is important. Some (broken) drivers set IFF_PROMISC, when
2213 	   IFF_ALLMULTI is requested not asking us and not reporting.
2214 	 */
2215 	if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
2216 		int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
2217 		dev->gflags ^= IFF_ALLMULTI;
2218 		dev_set_allmulti(dev, inc);
2219 	}
2220 
2221 	if (old_flags ^ dev->flags)
2222 		rtmsg_ifinfo(RTM_NEWLINK, dev, old_flags ^ dev->flags);
2223 
2224 	return ret;
2225 }
2226 
2227 int dev_set_mtu(struct net_device *dev, int new_mtu)
2228 {
2229 	int err;
2230 
2231 	if (new_mtu == dev->mtu)
2232 		return 0;
2233 
2234 	/*	MTU must be positive.	 */
2235 	if (new_mtu < 0)
2236 		return -EINVAL;
2237 
2238 	if (!netif_device_present(dev))
2239 		return -ENODEV;
2240 
2241 	err = 0;
2242 	if (dev->change_mtu)
2243 		err = dev->change_mtu(dev, new_mtu);
2244 	else
2245 		dev->mtu = new_mtu;
2246 	if (!err && dev->flags & IFF_UP)
2247 		notifier_call_chain(&netdev_chain,
2248 				    NETDEV_CHANGEMTU, dev);
2249 	return err;
2250 }
2251 
2252 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
2253 {
2254 	int err;
2255 
2256 	if (!dev->set_mac_address)
2257 		return -EOPNOTSUPP;
2258 	if (sa->sa_family != dev->type)
2259 		return -EINVAL;
2260 	if (!netif_device_present(dev))
2261 		return -ENODEV;
2262 	err = dev->set_mac_address(dev, sa);
2263 	if (!err)
2264 		notifier_call_chain(&netdev_chain, NETDEV_CHANGEADDR, dev);
2265 	return err;
2266 }
2267 
2268 /*
2269  *	Perform the SIOCxIFxxx calls.
2270  */
2271 static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
2272 {
2273 	int err;
2274 	struct net_device *dev = __dev_get_by_name(ifr->ifr_name);
2275 
2276 	if (!dev)
2277 		return -ENODEV;
2278 
2279 	switch (cmd) {
2280 		case SIOCGIFFLAGS:	/* Get interface flags */
2281 			ifr->ifr_flags = dev_get_flags(dev);
2282 			return 0;
2283 
2284 		case SIOCSIFFLAGS:	/* Set interface flags */
2285 			return dev_change_flags(dev, ifr->ifr_flags);
2286 
2287 		case SIOCGIFMETRIC:	/* Get the metric on the interface
2288 					   (currently unused) */
2289 			ifr->ifr_metric = 0;
2290 			return 0;
2291 
2292 		case SIOCSIFMETRIC:	/* Set the metric on the interface
2293 					   (currently unused) */
2294 			return -EOPNOTSUPP;
2295 
2296 		case SIOCGIFMTU:	/* Get the MTU of a device */
2297 			ifr->ifr_mtu = dev->mtu;
2298 			return 0;
2299 
2300 		case SIOCSIFMTU:	/* Set the MTU of a device */
2301 			return dev_set_mtu(dev, ifr->ifr_mtu);
2302 
2303 		case SIOCGIFHWADDR:
2304 			if (!dev->addr_len)
2305 				memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
2306 			else
2307 				memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
2308 				       min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
2309 			ifr->ifr_hwaddr.sa_family = dev->type;
2310 			return 0;
2311 
2312 		case SIOCSIFHWADDR:
2313 			return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
2314 
2315 		case SIOCSIFHWBROADCAST:
2316 			if (ifr->ifr_hwaddr.sa_family != dev->type)
2317 				return -EINVAL;
2318 			memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
2319 			       min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
2320 			notifier_call_chain(&netdev_chain,
2321 					    NETDEV_CHANGEADDR, dev);
2322 			return 0;
2323 
2324 		case SIOCGIFMAP:
2325 			ifr->ifr_map.mem_start = dev->mem_start;
2326 			ifr->ifr_map.mem_end   = dev->mem_end;
2327 			ifr->ifr_map.base_addr = dev->base_addr;
2328 			ifr->ifr_map.irq       = dev->irq;
2329 			ifr->ifr_map.dma       = dev->dma;
2330 			ifr->ifr_map.port      = dev->if_port;
2331 			return 0;
2332 
2333 		case SIOCSIFMAP:
2334 			if (dev->set_config) {
2335 				if (!netif_device_present(dev))
2336 					return -ENODEV;
2337 				return dev->set_config(dev, &ifr->ifr_map);
2338 			}
2339 			return -EOPNOTSUPP;
2340 
2341 		case SIOCADDMULTI:
2342 			if (!dev->set_multicast_list ||
2343 			    ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
2344 				return -EINVAL;
2345 			if (!netif_device_present(dev))
2346 				return -ENODEV;
2347 			return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
2348 					  dev->addr_len, 1);
2349 
2350 		case SIOCDELMULTI:
2351 			if (!dev->set_multicast_list ||
2352 			    ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
2353 				return -EINVAL;
2354 			if (!netif_device_present(dev))
2355 				return -ENODEV;
2356 			return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
2357 					     dev->addr_len, 1);
2358 
2359 		case SIOCGIFINDEX:
2360 			ifr->ifr_ifindex = dev->ifindex;
2361 			return 0;
2362 
2363 		case SIOCGIFTXQLEN:
2364 			ifr->ifr_qlen = dev->tx_queue_len;
2365 			return 0;
2366 
2367 		case SIOCSIFTXQLEN:
2368 			if (ifr->ifr_qlen < 0)
2369 				return -EINVAL;
2370 			dev->tx_queue_len = ifr->ifr_qlen;
2371 			return 0;
2372 
2373 		case SIOCSIFNAME:
2374 			ifr->ifr_newname[IFNAMSIZ-1] = '\0';
2375 			return dev_change_name(dev, ifr->ifr_newname);
2376 
2377 		/*
2378 		 *	Unknown or private ioctl
2379 		 */
2380 
2381 		default:
2382 			if ((cmd >= SIOCDEVPRIVATE &&
2383 			    cmd <= SIOCDEVPRIVATE + 15) ||
2384 			    cmd == SIOCBONDENSLAVE ||
2385 			    cmd == SIOCBONDRELEASE ||
2386 			    cmd == SIOCBONDSETHWADDR ||
2387 			    cmd == SIOCBONDSLAVEINFOQUERY ||
2388 			    cmd == SIOCBONDINFOQUERY ||
2389 			    cmd == SIOCBONDCHANGEACTIVE ||
2390 			    cmd == SIOCGMIIPHY ||
2391 			    cmd == SIOCGMIIREG ||
2392 			    cmd == SIOCSMIIREG ||
2393 			    cmd == SIOCBRADDIF ||
2394 			    cmd == SIOCBRDELIF ||
2395 			    cmd == SIOCWANDEV) {
2396 				err = -EOPNOTSUPP;
2397 				if (dev->do_ioctl) {
2398 					if (netif_device_present(dev))
2399 						err = dev->do_ioctl(dev, ifr,
2400 								    cmd);
2401 					else
2402 						err = -ENODEV;
2403 				}
2404 			} else
2405 				err = -EINVAL;
2406 
2407 	}
2408 	return err;
2409 }
2410 
2411 /*
2412  *	This function handles all "interface"-type I/O control requests. The actual
2413  *	'doing' part of this is dev_ifsioc above.
2414  */
2415 
2416 /**
2417  *	dev_ioctl	-	network device ioctl
2418  *	@cmd: command to issue
2419  *	@arg: pointer to a struct ifreq in user space
2420  *
2421  *	Issue ioctl functions to devices. This is normally called by the
2422  *	user space syscall interfaces but can sometimes be useful for
2423  *	other purposes. The return value is the return from the syscall if
2424  *	positive or a negative errno code on error.
2425  */
2426 
2427 int dev_ioctl(unsigned int cmd, void __user *arg)
2428 {
2429 	struct ifreq ifr;
2430 	int ret;
2431 	char *colon;
2432 
2433 	/* One special case: SIOCGIFCONF takes ifconf argument
2434 	   and requires shared lock, because it sleeps writing
2435 	   to user space.
2436 	 */
2437 
2438 	if (cmd == SIOCGIFCONF) {
2439 		rtnl_shlock();
2440 		ret = dev_ifconf((char __user *) arg);
2441 		rtnl_shunlock();
2442 		return ret;
2443 	}
2444 	if (cmd == SIOCGIFNAME)
2445 		return dev_ifname((struct ifreq __user *)arg);
2446 
2447 	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2448 		return -EFAULT;
2449 
2450 	ifr.ifr_name[IFNAMSIZ-1] = 0;
2451 
2452 	colon = strchr(ifr.ifr_name, ':');
2453 	if (colon)
2454 		*colon = 0;
2455 
2456 	/*
2457 	 *	See which interface the caller is talking about.
2458 	 */
2459 
2460 	switch (cmd) {
2461 		/*
2462 		 *	These ioctl calls:
2463 		 *	- can be done by all.
2464 		 *	- atomic and do not require locking.
2465 		 *	- return a value
2466 		 */
2467 		case SIOCGIFFLAGS:
2468 		case SIOCGIFMETRIC:
2469 		case SIOCGIFMTU:
2470 		case SIOCGIFHWADDR:
2471 		case SIOCGIFSLAVE:
2472 		case SIOCGIFMAP:
2473 		case SIOCGIFINDEX:
2474 		case SIOCGIFTXQLEN:
2475 			dev_load(ifr.ifr_name);
2476 			read_lock(&dev_base_lock);
2477 			ret = dev_ifsioc(&ifr, cmd);
2478 			read_unlock(&dev_base_lock);
2479 			if (!ret) {
2480 				if (colon)
2481 					*colon = ':';
2482 				if (copy_to_user(arg, &ifr,
2483 						 sizeof(struct ifreq)))
2484 					ret = -EFAULT;
2485 			}
2486 			return ret;
2487 
2488 		case SIOCETHTOOL:
2489 			dev_load(ifr.ifr_name);
2490 			rtnl_lock();
2491 			ret = dev_ethtool(&ifr);
2492 			rtnl_unlock();
2493 			if (!ret) {
2494 				if (colon)
2495 					*colon = ':';
2496 				if (copy_to_user(arg, &ifr,
2497 						 sizeof(struct ifreq)))
2498 					ret = -EFAULT;
2499 			}
2500 			return ret;
2501 
2502 		/*
2503 		 *	These ioctl calls:
2504 		 *	- require superuser power.
2505 		 *	- require strict serialization.
2506 		 *	- return a value
2507 		 */
2508 		case SIOCGMIIPHY:
2509 		case SIOCGMIIREG:
2510 		case SIOCSIFNAME:
2511 			if (!capable(CAP_NET_ADMIN))
2512 				return -EPERM;
2513 			dev_load(ifr.ifr_name);
2514 			rtnl_lock();
2515 			ret = dev_ifsioc(&ifr, cmd);
2516 			rtnl_unlock();
2517 			if (!ret) {
2518 				if (colon)
2519 					*colon = ':';
2520 				if (copy_to_user(arg, &ifr,
2521 						 sizeof(struct ifreq)))
2522 					ret = -EFAULT;
2523 			}
2524 			return ret;
2525 
2526 		/*
2527 		 *	These ioctl calls:
2528 		 *	- require superuser power.
2529 		 *	- require strict serialization.
2530 		 *	- do not return a value
2531 		 */
2532 		case SIOCSIFFLAGS:
2533 		case SIOCSIFMETRIC:
2534 		case SIOCSIFMTU:
2535 		case SIOCSIFMAP:
2536 		case SIOCSIFHWADDR:
2537 		case SIOCSIFSLAVE:
2538 		case SIOCADDMULTI:
2539 		case SIOCDELMULTI:
2540 		case SIOCSIFHWBROADCAST:
2541 		case SIOCSIFTXQLEN:
2542 		case SIOCSMIIREG:
2543 		case SIOCBONDENSLAVE:
2544 		case SIOCBONDRELEASE:
2545 		case SIOCBONDSETHWADDR:
2546 		case SIOCBONDCHANGEACTIVE:
2547 		case SIOCBRADDIF:
2548 		case SIOCBRDELIF:
2549 			if (!capable(CAP_NET_ADMIN))
2550 				return -EPERM;
2551 			/* fall through */
2552 		case SIOCBONDSLAVEINFOQUERY:
2553 		case SIOCBONDINFOQUERY:
2554 			dev_load(ifr.ifr_name);
2555 			rtnl_lock();
2556 			ret = dev_ifsioc(&ifr, cmd);
2557 			rtnl_unlock();
2558 			return ret;
2559 
2560 		case SIOCGIFMEM:
2561 			/* Get the per device memory space. We can add this but
2562 			 * currently do not support it */
2563 		case SIOCSIFMEM:
2564 			/* Set the per device memory buffer space.
2565 			 * Not applicable in our case */
2566 		case SIOCSIFLINK:
2567 			return -EINVAL;
2568 
2569 		/*
2570 		 *	Unknown or private ioctl.
2571 		 */
2572 		default:
2573 			if (cmd == SIOCWANDEV ||
2574 			    (cmd >= SIOCDEVPRIVATE &&
2575 			     cmd <= SIOCDEVPRIVATE + 15)) {
2576 				dev_load(ifr.ifr_name);
2577 				rtnl_lock();
2578 				ret = dev_ifsioc(&ifr, cmd);
2579 				rtnl_unlock();
2580 				if (!ret && copy_to_user(arg, &ifr,
2581 							 sizeof(struct ifreq)))
2582 					ret = -EFAULT;
2583 				return ret;
2584 			}
2585 #ifdef WIRELESS_EXT
2586 			/* Take care of Wireless Extensions */
2587 			if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
2588 				/* If command is `set a parameter', or
2589 				 * `get the encoding parameters', check if
2590 				 * the user has the right to do it */
2591 				if (IW_IS_SET(cmd) || cmd == SIOCGIWENCODE) {
2592 					if (!capable(CAP_NET_ADMIN))
2593 						return -EPERM;
2594 				}
2595 				dev_load(ifr.ifr_name);
2596 				rtnl_lock();
2597 				/* Follow me in net/core/wireless.c */
2598 				ret = wireless_process_ioctl(&ifr, cmd);
2599 				rtnl_unlock();
2600 				if (IW_IS_GET(cmd) &&
2601 				    copy_to_user(arg, &ifr,
2602 					    	 sizeof(struct ifreq)))
2603 					ret = -EFAULT;
2604 				return ret;
2605 			}
2606 #endif	/* WIRELESS_EXT */
2607 			return -EINVAL;
2608 	}
2609 }
2610 
2611 
2612 /**
2613  *	dev_new_index	-	allocate an ifindex
2614  *
2615  *	Returns a suitable unique value for a new device interface
2616  *	number.  The caller must hold the rtnl semaphore or the
2617  *	dev_base_lock to be sure it remains unique.
2618  */
2619 static int dev_new_index(void)
2620 {
2621 	static int ifindex;
2622 	for (;;) {
2623 		if (++ifindex <= 0)
2624 			ifindex = 1;
2625 		if (!__dev_get_by_index(ifindex))
2626 			return ifindex;
2627 	}
2628 }
2629 
2630 static int dev_boot_phase = 1;
2631 
2632 /* Delayed registration/unregisteration */
2633 static DEFINE_SPINLOCK(net_todo_list_lock);
2634 static struct list_head net_todo_list = LIST_HEAD_INIT(net_todo_list);
2635 
2636 static inline void net_set_todo(struct net_device *dev)
2637 {
2638 	spin_lock(&net_todo_list_lock);
2639 	list_add_tail(&dev->todo_list, &net_todo_list);
2640 	spin_unlock(&net_todo_list_lock);
2641 }
2642 
2643 /**
2644  *	register_netdevice	- register a network device
2645  *	@dev: device to register
2646  *
2647  *	Take a completed network device structure and add it to the kernel
2648  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
2649  *	chain. 0 is returned on success. A negative errno code is returned
2650  *	on a failure to set up the device, or if the name is a duplicate.
2651  *
2652  *	Callers must hold the rtnl semaphore. You may want
2653  *	register_netdev() instead of this.
2654  *
2655  *	BUGS:
2656  *	The locking appears insufficient to guarantee two parallel registers
2657  *	will not get the same name.
2658  */
2659 
2660 int register_netdevice(struct net_device *dev)
2661 {
2662 	struct hlist_head *head;
2663 	struct hlist_node *p;
2664 	int ret;
2665 
2666 	BUG_ON(dev_boot_phase);
2667 	ASSERT_RTNL();
2668 
2669 	/* When net_device's are persistent, this will be fatal. */
2670 	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
2671 
2672 	spin_lock_init(&dev->queue_lock);
2673 	spin_lock_init(&dev->xmit_lock);
2674 	dev->xmit_lock_owner = -1;
2675 #ifdef CONFIG_NET_CLS_ACT
2676 	spin_lock_init(&dev->ingress_lock);
2677 #endif
2678 
2679 	ret = alloc_divert_blk(dev);
2680 	if (ret)
2681 		goto out;
2682 
2683 	dev->iflink = -1;
2684 
2685 	/* Init, if this function is available */
2686 	if (dev->init) {
2687 		ret = dev->init(dev);
2688 		if (ret) {
2689 			if (ret > 0)
2690 				ret = -EIO;
2691 			goto out_err;
2692 		}
2693 	}
2694 
2695 	if (!dev_valid_name(dev->name)) {
2696 		ret = -EINVAL;
2697 		goto out_err;
2698 	}
2699 
2700 	dev->ifindex = dev_new_index();
2701 	if (dev->iflink == -1)
2702 		dev->iflink = dev->ifindex;
2703 
2704 	/* Check for existence of name */
2705 	head = dev_name_hash(dev->name);
2706 	hlist_for_each(p, head) {
2707 		struct net_device *d
2708 			= hlist_entry(p, struct net_device, name_hlist);
2709 		if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
2710 			ret = -EEXIST;
2711  			goto out_err;
2712 		}
2713  	}
2714 
2715 	/* Fix illegal SG+CSUM combinations. */
2716 	if ((dev->features & NETIF_F_SG) &&
2717 	    !(dev->features & (NETIF_F_IP_CSUM |
2718 			       NETIF_F_NO_CSUM |
2719 			       NETIF_F_HW_CSUM))) {
2720 		printk("%s: Dropping NETIF_F_SG since no checksum feature.\n",
2721 		       dev->name);
2722 		dev->features &= ~NETIF_F_SG;
2723 	}
2724 
2725 	/* TSO requires that SG is present as well. */
2726 	if ((dev->features & NETIF_F_TSO) &&
2727 	    !(dev->features & NETIF_F_SG)) {
2728 		printk("%s: Dropping NETIF_F_TSO since no SG feature.\n",
2729 		       dev->name);
2730 		dev->features &= ~NETIF_F_TSO;
2731 	}
2732 	if (dev->features & NETIF_F_UFO) {
2733 		if (!(dev->features & NETIF_F_HW_CSUM)) {
2734 			printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
2735 					"NETIF_F_HW_CSUM feature.\n",
2736 							dev->name);
2737 			dev->features &= ~NETIF_F_UFO;
2738 		}
2739 		if (!(dev->features & NETIF_F_SG)) {
2740 			printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
2741 					"NETIF_F_SG feature.\n",
2742 					dev->name);
2743 			dev->features &= ~NETIF_F_UFO;
2744 		}
2745 	}
2746 
2747 	/*
2748 	 *	nil rebuild_header routine,
2749 	 *	that should be never called and used as just bug trap.
2750 	 */
2751 
2752 	if (!dev->rebuild_header)
2753 		dev->rebuild_header = default_rebuild_header;
2754 
2755 	/*
2756 	 *	Default initial state at registry is that the
2757 	 *	device is present.
2758 	 */
2759 
2760 	set_bit(__LINK_STATE_PRESENT, &dev->state);
2761 
2762 	dev->next = NULL;
2763 	dev_init_scheduler(dev);
2764 	write_lock_bh(&dev_base_lock);
2765 	*dev_tail = dev;
2766 	dev_tail = &dev->next;
2767 	hlist_add_head(&dev->name_hlist, head);
2768 	hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex));
2769 	dev_hold(dev);
2770 	dev->reg_state = NETREG_REGISTERING;
2771 	write_unlock_bh(&dev_base_lock);
2772 
2773 	/* Notify protocols, that a new device appeared. */
2774 	notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
2775 
2776 	/* Finish registration after unlock */
2777 	net_set_todo(dev);
2778 	ret = 0;
2779 
2780 out:
2781 	return ret;
2782 out_err:
2783 	free_divert_blk(dev);
2784 	goto out;
2785 }
2786 
2787 /**
2788  *	register_netdev	- register a network device
2789  *	@dev: device to register
2790  *
2791  *	Take a completed network device structure and add it to the kernel
2792  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
2793  *	chain. 0 is returned on success. A negative errno code is returned
2794  *	on a failure to set up the device, or if the name is a duplicate.
2795  *
2796  *	This is a wrapper around register_netdev that takes the rtnl semaphore
2797  *	and expands the device name if you passed a format string to
2798  *	alloc_netdev.
2799  */
2800 int register_netdev(struct net_device *dev)
2801 {
2802 	int err;
2803 
2804 	rtnl_lock();
2805 
2806 	/*
2807 	 * If the name is a format string the caller wants us to do a
2808 	 * name allocation.
2809 	 */
2810 	if (strchr(dev->name, '%')) {
2811 		err = dev_alloc_name(dev, dev->name);
2812 		if (err < 0)
2813 			goto out;
2814 	}
2815 
2816 	/*
2817 	 * Back compatibility hook. Kill this one in 2.5
2818 	 */
2819 	if (dev->name[0] == 0 || dev->name[0] == ' ') {
2820 		err = dev_alloc_name(dev, "eth%d");
2821 		if (err < 0)
2822 			goto out;
2823 	}
2824 
2825 	err = register_netdevice(dev);
2826 out:
2827 	rtnl_unlock();
2828 	return err;
2829 }
2830 EXPORT_SYMBOL(register_netdev);
2831 
2832 /*
2833  * netdev_wait_allrefs - wait until all references are gone.
2834  *
2835  * This is called when unregistering network devices.
2836  *
2837  * Any protocol or device that holds a reference should register
2838  * for netdevice notification, and cleanup and put back the
2839  * reference if they receive an UNREGISTER event.
2840  * We can get stuck here if buggy protocols don't correctly
2841  * call dev_put.
2842  */
2843 static void netdev_wait_allrefs(struct net_device *dev)
2844 {
2845 	unsigned long rebroadcast_time, warning_time;
2846 
2847 	rebroadcast_time = warning_time = jiffies;
2848 	while (atomic_read(&dev->refcnt) != 0) {
2849 		if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
2850 			rtnl_shlock();
2851 
2852 			/* Rebroadcast unregister notification */
2853 			notifier_call_chain(&netdev_chain,
2854 					    NETDEV_UNREGISTER, dev);
2855 
2856 			if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
2857 				     &dev->state)) {
2858 				/* We must not have linkwatch events
2859 				 * pending on unregister. If this
2860 				 * happens, we simply run the queue
2861 				 * unscheduled, resulting in a noop
2862 				 * for this device.
2863 				 */
2864 				linkwatch_run_queue();
2865 			}
2866 
2867 			rtnl_shunlock();
2868 
2869 			rebroadcast_time = jiffies;
2870 		}
2871 
2872 		msleep(250);
2873 
2874 		if (time_after(jiffies, warning_time + 10 * HZ)) {
2875 			printk(KERN_EMERG "unregister_netdevice: "
2876 			       "waiting for %s to become free. Usage "
2877 			       "count = %d\n",
2878 			       dev->name, atomic_read(&dev->refcnt));
2879 			warning_time = jiffies;
2880 		}
2881 	}
2882 }
2883 
2884 /* The sequence is:
2885  *
2886  *	rtnl_lock();
2887  *	...
2888  *	register_netdevice(x1);
2889  *	register_netdevice(x2);
2890  *	...
2891  *	unregister_netdevice(y1);
2892  *	unregister_netdevice(y2);
2893  *      ...
2894  *	rtnl_unlock();
2895  *	free_netdev(y1);
2896  *	free_netdev(y2);
2897  *
2898  * We are invoked by rtnl_unlock() after it drops the semaphore.
2899  * This allows us to deal with problems:
2900  * 1) We can create/delete sysfs objects which invoke hotplug
2901  *    without deadlocking with linkwatch via keventd.
2902  * 2) Since we run with the RTNL semaphore not held, we can sleep
2903  *    safely in order to wait for the netdev refcnt to drop to zero.
2904  */
2905 static DECLARE_MUTEX(net_todo_run_mutex);
2906 void netdev_run_todo(void)
2907 {
2908 	struct list_head list = LIST_HEAD_INIT(list);
2909 	int err;
2910 
2911 
2912 	/* Need to guard against multiple cpu's getting out of order. */
2913 	down(&net_todo_run_mutex);
2914 
2915 	/* Not safe to do outside the semaphore.  We must not return
2916 	 * until all unregister events invoked by the local processor
2917 	 * have been completed (either by this todo run, or one on
2918 	 * another cpu).
2919 	 */
2920 	if (list_empty(&net_todo_list))
2921 		goto out;
2922 
2923 	/* Snapshot list, allow later requests */
2924 	spin_lock(&net_todo_list_lock);
2925 	list_splice_init(&net_todo_list, &list);
2926 	spin_unlock(&net_todo_list_lock);
2927 
2928 	while (!list_empty(&list)) {
2929 		struct net_device *dev
2930 			= list_entry(list.next, struct net_device, todo_list);
2931 		list_del(&dev->todo_list);
2932 
2933 		switch(dev->reg_state) {
2934 		case NETREG_REGISTERING:
2935 			err = netdev_register_sysfs(dev);
2936 			if (err)
2937 				printk(KERN_ERR "%s: failed sysfs registration (%d)\n",
2938 				       dev->name, err);
2939 			dev->reg_state = NETREG_REGISTERED;
2940 			break;
2941 
2942 		case NETREG_UNREGISTERING:
2943 			netdev_unregister_sysfs(dev);
2944 			dev->reg_state = NETREG_UNREGISTERED;
2945 
2946 			netdev_wait_allrefs(dev);
2947 
2948 			/* paranoia */
2949 			BUG_ON(atomic_read(&dev->refcnt));
2950 			BUG_TRAP(!dev->ip_ptr);
2951 			BUG_TRAP(!dev->ip6_ptr);
2952 			BUG_TRAP(!dev->dn_ptr);
2953 
2954 
2955 			/* It must be the very last action,
2956 			 * after this 'dev' may point to freed up memory.
2957 			 */
2958 			if (dev->destructor)
2959 				dev->destructor(dev);
2960 			break;
2961 
2962 		default:
2963 			printk(KERN_ERR "network todo '%s' but state %d\n",
2964 			       dev->name, dev->reg_state);
2965 			break;
2966 		}
2967 	}
2968 
2969 out:
2970 	up(&net_todo_run_mutex);
2971 }
2972 
2973 /**
2974  *	alloc_netdev - allocate network device
2975  *	@sizeof_priv:	size of private data to allocate space for
2976  *	@name:		device name format string
2977  *	@setup:		callback to initialize device
2978  *
2979  *	Allocates a struct net_device with private data area for driver use
2980  *	and performs basic initialization.
2981  */
2982 struct net_device *alloc_netdev(int sizeof_priv, const char *name,
2983 		void (*setup)(struct net_device *))
2984 {
2985 	void *p;
2986 	struct net_device *dev;
2987 	int alloc_size;
2988 
2989 	/* ensure 32-byte alignment of both the device and private area */
2990 	alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
2991 	alloc_size += sizeof_priv + NETDEV_ALIGN_CONST;
2992 
2993 	p = kmalloc(alloc_size, GFP_KERNEL);
2994 	if (!p) {
2995 		printk(KERN_ERR "alloc_dev: Unable to allocate device.\n");
2996 		return NULL;
2997 	}
2998 	memset(p, 0, alloc_size);
2999 
3000 	dev = (struct net_device *)
3001 		(((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
3002 	dev->padded = (char *)dev - (char *)p;
3003 
3004 	if (sizeof_priv)
3005 		dev->priv = netdev_priv(dev);
3006 
3007 	setup(dev);
3008 	strcpy(dev->name, name);
3009 	return dev;
3010 }
3011 EXPORT_SYMBOL(alloc_netdev);
3012 
3013 /**
3014  *	free_netdev - free network device
3015  *	@dev: device
3016  *
3017  *	This function does the last stage of destroying an allocated device
3018  * 	interface. The reference to the device object is released.
3019  *	If this is the last reference then it will be freed.
3020  */
3021 void free_netdev(struct net_device *dev)
3022 {
3023 #ifdef CONFIG_SYSFS
3024 	/*  Compatiablity with error handling in drivers */
3025 	if (dev->reg_state == NETREG_UNINITIALIZED) {
3026 		kfree((char *)dev - dev->padded);
3027 		return;
3028 	}
3029 
3030 	BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
3031 	dev->reg_state = NETREG_RELEASED;
3032 
3033 	/* will free via class release */
3034 	class_device_put(&dev->class_dev);
3035 #else
3036 	kfree((char *)dev - dev->padded);
3037 #endif
3038 }
3039 
3040 /* Synchronize with packet receive processing. */
3041 void synchronize_net(void)
3042 {
3043 	might_sleep();
3044 	synchronize_rcu();
3045 }
3046 
3047 /**
3048  *	unregister_netdevice - remove device from the kernel
3049  *	@dev: device
3050  *
3051  *	This function shuts down a device interface and removes it
3052  *	from the kernel tables. On success 0 is returned, on a failure
3053  *	a negative errno code is returned.
3054  *
3055  *	Callers must hold the rtnl semaphore.  You may want
3056  *	unregister_netdev() instead of this.
3057  */
3058 
3059 int unregister_netdevice(struct net_device *dev)
3060 {
3061 	struct net_device *d, **dp;
3062 
3063 	BUG_ON(dev_boot_phase);
3064 	ASSERT_RTNL();
3065 
3066 	/* Some devices call without registering for initialization unwind. */
3067 	if (dev->reg_state == NETREG_UNINITIALIZED) {
3068 		printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3069 				  "was registered\n", dev->name, dev);
3070 		return -ENODEV;
3071 	}
3072 
3073 	BUG_ON(dev->reg_state != NETREG_REGISTERED);
3074 
3075 	/* If device is running, close it first. */
3076 	if (dev->flags & IFF_UP)
3077 		dev_close(dev);
3078 
3079 	/* And unlink it from device chain. */
3080 	for (dp = &dev_base; (d = *dp) != NULL; dp = &d->next) {
3081 		if (d == dev) {
3082 			write_lock_bh(&dev_base_lock);
3083 			hlist_del(&dev->name_hlist);
3084 			hlist_del(&dev->index_hlist);
3085 			if (dev_tail == &dev->next)
3086 				dev_tail = dp;
3087 			*dp = d->next;
3088 			write_unlock_bh(&dev_base_lock);
3089 			break;
3090 		}
3091 	}
3092 	if (!d) {
3093 		printk(KERN_ERR "unregister net_device: '%s' not found\n",
3094 		       dev->name);
3095 		return -ENODEV;
3096 	}
3097 
3098 	dev->reg_state = NETREG_UNREGISTERING;
3099 
3100 	synchronize_net();
3101 
3102 	/* Shutdown queueing discipline. */
3103 	dev_shutdown(dev);
3104 
3105 
3106 	/* Notify protocols, that we are about to destroy
3107 	   this device. They should clean all the things.
3108 	*/
3109 	notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
3110 
3111 	/*
3112 	 *	Flush the multicast chain
3113 	 */
3114 	dev_mc_discard(dev);
3115 
3116 	if (dev->uninit)
3117 		dev->uninit(dev);
3118 
3119 	/* Notifier chain MUST detach us from master device. */
3120 	BUG_TRAP(!dev->master);
3121 
3122 	free_divert_blk(dev);
3123 
3124 	/* Finish processing unregister after unlock */
3125 	net_set_todo(dev);
3126 
3127 	synchronize_net();
3128 
3129 	dev_put(dev);
3130 	return 0;
3131 }
3132 
3133 /**
3134  *	unregister_netdev - remove device from the kernel
3135  *	@dev: device
3136  *
3137  *	This function shuts down a device interface and removes it
3138  *	from the kernel tables. On success 0 is returned, on a failure
3139  *	a negative errno code is returned.
3140  *
3141  *	This is just a wrapper for unregister_netdevice that takes
3142  *	the rtnl semaphore.  In general you want to use this and not
3143  *	unregister_netdevice.
3144  */
3145 void unregister_netdev(struct net_device *dev)
3146 {
3147 	rtnl_lock();
3148 	unregister_netdevice(dev);
3149 	rtnl_unlock();
3150 }
3151 
3152 EXPORT_SYMBOL(unregister_netdev);
3153 
3154 #ifdef CONFIG_HOTPLUG_CPU
3155 static int dev_cpu_callback(struct notifier_block *nfb,
3156 			    unsigned long action,
3157 			    void *ocpu)
3158 {
3159 	struct sk_buff **list_skb;
3160 	struct net_device **list_net;
3161 	struct sk_buff *skb;
3162 	unsigned int cpu, oldcpu = (unsigned long)ocpu;
3163 	struct softnet_data *sd, *oldsd;
3164 
3165 	if (action != CPU_DEAD)
3166 		return NOTIFY_OK;
3167 
3168 	local_irq_disable();
3169 	cpu = smp_processor_id();
3170 	sd = &per_cpu(softnet_data, cpu);
3171 	oldsd = &per_cpu(softnet_data, oldcpu);
3172 
3173 	/* Find end of our completion_queue. */
3174 	list_skb = &sd->completion_queue;
3175 	while (*list_skb)
3176 		list_skb = &(*list_skb)->next;
3177 	/* Append completion queue from offline CPU. */
3178 	*list_skb = oldsd->completion_queue;
3179 	oldsd->completion_queue = NULL;
3180 
3181 	/* Find end of our output_queue. */
3182 	list_net = &sd->output_queue;
3183 	while (*list_net)
3184 		list_net = &(*list_net)->next_sched;
3185 	/* Append output queue from offline CPU. */
3186 	*list_net = oldsd->output_queue;
3187 	oldsd->output_queue = NULL;
3188 
3189 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
3190 	local_irq_enable();
3191 
3192 	/* Process offline CPU's input_pkt_queue */
3193 	while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
3194 		netif_rx(skb);
3195 
3196 	return NOTIFY_OK;
3197 }
3198 #endif /* CONFIG_HOTPLUG_CPU */
3199 
3200 
3201 /*
3202  *	Initialize the DEV module. At boot time this walks the device list and
3203  *	unhooks any devices that fail to initialise (normally hardware not
3204  *	present) and leaves us with a valid list of present and active devices.
3205  *
3206  */
3207 
3208 /*
3209  *       This is called single threaded during boot, so no need
3210  *       to take the rtnl semaphore.
3211  */
3212 static int __init net_dev_init(void)
3213 {
3214 	int i, rc = -ENOMEM;
3215 
3216 	BUG_ON(!dev_boot_phase);
3217 
3218 	net_random_init();
3219 
3220 	if (dev_proc_init())
3221 		goto out;
3222 
3223 	if (netdev_sysfs_init())
3224 		goto out;
3225 
3226 	INIT_LIST_HEAD(&ptype_all);
3227 	for (i = 0; i < 16; i++)
3228 		INIT_LIST_HEAD(&ptype_base[i]);
3229 
3230 	for (i = 0; i < ARRAY_SIZE(dev_name_head); i++)
3231 		INIT_HLIST_HEAD(&dev_name_head[i]);
3232 
3233 	for (i = 0; i < ARRAY_SIZE(dev_index_head); i++)
3234 		INIT_HLIST_HEAD(&dev_index_head[i]);
3235 
3236 	/*
3237 	 *	Initialise the packet receive queues.
3238 	 */
3239 
3240 	for_each_cpu(i) {
3241 		struct softnet_data *queue;
3242 
3243 		queue = &per_cpu(softnet_data, i);
3244 		skb_queue_head_init(&queue->input_pkt_queue);
3245 		queue->completion_queue = NULL;
3246 		INIT_LIST_HEAD(&queue->poll_list);
3247 		set_bit(__LINK_STATE_START, &queue->backlog_dev.state);
3248 		queue->backlog_dev.weight = weight_p;
3249 		queue->backlog_dev.poll = process_backlog;
3250 		atomic_set(&queue->backlog_dev.refcnt, 1);
3251 	}
3252 
3253 	dev_boot_phase = 0;
3254 
3255 	open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
3256 	open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
3257 
3258 	hotcpu_notifier(dev_cpu_callback, 0);
3259 	dst_init();
3260 	dev_mcast_init();
3261 	rc = 0;
3262 out:
3263 	return rc;
3264 }
3265 
3266 subsys_initcall(net_dev_init);
3267 
3268 EXPORT_SYMBOL(__dev_get_by_index);
3269 EXPORT_SYMBOL(__dev_get_by_name);
3270 EXPORT_SYMBOL(__dev_remove_pack);
3271 EXPORT_SYMBOL(__skb_linearize);
3272 EXPORT_SYMBOL(dev_valid_name);
3273 EXPORT_SYMBOL(dev_add_pack);
3274 EXPORT_SYMBOL(dev_alloc_name);
3275 EXPORT_SYMBOL(dev_close);
3276 EXPORT_SYMBOL(dev_get_by_flags);
3277 EXPORT_SYMBOL(dev_get_by_index);
3278 EXPORT_SYMBOL(dev_get_by_name);
3279 EXPORT_SYMBOL(dev_open);
3280 EXPORT_SYMBOL(dev_queue_xmit);
3281 EXPORT_SYMBOL(dev_remove_pack);
3282 EXPORT_SYMBOL(dev_set_allmulti);
3283 EXPORT_SYMBOL(dev_set_promiscuity);
3284 EXPORT_SYMBOL(dev_change_flags);
3285 EXPORT_SYMBOL(dev_set_mtu);
3286 EXPORT_SYMBOL(dev_set_mac_address);
3287 EXPORT_SYMBOL(free_netdev);
3288 EXPORT_SYMBOL(netdev_boot_setup_check);
3289 EXPORT_SYMBOL(netdev_set_master);
3290 EXPORT_SYMBOL(netdev_state_change);
3291 EXPORT_SYMBOL(netif_receive_skb);
3292 EXPORT_SYMBOL(netif_rx);
3293 EXPORT_SYMBOL(register_gifconf);
3294 EXPORT_SYMBOL(register_netdevice);
3295 EXPORT_SYMBOL(register_netdevice_notifier);
3296 EXPORT_SYMBOL(skb_checksum_help);
3297 EXPORT_SYMBOL(synchronize_net);
3298 EXPORT_SYMBOL(unregister_netdevice);
3299 EXPORT_SYMBOL(unregister_netdevice_notifier);
3300 EXPORT_SYMBOL(net_enable_timestamp);
3301 EXPORT_SYMBOL(net_disable_timestamp);
3302 EXPORT_SYMBOL(dev_get_flags);
3303 
3304 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
3305 EXPORT_SYMBOL(br_handle_frame_hook);
3306 EXPORT_SYMBOL(br_fdb_get_hook);
3307 EXPORT_SYMBOL(br_fdb_put_hook);
3308 #endif
3309 
3310 #ifdef CONFIG_KMOD
3311 EXPORT_SYMBOL(dev_load);
3312 #endif
3313 
3314 EXPORT_PER_CPU_SYMBOL(softnet_data);
3315