xref: /linux/net/core/dev.c (revision 87c2ce3b9305b9b723faeedf6e32ef703ec9b33a)
1 /*
2  * 	NET3	Protocol independent device support routines.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  *	Derived from the non IP parts of dev.c 1.0.19
10  * 		Authors:	Ross Biro
11  *				Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *				Mark Evans, <evansmp@uhura.aston.ac.uk>
13  *
14  *	Additional Authors:
15  *		Florian la Roche <rzsfl@rz.uni-sb.de>
16  *		Alan Cox <gw4pts@gw4pts.ampr.org>
17  *		David Hinds <dahinds@users.sourceforge.net>
18  *		Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19  *		Adam Sulmicki <adam@cfar.umd.edu>
20  *              Pekka Riikonen <priikone@poesidon.pspt.fi>
21  *
22  *	Changes:
23  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
24  *              			to 2 if register_netdev gets called
25  *              			before net_dev_init & also removed a
26  *              			few lines of code in the process.
27  *		Alan Cox	:	device private ioctl copies fields back.
28  *		Alan Cox	:	Transmit queue code does relevant
29  *					stunts to keep the queue safe.
30  *		Alan Cox	:	Fixed double lock.
31  *		Alan Cox	:	Fixed promisc NULL pointer trap
32  *		????????	:	Support the full private ioctl range
33  *		Alan Cox	:	Moved ioctl permission check into
34  *					drivers
35  *		Tim Kordas	:	SIOCADDMULTI/SIOCDELMULTI
36  *		Alan Cox	:	100 backlog just doesn't cut it when
37  *					you start doing multicast video 8)
38  *		Alan Cox	:	Rewrote net_bh and list manager.
39  *		Alan Cox	: 	Fix ETH_P_ALL echoback lengths.
40  *		Alan Cox	:	Took out transmit every packet pass
41  *					Saved a few bytes in the ioctl handler
42  *		Alan Cox	:	Network driver sets packet type before
43  *					calling netif_rx. Saves a function
44  *					call a packet.
45  *		Alan Cox	:	Hashed net_bh()
46  *		Richard Kooijman:	Timestamp fixes.
47  *		Alan Cox	:	Wrong field in SIOCGIFDSTADDR
48  *		Alan Cox	:	Device lock protection.
49  *		Alan Cox	: 	Fixed nasty side effect of device close
50  *					changes.
51  *		Rudi Cilibrasi	:	Pass the right thing to
52  *					set_mac_address()
53  *		Dave Miller	:	32bit quantity for the device lock to
54  *					make it work out on a Sparc.
55  *		Bjorn Ekwall	:	Added KERNELD hack.
56  *		Alan Cox	:	Cleaned up the backlog initialise.
57  *		Craig Metz	:	SIOCGIFCONF fix if space for under
58  *					1 device.
59  *	    Thomas Bogendoerfer :	Return ENODEV for dev_open, if there
60  *					is no device open function.
61  *		Andi Kleen	:	Fix error reporting for SIOCGIFCONF
62  *	    Michael Chastain	:	Fix signed/unsigned for SIOCGIFCONF
63  *		Cyrus Durgin	:	Cleaned for KMOD
64  *		Adam Sulmicki   :	Bug Fix : Network Device Unload
65  *					A network device unload needs to purge
66  *					the backlog queue.
67  *	Paul Rusty Russell	:	SIOCSIFNAME
68  *              Pekka Riikonen  :	Netdev boot-time settings code
69  *              Andrew Morton   :       Make unregister_netdevice wait
70  *              			indefinitely on dev->refcnt
71  * 		J Hadi Salim	:	- Backlog queue sampling
72  *				        - netif_rx() feedback
73  */
74 
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/config.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/sched.h>
83 #include <linux/string.h>
84 #include <linux/mm.h>
85 #include <linux/socket.h>
86 #include <linux/sockios.h>
87 #include <linux/errno.h>
88 #include <linux/interrupt.h>
89 #include <linux/if_ether.h>
90 #include <linux/netdevice.h>
91 #include <linux/etherdevice.h>
92 #include <linux/notifier.h>
93 #include <linux/skbuff.h>
94 #include <net/sock.h>
95 #include <linux/rtnetlink.h>
96 #include <linux/proc_fs.h>
97 #include <linux/seq_file.h>
98 #include <linux/stat.h>
99 #include <linux/if_bridge.h>
100 #include <linux/divert.h>
101 #include <net/dst.h>
102 #include <net/pkt_sched.h>
103 #include <net/checksum.h>
104 #include <linux/highmem.h>
105 #include <linux/init.h>
106 #include <linux/kmod.h>
107 #include <linux/module.h>
108 #include <linux/kallsyms.h>
109 #include <linux/netpoll.h>
110 #include <linux/rcupdate.h>
111 #include <linux/delay.h>
112 #ifdef CONFIG_NET_RADIO
113 #include <linux/wireless.h>		/* Note : will define WIRELESS_EXT */
114 #include <net/iw_handler.h>
115 #endif	/* CONFIG_NET_RADIO */
116 #include <asm/current.h>
117 
118 /*
119  *	The list of packet types we will receive (as opposed to discard)
120  *	and the routines to invoke.
121  *
122  *	Why 16. Because with 16 the only overlap we get on a hash of the
123  *	low nibble of the protocol value is RARP/SNAP/X.25.
124  *
125  *      NOTE:  That is no longer true with the addition of VLAN tags.  Not
126  *             sure which should go first, but I bet it won't make much
127  *             difference if we are running VLANs.  The good news is that
128  *             this protocol won't be in the list unless compiled in, so
129  *             the average user (w/out VLANs) will not be adversly affected.
130  *             --BLG
131  *
132  *		0800	IP
133  *		8100    802.1Q VLAN
134  *		0001	802.3
135  *		0002	AX.25
136  *		0004	802.2
137  *		8035	RARP
138  *		0005	SNAP
139  *		0805	X.25
140  *		0806	ARP
141  *		8137	IPX
142  *		0009	Localtalk
143  *		86DD	IPv6
144  */
145 
146 static DEFINE_SPINLOCK(ptype_lock);
147 static struct list_head ptype_base[16];	/* 16 way hashed list */
148 static struct list_head ptype_all;		/* Taps */
149 
150 /*
151  * The @dev_base list is protected by @dev_base_lock and the rtln
152  * semaphore.
153  *
154  * Pure readers hold dev_base_lock for reading.
155  *
156  * Writers must hold the rtnl semaphore while they loop through the
157  * dev_base list, and hold dev_base_lock for writing when they do the
158  * actual updates.  This allows pure readers to access the list even
159  * while a writer is preparing to update it.
160  *
161  * To put it another way, dev_base_lock is held for writing only to
162  * protect against pure readers; the rtnl semaphore provides the
163  * protection against other writers.
164  *
165  * See, for example usages, register_netdevice() and
166  * unregister_netdevice(), which must be called with the rtnl
167  * semaphore held.
168  */
169 struct net_device *dev_base;
170 static struct net_device **dev_tail = &dev_base;
171 DEFINE_RWLOCK(dev_base_lock);
172 
173 EXPORT_SYMBOL(dev_base);
174 EXPORT_SYMBOL(dev_base_lock);
175 
176 #define NETDEV_HASHBITS	8
177 static struct hlist_head dev_name_head[1<<NETDEV_HASHBITS];
178 static struct hlist_head dev_index_head[1<<NETDEV_HASHBITS];
179 
180 static inline struct hlist_head *dev_name_hash(const char *name)
181 {
182 	unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
183 	return &dev_name_head[hash & ((1<<NETDEV_HASHBITS)-1)];
184 }
185 
186 static inline struct hlist_head *dev_index_hash(int ifindex)
187 {
188 	return &dev_index_head[ifindex & ((1<<NETDEV_HASHBITS)-1)];
189 }
190 
191 /*
192  *	Our notifier list
193  */
194 
195 static struct notifier_block *netdev_chain;
196 
197 /*
198  *	Device drivers call our routines to queue packets here. We empty the
199  *	queue in the local softnet handler.
200  */
201 DEFINE_PER_CPU(struct softnet_data, softnet_data) = { NULL };
202 
203 #ifdef CONFIG_SYSFS
204 extern int netdev_sysfs_init(void);
205 extern int netdev_register_sysfs(struct net_device *);
206 extern void netdev_unregister_sysfs(struct net_device *);
207 #else
208 #define netdev_sysfs_init()	 	(0)
209 #define netdev_register_sysfs(dev)	(0)
210 #define	netdev_unregister_sysfs(dev)	do { } while(0)
211 #endif
212 
213 
214 /*******************************************************************************
215 
216 		Protocol management and registration routines
217 
218 *******************************************************************************/
219 
220 /*
221  *	For efficiency
222  */
223 
224 int netdev_nit;
225 
226 /*
227  *	Add a protocol ID to the list. Now that the input handler is
228  *	smarter we can dispense with all the messy stuff that used to be
229  *	here.
230  *
231  *	BEWARE!!! Protocol handlers, mangling input packets,
232  *	MUST BE last in hash buckets and checking protocol handlers
233  *	MUST start from promiscuous ptype_all chain in net_bh.
234  *	It is true now, do not change it.
235  *	Explanation follows: if protocol handler, mangling packet, will
236  *	be the first on list, it is not able to sense, that packet
237  *	is cloned and should be copied-on-write, so that it will
238  *	change it and subsequent readers will get broken packet.
239  *							--ANK (980803)
240  */
241 
242 /**
243  *	dev_add_pack - add packet handler
244  *	@pt: packet type declaration
245  *
246  *	Add a protocol handler to the networking stack. The passed &packet_type
247  *	is linked into kernel lists and may not be freed until it has been
248  *	removed from the kernel lists.
249  *
250  *	This call does not sleep therefore it can not
251  *	guarantee all CPU's that are in middle of receiving packets
252  *	will see the new packet type (until the next received packet).
253  */
254 
255 void dev_add_pack(struct packet_type *pt)
256 {
257 	int hash;
258 
259 	spin_lock_bh(&ptype_lock);
260 	if (pt->type == htons(ETH_P_ALL)) {
261 		netdev_nit++;
262 		list_add_rcu(&pt->list, &ptype_all);
263 	} else {
264 		hash = ntohs(pt->type) & 15;
265 		list_add_rcu(&pt->list, &ptype_base[hash]);
266 	}
267 	spin_unlock_bh(&ptype_lock);
268 }
269 
270 /**
271  *	__dev_remove_pack	 - remove packet handler
272  *	@pt: packet type declaration
273  *
274  *	Remove a protocol handler that was previously added to the kernel
275  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
276  *	from the kernel lists and can be freed or reused once this function
277  *	returns.
278  *
279  *      The packet type might still be in use by receivers
280  *	and must not be freed until after all the CPU's have gone
281  *	through a quiescent state.
282  */
283 void __dev_remove_pack(struct packet_type *pt)
284 {
285 	struct list_head *head;
286 	struct packet_type *pt1;
287 
288 	spin_lock_bh(&ptype_lock);
289 
290 	if (pt->type == htons(ETH_P_ALL)) {
291 		netdev_nit--;
292 		head = &ptype_all;
293 	} else
294 		head = &ptype_base[ntohs(pt->type) & 15];
295 
296 	list_for_each_entry(pt1, head, list) {
297 		if (pt == pt1) {
298 			list_del_rcu(&pt->list);
299 			goto out;
300 		}
301 	}
302 
303 	printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
304 out:
305 	spin_unlock_bh(&ptype_lock);
306 }
307 /**
308  *	dev_remove_pack	 - remove packet handler
309  *	@pt: packet type declaration
310  *
311  *	Remove a protocol handler that was previously added to the kernel
312  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
313  *	from the kernel lists and can be freed or reused once this function
314  *	returns.
315  *
316  *	This call sleeps to guarantee that no CPU is looking at the packet
317  *	type after return.
318  */
319 void dev_remove_pack(struct packet_type *pt)
320 {
321 	__dev_remove_pack(pt);
322 
323 	synchronize_net();
324 }
325 
326 /******************************************************************************
327 
328 		      Device Boot-time Settings Routines
329 
330 *******************************************************************************/
331 
332 /* Boot time configuration table */
333 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
334 
335 /**
336  *	netdev_boot_setup_add	- add new setup entry
337  *	@name: name of the device
338  *	@map: configured settings for the device
339  *
340  *	Adds new setup entry to the dev_boot_setup list.  The function
341  *	returns 0 on error and 1 on success.  This is a generic routine to
342  *	all netdevices.
343  */
344 static int netdev_boot_setup_add(char *name, struct ifmap *map)
345 {
346 	struct netdev_boot_setup *s;
347 	int i;
348 
349 	s = dev_boot_setup;
350 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
351 		if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
352 			memset(s[i].name, 0, sizeof(s[i].name));
353 			strcpy(s[i].name, name);
354 			memcpy(&s[i].map, map, sizeof(s[i].map));
355 			break;
356 		}
357 	}
358 
359 	return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
360 }
361 
362 /**
363  *	netdev_boot_setup_check	- check boot time settings
364  *	@dev: the netdevice
365  *
366  * 	Check boot time settings for the device.
367  *	The found settings are set for the device to be used
368  *	later in the device probing.
369  *	Returns 0 if no settings found, 1 if they are.
370  */
371 int netdev_boot_setup_check(struct net_device *dev)
372 {
373 	struct netdev_boot_setup *s = dev_boot_setup;
374 	int i;
375 
376 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
377 		if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
378 		    !strncmp(dev->name, s[i].name, strlen(s[i].name))) {
379 			dev->irq 	= s[i].map.irq;
380 			dev->base_addr 	= s[i].map.base_addr;
381 			dev->mem_start 	= s[i].map.mem_start;
382 			dev->mem_end 	= s[i].map.mem_end;
383 			return 1;
384 		}
385 	}
386 	return 0;
387 }
388 
389 
390 /**
391  *	netdev_boot_base	- get address from boot time settings
392  *	@prefix: prefix for network device
393  *	@unit: id for network device
394  *
395  * 	Check boot time settings for the base address of device.
396  *	The found settings are set for the device to be used
397  *	later in the device probing.
398  *	Returns 0 if no settings found.
399  */
400 unsigned long netdev_boot_base(const char *prefix, int unit)
401 {
402 	const struct netdev_boot_setup *s = dev_boot_setup;
403 	char name[IFNAMSIZ];
404 	int i;
405 
406 	sprintf(name, "%s%d", prefix, unit);
407 
408 	/*
409 	 * If device already registered then return base of 1
410 	 * to indicate not to probe for this interface
411 	 */
412 	if (__dev_get_by_name(name))
413 		return 1;
414 
415 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
416 		if (!strcmp(name, s[i].name))
417 			return s[i].map.base_addr;
418 	return 0;
419 }
420 
421 /*
422  * Saves at boot time configured settings for any netdevice.
423  */
424 int __init netdev_boot_setup(char *str)
425 {
426 	int ints[5];
427 	struct ifmap map;
428 
429 	str = get_options(str, ARRAY_SIZE(ints), ints);
430 	if (!str || !*str)
431 		return 0;
432 
433 	/* Save settings */
434 	memset(&map, 0, sizeof(map));
435 	if (ints[0] > 0)
436 		map.irq = ints[1];
437 	if (ints[0] > 1)
438 		map.base_addr = ints[2];
439 	if (ints[0] > 2)
440 		map.mem_start = ints[3];
441 	if (ints[0] > 3)
442 		map.mem_end = ints[4];
443 
444 	/* Add new entry to the list */
445 	return netdev_boot_setup_add(str, &map);
446 }
447 
448 __setup("netdev=", netdev_boot_setup);
449 
450 /*******************************************************************************
451 
452 			    Device Interface Subroutines
453 
454 *******************************************************************************/
455 
456 /**
457  *	__dev_get_by_name	- find a device by its name
458  *	@name: name to find
459  *
460  *	Find an interface by name. Must be called under RTNL semaphore
461  *	or @dev_base_lock. If the name is found a pointer to the device
462  *	is returned. If the name is not found then %NULL is returned. The
463  *	reference counters are not incremented so the caller must be
464  *	careful with locks.
465  */
466 
467 struct net_device *__dev_get_by_name(const char *name)
468 {
469 	struct hlist_node *p;
470 
471 	hlist_for_each(p, dev_name_hash(name)) {
472 		struct net_device *dev
473 			= hlist_entry(p, struct net_device, name_hlist);
474 		if (!strncmp(dev->name, name, IFNAMSIZ))
475 			return dev;
476 	}
477 	return NULL;
478 }
479 
480 /**
481  *	dev_get_by_name		- find a device by its name
482  *	@name: name to find
483  *
484  *	Find an interface by name. This can be called from any
485  *	context and does its own locking. The returned handle has
486  *	the usage count incremented and the caller must use dev_put() to
487  *	release it when it is no longer needed. %NULL is returned if no
488  *	matching device is found.
489  */
490 
491 struct net_device *dev_get_by_name(const char *name)
492 {
493 	struct net_device *dev;
494 
495 	read_lock(&dev_base_lock);
496 	dev = __dev_get_by_name(name);
497 	if (dev)
498 		dev_hold(dev);
499 	read_unlock(&dev_base_lock);
500 	return dev;
501 }
502 
503 /**
504  *	__dev_get_by_index - find a device by its ifindex
505  *	@ifindex: index of device
506  *
507  *	Search for an interface by index. Returns %NULL if the device
508  *	is not found or a pointer to the device. The device has not
509  *	had its reference counter increased so the caller must be careful
510  *	about locking. The caller must hold either the RTNL semaphore
511  *	or @dev_base_lock.
512  */
513 
514 struct net_device *__dev_get_by_index(int ifindex)
515 {
516 	struct hlist_node *p;
517 
518 	hlist_for_each(p, dev_index_hash(ifindex)) {
519 		struct net_device *dev
520 			= hlist_entry(p, struct net_device, index_hlist);
521 		if (dev->ifindex == ifindex)
522 			return dev;
523 	}
524 	return NULL;
525 }
526 
527 
528 /**
529  *	dev_get_by_index - find a device by its ifindex
530  *	@ifindex: index of device
531  *
532  *	Search for an interface by index. Returns NULL if the device
533  *	is not found or a pointer to the device. The device returned has
534  *	had a reference added and the pointer is safe until the user calls
535  *	dev_put to indicate they have finished with it.
536  */
537 
538 struct net_device *dev_get_by_index(int ifindex)
539 {
540 	struct net_device *dev;
541 
542 	read_lock(&dev_base_lock);
543 	dev = __dev_get_by_index(ifindex);
544 	if (dev)
545 		dev_hold(dev);
546 	read_unlock(&dev_base_lock);
547 	return dev;
548 }
549 
550 /**
551  *	dev_getbyhwaddr - find a device by its hardware address
552  *	@type: media type of device
553  *	@ha: hardware address
554  *
555  *	Search for an interface by MAC address. Returns NULL if the device
556  *	is not found or a pointer to the device. The caller must hold the
557  *	rtnl semaphore. The returned device has not had its ref count increased
558  *	and the caller must therefore be careful about locking
559  *
560  *	BUGS:
561  *	If the API was consistent this would be __dev_get_by_hwaddr
562  */
563 
564 struct net_device *dev_getbyhwaddr(unsigned short type, char *ha)
565 {
566 	struct net_device *dev;
567 
568 	ASSERT_RTNL();
569 
570 	for (dev = dev_base; dev; dev = dev->next)
571 		if (dev->type == type &&
572 		    !memcmp(dev->dev_addr, ha, dev->addr_len))
573 			break;
574 	return dev;
575 }
576 
577 EXPORT_SYMBOL(dev_getbyhwaddr);
578 
579 struct net_device *dev_getfirstbyhwtype(unsigned short type)
580 {
581 	struct net_device *dev;
582 
583 	rtnl_lock();
584 	for (dev = dev_base; dev; dev = dev->next) {
585 		if (dev->type == type) {
586 			dev_hold(dev);
587 			break;
588 		}
589 	}
590 	rtnl_unlock();
591 	return dev;
592 }
593 
594 EXPORT_SYMBOL(dev_getfirstbyhwtype);
595 
596 /**
597  *	dev_get_by_flags - find any device with given flags
598  *	@if_flags: IFF_* values
599  *	@mask: bitmask of bits in if_flags to check
600  *
601  *	Search for any interface with the given flags. Returns NULL if a device
602  *	is not found or a pointer to the device. The device returned has
603  *	had a reference added and the pointer is safe until the user calls
604  *	dev_put to indicate they have finished with it.
605  */
606 
607 struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mask)
608 {
609 	struct net_device *dev;
610 
611 	read_lock(&dev_base_lock);
612 	for (dev = dev_base; dev != NULL; dev = dev->next) {
613 		if (((dev->flags ^ if_flags) & mask) == 0) {
614 			dev_hold(dev);
615 			break;
616 		}
617 	}
618 	read_unlock(&dev_base_lock);
619 	return dev;
620 }
621 
622 /**
623  *	dev_valid_name - check if name is okay for network device
624  *	@name: name string
625  *
626  *	Network device names need to be valid file names to
627  *	to allow sysfs to work
628  */
629 int dev_valid_name(const char *name)
630 {
631 	return !(*name == '\0'
632 		 || !strcmp(name, ".")
633 		 || !strcmp(name, "..")
634 		 || strchr(name, '/'));
635 }
636 
637 /**
638  *	dev_alloc_name - allocate a name for a device
639  *	@dev: device
640  *	@name: name format string
641  *
642  *	Passed a format string - eg "lt%d" it will try and find a suitable
643  *	id. Not efficient for many devices, not called a lot. The caller
644  *	must hold the dev_base or rtnl lock while allocating the name and
645  *	adding the device in order to avoid duplicates. Returns the number
646  *	of the unit assigned or a negative errno code.
647  */
648 
649 int dev_alloc_name(struct net_device *dev, const char *name)
650 {
651 	int i = 0;
652 	char buf[IFNAMSIZ];
653 	const char *p;
654 	const int max_netdevices = 8*PAGE_SIZE;
655 	long *inuse;
656 	struct net_device *d;
657 
658 	p = strnchr(name, IFNAMSIZ-1, '%');
659 	if (p) {
660 		/*
661 		 * Verify the string as this thing may have come from
662 		 * the user.  There must be either one "%d" and no other "%"
663 		 * characters.
664 		 */
665 		if (p[1] != 'd' || strchr(p + 2, '%'))
666 			return -EINVAL;
667 
668 		/* Use one page as a bit array of possible slots */
669 		inuse = (long *) get_zeroed_page(GFP_ATOMIC);
670 		if (!inuse)
671 			return -ENOMEM;
672 
673 		for (d = dev_base; d; d = d->next) {
674 			if (!sscanf(d->name, name, &i))
675 				continue;
676 			if (i < 0 || i >= max_netdevices)
677 				continue;
678 
679 			/*  avoid cases where sscanf is not exact inverse of printf */
680 			snprintf(buf, sizeof(buf), name, i);
681 			if (!strncmp(buf, d->name, IFNAMSIZ))
682 				set_bit(i, inuse);
683 		}
684 
685 		i = find_first_zero_bit(inuse, max_netdevices);
686 		free_page((unsigned long) inuse);
687 	}
688 
689 	snprintf(buf, sizeof(buf), name, i);
690 	if (!__dev_get_by_name(buf)) {
691 		strlcpy(dev->name, buf, IFNAMSIZ);
692 		return i;
693 	}
694 
695 	/* It is possible to run out of possible slots
696 	 * when the name is long and there isn't enough space left
697 	 * for the digits, or if all bits are used.
698 	 */
699 	return -ENFILE;
700 }
701 
702 
703 /**
704  *	dev_change_name - change name of a device
705  *	@dev: device
706  *	@newname: name (or format string) must be at least IFNAMSIZ
707  *
708  *	Change name of a device, can pass format strings "eth%d".
709  *	for wildcarding.
710  */
711 int dev_change_name(struct net_device *dev, char *newname)
712 {
713 	int err = 0;
714 
715 	ASSERT_RTNL();
716 
717 	if (dev->flags & IFF_UP)
718 		return -EBUSY;
719 
720 	if (!dev_valid_name(newname))
721 		return -EINVAL;
722 
723 	if (strchr(newname, '%')) {
724 		err = dev_alloc_name(dev, newname);
725 		if (err < 0)
726 			return err;
727 		strcpy(newname, dev->name);
728 	}
729 	else if (__dev_get_by_name(newname))
730 		return -EEXIST;
731 	else
732 		strlcpy(dev->name, newname, IFNAMSIZ);
733 
734 	err = class_device_rename(&dev->class_dev, dev->name);
735 	if (!err) {
736 		hlist_del(&dev->name_hlist);
737 		hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name));
738 		notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev);
739 	}
740 
741 	return err;
742 }
743 
744 /**
745  *	netdev_features_change - device changes fatures
746  *	@dev: device to cause notification
747  *
748  *	Called to indicate a device has changed features.
749  */
750 void netdev_features_change(struct net_device *dev)
751 {
752 	notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev);
753 }
754 EXPORT_SYMBOL(netdev_features_change);
755 
756 /**
757  *	netdev_state_change - device changes state
758  *	@dev: device to cause notification
759  *
760  *	Called to indicate a device has changed state. This function calls
761  *	the notifier chains for netdev_chain and sends a NEWLINK message
762  *	to the routing socket.
763  */
764 void netdev_state_change(struct net_device *dev)
765 {
766 	if (dev->flags & IFF_UP) {
767 		notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
768 		rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
769 	}
770 }
771 
772 /**
773  *	dev_load 	- load a network module
774  *	@name: name of interface
775  *
776  *	If a network interface is not present and the process has suitable
777  *	privileges this function loads the module. If module loading is not
778  *	available in this kernel then it becomes a nop.
779  */
780 
781 void dev_load(const char *name)
782 {
783 	struct net_device *dev;
784 
785 	read_lock(&dev_base_lock);
786 	dev = __dev_get_by_name(name);
787 	read_unlock(&dev_base_lock);
788 
789 	if (!dev && capable(CAP_SYS_MODULE))
790 		request_module("%s", name);
791 }
792 
793 static int default_rebuild_header(struct sk_buff *skb)
794 {
795 	printk(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n",
796 	       skb->dev ? skb->dev->name : "NULL!!!");
797 	kfree_skb(skb);
798 	return 1;
799 }
800 
801 
802 /**
803  *	dev_open	- prepare an interface for use.
804  *	@dev:	device to open
805  *
806  *	Takes a device from down to up state. The device's private open
807  *	function is invoked and then the multicast lists are loaded. Finally
808  *	the device is moved into the up state and a %NETDEV_UP message is
809  *	sent to the netdev notifier chain.
810  *
811  *	Calling this function on an active interface is a nop. On a failure
812  *	a negative errno code is returned.
813  */
814 int dev_open(struct net_device *dev)
815 {
816 	int ret = 0;
817 
818 	/*
819 	 *	Is it already up?
820 	 */
821 
822 	if (dev->flags & IFF_UP)
823 		return 0;
824 
825 	/*
826 	 *	Is it even present?
827 	 */
828 	if (!netif_device_present(dev))
829 		return -ENODEV;
830 
831 	/*
832 	 *	Call device private open method
833 	 */
834 	set_bit(__LINK_STATE_START, &dev->state);
835 	if (dev->open) {
836 		ret = dev->open(dev);
837 		if (ret)
838 			clear_bit(__LINK_STATE_START, &dev->state);
839 	}
840 
841  	/*
842 	 *	If it went open OK then:
843 	 */
844 
845 	if (!ret) {
846 		/*
847 		 *	Set the flags.
848 		 */
849 		dev->flags |= IFF_UP;
850 
851 		/*
852 		 *	Initialize multicasting status
853 		 */
854 		dev_mc_upload(dev);
855 
856 		/*
857 		 *	Wakeup transmit queue engine
858 		 */
859 		dev_activate(dev);
860 
861 		/*
862 		 *	... and announce new interface.
863 		 */
864 		notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
865 	}
866 	return ret;
867 }
868 
869 /**
870  *	dev_close - shutdown an interface.
871  *	@dev: device to shutdown
872  *
873  *	This function moves an active device into down state. A
874  *	%NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
875  *	is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
876  *	chain.
877  */
878 int dev_close(struct net_device *dev)
879 {
880 	if (!(dev->flags & IFF_UP))
881 		return 0;
882 
883 	/*
884 	 *	Tell people we are going down, so that they can
885 	 *	prepare to death, when device is still operating.
886 	 */
887 	notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
888 
889 	dev_deactivate(dev);
890 
891 	clear_bit(__LINK_STATE_START, &dev->state);
892 
893 	/* Synchronize to scheduled poll. We cannot touch poll list,
894 	 * it can be even on different cpu. So just clear netif_running(),
895 	 * and wait when poll really will happen. Actually, the best place
896 	 * for this is inside dev->stop() after device stopped its irq
897 	 * engine, but this requires more changes in devices. */
898 
899 	smp_mb__after_clear_bit(); /* Commit netif_running(). */
900 	while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
901 		/* No hurry. */
902 		msleep(1);
903 	}
904 
905 	/*
906 	 *	Call the device specific close. This cannot fail.
907 	 *	Only if device is UP
908 	 *
909 	 *	We allow it to be called even after a DETACH hot-plug
910 	 *	event.
911 	 */
912 	if (dev->stop)
913 		dev->stop(dev);
914 
915 	/*
916 	 *	Device is now down.
917 	 */
918 
919 	dev->flags &= ~IFF_UP;
920 
921 	/*
922 	 * Tell people we are down
923 	 */
924 	notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
925 
926 	return 0;
927 }
928 
929 
930 /*
931  *	Device change register/unregister. These are not inline or static
932  *	as we export them to the world.
933  */
934 
935 /**
936  *	register_netdevice_notifier - register a network notifier block
937  *	@nb: notifier
938  *
939  *	Register a notifier to be called when network device events occur.
940  *	The notifier passed is linked into the kernel structures and must
941  *	not be reused until it has been unregistered. A negative errno code
942  *	is returned on a failure.
943  *
944  * 	When registered all registration and up events are replayed
945  *	to the new notifier to allow device to have a race free
946  *	view of the network device list.
947  */
948 
949 int register_netdevice_notifier(struct notifier_block *nb)
950 {
951 	struct net_device *dev;
952 	int err;
953 
954 	rtnl_lock();
955 	err = notifier_chain_register(&netdev_chain, nb);
956 	if (!err) {
957 		for (dev = dev_base; dev; dev = dev->next) {
958 			nb->notifier_call(nb, NETDEV_REGISTER, dev);
959 
960 			if (dev->flags & IFF_UP)
961 				nb->notifier_call(nb, NETDEV_UP, dev);
962 		}
963 	}
964 	rtnl_unlock();
965 	return err;
966 }
967 
968 /**
969  *	unregister_netdevice_notifier - unregister a network notifier block
970  *	@nb: notifier
971  *
972  *	Unregister a notifier previously registered by
973  *	register_netdevice_notifier(). The notifier is unlinked into the
974  *	kernel structures and may then be reused. A negative errno code
975  *	is returned on a failure.
976  */
977 
978 int unregister_netdevice_notifier(struct notifier_block *nb)
979 {
980 	return notifier_chain_unregister(&netdev_chain, nb);
981 }
982 
983 /**
984  *	call_netdevice_notifiers - call all network notifier blocks
985  *      @val: value passed unmodified to notifier function
986  *      @v:   pointer passed unmodified to notifier function
987  *
988  *	Call all network notifier blocks.  Parameters and return value
989  *	are as for notifier_call_chain().
990  */
991 
992 int call_netdevice_notifiers(unsigned long val, void *v)
993 {
994 	return notifier_call_chain(&netdev_chain, val, v);
995 }
996 
997 /* When > 0 there are consumers of rx skb time stamps */
998 static atomic_t netstamp_needed = ATOMIC_INIT(0);
999 
1000 void net_enable_timestamp(void)
1001 {
1002 	atomic_inc(&netstamp_needed);
1003 }
1004 
1005 void net_disable_timestamp(void)
1006 {
1007 	atomic_dec(&netstamp_needed);
1008 }
1009 
1010 void __net_timestamp(struct sk_buff *skb)
1011 {
1012 	struct timeval tv;
1013 
1014 	do_gettimeofday(&tv);
1015 	skb_set_timestamp(skb, &tv);
1016 }
1017 EXPORT_SYMBOL(__net_timestamp);
1018 
1019 static inline void net_timestamp(struct sk_buff *skb)
1020 {
1021 	if (atomic_read(&netstamp_needed))
1022 		__net_timestamp(skb);
1023 	else {
1024 		skb->tstamp.off_sec = 0;
1025 		skb->tstamp.off_usec = 0;
1026 	}
1027 }
1028 
1029 /*
1030  *	Support routine. Sends outgoing frames to any network
1031  *	taps currently in use.
1032  */
1033 
1034 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1035 {
1036 	struct packet_type *ptype;
1037 
1038 	net_timestamp(skb);
1039 
1040 	rcu_read_lock();
1041 	list_for_each_entry_rcu(ptype, &ptype_all, list) {
1042 		/* Never send packets back to the socket
1043 		 * they originated from - MvS (miquels@drinkel.ow.org)
1044 		 */
1045 		if ((ptype->dev == dev || !ptype->dev) &&
1046 		    (ptype->af_packet_priv == NULL ||
1047 		     (struct sock *)ptype->af_packet_priv != skb->sk)) {
1048 			struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1049 			if (!skb2)
1050 				break;
1051 
1052 			/* skb->nh should be correctly
1053 			   set by sender, so that the second statement is
1054 			   just protection against buggy protocols.
1055 			 */
1056 			skb2->mac.raw = skb2->data;
1057 
1058 			if (skb2->nh.raw < skb2->data ||
1059 			    skb2->nh.raw > skb2->tail) {
1060 				if (net_ratelimit())
1061 					printk(KERN_CRIT "protocol %04x is "
1062 					       "buggy, dev %s\n",
1063 					       skb2->protocol, dev->name);
1064 				skb2->nh.raw = skb2->data;
1065 			}
1066 
1067 			skb2->h.raw = skb2->nh.raw;
1068 			skb2->pkt_type = PACKET_OUTGOING;
1069 			ptype->func(skb2, skb->dev, ptype, skb->dev);
1070 		}
1071 	}
1072 	rcu_read_unlock();
1073 }
1074 
1075 /*
1076  * Invalidate hardware checksum when packet is to be mangled, and
1077  * complete checksum manually on outgoing path.
1078  */
1079 int skb_checksum_help(struct sk_buff *skb, int inward)
1080 {
1081 	unsigned int csum;
1082 	int ret = 0, offset = skb->h.raw - skb->data;
1083 
1084 	if (inward) {
1085 		skb->ip_summed = CHECKSUM_NONE;
1086 		goto out;
1087 	}
1088 
1089 	if (skb_cloned(skb)) {
1090 		ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1091 		if (ret)
1092 			goto out;
1093 	}
1094 
1095 	BUG_ON(offset > (int)skb->len);
1096 	csum = skb_checksum(skb, offset, skb->len-offset, 0);
1097 
1098 	offset = skb->tail - skb->h.raw;
1099 	BUG_ON(offset <= 0);
1100 	BUG_ON(skb->csum + 2 > offset);
1101 
1102 	*(u16*)(skb->h.raw + skb->csum) = csum_fold(csum);
1103 	skb->ip_summed = CHECKSUM_NONE;
1104 out:
1105 	return ret;
1106 }
1107 
1108 /* Take action when hardware reception checksum errors are detected. */
1109 #ifdef CONFIG_BUG
1110 void netdev_rx_csum_fault(struct net_device *dev)
1111 {
1112 	if (net_ratelimit()) {
1113 		printk(KERN_ERR "%s: hw csum failure.\n",
1114 			dev ? dev->name : "<unknown>");
1115 		dump_stack();
1116 	}
1117 }
1118 EXPORT_SYMBOL(netdev_rx_csum_fault);
1119 #endif
1120 
1121 #ifdef CONFIG_HIGHMEM
1122 /* Actually, we should eliminate this check as soon as we know, that:
1123  * 1. IOMMU is present and allows to map all the memory.
1124  * 2. No high memory really exists on this machine.
1125  */
1126 
1127 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1128 {
1129 	int i;
1130 
1131 	if (dev->features & NETIF_F_HIGHDMA)
1132 		return 0;
1133 
1134 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1135 		if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1136 			return 1;
1137 
1138 	return 0;
1139 }
1140 #else
1141 #define illegal_highdma(dev, skb)	(0)
1142 #endif
1143 
1144 /* Keep head the same: replace data */
1145 int __skb_linearize(struct sk_buff *skb, gfp_t gfp_mask)
1146 {
1147 	unsigned int size;
1148 	u8 *data;
1149 	long offset;
1150 	struct skb_shared_info *ninfo;
1151 	int headerlen = skb->data - skb->head;
1152 	int expand = (skb->tail + skb->data_len) - skb->end;
1153 
1154 	if (skb_shared(skb))
1155 		BUG();
1156 
1157 	if (expand <= 0)
1158 		expand = 0;
1159 
1160 	size = skb->end - skb->head + expand;
1161 	size = SKB_DATA_ALIGN(size);
1162 	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
1163 	if (!data)
1164 		return -ENOMEM;
1165 
1166 	/* Copy entire thing */
1167 	if (skb_copy_bits(skb, -headerlen, data, headerlen + skb->len))
1168 		BUG();
1169 
1170 	/* Set up shinfo */
1171 	ninfo = (struct skb_shared_info*)(data + size);
1172 	atomic_set(&ninfo->dataref, 1);
1173 	ninfo->tso_size = skb_shinfo(skb)->tso_size;
1174 	ninfo->tso_segs = skb_shinfo(skb)->tso_segs;
1175 	ninfo->nr_frags = 0;
1176 	ninfo->frag_list = NULL;
1177 
1178 	/* Offset between the two in bytes */
1179 	offset = data - skb->head;
1180 
1181 	/* Free old data. */
1182 	skb_release_data(skb);
1183 
1184 	skb->head = data;
1185 	skb->end  = data + size;
1186 
1187 	/* Set up new pointers */
1188 	skb->h.raw   += offset;
1189 	skb->nh.raw  += offset;
1190 	skb->mac.raw += offset;
1191 	skb->tail    += offset;
1192 	skb->data    += offset;
1193 
1194 	/* We are no longer a clone, even if we were. */
1195 	skb->cloned    = 0;
1196 
1197 	skb->tail     += skb->data_len;
1198 	skb->data_len  = 0;
1199 	return 0;
1200 }
1201 
1202 #define HARD_TX_LOCK(dev, cpu) {			\
1203 	if ((dev->features & NETIF_F_LLTX) == 0) {	\
1204 		spin_lock(&dev->xmit_lock);		\
1205 		dev->xmit_lock_owner = cpu;		\
1206 	}						\
1207 }
1208 
1209 #define HARD_TX_UNLOCK(dev) {				\
1210 	if ((dev->features & NETIF_F_LLTX) == 0) {	\
1211 		dev->xmit_lock_owner = -1;		\
1212 		spin_unlock(&dev->xmit_lock);		\
1213 	}						\
1214 }
1215 
1216 /**
1217  *	dev_queue_xmit - transmit a buffer
1218  *	@skb: buffer to transmit
1219  *
1220  *	Queue a buffer for transmission to a network device. The caller must
1221  *	have set the device and priority and built the buffer before calling
1222  *	this function. The function can be called from an interrupt.
1223  *
1224  *	A negative errno code is returned on a failure. A success does not
1225  *	guarantee the frame will be transmitted as it may be dropped due
1226  *	to congestion or traffic shaping.
1227  *
1228  * -----------------------------------------------------------------------------------
1229  *      I notice this method can also return errors from the queue disciplines,
1230  *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
1231  *      be positive.
1232  *
1233  *      Regardless of the return value, the skb is consumed, so it is currently
1234  *      difficult to retry a send to this method.  (You can bump the ref count
1235  *      before sending to hold a reference for retry if you are careful.)
1236  *
1237  *      When calling this method, interrupts MUST be enabled.  This is because
1238  *      the BH enable code must have IRQs enabled so that it will not deadlock.
1239  *          --BLG
1240  */
1241 
1242 int dev_queue_xmit(struct sk_buff *skb)
1243 {
1244 	struct net_device *dev = skb->dev;
1245 	struct Qdisc *q;
1246 	int rc = -ENOMEM;
1247 
1248 	if (skb_shinfo(skb)->frag_list &&
1249 	    !(dev->features & NETIF_F_FRAGLIST) &&
1250 	    __skb_linearize(skb, GFP_ATOMIC))
1251 		goto out_kfree_skb;
1252 
1253 	/* Fragmented skb is linearized if device does not support SG,
1254 	 * or if at least one of fragments is in highmem and device
1255 	 * does not support DMA from it.
1256 	 */
1257 	if (skb_shinfo(skb)->nr_frags &&
1258 	    (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1259 	    __skb_linearize(skb, GFP_ATOMIC))
1260 		goto out_kfree_skb;
1261 
1262 	/* If packet is not checksummed and device does not support
1263 	 * checksumming for this protocol, complete checksumming here.
1264 	 */
1265 	if (skb->ip_summed == CHECKSUM_HW &&
1266 	    (!(dev->features & (NETIF_F_HW_CSUM | NETIF_F_NO_CSUM)) &&
1267 	     (!(dev->features & NETIF_F_IP_CSUM) ||
1268 	      skb->protocol != htons(ETH_P_IP))))
1269 	      	if (skb_checksum_help(skb, 0))
1270 	      		goto out_kfree_skb;
1271 
1272 	spin_lock_prefetch(&dev->queue_lock);
1273 
1274 	/* Disable soft irqs for various locks below. Also
1275 	 * stops preemption for RCU.
1276 	 */
1277 	local_bh_disable();
1278 
1279 	/* Updates of qdisc are serialized by queue_lock.
1280 	 * The struct Qdisc which is pointed to by qdisc is now a
1281 	 * rcu structure - it may be accessed without acquiring
1282 	 * a lock (but the structure may be stale.) The freeing of the
1283 	 * qdisc will be deferred until it's known that there are no
1284 	 * more references to it.
1285 	 *
1286 	 * If the qdisc has an enqueue function, we still need to
1287 	 * hold the queue_lock before calling it, since queue_lock
1288 	 * also serializes access to the device queue.
1289 	 */
1290 
1291 	q = rcu_dereference(dev->qdisc);
1292 #ifdef CONFIG_NET_CLS_ACT
1293 	skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1294 #endif
1295 	if (q->enqueue) {
1296 		/* Grab device queue */
1297 		spin_lock(&dev->queue_lock);
1298 
1299 		rc = q->enqueue(skb, q);
1300 
1301 		qdisc_run(dev);
1302 
1303 		spin_unlock(&dev->queue_lock);
1304 		rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1305 		goto out;
1306 	}
1307 
1308 	/* The device has no queue. Common case for software devices:
1309 	   loopback, all the sorts of tunnels...
1310 
1311 	   Really, it is unlikely that xmit_lock protection is necessary here.
1312 	   (f.e. loopback and IP tunnels are clean ignoring statistics
1313 	   counters.)
1314 	   However, it is possible, that they rely on protection
1315 	   made by us here.
1316 
1317 	   Check this and shot the lock. It is not prone from deadlocks.
1318 	   Either shot noqueue qdisc, it is even simpler 8)
1319 	 */
1320 	if (dev->flags & IFF_UP) {
1321 		int cpu = smp_processor_id(); /* ok because BHs are off */
1322 
1323 		if (dev->xmit_lock_owner != cpu) {
1324 
1325 			HARD_TX_LOCK(dev, cpu);
1326 
1327 			if (!netif_queue_stopped(dev)) {
1328 				if (netdev_nit)
1329 					dev_queue_xmit_nit(skb, dev);
1330 
1331 				rc = 0;
1332 				if (!dev->hard_start_xmit(skb, dev)) {
1333 					HARD_TX_UNLOCK(dev);
1334 					goto out;
1335 				}
1336 			}
1337 			HARD_TX_UNLOCK(dev);
1338 			if (net_ratelimit())
1339 				printk(KERN_CRIT "Virtual device %s asks to "
1340 				       "queue packet!\n", dev->name);
1341 		} else {
1342 			/* Recursion is detected! It is possible,
1343 			 * unfortunately */
1344 			if (net_ratelimit())
1345 				printk(KERN_CRIT "Dead loop on virtual device "
1346 				       "%s, fix it urgently!\n", dev->name);
1347 		}
1348 	}
1349 
1350 	rc = -ENETDOWN;
1351 	local_bh_enable();
1352 
1353 out_kfree_skb:
1354 	kfree_skb(skb);
1355 	return rc;
1356 out:
1357 	local_bh_enable();
1358 	return rc;
1359 }
1360 
1361 
1362 /*=======================================================================
1363 			Receiver routines
1364   =======================================================================*/
1365 
1366 int netdev_max_backlog = 1000;
1367 int netdev_budget = 300;
1368 int weight_p = 64;            /* old backlog weight */
1369 
1370 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1371 
1372 
1373 /**
1374  *	netif_rx	-	post buffer to the network code
1375  *	@skb: buffer to post
1376  *
1377  *	This function receives a packet from a device driver and queues it for
1378  *	the upper (protocol) levels to process.  It always succeeds. The buffer
1379  *	may be dropped during processing for congestion control or by the
1380  *	protocol layers.
1381  *
1382  *	return values:
1383  *	NET_RX_SUCCESS	(no congestion)
1384  *	NET_RX_CN_LOW   (low congestion)
1385  *	NET_RX_CN_MOD   (moderate congestion)
1386  *	NET_RX_CN_HIGH  (high congestion)
1387  *	NET_RX_DROP     (packet was dropped)
1388  *
1389  */
1390 
1391 int netif_rx(struct sk_buff *skb)
1392 {
1393 	struct softnet_data *queue;
1394 	unsigned long flags;
1395 
1396 	/* if netpoll wants it, pretend we never saw it */
1397 	if (netpoll_rx(skb))
1398 		return NET_RX_DROP;
1399 
1400 	if (!skb->tstamp.off_sec)
1401 		net_timestamp(skb);
1402 
1403 	/*
1404 	 * The code is rearranged so that the path is the most
1405 	 * short when CPU is congested, but is still operating.
1406 	 */
1407 	local_irq_save(flags);
1408 	queue = &__get_cpu_var(softnet_data);
1409 
1410 	__get_cpu_var(netdev_rx_stat).total++;
1411 	if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1412 		if (queue->input_pkt_queue.qlen) {
1413 enqueue:
1414 			dev_hold(skb->dev);
1415 			__skb_queue_tail(&queue->input_pkt_queue, skb);
1416 			local_irq_restore(flags);
1417 			return NET_RX_SUCCESS;
1418 		}
1419 
1420 		netif_rx_schedule(&queue->backlog_dev);
1421 		goto enqueue;
1422 	}
1423 
1424 	__get_cpu_var(netdev_rx_stat).dropped++;
1425 	local_irq_restore(flags);
1426 
1427 	kfree_skb(skb);
1428 	return NET_RX_DROP;
1429 }
1430 
1431 int netif_rx_ni(struct sk_buff *skb)
1432 {
1433 	int err;
1434 
1435 	preempt_disable();
1436 	err = netif_rx(skb);
1437 	if (local_softirq_pending())
1438 		do_softirq();
1439 	preempt_enable();
1440 
1441 	return err;
1442 }
1443 
1444 EXPORT_SYMBOL(netif_rx_ni);
1445 
1446 static inline struct net_device *skb_bond(struct sk_buff *skb)
1447 {
1448 	struct net_device *dev = skb->dev;
1449 
1450 	if (dev->master)
1451 		skb->dev = dev->master;
1452 
1453 	return dev;
1454 }
1455 
1456 static void net_tx_action(struct softirq_action *h)
1457 {
1458 	struct softnet_data *sd = &__get_cpu_var(softnet_data);
1459 
1460 	if (sd->completion_queue) {
1461 		struct sk_buff *clist;
1462 
1463 		local_irq_disable();
1464 		clist = sd->completion_queue;
1465 		sd->completion_queue = NULL;
1466 		local_irq_enable();
1467 
1468 		while (clist) {
1469 			struct sk_buff *skb = clist;
1470 			clist = clist->next;
1471 
1472 			BUG_TRAP(!atomic_read(&skb->users));
1473 			__kfree_skb(skb);
1474 		}
1475 	}
1476 
1477 	if (sd->output_queue) {
1478 		struct net_device *head;
1479 
1480 		local_irq_disable();
1481 		head = sd->output_queue;
1482 		sd->output_queue = NULL;
1483 		local_irq_enable();
1484 
1485 		while (head) {
1486 			struct net_device *dev = head;
1487 			head = head->next_sched;
1488 
1489 			smp_mb__before_clear_bit();
1490 			clear_bit(__LINK_STATE_SCHED, &dev->state);
1491 
1492 			if (spin_trylock(&dev->queue_lock)) {
1493 				qdisc_run(dev);
1494 				spin_unlock(&dev->queue_lock);
1495 			} else {
1496 				netif_schedule(dev);
1497 			}
1498 		}
1499 	}
1500 }
1501 
1502 static __inline__ int deliver_skb(struct sk_buff *skb,
1503 				  struct packet_type *pt_prev,
1504 				  struct net_device *orig_dev)
1505 {
1506 	atomic_inc(&skb->users);
1507 	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1508 }
1509 
1510 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
1511 int (*br_handle_frame_hook)(struct net_bridge_port *p, struct sk_buff **pskb);
1512 struct net_bridge;
1513 struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
1514 						unsigned char *addr);
1515 void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent);
1516 
1517 static __inline__ int handle_bridge(struct sk_buff **pskb,
1518 				    struct packet_type **pt_prev, int *ret,
1519 				    struct net_device *orig_dev)
1520 {
1521 	struct net_bridge_port *port;
1522 
1523 	if ((*pskb)->pkt_type == PACKET_LOOPBACK ||
1524 	    (port = rcu_dereference((*pskb)->dev->br_port)) == NULL)
1525 		return 0;
1526 
1527 	if (*pt_prev) {
1528 		*ret = deliver_skb(*pskb, *pt_prev, orig_dev);
1529 		*pt_prev = NULL;
1530 	}
1531 
1532 	return br_handle_frame_hook(port, pskb);
1533 }
1534 #else
1535 #define handle_bridge(skb, pt_prev, ret, orig_dev)	(0)
1536 #endif
1537 
1538 #ifdef CONFIG_NET_CLS_ACT
1539 /* TODO: Maybe we should just force sch_ingress to be compiled in
1540  * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
1541  * a compare and 2 stores extra right now if we dont have it on
1542  * but have CONFIG_NET_CLS_ACT
1543  * NOTE: This doesnt stop any functionality; if you dont have
1544  * the ingress scheduler, you just cant add policies on ingress.
1545  *
1546  */
1547 static int ing_filter(struct sk_buff *skb)
1548 {
1549 	struct Qdisc *q;
1550 	struct net_device *dev = skb->dev;
1551 	int result = TC_ACT_OK;
1552 
1553 	if (dev->qdisc_ingress) {
1554 		__u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd);
1555 		if (MAX_RED_LOOP < ttl++) {
1556 			printk("Redir loop detected Dropping packet (%s->%s)\n",
1557 				skb->input_dev->name, skb->dev->name);
1558 			return TC_ACT_SHOT;
1559 		}
1560 
1561 		skb->tc_verd = SET_TC_RTTL(skb->tc_verd,ttl);
1562 
1563 		skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS);
1564 
1565 		spin_lock(&dev->ingress_lock);
1566 		if ((q = dev->qdisc_ingress) != NULL)
1567 			result = q->enqueue(skb, q);
1568 		spin_unlock(&dev->ingress_lock);
1569 
1570 	}
1571 
1572 	return result;
1573 }
1574 #endif
1575 
1576 int netif_receive_skb(struct sk_buff *skb)
1577 {
1578 	struct packet_type *ptype, *pt_prev;
1579 	struct net_device *orig_dev;
1580 	int ret = NET_RX_DROP;
1581 	unsigned short type;
1582 
1583 	/* if we've gotten here through NAPI, check netpoll */
1584 	if (skb->dev->poll && netpoll_rx(skb))
1585 		return NET_RX_DROP;
1586 
1587 	if (!skb->tstamp.off_sec)
1588 		net_timestamp(skb);
1589 
1590 	if (!skb->input_dev)
1591 		skb->input_dev = skb->dev;
1592 
1593 	orig_dev = skb_bond(skb);
1594 
1595 	__get_cpu_var(netdev_rx_stat).total++;
1596 
1597 	skb->h.raw = skb->nh.raw = skb->data;
1598 	skb->mac_len = skb->nh.raw - skb->mac.raw;
1599 
1600 	pt_prev = NULL;
1601 
1602 	rcu_read_lock();
1603 
1604 #ifdef CONFIG_NET_CLS_ACT
1605 	if (skb->tc_verd & TC_NCLS) {
1606 		skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
1607 		goto ncls;
1608 	}
1609 #endif
1610 
1611 	list_for_each_entry_rcu(ptype, &ptype_all, list) {
1612 		if (!ptype->dev || ptype->dev == skb->dev) {
1613 			if (pt_prev)
1614 				ret = deliver_skb(skb, pt_prev, orig_dev);
1615 			pt_prev = ptype;
1616 		}
1617 	}
1618 
1619 #ifdef CONFIG_NET_CLS_ACT
1620 	if (pt_prev) {
1621 		ret = deliver_skb(skb, pt_prev, orig_dev);
1622 		pt_prev = NULL; /* noone else should process this after*/
1623 	} else {
1624 		skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
1625 	}
1626 
1627 	ret = ing_filter(skb);
1628 
1629 	if (ret == TC_ACT_SHOT || (ret == TC_ACT_STOLEN)) {
1630 		kfree_skb(skb);
1631 		goto out;
1632 	}
1633 
1634 	skb->tc_verd = 0;
1635 ncls:
1636 #endif
1637 
1638 	handle_diverter(skb);
1639 
1640 	if (handle_bridge(&skb, &pt_prev, &ret, orig_dev))
1641 		goto out;
1642 
1643 	type = skb->protocol;
1644 	list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) {
1645 		if (ptype->type == type &&
1646 		    (!ptype->dev || ptype->dev == skb->dev)) {
1647 			if (pt_prev)
1648 				ret = deliver_skb(skb, pt_prev, orig_dev);
1649 			pt_prev = ptype;
1650 		}
1651 	}
1652 
1653 	if (pt_prev) {
1654 		ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1655 	} else {
1656 		kfree_skb(skb);
1657 		/* Jamal, now you will not able to escape explaining
1658 		 * me how you were going to use this. :-)
1659 		 */
1660 		ret = NET_RX_DROP;
1661 	}
1662 
1663 out:
1664 	rcu_read_unlock();
1665 	return ret;
1666 }
1667 
1668 static int process_backlog(struct net_device *backlog_dev, int *budget)
1669 {
1670 	int work = 0;
1671 	int quota = min(backlog_dev->quota, *budget);
1672 	struct softnet_data *queue = &__get_cpu_var(softnet_data);
1673 	unsigned long start_time = jiffies;
1674 
1675 	backlog_dev->weight = weight_p;
1676 	for (;;) {
1677 		struct sk_buff *skb;
1678 		struct net_device *dev;
1679 
1680 		local_irq_disable();
1681 		skb = __skb_dequeue(&queue->input_pkt_queue);
1682 		if (!skb)
1683 			goto job_done;
1684 		local_irq_enable();
1685 
1686 		dev = skb->dev;
1687 
1688 		netif_receive_skb(skb);
1689 
1690 		dev_put(dev);
1691 
1692 		work++;
1693 
1694 		if (work >= quota || jiffies - start_time > 1)
1695 			break;
1696 
1697 	}
1698 
1699 	backlog_dev->quota -= work;
1700 	*budget -= work;
1701 	return -1;
1702 
1703 job_done:
1704 	backlog_dev->quota -= work;
1705 	*budget -= work;
1706 
1707 	list_del(&backlog_dev->poll_list);
1708 	smp_mb__before_clear_bit();
1709 	netif_poll_enable(backlog_dev);
1710 
1711 	local_irq_enable();
1712 	return 0;
1713 }
1714 
1715 static void net_rx_action(struct softirq_action *h)
1716 {
1717 	struct softnet_data *queue = &__get_cpu_var(softnet_data);
1718 	unsigned long start_time = jiffies;
1719 	int budget = netdev_budget;
1720 	void *have;
1721 
1722 	local_irq_disable();
1723 
1724 	while (!list_empty(&queue->poll_list)) {
1725 		struct net_device *dev;
1726 
1727 		if (budget <= 0 || jiffies - start_time > 1)
1728 			goto softnet_break;
1729 
1730 		local_irq_enable();
1731 
1732 		dev = list_entry(queue->poll_list.next,
1733 				 struct net_device, poll_list);
1734 		have = netpoll_poll_lock(dev);
1735 
1736 		if (dev->quota <= 0 || dev->poll(dev, &budget)) {
1737 			netpoll_poll_unlock(have);
1738 			local_irq_disable();
1739 			list_del(&dev->poll_list);
1740 			list_add_tail(&dev->poll_list, &queue->poll_list);
1741 			if (dev->quota < 0)
1742 				dev->quota += dev->weight;
1743 			else
1744 				dev->quota = dev->weight;
1745 		} else {
1746 			netpoll_poll_unlock(have);
1747 			dev_put(dev);
1748 			local_irq_disable();
1749 		}
1750 	}
1751 out:
1752 	local_irq_enable();
1753 	return;
1754 
1755 softnet_break:
1756 	__get_cpu_var(netdev_rx_stat).time_squeeze++;
1757 	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
1758 	goto out;
1759 }
1760 
1761 static gifconf_func_t * gifconf_list [NPROTO];
1762 
1763 /**
1764  *	register_gifconf	-	register a SIOCGIF handler
1765  *	@family: Address family
1766  *	@gifconf: Function handler
1767  *
1768  *	Register protocol dependent address dumping routines. The handler
1769  *	that is passed must not be freed or reused until it has been replaced
1770  *	by another handler.
1771  */
1772 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
1773 {
1774 	if (family >= NPROTO)
1775 		return -EINVAL;
1776 	gifconf_list[family] = gifconf;
1777 	return 0;
1778 }
1779 
1780 
1781 /*
1782  *	Map an interface index to its name (SIOCGIFNAME)
1783  */
1784 
1785 /*
1786  *	We need this ioctl for efficient implementation of the
1787  *	if_indextoname() function required by the IPv6 API.  Without
1788  *	it, we would have to search all the interfaces to find a
1789  *	match.  --pb
1790  */
1791 
1792 static int dev_ifname(struct ifreq __user *arg)
1793 {
1794 	struct net_device *dev;
1795 	struct ifreq ifr;
1796 
1797 	/*
1798 	 *	Fetch the caller's info block.
1799 	 */
1800 
1801 	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
1802 		return -EFAULT;
1803 
1804 	read_lock(&dev_base_lock);
1805 	dev = __dev_get_by_index(ifr.ifr_ifindex);
1806 	if (!dev) {
1807 		read_unlock(&dev_base_lock);
1808 		return -ENODEV;
1809 	}
1810 
1811 	strcpy(ifr.ifr_name, dev->name);
1812 	read_unlock(&dev_base_lock);
1813 
1814 	if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
1815 		return -EFAULT;
1816 	return 0;
1817 }
1818 
1819 /*
1820  *	Perform a SIOCGIFCONF call. This structure will change
1821  *	size eventually, and there is nothing I can do about it.
1822  *	Thus we will need a 'compatibility mode'.
1823  */
1824 
1825 static int dev_ifconf(char __user *arg)
1826 {
1827 	struct ifconf ifc;
1828 	struct net_device *dev;
1829 	char __user *pos;
1830 	int len;
1831 	int total;
1832 	int i;
1833 
1834 	/*
1835 	 *	Fetch the caller's info block.
1836 	 */
1837 
1838 	if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
1839 		return -EFAULT;
1840 
1841 	pos = ifc.ifc_buf;
1842 	len = ifc.ifc_len;
1843 
1844 	/*
1845 	 *	Loop over the interfaces, and write an info block for each.
1846 	 */
1847 
1848 	total = 0;
1849 	for (dev = dev_base; dev; dev = dev->next) {
1850 		for (i = 0; i < NPROTO; i++) {
1851 			if (gifconf_list[i]) {
1852 				int done;
1853 				if (!pos)
1854 					done = gifconf_list[i](dev, NULL, 0);
1855 				else
1856 					done = gifconf_list[i](dev, pos + total,
1857 							       len - total);
1858 				if (done < 0)
1859 					return -EFAULT;
1860 				total += done;
1861 			}
1862 		}
1863   	}
1864 
1865 	/*
1866 	 *	All done.  Write the updated control block back to the caller.
1867 	 */
1868 	ifc.ifc_len = total;
1869 
1870 	/*
1871 	 * 	Both BSD and Solaris return 0 here, so we do too.
1872 	 */
1873 	return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
1874 }
1875 
1876 #ifdef CONFIG_PROC_FS
1877 /*
1878  *	This is invoked by the /proc filesystem handler to display a device
1879  *	in detail.
1880  */
1881 static __inline__ struct net_device *dev_get_idx(loff_t pos)
1882 {
1883 	struct net_device *dev;
1884 	loff_t i;
1885 
1886 	for (i = 0, dev = dev_base; dev && i < pos; ++i, dev = dev->next);
1887 
1888 	return i == pos ? dev : NULL;
1889 }
1890 
1891 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
1892 {
1893 	read_lock(&dev_base_lock);
1894 	return *pos ? dev_get_idx(*pos - 1) : SEQ_START_TOKEN;
1895 }
1896 
1897 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1898 {
1899 	++*pos;
1900 	return v == SEQ_START_TOKEN ? dev_base : ((struct net_device *)v)->next;
1901 }
1902 
1903 void dev_seq_stop(struct seq_file *seq, void *v)
1904 {
1905 	read_unlock(&dev_base_lock);
1906 }
1907 
1908 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
1909 {
1910 	if (dev->get_stats) {
1911 		struct net_device_stats *stats = dev->get_stats(dev);
1912 
1913 		seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
1914 				"%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
1915 			   dev->name, stats->rx_bytes, stats->rx_packets,
1916 			   stats->rx_errors,
1917 			   stats->rx_dropped + stats->rx_missed_errors,
1918 			   stats->rx_fifo_errors,
1919 			   stats->rx_length_errors + stats->rx_over_errors +
1920 			     stats->rx_crc_errors + stats->rx_frame_errors,
1921 			   stats->rx_compressed, stats->multicast,
1922 			   stats->tx_bytes, stats->tx_packets,
1923 			   stats->tx_errors, stats->tx_dropped,
1924 			   stats->tx_fifo_errors, stats->collisions,
1925 			   stats->tx_carrier_errors +
1926 			     stats->tx_aborted_errors +
1927 			     stats->tx_window_errors +
1928 			     stats->tx_heartbeat_errors,
1929 			   stats->tx_compressed);
1930 	} else
1931 		seq_printf(seq, "%6s: No statistics available.\n", dev->name);
1932 }
1933 
1934 /*
1935  *	Called from the PROCfs module. This now uses the new arbitrary sized
1936  *	/proc/net interface to create /proc/net/dev
1937  */
1938 static int dev_seq_show(struct seq_file *seq, void *v)
1939 {
1940 	if (v == SEQ_START_TOKEN)
1941 		seq_puts(seq, "Inter-|   Receive                            "
1942 			      "                    |  Transmit\n"
1943 			      " face |bytes    packets errs drop fifo frame "
1944 			      "compressed multicast|bytes    packets errs "
1945 			      "drop fifo colls carrier compressed\n");
1946 	else
1947 		dev_seq_printf_stats(seq, v);
1948 	return 0;
1949 }
1950 
1951 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
1952 {
1953 	struct netif_rx_stats *rc = NULL;
1954 
1955 	while (*pos < NR_CPUS)
1956 	       	if (cpu_online(*pos)) {
1957 			rc = &per_cpu(netdev_rx_stat, *pos);
1958 			break;
1959 		} else
1960 			++*pos;
1961 	return rc;
1962 }
1963 
1964 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
1965 {
1966 	return softnet_get_online(pos);
1967 }
1968 
1969 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1970 {
1971 	++*pos;
1972 	return softnet_get_online(pos);
1973 }
1974 
1975 static void softnet_seq_stop(struct seq_file *seq, void *v)
1976 {
1977 }
1978 
1979 static int softnet_seq_show(struct seq_file *seq, void *v)
1980 {
1981 	struct netif_rx_stats *s = v;
1982 
1983 	seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
1984 		   s->total, s->dropped, s->time_squeeze, 0,
1985 		   0, 0, 0, 0, /* was fastroute */
1986 		   s->cpu_collision );
1987 	return 0;
1988 }
1989 
1990 static struct seq_operations dev_seq_ops = {
1991 	.start = dev_seq_start,
1992 	.next  = dev_seq_next,
1993 	.stop  = dev_seq_stop,
1994 	.show  = dev_seq_show,
1995 };
1996 
1997 static int dev_seq_open(struct inode *inode, struct file *file)
1998 {
1999 	return seq_open(file, &dev_seq_ops);
2000 }
2001 
2002 static struct file_operations dev_seq_fops = {
2003 	.owner	 = THIS_MODULE,
2004 	.open    = dev_seq_open,
2005 	.read    = seq_read,
2006 	.llseek  = seq_lseek,
2007 	.release = seq_release,
2008 };
2009 
2010 static struct seq_operations softnet_seq_ops = {
2011 	.start = softnet_seq_start,
2012 	.next  = softnet_seq_next,
2013 	.stop  = softnet_seq_stop,
2014 	.show  = softnet_seq_show,
2015 };
2016 
2017 static int softnet_seq_open(struct inode *inode, struct file *file)
2018 {
2019 	return seq_open(file, &softnet_seq_ops);
2020 }
2021 
2022 static struct file_operations softnet_seq_fops = {
2023 	.owner	 = THIS_MODULE,
2024 	.open    = softnet_seq_open,
2025 	.read    = seq_read,
2026 	.llseek  = seq_lseek,
2027 	.release = seq_release,
2028 };
2029 
2030 #ifdef WIRELESS_EXT
2031 extern int wireless_proc_init(void);
2032 #else
2033 #define wireless_proc_init() 0
2034 #endif
2035 
2036 static int __init dev_proc_init(void)
2037 {
2038 	int rc = -ENOMEM;
2039 
2040 	if (!proc_net_fops_create("dev", S_IRUGO, &dev_seq_fops))
2041 		goto out;
2042 	if (!proc_net_fops_create("softnet_stat", S_IRUGO, &softnet_seq_fops))
2043 		goto out_dev;
2044 	if (wireless_proc_init())
2045 		goto out_softnet;
2046 	rc = 0;
2047 out:
2048 	return rc;
2049 out_softnet:
2050 	proc_net_remove("softnet_stat");
2051 out_dev:
2052 	proc_net_remove("dev");
2053 	goto out;
2054 }
2055 #else
2056 #define dev_proc_init() 0
2057 #endif	/* CONFIG_PROC_FS */
2058 
2059 
2060 /**
2061  *	netdev_set_master	-	set up master/slave pair
2062  *	@slave: slave device
2063  *	@master: new master device
2064  *
2065  *	Changes the master device of the slave. Pass %NULL to break the
2066  *	bonding. The caller must hold the RTNL semaphore. On a failure
2067  *	a negative errno code is returned. On success the reference counts
2068  *	are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2069  *	function returns zero.
2070  */
2071 int netdev_set_master(struct net_device *slave, struct net_device *master)
2072 {
2073 	struct net_device *old = slave->master;
2074 
2075 	ASSERT_RTNL();
2076 
2077 	if (master) {
2078 		if (old)
2079 			return -EBUSY;
2080 		dev_hold(master);
2081 	}
2082 
2083 	slave->master = master;
2084 
2085 	synchronize_net();
2086 
2087 	if (old)
2088 		dev_put(old);
2089 
2090 	if (master)
2091 		slave->flags |= IFF_SLAVE;
2092 	else
2093 		slave->flags &= ~IFF_SLAVE;
2094 
2095 	rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2096 	return 0;
2097 }
2098 
2099 /**
2100  *	dev_set_promiscuity	- update promiscuity count on a device
2101  *	@dev: device
2102  *	@inc: modifier
2103  *
2104  *	Add or remove promsicuity from a device. While the count in the device
2105  *	remains above zero the interface remains promiscuous. Once it hits zero
2106  *	the device reverts back to normal filtering operation. A negative inc
2107  *	value is used to drop promiscuity on the device.
2108  */
2109 void dev_set_promiscuity(struct net_device *dev, int inc)
2110 {
2111 	unsigned short old_flags = dev->flags;
2112 
2113 	if ((dev->promiscuity += inc) == 0)
2114 		dev->flags &= ~IFF_PROMISC;
2115 	else
2116 		dev->flags |= IFF_PROMISC;
2117 	if (dev->flags != old_flags) {
2118 		dev_mc_upload(dev);
2119 		printk(KERN_INFO "device %s %s promiscuous mode\n",
2120 		       dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2121 		       					       "left");
2122 	}
2123 }
2124 
2125 /**
2126  *	dev_set_allmulti	- update allmulti count on a device
2127  *	@dev: device
2128  *	@inc: modifier
2129  *
2130  *	Add or remove reception of all multicast frames to a device. While the
2131  *	count in the device remains above zero the interface remains listening
2132  *	to all interfaces. Once it hits zero the device reverts back to normal
2133  *	filtering operation. A negative @inc value is used to drop the counter
2134  *	when releasing a resource needing all multicasts.
2135  */
2136 
2137 void dev_set_allmulti(struct net_device *dev, int inc)
2138 {
2139 	unsigned short old_flags = dev->flags;
2140 
2141 	dev->flags |= IFF_ALLMULTI;
2142 	if ((dev->allmulti += inc) == 0)
2143 		dev->flags &= ~IFF_ALLMULTI;
2144 	if (dev->flags ^ old_flags)
2145 		dev_mc_upload(dev);
2146 }
2147 
2148 unsigned dev_get_flags(const struct net_device *dev)
2149 {
2150 	unsigned flags;
2151 
2152 	flags = (dev->flags & ~(IFF_PROMISC |
2153 				IFF_ALLMULTI |
2154 				IFF_RUNNING)) |
2155 		(dev->gflags & (IFF_PROMISC |
2156 				IFF_ALLMULTI));
2157 
2158 	if (netif_running(dev) && netif_carrier_ok(dev))
2159 		flags |= IFF_RUNNING;
2160 
2161 	return flags;
2162 }
2163 
2164 int dev_change_flags(struct net_device *dev, unsigned flags)
2165 {
2166 	int ret;
2167 	int old_flags = dev->flags;
2168 
2169 	/*
2170 	 *	Set the flags on our device.
2171 	 */
2172 
2173 	dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
2174 			       IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
2175 			       IFF_AUTOMEDIA)) |
2176 		     (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
2177 				    IFF_ALLMULTI));
2178 
2179 	/*
2180 	 *	Load in the correct multicast list now the flags have changed.
2181 	 */
2182 
2183 	dev_mc_upload(dev);
2184 
2185 	/*
2186 	 *	Have we downed the interface. We handle IFF_UP ourselves
2187 	 *	according to user attempts to set it, rather than blindly
2188 	 *	setting it.
2189 	 */
2190 
2191 	ret = 0;
2192 	if ((old_flags ^ flags) & IFF_UP) {	/* Bit is different  ? */
2193 		ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
2194 
2195 		if (!ret)
2196 			dev_mc_upload(dev);
2197 	}
2198 
2199 	if (dev->flags & IFF_UP &&
2200 	    ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
2201 					  IFF_VOLATILE)))
2202 		notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
2203 
2204 	if ((flags ^ dev->gflags) & IFF_PROMISC) {
2205 		int inc = (flags & IFF_PROMISC) ? +1 : -1;
2206 		dev->gflags ^= IFF_PROMISC;
2207 		dev_set_promiscuity(dev, inc);
2208 	}
2209 
2210 	/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
2211 	   is important. Some (broken) drivers set IFF_PROMISC, when
2212 	   IFF_ALLMULTI is requested not asking us and not reporting.
2213 	 */
2214 	if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
2215 		int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
2216 		dev->gflags ^= IFF_ALLMULTI;
2217 		dev_set_allmulti(dev, inc);
2218 	}
2219 
2220 	if (old_flags ^ dev->flags)
2221 		rtmsg_ifinfo(RTM_NEWLINK, dev, old_flags ^ dev->flags);
2222 
2223 	return ret;
2224 }
2225 
2226 int dev_set_mtu(struct net_device *dev, int new_mtu)
2227 {
2228 	int err;
2229 
2230 	if (new_mtu == dev->mtu)
2231 		return 0;
2232 
2233 	/*	MTU must be positive.	 */
2234 	if (new_mtu < 0)
2235 		return -EINVAL;
2236 
2237 	if (!netif_device_present(dev))
2238 		return -ENODEV;
2239 
2240 	err = 0;
2241 	if (dev->change_mtu)
2242 		err = dev->change_mtu(dev, new_mtu);
2243 	else
2244 		dev->mtu = new_mtu;
2245 	if (!err && dev->flags & IFF_UP)
2246 		notifier_call_chain(&netdev_chain,
2247 				    NETDEV_CHANGEMTU, dev);
2248 	return err;
2249 }
2250 
2251 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
2252 {
2253 	int err;
2254 
2255 	if (!dev->set_mac_address)
2256 		return -EOPNOTSUPP;
2257 	if (sa->sa_family != dev->type)
2258 		return -EINVAL;
2259 	if (!netif_device_present(dev))
2260 		return -ENODEV;
2261 	err = dev->set_mac_address(dev, sa);
2262 	if (!err)
2263 		notifier_call_chain(&netdev_chain, NETDEV_CHANGEADDR, dev);
2264 	return err;
2265 }
2266 
2267 /*
2268  *	Perform the SIOCxIFxxx calls.
2269  */
2270 static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
2271 {
2272 	int err;
2273 	struct net_device *dev = __dev_get_by_name(ifr->ifr_name);
2274 
2275 	if (!dev)
2276 		return -ENODEV;
2277 
2278 	switch (cmd) {
2279 		case SIOCGIFFLAGS:	/* Get interface flags */
2280 			ifr->ifr_flags = dev_get_flags(dev);
2281 			return 0;
2282 
2283 		case SIOCSIFFLAGS:	/* Set interface flags */
2284 			return dev_change_flags(dev, ifr->ifr_flags);
2285 
2286 		case SIOCGIFMETRIC:	/* Get the metric on the interface
2287 					   (currently unused) */
2288 			ifr->ifr_metric = 0;
2289 			return 0;
2290 
2291 		case SIOCSIFMETRIC:	/* Set the metric on the interface
2292 					   (currently unused) */
2293 			return -EOPNOTSUPP;
2294 
2295 		case SIOCGIFMTU:	/* Get the MTU of a device */
2296 			ifr->ifr_mtu = dev->mtu;
2297 			return 0;
2298 
2299 		case SIOCSIFMTU:	/* Set the MTU of a device */
2300 			return dev_set_mtu(dev, ifr->ifr_mtu);
2301 
2302 		case SIOCGIFHWADDR:
2303 			if (!dev->addr_len)
2304 				memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
2305 			else
2306 				memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
2307 				       min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
2308 			ifr->ifr_hwaddr.sa_family = dev->type;
2309 			return 0;
2310 
2311 		case SIOCSIFHWADDR:
2312 			return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
2313 
2314 		case SIOCSIFHWBROADCAST:
2315 			if (ifr->ifr_hwaddr.sa_family != dev->type)
2316 				return -EINVAL;
2317 			memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
2318 			       min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
2319 			notifier_call_chain(&netdev_chain,
2320 					    NETDEV_CHANGEADDR, dev);
2321 			return 0;
2322 
2323 		case SIOCGIFMAP:
2324 			ifr->ifr_map.mem_start = dev->mem_start;
2325 			ifr->ifr_map.mem_end   = dev->mem_end;
2326 			ifr->ifr_map.base_addr = dev->base_addr;
2327 			ifr->ifr_map.irq       = dev->irq;
2328 			ifr->ifr_map.dma       = dev->dma;
2329 			ifr->ifr_map.port      = dev->if_port;
2330 			return 0;
2331 
2332 		case SIOCSIFMAP:
2333 			if (dev->set_config) {
2334 				if (!netif_device_present(dev))
2335 					return -ENODEV;
2336 				return dev->set_config(dev, &ifr->ifr_map);
2337 			}
2338 			return -EOPNOTSUPP;
2339 
2340 		case SIOCADDMULTI:
2341 			if (!dev->set_multicast_list ||
2342 			    ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
2343 				return -EINVAL;
2344 			if (!netif_device_present(dev))
2345 				return -ENODEV;
2346 			return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
2347 					  dev->addr_len, 1);
2348 
2349 		case SIOCDELMULTI:
2350 			if (!dev->set_multicast_list ||
2351 			    ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
2352 				return -EINVAL;
2353 			if (!netif_device_present(dev))
2354 				return -ENODEV;
2355 			return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
2356 					     dev->addr_len, 1);
2357 
2358 		case SIOCGIFINDEX:
2359 			ifr->ifr_ifindex = dev->ifindex;
2360 			return 0;
2361 
2362 		case SIOCGIFTXQLEN:
2363 			ifr->ifr_qlen = dev->tx_queue_len;
2364 			return 0;
2365 
2366 		case SIOCSIFTXQLEN:
2367 			if (ifr->ifr_qlen < 0)
2368 				return -EINVAL;
2369 			dev->tx_queue_len = ifr->ifr_qlen;
2370 			return 0;
2371 
2372 		case SIOCSIFNAME:
2373 			ifr->ifr_newname[IFNAMSIZ-1] = '\0';
2374 			return dev_change_name(dev, ifr->ifr_newname);
2375 
2376 		/*
2377 		 *	Unknown or private ioctl
2378 		 */
2379 
2380 		default:
2381 			if ((cmd >= SIOCDEVPRIVATE &&
2382 			    cmd <= SIOCDEVPRIVATE + 15) ||
2383 			    cmd == SIOCBONDENSLAVE ||
2384 			    cmd == SIOCBONDRELEASE ||
2385 			    cmd == SIOCBONDSETHWADDR ||
2386 			    cmd == SIOCBONDSLAVEINFOQUERY ||
2387 			    cmd == SIOCBONDINFOQUERY ||
2388 			    cmd == SIOCBONDCHANGEACTIVE ||
2389 			    cmd == SIOCGMIIPHY ||
2390 			    cmd == SIOCGMIIREG ||
2391 			    cmd == SIOCSMIIREG ||
2392 			    cmd == SIOCBRADDIF ||
2393 			    cmd == SIOCBRDELIF ||
2394 			    cmd == SIOCWANDEV) {
2395 				err = -EOPNOTSUPP;
2396 				if (dev->do_ioctl) {
2397 					if (netif_device_present(dev))
2398 						err = dev->do_ioctl(dev, ifr,
2399 								    cmd);
2400 					else
2401 						err = -ENODEV;
2402 				}
2403 			} else
2404 				err = -EINVAL;
2405 
2406 	}
2407 	return err;
2408 }
2409 
2410 /*
2411  *	This function handles all "interface"-type I/O control requests. The actual
2412  *	'doing' part of this is dev_ifsioc above.
2413  */
2414 
2415 /**
2416  *	dev_ioctl	-	network device ioctl
2417  *	@cmd: command to issue
2418  *	@arg: pointer to a struct ifreq in user space
2419  *
2420  *	Issue ioctl functions to devices. This is normally called by the
2421  *	user space syscall interfaces but can sometimes be useful for
2422  *	other purposes. The return value is the return from the syscall if
2423  *	positive or a negative errno code on error.
2424  */
2425 
2426 int dev_ioctl(unsigned int cmd, void __user *arg)
2427 {
2428 	struct ifreq ifr;
2429 	int ret;
2430 	char *colon;
2431 
2432 	/* One special case: SIOCGIFCONF takes ifconf argument
2433 	   and requires shared lock, because it sleeps writing
2434 	   to user space.
2435 	 */
2436 
2437 	if (cmd == SIOCGIFCONF) {
2438 		rtnl_shlock();
2439 		ret = dev_ifconf((char __user *) arg);
2440 		rtnl_shunlock();
2441 		return ret;
2442 	}
2443 	if (cmd == SIOCGIFNAME)
2444 		return dev_ifname((struct ifreq __user *)arg);
2445 
2446 	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2447 		return -EFAULT;
2448 
2449 	ifr.ifr_name[IFNAMSIZ-1] = 0;
2450 
2451 	colon = strchr(ifr.ifr_name, ':');
2452 	if (colon)
2453 		*colon = 0;
2454 
2455 	/*
2456 	 *	See which interface the caller is talking about.
2457 	 */
2458 
2459 	switch (cmd) {
2460 		/*
2461 		 *	These ioctl calls:
2462 		 *	- can be done by all.
2463 		 *	- atomic and do not require locking.
2464 		 *	- return a value
2465 		 */
2466 		case SIOCGIFFLAGS:
2467 		case SIOCGIFMETRIC:
2468 		case SIOCGIFMTU:
2469 		case SIOCGIFHWADDR:
2470 		case SIOCGIFSLAVE:
2471 		case SIOCGIFMAP:
2472 		case SIOCGIFINDEX:
2473 		case SIOCGIFTXQLEN:
2474 			dev_load(ifr.ifr_name);
2475 			read_lock(&dev_base_lock);
2476 			ret = dev_ifsioc(&ifr, cmd);
2477 			read_unlock(&dev_base_lock);
2478 			if (!ret) {
2479 				if (colon)
2480 					*colon = ':';
2481 				if (copy_to_user(arg, &ifr,
2482 						 sizeof(struct ifreq)))
2483 					ret = -EFAULT;
2484 			}
2485 			return ret;
2486 
2487 		case SIOCETHTOOL:
2488 			dev_load(ifr.ifr_name);
2489 			rtnl_lock();
2490 			ret = dev_ethtool(&ifr);
2491 			rtnl_unlock();
2492 			if (!ret) {
2493 				if (colon)
2494 					*colon = ':';
2495 				if (copy_to_user(arg, &ifr,
2496 						 sizeof(struct ifreq)))
2497 					ret = -EFAULT;
2498 			}
2499 			return ret;
2500 
2501 		/*
2502 		 *	These ioctl calls:
2503 		 *	- require superuser power.
2504 		 *	- require strict serialization.
2505 		 *	- return a value
2506 		 */
2507 		case SIOCGMIIPHY:
2508 		case SIOCGMIIREG:
2509 		case SIOCSIFNAME:
2510 			if (!capable(CAP_NET_ADMIN))
2511 				return -EPERM;
2512 			dev_load(ifr.ifr_name);
2513 			rtnl_lock();
2514 			ret = dev_ifsioc(&ifr, cmd);
2515 			rtnl_unlock();
2516 			if (!ret) {
2517 				if (colon)
2518 					*colon = ':';
2519 				if (copy_to_user(arg, &ifr,
2520 						 sizeof(struct ifreq)))
2521 					ret = -EFAULT;
2522 			}
2523 			return ret;
2524 
2525 		/*
2526 		 *	These ioctl calls:
2527 		 *	- require superuser power.
2528 		 *	- require strict serialization.
2529 		 *	- do not return a value
2530 		 */
2531 		case SIOCSIFFLAGS:
2532 		case SIOCSIFMETRIC:
2533 		case SIOCSIFMTU:
2534 		case SIOCSIFMAP:
2535 		case SIOCSIFHWADDR:
2536 		case SIOCSIFSLAVE:
2537 		case SIOCADDMULTI:
2538 		case SIOCDELMULTI:
2539 		case SIOCSIFHWBROADCAST:
2540 		case SIOCSIFTXQLEN:
2541 		case SIOCSMIIREG:
2542 		case SIOCBONDENSLAVE:
2543 		case SIOCBONDRELEASE:
2544 		case SIOCBONDSETHWADDR:
2545 		case SIOCBONDSLAVEINFOQUERY:
2546 		case SIOCBONDINFOQUERY:
2547 		case SIOCBONDCHANGEACTIVE:
2548 		case SIOCBRADDIF:
2549 		case SIOCBRDELIF:
2550 			if (!capable(CAP_NET_ADMIN))
2551 				return -EPERM;
2552 			dev_load(ifr.ifr_name);
2553 			rtnl_lock();
2554 			ret = dev_ifsioc(&ifr, cmd);
2555 			rtnl_unlock();
2556 			return ret;
2557 
2558 		case SIOCGIFMEM:
2559 			/* Get the per device memory space. We can add this but
2560 			 * currently do not support it */
2561 		case SIOCSIFMEM:
2562 			/* Set the per device memory buffer space.
2563 			 * Not applicable in our case */
2564 		case SIOCSIFLINK:
2565 			return -EINVAL;
2566 
2567 		/*
2568 		 *	Unknown or private ioctl.
2569 		 */
2570 		default:
2571 			if (cmd == SIOCWANDEV ||
2572 			    (cmd >= SIOCDEVPRIVATE &&
2573 			     cmd <= SIOCDEVPRIVATE + 15)) {
2574 				dev_load(ifr.ifr_name);
2575 				rtnl_lock();
2576 				ret = dev_ifsioc(&ifr, cmd);
2577 				rtnl_unlock();
2578 				if (!ret && copy_to_user(arg, &ifr,
2579 							 sizeof(struct ifreq)))
2580 					ret = -EFAULT;
2581 				return ret;
2582 			}
2583 #ifdef WIRELESS_EXT
2584 			/* Take care of Wireless Extensions */
2585 			if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
2586 				/* If command is `set a parameter', or
2587 				 * `get the encoding parameters', check if
2588 				 * the user has the right to do it */
2589 				if (IW_IS_SET(cmd) || cmd == SIOCGIWENCODE) {
2590 					if (!capable(CAP_NET_ADMIN))
2591 						return -EPERM;
2592 				}
2593 				dev_load(ifr.ifr_name);
2594 				rtnl_lock();
2595 				/* Follow me in net/core/wireless.c */
2596 				ret = wireless_process_ioctl(&ifr, cmd);
2597 				rtnl_unlock();
2598 				if (IW_IS_GET(cmd) &&
2599 				    copy_to_user(arg, &ifr,
2600 					    	 sizeof(struct ifreq)))
2601 					ret = -EFAULT;
2602 				return ret;
2603 			}
2604 #endif	/* WIRELESS_EXT */
2605 			return -EINVAL;
2606 	}
2607 }
2608 
2609 
2610 /**
2611  *	dev_new_index	-	allocate an ifindex
2612  *
2613  *	Returns a suitable unique value for a new device interface
2614  *	number.  The caller must hold the rtnl semaphore or the
2615  *	dev_base_lock to be sure it remains unique.
2616  */
2617 static int dev_new_index(void)
2618 {
2619 	static int ifindex;
2620 	for (;;) {
2621 		if (++ifindex <= 0)
2622 			ifindex = 1;
2623 		if (!__dev_get_by_index(ifindex))
2624 			return ifindex;
2625 	}
2626 }
2627 
2628 static int dev_boot_phase = 1;
2629 
2630 /* Delayed registration/unregisteration */
2631 static DEFINE_SPINLOCK(net_todo_list_lock);
2632 static struct list_head net_todo_list = LIST_HEAD_INIT(net_todo_list);
2633 
2634 static inline void net_set_todo(struct net_device *dev)
2635 {
2636 	spin_lock(&net_todo_list_lock);
2637 	list_add_tail(&dev->todo_list, &net_todo_list);
2638 	spin_unlock(&net_todo_list_lock);
2639 }
2640 
2641 /**
2642  *	register_netdevice	- register a network device
2643  *	@dev: device to register
2644  *
2645  *	Take a completed network device structure and add it to the kernel
2646  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
2647  *	chain. 0 is returned on success. A negative errno code is returned
2648  *	on a failure to set up the device, or if the name is a duplicate.
2649  *
2650  *	Callers must hold the rtnl semaphore. You may want
2651  *	register_netdev() instead of this.
2652  *
2653  *	BUGS:
2654  *	The locking appears insufficient to guarantee two parallel registers
2655  *	will not get the same name.
2656  */
2657 
2658 int register_netdevice(struct net_device *dev)
2659 {
2660 	struct hlist_head *head;
2661 	struct hlist_node *p;
2662 	int ret;
2663 
2664 	BUG_ON(dev_boot_phase);
2665 	ASSERT_RTNL();
2666 
2667 	/* When net_device's are persistent, this will be fatal. */
2668 	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
2669 
2670 	spin_lock_init(&dev->queue_lock);
2671 	spin_lock_init(&dev->xmit_lock);
2672 	dev->xmit_lock_owner = -1;
2673 #ifdef CONFIG_NET_CLS_ACT
2674 	spin_lock_init(&dev->ingress_lock);
2675 #endif
2676 
2677 	ret = alloc_divert_blk(dev);
2678 	if (ret)
2679 		goto out;
2680 
2681 	dev->iflink = -1;
2682 
2683 	/* Init, if this function is available */
2684 	if (dev->init) {
2685 		ret = dev->init(dev);
2686 		if (ret) {
2687 			if (ret > 0)
2688 				ret = -EIO;
2689 			goto out_err;
2690 		}
2691 	}
2692 
2693 	if (!dev_valid_name(dev->name)) {
2694 		ret = -EINVAL;
2695 		goto out_err;
2696 	}
2697 
2698 	dev->ifindex = dev_new_index();
2699 	if (dev->iflink == -1)
2700 		dev->iflink = dev->ifindex;
2701 
2702 	/* Check for existence of name */
2703 	head = dev_name_hash(dev->name);
2704 	hlist_for_each(p, head) {
2705 		struct net_device *d
2706 			= hlist_entry(p, struct net_device, name_hlist);
2707 		if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
2708 			ret = -EEXIST;
2709  			goto out_err;
2710 		}
2711  	}
2712 
2713 	/* Fix illegal SG+CSUM combinations. */
2714 	if ((dev->features & NETIF_F_SG) &&
2715 	    !(dev->features & (NETIF_F_IP_CSUM |
2716 			       NETIF_F_NO_CSUM |
2717 			       NETIF_F_HW_CSUM))) {
2718 		printk("%s: Dropping NETIF_F_SG since no checksum feature.\n",
2719 		       dev->name);
2720 		dev->features &= ~NETIF_F_SG;
2721 	}
2722 
2723 	/* TSO requires that SG is present as well. */
2724 	if ((dev->features & NETIF_F_TSO) &&
2725 	    !(dev->features & NETIF_F_SG)) {
2726 		printk("%s: Dropping NETIF_F_TSO since no SG feature.\n",
2727 		       dev->name);
2728 		dev->features &= ~NETIF_F_TSO;
2729 	}
2730 	if (dev->features & NETIF_F_UFO) {
2731 		if (!(dev->features & NETIF_F_HW_CSUM)) {
2732 			printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
2733 					"NETIF_F_HW_CSUM feature.\n",
2734 							dev->name);
2735 			dev->features &= ~NETIF_F_UFO;
2736 		}
2737 		if (!(dev->features & NETIF_F_SG)) {
2738 			printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
2739 					"NETIF_F_SG feature.\n",
2740 					dev->name);
2741 			dev->features &= ~NETIF_F_UFO;
2742 		}
2743 	}
2744 
2745 	/*
2746 	 *	nil rebuild_header routine,
2747 	 *	that should be never called and used as just bug trap.
2748 	 */
2749 
2750 	if (!dev->rebuild_header)
2751 		dev->rebuild_header = default_rebuild_header;
2752 
2753 	/*
2754 	 *	Default initial state at registry is that the
2755 	 *	device is present.
2756 	 */
2757 
2758 	set_bit(__LINK_STATE_PRESENT, &dev->state);
2759 
2760 	dev->next = NULL;
2761 	dev_init_scheduler(dev);
2762 	write_lock_bh(&dev_base_lock);
2763 	*dev_tail = dev;
2764 	dev_tail = &dev->next;
2765 	hlist_add_head(&dev->name_hlist, head);
2766 	hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex));
2767 	dev_hold(dev);
2768 	dev->reg_state = NETREG_REGISTERING;
2769 	write_unlock_bh(&dev_base_lock);
2770 
2771 	/* Notify protocols, that a new device appeared. */
2772 	notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
2773 
2774 	/* Finish registration after unlock */
2775 	net_set_todo(dev);
2776 	ret = 0;
2777 
2778 out:
2779 	return ret;
2780 out_err:
2781 	free_divert_blk(dev);
2782 	goto out;
2783 }
2784 
2785 /**
2786  *	register_netdev	- register a network device
2787  *	@dev: device to register
2788  *
2789  *	Take a completed network device structure and add it to the kernel
2790  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
2791  *	chain. 0 is returned on success. A negative errno code is returned
2792  *	on a failure to set up the device, or if the name is a duplicate.
2793  *
2794  *	This is a wrapper around register_netdev that takes the rtnl semaphore
2795  *	and expands the device name if you passed a format string to
2796  *	alloc_netdev.
2797  */
2798 int register_netdev(struct net_device *dev)
2799 {
2800 	int err;
2801 
2802 	rtnl_lock();
2803 
2804 	/*
2805 	 * If the name is a format string the caller wants us to do a
2806 	 * name allocation.
2807 	 */
2808 	if (strchr(dev->name, '%')) {
2809 		err = dev_alloc_name(dev, dev->name);
2810 		if (err < 0)
2811 			goto out;
2812 	}
2813 
2814 	/*
2815 	 * Back compatibility hook. Kill this one in 2.5
2816 	 */
2817 	if (dev->name[0] == 0 || dev->name[0] == ' ') {
2818 		err = dev_alloc_name(dev, "eth%d");
2819 		if (err < 0)
2820 			goto out;
2821 	}
2822 
2823 	err = register_netdevice(dev);
2824 out:
2825 	rtnl_unlock();
2826 	return err;
2827 }
2828 EXPORT_SYMBOL(register_netdev);
2829 
2830 /*
2831  * netdev_wait_allrefs - wait until all references are gone.
2832  *
2833  * This is called when unregistering network devices.
2834  *
2835  * Any protocol or device that holds a reference should register
2836  * for netdevice notification, and cleanup and put back the
2837  * reference if they receive an UNREGISTER event.
2838  * We can get stuck here if buggy protocols don't correctly
2839  * call dev_put.
2840  */
2841 static void netdev_wait_allrefs(struct net_device *dev)
2842 {
2843 	unsigned long rebroadcast_time, warning_time;
2844 
2845 	rebroadcast_time = warning_time = jiffies;
2846 	while (atomic_read(&dev->refcnt) != 0) {
2847 		if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
2848 			rtnl_shlock();
2849 
2850 			/* Rebroadcast unregister notification */
2851 			notifier_call_chain(&netdev_chain,
2852 					    NETDEV_UNREGISTER, dev);
2853 
2854 			if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
2855 				     &dev->state)) {
2856 				/* We must not have linkwatch events
2857 				 * pending on unregister. If this
2858 				 * happens, we simply run the queue
2859 				 * unscheduled, resulting in a noop
2860 				 * for this device.
2861 				 */
2862 				linkwatch_run_queue();
2863 			}
2864 
2865 			rtnl_shunlock();
2866 
2867 			rebroadcast_time = jiffies;
2868 		}
2869 
2870 		msleep(250);
2871 
2872 		if (time_after(jiffies, warning_time + 10 * HZ)) {
2873 			printk(KERN_EMERG "unregister_netdevice: "
2874 			       "waiting for %s to become free. Usage "
2875 			       "count = %d\n",
2876 			       dev->name, atomic_read(&dev->refcnt));
2877 			warning_time = jiffies;
2878 		}
2879 	}
2880 }
2881 
2882 /* The sequence is:
2883  *
2884  *	rtnl_lock();
2885  *	...
2886  *	register_netdevice(x1);
2887  *	register_netdevice(x2);
2888  *	...
2889  *	unregister_netdevice(y1);
2890  *	unregister_netdevice(y2);
2891  *      ...
2892  *	rtnl_unlock();
2893  *	free_netdev(y1);
2894  *	free_netdev(y2);
2895  *
2896  * We are invoked by rtnl_unlock() after it drops the semaphore.
2897  * This allows us to deal with problems:
2898  * 1) We can create/delete sysfs objects which invoke hotplug
2899  *    without deadlocking with linkwatch via keventd.
2900  * 2) Since we run with the RTNL semaphore not held, we can sleep
2901  *    safely in order to wait for the netdev refcnt to drop to zero.
2902  */
2903 static DECLARE_MUTEX(net_todo_run_mutex);
2904 void netdev_run_todo(void)
2905 {
2906 	struct list_head list = LIST_HEAD_INIT(list);
2907 	int err;
2908 
2909 
2910 	/* Need to guard against multiple cpu's getting out of order. */
2911 	down(&net_todo_run_mutex);
2912 
2913 	/* Not safe to do outside the semaphore.  We must not return
2914 	 * until all unregister events invoked by the local processor
2915 	 * have been completed (either by this todo run, or one on
2916 	 * another cpu).
2917 	 */
2918 	if (list_empty(&net_todo_list))
2919 		goto out;
2920 
2921 	/* Snapshot list, allow later requests */
2922 	spin_lock(&net_todo_list_lock);
2923 	list_splice_init(&net_todo_list, &list);
2924 	spin_unlock(&net_todo_list_lock);
2925 
2926 	while (!list_empty(&list)) {
2927 		struct net_device *dev
2928 			= list_entry(list.next, struct net_device, todo_list);
2929 		list_del(&dev->todo_list);
2930 
2931 		switch(dev->reg_state) {
2932 		case NETREG_REGISTERING:
2933 			err = netdev_register_sysfs(dev);
2934 			if (err)
2935 				printk(KERN_ERR "%s: failed sysfs registration (%d)\n",
2936 				       dev->name, err);
2937 			dev->reg_state = NETREG_REGISTERED;
2938 			break;
2939 
2940 		case NETREG_UNREGISTERING:
2941 			netdev_unregister_sysfs(dev);
2942 			dev->reg_state = NETREG_UNREGISTERED;
2943 
2944 			netdev_wait_allrefs(dev);
2945 
2946 			/* paranoia */
2947 			BUG_ON(atomic_read(&dev->refcnt));
2948 			BUG_TRAP(!dev->ip_ptr);
2949 			BUG_TRAP(!dev->ip6_ptr);
2950 			BUG_TRAP(!dev->dn_ptr);
2951 
2952 
2953 			/* It must be the very last action,
2954 			 * after this 'dev' may point to freed up memory.
2955 			 */
2956 			if (dev->destructor)
2957 				dev->destructor(dev);
2958 			break;
2959 
2960 		default:
2961 			printk(KERN_ERR "network todo '%s' but state %d\n",
2962 			       dev->name, dev->reg_state);
2963 			break;
2964 		}
2965 	}
2966 
2967 out:
2968 	up(&net_todo_run_mutex);
2969 }
2970 
2971 /**
2972  *	alloc_netdev - allocate network device
2973  *	@sizeof_priv:	size of private data to allocate space for
2974  *	@name:		device name format string
2975  *	@setup:		callback to initialize device
2976  *
2977  *	Allocates a struct net_device with private data area for driver use
2978  *	and performs basic initialization.
2979  */
2980 struct net_device *alloc_netdev(int sizeof_priv, const char *name,
2981 		void (*setup)(struct net_device *))
2982 {
2983 	void *p;
2984 	struct net_device *dev;
2985 	int alloc_size;
2986 
2987 	/* ensure 32-byte alignment of both the device and private area */
2988 	alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
2989 	alloc_size += sizeof_priv + NETDEV_ALIGN_CONST;
2990 
2991 	p = kmalloc(alloc_size, GFP_KERNEL);
2992 	if (!p) {
2993 		printk(KERN_ERR "alloc_dev: Unable to allocate device.\n");
2994 		return NULL;
2995 	}
2996 	memset(p, 0, alloc_size);
2997 
2998 	dev = (struct net_device *)
2999 		(((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
3000 	dev->padded = (char *)dev - (char *)p;
3001 
3002 	if (sizeof_priv)
3003 		dev->priv = netdev_priv(dev);
3004 
3005 	setup(dev);
3006 	strcpy(dev->name, name);
3007 	return dev;
3008 }
3009 EXPORT_SYMBOL(alloc_netdev);
3010 
3011 /**
3012  *	free_netdev - free network device
3013  *	@dev: device
3014  *
3015  *	This function does the last stage of destroying an allocated device
3016  * 	interface. The reference to the device object is released.
3017  *	If this is the last reference then it will be freed.
3018  */
3019 void free_netdev(struct net_device *dev)
3020 {
3021 #ifdef CONFIG_SYSFS
3022 	/*  Compatiablity with error handling in drivers */
3023 	if (dev->reg_state == NETREG_UNINITIALIZED) {
3024 		kfree((char *)dev - dev->padded);
3025 		return;
3026 	}
3027 
3028 	BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
3029 	dev->reg_state = NETREG_RELEASED;
3030 
3031 	/* will free via class release */
3032 	class_device_put(&dev->class_dev);
3033 #else
3034 	kfree((char *)dev - dev->padded);
3035 #endif
3036 }
3037 
3038 /* Synchronize with packet receive processing. */
3039 void synchronize_net(void)
3040 {
3041 	might_sleep();
3042 	synchronize_rcu();
3043 }
3044 
3045 /**
3046  *	unregister_netdevice - remove device from the kernel
3047  *	@dev: device
3048  *
3049  *	This function shuts down a device interface and removes it
3050  *	from the kernel tables. On success 0 is returned, on a failure
3051  *	a negative errno code is returned.
3052  *
3053  *	Callers must hold the rtnl semaphore.  You may want
3054  *	unregister_netdev() instead of this.
3055  */
3056 
3057 int unregister_netdevice(struct net_device *dev)
3058 {
3059 	struct net_device *d, **dp;
3060 
3061 	BUG_ON(dev_boot_phase);
3062 	ASSERT_RTNL();
3063 
3064 	/* Some devices call without registering for initialization unwind. */
3065 	if (dev->reg_state == NETREG_UNINITIALIZED) {
3066 		printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3067 				  "was registered\n", dev->name, dev);
3068 		return -ENODEV;
3069 	}
3070 
3071 	BUG_ON(dev->reg_state != NETREG_REGISTERED);
3072 
3073 	/* If device is running, close it first. */
3074 	if (dev->flags & IFF_UP)
3075 		dev_close(dev);
3076 
3077 	/* And unlink it from device chain. */
3078 	for (dp = &dev_base; (d = *dp) != NULL; dp = &d->next) {
3079 		if (d == dev) {
3080 			write_lock_bh(&dev_base_lock);
3081 			hlist_del(&dev->name_hlist);
3082 			hlist_del(&dev->index_hlist);
3083 			if (dev_tail == &dev->next)
3084 				dev_tail = dp;
3085 			*dp = d->next;
3086 			write_unlock_bh(&dev_base_lock);
3087 			break;
3088 		}
3089 	}
3090 	if (!d) {
3091 		printk(KERN_ERR "unregister net_device: '%s' not found\n",
3092 		       dev->name);
3093 		return -ENODEV;
3094 	}
3095 
3096 	dev->reg_state = NETREG_UNREGISTERING;
3097 
3098 	synchronize_net();
3099 
3100 	/* Shutdown queueing discipline. */
3101 	dev_shutdown(dev);
3102 
3103 
3104 	/* Notify protocols, that we are about to destroy
3105 	   this device. They should clean all the things.
3106 	*/
3107 	notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
3108 
3109 	/*
3110 	 *	Flush the multicast chain
3111 	 */
3112 	dev_mc_discard(dev);
3113 
3114 	if (dev->uninit)
3115 		dev->uninit(dev);
3116 
3117 	/* Notifier chain MUST detach us from master device. */
3118 	BUG_TRAP(!dev->master);
3119 
3120 	free_divert_blk(dev);
3121 
3122 	/* Finish processing unregister after unlock */
3123 	net_set_todo(dev);
3124 
3125 	synchronize_net();
3126 
3127 	dev_put(dev);
3128 	return 0;
3129 }
3130 
3131 /**
3132  *	unregister_netdev - remove device from the kernel
3133  *	@dev: device
3134  *
3135  *	This function shuts down a device interface and removes it
3136  *	from the kernel tables. On success 0 is returned, on a failure
3137  *	a negative errno code is returned.
3138  *
3139  *	This is just a wrapper for unregister_netdevice that takes
3140  *	the rtnl semaphore.  In general you want to use this and not
3141  *	unregister_netdevice.
3142  */
3143 void unregister_netdev(struct net_device *dev)
3144 {
3145 	rtnl_lock();
3146 	unregister_netdevice(dev);
3147 	rtnl_unlock();
3148 }
3149 
3150 EXPORT_SYMBOL(unregister_netdev);
3151 
3152 #ifdef CONFIG_HOTPLUG_CPU
3153 static int dev_cpu_callback(struct notifier_block *nfb,
3154 			    unsigned long action,
3155 			    void *ocpu)
3156 {
3157 	struct sk_buff **list_skb;
3158 	struct net_device **list_net;
3159 	struct sk_buff *skb;
3160 	unsigned int cpu, oldcpu = (unsigned long)ocpu;
3161 	struct softnet_data *sd, *oldsd;
3162 
3163 	if (action != CPU_DEAD)
3164 		return NOTIFY_OK;
3165 
3166 	local_irq_disable();
3167 	cpu = smp_processor_id();
3168 	sd = &per_cpu(softnet_data, cpu);
3169 	oldsd = &per_cpu(softnet_data, oldcpu);
3170 
3171 	/* Find end of our completion_queue. */
3172 	list_skb = &sd->completion_queue;
3173 	while (*list_skb)
3174 		list_skb = &(*list_skb)->next;
3175 	/* Append completion queue from offline CPU. */
3176 	*list_skb = oldsd->completion_queue;
3177 	oldsd->completion_queue = NULL;
3178 
3179 	/* Find end of our output_queue. */
3180 	list_net = &sd->output_queue;
3181 	while (*list_net)
3182 		list_net = &(*list_net)->next_sched;
3183 	/* Append output queue from offline CPU. */
3184 	*list_net = oldsd->output_queue;
3185 	oldsd->output_queue = NULL;
3186 
3187 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
3188 	local_irq_enable();
3189 
3190 	/* Process offline CPU's input_pkt_queue */
3191 	while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
3192 		netif_rx(skb);
3193 
3194 	return NOTIFY_OK;
3195 }
3196 #endif /* CONFIG_HOTPLUG_CPU */
3197 
3198 
3199 /*
3200  *	Initialize the DEV module. At boot time this walks the device list and
3201  *	unhooks any devices that fail to initialise (normally hardware not
3202  *	present) and leaves us with a valid list of present and active devices.
3203  *
3204  */
3205 
3206 /*
3207  *       This is called single threaded during boot, so no need
3208  *       to take the rtnl semaphore.
3209  */
3210 static int __init net_dev_init(void)
3211 {
3212 	int i, rc = -ENOMEM;
3213 
3214 	BUG_ON(!dev_boot_phase);
3215 
3216 	net_random_init();
3217 
3218 	if (dev_proc_init())
3219 		goto out;
3220 
3221 	if (netdev_sysfs_init())
3222 		goto out;
3223 
3224 	INIT_LIST_HEAD(&ptype_all);
3225 	for (i = 0; i < 16; i++)
3226 		INIT_LIST_HEAD(&ptype_base[i]);
3227 
3228 	for (i = 0; i < ARRAY_SIZE(dev_name_head); i++)
3229 		INIT_HLIST_HEAD(&dev_name_head[i]);
3230 
3231 	for (i = 0; i < ARRAY_SIZE(dev_index_head); i++)
3232 		INIT_HLIST_HEAD(&dev_index_head[i]);
3233 
3234 	/*
3235 	 *	Initialise the packet receive queues.
3236 	 */
3237 
3238 	for (i = 0; i < NR_CPUS; i++) {
3239 		struct softnet_data *queue;
3240 
3241 		queue = &per_cpu(softnet_data, i);
3242 		skb_queue_head_init(&queue->input_pkt_queue);
3243 		queue->completion_queue = NULL;
3244 		INIT_LIST_HEAD(&queue->poll_list);
3245 		set_bit(__LINK_STATE_START, &queue->backlog_dev.state);
3246 		queue->backlog_dev.weight = weight_p;
3247 		queue->backlog_dev.poll = process_backlog;
3248 		atomic_set(&queue->backlog_dev.refcnt, 1);
3249 	}
3250 
3251 	dev_boot_phase = 0;
3252 
3253 	open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
3254 	open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
3255 
3256 	hotcpu_notifier(dev_cpu_callback, 0);
3257 	dst_init();
3258 	dev_mcast_init();
3259 	rc = 0;
3260 out:
3261 	return rc;
3262 }
3263 
3264 subsys_initcall(net_dev_init);
3265 
3266 EXPORT_SYMBOL(__dev_get_by_index);
3267 EXPORT_SYMBOL(__dev_get_by_name);
3268 EXPORT_SYMBOL(__dev_remove_pack);
3269 EXPORT_SYMBOL(__skb_linearize);
3270 EXPORT_SYMBOL(dev_valid_name);
3271 EXPORT_SYMBOL(dev_add_pack);
3272 EXPORT_SYMBOL(dev_alloc_name);
3273 EXPORT_SYMBOL(dev_close);
3274 EXPORT_SYMBOL(dev_get_by_flags);
3275 EXPORT_SYMBOL(dev_get_by_index);
3276 EXPORT_SYMBOL(dev_get_by_name);
3277 EXPORT_SYMBOL(dev_open);
3278 EXPORT_SYMBOL(dev_queue_xmit);
3279 EXPORT_SYMBOL(dev_remove_pack);
3280 EXPORT_SYMBOL(dev_set_allmulti);
3281 EXPORT_SYMBOL(dev_set_promiscuity);
3282 EXPORT_SYMBOL(dev_change_flags);
3283 EXPORT_SYMBOL(dev_set_mtu);
3284 EXPORT_SYMBOL(dev_set_mac_address);
3285 EXPORT_SYMBOL(free_netdev);
3286 EXPORT_SYMBOL(netdev_boot_setup_check);
3287 EXPORT_SYMBOL(netdev_set_master);
3288 EXPORT_SYMBOL(netdev_state_change);
3289 EXPORT_SYMBOL(netif_receive_skb);
3290 EXPORT_SYMBOL(netif_rx);
3291 EXPORT_SYMBOL(register_gifconf);
3292 EXPORT_SYMBOL(register_netdevice);
3293 EXPORT_SYMBOL(register_netdevice_notifier);
3294 EXPORT_SYMBOL(skb_checksum_help);
3295 EXPORT_SYMBOL(synchronize_net);
3296 EXPORT_SYMBOL(unregister_netdevice);
3297 EXPORT_SYMBOL(unregister_netdevice_notifier);
3298 EXPORT_SYMBOL(net_enable_timestamp);
3299 EXPORT_SYMBOL(net_disable_timestamp);
3300 EXPORT_SYMBOL(dev_get_flags);
3301 
3302 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
3303 EXPORT_SYMBOL(br_handle_frame_hook);
3304 EXPORT_SYMBOL(br_fdb_get_hook);
3305 EXPORT_SYMBOL(br_fdb_put_hook);
3306 #endif
3307 
3308 #ifdef CONFIG_KMOD
3309 EXPORT_SYMBOL(dev_load);
3310 #endif
3311 
3312 EXPORT_PER_CPU_SYMBOL(softnet_data);
3313