xref: /linux/net/core/neighbour.c (revision 13abf8130139c2ccd4962a7e5a8902be5e6cb5a7)
1 /*
2  *	Generic address resolution entity
3  *
4  *	Authors:
5  *	Pedro Roque		<roque@di.fc.ul.pt>
6  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *	Fixes:
14  *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
15  *	Harald Welte		Add neighbour cache statistics like rtstat
16  */
17 
18 #include <linux/config.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/socket.h>
23 #include <linux/sched.h>
24 #include <linux/netdevice.h>
25 #include <linux/proc_fs.h>
26 #ifdef CONFIG_SYSCTL
27 #include <linux/sysctl.h>
28 #endif
29 #include <linux/times.h>
30 #include <net/neighbour.h>
31 #include <net/dst.h>
32 #include <net/sock.h>
33 #include <linux/rtnetlink.h>
34 #include <linux/random.h>
35 #include <linux/string.h>
36 
37 #define NEIGH_DEBUG 1
38 
39 #define NEIGH_PRINTK(x...) printk(x)
40 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
41 #define NEIGH_PRINTK0 NEIGH_PRINTK
42 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
43 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
44 
45 #if NEIGH_DEBUG >= 1
46 #undef NEIGH_PRINTK1
47 #define NEIGH_PRINTK1 NEIGH_PRINTK
48 #endif
49 #if NEIGH_DEBUG >= 2
50 #undef NEIGH_PRINTK2
51 #define NEIGH_PRINTK2 NEIGH_PRINTK
52 #endif
53 
54 #define PNEIGH_HASHMASK		0xF
55 
56 static void neigh_timer_handler(unsigned long arg);
57 #ifdef CONFIG_ARPD
58 static void neigh_app_notify(struct neighbour *n);
59 #endif
60 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
61 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
62 
63 static struct neigh_table *neigh_tables;
64 static struct file_operations neigh_stat_seq_fops;
65 
66 /*
67    Neighbour hash table buckets are protected with rwlock tbl->lock.
68 
69    - All the scans/updates to hash buckets MUST be made under this lock.
70    - NOTHING clever should be made under this lock: no callbacks
71      to protocol backends, no attempts to send something to network.
72      It will result in deadlocks, if backend/driver wants to use neighbour
73      cache.
74    - If the entry requires some non-trivial actions, increase
75      its reference count and release table lock.
76 
77    Neighbour entries are protected:
78    - with reference count.
79    - with rwlock neigh->lock
80 
81    Reference count prevents destruction.
82 
83    neigh->lock mainly serializes ll address data and its validity state.
84    However, the same lock is used to protect another entry fields:
85     - timer
86     - resolution queue
87 
88    Again, nothing clever shall be made under neigh->lock,
89    the most complicated procedure, which we allow is dev->hard_header.
90    It is supposed, that dev->hard_header is simplistic and does
91    not make callbacks to neighbour tables.
92 
93    The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
94    list of neighbour tables. This list is used only in process context,
95  */
96 
97 static DEFINE_RWLOCK(neigh_tbl_lock);
98 
99 static int neigh_blackhole(struct sk_buff *skb)
100 {
101 	kfree_skb(skb);
102 	return -ENETDOWN;
103 }
104 
105 /*
106  * It is random distribution in the interval (1/2)*base...(3/2)*base.
107  * It corresponds to default IPv6 settings and is not overridable,
108  * because it is really reasonable choice.
109  */
110 
111 unsigned long neigh_rand_reach_time(unsigned long base)
112 {
113 	return (base ? (net_random() % base) + (base >> 1) : 0);
114 }
115 
116 
117 static int neigh_forced_gc(struct neigh_table *tbl)
118 {
119 	int shrunk = 0;
120 	int i;
121 
122 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
123 
124 	write_lock_bh(&tbl->lock);
125 	for (i = 0; i <= tbl->hash_mask; i++) {
126 		struct neighbour *n, **np;
127 
128 		np = &tbl->hash_buckets[i];
129 		while ((n = *np) != NULL) {
130 			/* Neighbour record may be discarded if:
131 			 * - nobody refers to it.
132 			 * - it is not permanent
133 			 */
134 			write_lock(&n->lock);
135 			if (atomic_read(&n->refcnt) == 1 &&
136 			    !(n->nud_state & NUD_PERMANENT)) {
137 				*np	= n->next;
138 				n->dead = 1;
139 				shrunk	= 1;
140 				write_unlock(&n->lock);
141 				neigh_release(n);
142 				continue;
143 			}
144 			write_unlock(&n->lock);
145 			np = &n->next;
146 		}
147 	}
148 
149 	tbl->last_flush = jiffies;
150 
151 	write_unlock_bh(&tbl->lock);
152 
153 	return shrunk;
154 }
155 
156 static int neigh_del_timer(struct neighbour *n)
157 {
158 	if ((n->nud_state & NUD_IN_TIMER) &&
159 	    del_timer(&n->timer)) {
160 		neigh_release(n);
161 		return 1;
162 	}
163 	return 0;
164 }
165 
166 static void pneigh_queue_purge(struct sk_buff_head *list)
167 {
168 	struct sk_buff *skb;
169 
170 	while ((skb = skb_dequeue(list)) != NULL) {
171 		dev_put(skb->dev);
172 		kfree_skb(skb);
173 	}
174 }
175 
176 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
177 {
178 	int i;
179 
180 	write_lock_bh(&tbl->lock);
181 
182 	for (i=0; i <= tbl->hash_mask; i++) {
183 		struct neighbour *n, **np;
184 
185 		np = &tbl->hash_buckets[i];
186 		while ((n = *np) != NULL) {
187 			if (dev && n->dev != dev) {
188 				np = &n->next;
189 				continue;
190 			}
191 			*np = n->next;
192 			write_lock_bh(&n->lock);
193 			n->dead = 1;
194 			neigh_del_timer(n);
195 			write_unlock_bh(&n->lock);
196 			neigh_release(n);
197 		}
198 	}
199 
200         write_unlock_bh(&tbl->lock);
201 }
202 
203 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
204 {
205 	int i;
206 
207 	write_lock_bh(&tbl->lock);
208 
209 	for (i = 0; i <= tbl->hash_mask; i++) {
210 		struct neighbour *n, **np = &tbl->hash_buckets[i];
211 
212 		while ((n = *np) != NULL) {
213 			if (dev && n->dev != dev) {
214 				np = &n->next;
215 				continue;
216 			}
217 			*np = n->next;
218 			write_lock(&n->lock);
219 			neigh_del_timer(n);
220 			n->dead = 1;
221 
222 			if (atomic_read(&n->refcnt) != 1) {
223 				/* The most unpleasant situation.
224 				   We must destroy neighbour entry,
225 				   but someone still uses it.
226 
227 				   The destroy will be delayed until
228 				   the last user releases us, but
229 				   we must kill timers etc. and move
230 				   it to safe state.
231 				 */
232 				skb_queue_purge(&n->arp_queue);
233 				n->output = neigh_blackhole;
234 				if (n->nud_state & NUD_VALID)
235 					n->nud_state = NUD_NOARP;
236 				else
237 					n->nud_state = NUD_NONE;
238 				NEIGH_PRINTK2("neigh %p is stray.\n", n);
239 			}
240 			write_unlock(&n->lock);
241 			neigh_release(n);
242 		}
243 	}
244 
245 	pneigh_ifdown(tbl, dev);
246 	write_unlock_bh(&tbl->lock);
247 
248 	del_timer_sync(&tbl->proxy_timer);
249 	pneigh_queue_purge(&tbl->proxy_queue);
250 	return 0;
251 }
252 
253 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
254 {
255 	struct neighbour *n = NULL;
256 	unsigned long now = jiffies;
257 	int entries;
258 
259 	entries = atomic_inc_return(&tbl->entries) - 1;
260 	if (entries >= tbl->gc_thresh3 ||
261 	    (entries >= tbl->gc_thresh2 &&
262 	     time_after(now, tbl->last_flush + 5 * HZ))) {
263 		if (!neigh_forced_gc(tbl) &&
264 		    entries >= tbl->gc_thresh3)
265 			goto out_entries;
266 	}
267 
268 	n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
269 	if (!n)
270 		goto out_entries;
271 
272 	memset(n, 0, tbl->entry_size);
273 
274 	skb_queue_head_init(&n->arp_queue);
275 	rwlock_init(&n->lock);
276 	n->updated	  = n->used = now;
277 	n->nud_state	  = NUD_NONE;
278 	n->output	  = neigh_blackhole;
279 	n->parms	  = neigh_parms_clone(&tbl->parms);
280 	init_timer(&n->timer);
281 	n->timer.function = neigh_timer_handler;
282 	n->timer.data	  = (unsigned long)n;
283 
284 	NEIGH_CACHE_STAT_INC(tbl, allocs);
285 	n->tbl		  = tbl;
286 	atomic_set(&n->refcnt, 1);
287 	n->dead		  = 1;
288 out:
289 	return n;
290 
291 out_entries:
292 	atomic_dec(&tbl->entries);
293 	goto out;
294 }
295 
296 static struct neighbour **neigh_hash_alloc(unsigned int entries)
297 {
298 	unsigned long size = entries * sizeof(struct neighbour *);
299 	struct neighbour **ret;
300 
301 	if (size <= PAGE_SIZE) {
302 		ret = kmalloc(size, GFP_ATOMIC);
303 	} else {
304 		ret = (struct neighbour **)
305 			__get_free_pages(GFP_ATOMIC, get_order(size));
306 	}
307 	if (ret)
308 		memset(ret, 0, size);
309 
310 	return ret;
311 }
312 
313 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
314 {
315 	unsigned long size = entries * sizeof(struct neighbour *);
316 
317 	if (size <= PAGE_SIZE)
318 		kfree(hash);
319 	else
320 		free_pages((unsigned long)hash, get_order(size));
321 }
322 
323 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
324 {
325 	struct neighbour **new_hash, **old_hash;
326 	unsigned int i, new_hash_mask, old_entries;
327 
328 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
329 
330 	BUG_ON(new_entries & (new_entries - 1));
331 	new_hash = neigh_hash_alloc(new_entries);
332 	if (!new_hash)
333 		return;
334 
335 	old_entries = tbl->hash_mask + 1;
336 	new_hash_mask = new_entries - 1;
337 	old_hash = tbl->hash_buckets;
338 
339 	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
340 	for (i = 0; i < old_entries; i++) {
341 		struct neighbour *n, *next;
342 
343 		for (n = old_hash[i]; n; n = next) {
344 			unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
345 
346 			hash_val &= new_hash_mask;
347 			next = n->next;
348 
349 			n->next = new_hash[hash_val];
350 			new_hash[hash_val] = n;
351 		}
352 	}
353 	tbl->hash_buckets = new_hash;
354 	tbl->hash_mask = new_hash_mask;
355 
356 	neigh_hash_free(old_hash, old_entries);
357 }
358 
359 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
360 			       struct net_device *dev)
361 {
362 	struct neighbour *n;
363 	int key_len = tbl->key_len;
364 	u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
365 
366 	NEIGH_CACHE_STAT_INC(tbl, lookups);
367 
368 	read_lock_bh(&tbl->lock);
369 	for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
370 		if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
371 			neigh_hold(n);
372 			NEIGH_CACHE_STAT_INC(tbl, hits);
373 			break;
374 		}
375 	}
376 	read_unlock_bh(&tbl->lock);
377 	return n;
378 }
379 
380 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
381 {
382 	struct neighbour *n;
383 	int key_len = tbl->key_len;
384 	u32 hash_val = tbl->hash(pkey, NULL) & tbl->hash_mask;
385 
386 	NEIGH_CACHE_STAT_INC(tbl, lookups);
387 
388 	read_lock_bh(&tbl->lock);
389 	for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
390 		if (!memcmp(n->primary_key, pkey, key_len)) {
391 			neigh_hold(n);
392 			NEIGH_CACHE_STAT_INC(tbl, hits);
393 			break;
394 		}
395 	}
396 	read_unlock_bh(&tbl->lock);
397 	return n;
398 }
399 
400 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
401 			       struct net_device *dev)
402 {
403 	u32 hash_val;
404 	int key_len = tbl->key_len;
405 	int error;
406 	struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
407 
408 	if (!n) {
409 		rc = ERR_PTR(-ENOBUFS);
410 		goto out;
411 	}
412 
413 	memcpy(n->primary_key, pkey, key_len);
414 	n->dev = dev;
415 	dev_hold(dev);
416 
417 	/* Protocol specific setup. */
418 	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
419 		rc = ERR_PTR(error);
420 		goto out_neigh_release;
421 	}
422 
423 	/* Device specific setup. */
424 	if (n->parms->neigh_setup &&
425 	    (error = n->parms->neigh_setup(n)) < 0) {
426 		rc = ERR_PTR(error);
427 		goto out_neigh_release;
428 	}
429 
430 	n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
431 
432 	write_lock_bh(&tbl->lock);
433 
434 	if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
435 		neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
436 
437 	hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
438 
439 	if (n->parms->dead) {
440 		rc = ERR_PTR(-EINVAL);
441 		goto out_tbl_unlock;
442 	}
443 
444 	for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
445 		if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
446 			neigh_hold(n1);
447 			rc = n1;
448 			goto out_tbl_unlock;
449 		}
450 	}
451 
452 	n->next = tbl->hash_buckets[hash_val];
453 	tbl->hash_buckets[hash_val] = n;
454 	n->dead = 0;
455 	neigh_hold(n);
456 	write_unlock_bh(&tbl->lock);
457 	NEIGH_PRINTK2("neigh %p is created.\n", n);
458 	rc = n;
459 out:
460 	return rc;
461 out_tbl_unlock:
462 	write_unlock_bh(&tbl->lock);
463 out_neigh_release:
464 	neigh_release(n);
465 	goto out;
466 }
467 
468 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
469 				    struct net_device *dev, int creat)
470 {
471 	struct pneigh_entry *n;
472 	int key_len = tbl->key_len;
473 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
474 
475 	hash_val ^= (hash_val >> 16);
476 	hash_val ^= hash_val >> 8;
477 	hash_val ^= hash_val >> 4;
478 	hash_val &= PNEIGH_HASHMASK;
479 
480 	read_lock_bh(&tbl->lock);
481 
482 	for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
483 		if (!memcmp(n->key, pkey, key_len) &&
484 		    (n->dev == dev || !n->dev)) {
485 			read_unlock_bh(&tbl->lock);
486 			goto out;
487 		}
488 	}
489 	read_unlock_bh(&tbl->lock);
490 	n = NULL;
491 	if (!creat)
492 		goto out;
493 
494 	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
495 	if (!n)
496 		goto out;
497 
498 	memcpy(n->key, pkey, key_len);
499 	n->dev = dev;
500 	if (dev)
501 		dev_hold(dev);
502 
503 	if (tbl->pconstructor && tbl->pconstructor(n)) {
504 		if (dev)
505 			dev_put(dev);
506 		kfree(n);
507 		n = NULL;
508 		goto out;
509 	}
510 
511 	write_lock_bh(&tbl->lock);
512 	n->next = tbl->phash_buckets[hash_val];
513 	tbl->phash_buckets[hash_val] = n;
514 	write_unlock_bh(&tbl->lock);
515 out:
516 	return n;
517 }
518 
519 
520 int pneigh_delete(struct neigh_table *tbl, const void *pkey,
521 		  struct net_device *dev)
522 {
523 	struct pneigh_entry *n, **np;
524 	int key_len = tbl->key_len;
525 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
526 
527 	hash_val ^= (hash_val >> 16);
528 	hash_val ^= hash_val >> 8;
529 	hash_val ^= hash_val >> 4;
530 	hash_val &= PNEIGH_HASHMASK;
531 
532 	write_lock_bh(&tbl->lock);
533 	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
534 	     np = &n->next) {
535 		if (!memcmp(n->key, pkey, key_len) && n->dev == dev) {
536 			*np = n->next;
537 			write_unlock_bh(&tbl->lock);
538 			if (tbl->pdestructor)
539 				tbl->pdestructor(n);
540 			if (n->dev)
541 				dev_put(n->dev);
542 			kfree(n);
543 			return 0;
544 		}
545 	}
546 	write_unlock_bh(&tbl->lock);
547 	return -ENOENT;
548 }
549 
550 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
551 {
552 	struct pneigh_entry *n, **np;
553 	u32 h;
554 
555 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
556 		np = &tbl->phash_buckets[h];
557 		while ((n = *np) != NULL) {
558 			if (!dev || n->dev == dev) {
559 				*np = n->next;
560 				if (tbl->pdestructor)
561 					tbl->pdestructor(n);
562 				if (n->dev)
563 					dev_put(n->dev);
564 				kfree(n);
565 				continue;
566 			}
567 			np = &n->next;
568 		}
569 	}
570 	return -ENOENT;
571 }
572 
573 
574 /*
575  *	neighbour must already be out of the table;
576  *
577  */
578 void neigh_destroy(struct neighbour *neigh)
579 {
580 	struct hh_cache *hh;
581 
582 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
583 
584 	if (!neigh->dead) {
585 		printk(KERN_WARNING
586 		       "Destroying alive neighbour %p\n", neigh);
587 		dump_stack();
588 		return;
589 	}
590 
591 	if (neigh_del_timer(neigh))
592 		printk(KERN_WARNING "Impossible event.\n");
593 
594 	while ((hh = neigh->hh) != NULL) {
595 		neigh->hh = hh->hh_next;
596 		hh->hh_next = NULL;
597 		write_lock_bh(&hh->hh_lock);
598 		hh->hh_output = neigh_blackhole;
599 		write_unlock_bh(&hh->hh_lock);
600 		if (atomic_dec_and_test(&hh->hh_refcnt))
601 			kfree(hh);
602 	}
603 
604 	if (neigh->ops && neigh->ops->destructor)
605 		(neigh->ops->destructor)(neigh);
606 
607 	skb_queue_purge(&neigh->arp_queue);
608 
609 	dev_put(neigh->dev);
610 	neigh_parms_put(neigh->parms);
611 
612 	NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
613 
614 	atomic_dec(&neigh->tbl->entries);
615 	kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
616 }
617 
618 /* Neighbour state is suspicious;
619    disable fast path.
620 
621    Called with write_locked neigh.
622  */
623 static void neigh_suspect(struct neighbour *neigh)
624 {
625 	struct hh_cache *hh;
626 
627 	NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
628 
629 	neigh->output = neigh->ops->output;
630 
631 	for (hh = neigh->hh; hh; hh = hh->hh_next)
632 		hh->hh_output = neigh->ops->output;
633 }
634 
635 /* Neighbour state is OK;
636    enable fast path.
637 
638    Called with write_locked neigh.
639  */
640 static void neigh_connect(struct neighbour *neigh)
641 {
642 	struct hh_cache *hh;
643 
644 	NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
645 
646 	neigh->output = neigh->ops->connected_output;
647 
648 	for (hh = neigh->hh; hh; hh = hh->hh_next)
649 		hh->hh_output = neigh->ops->hh_output;
650 }
651 
652 static void neigh_periodic_timer(unsigned long arg)
653 {
654 	struct neigh_table *tbl = (struct neigh_table *)arg;
655 	struct neighbour *n, **np;
656 	unsigned long expire, now = jiffies;
657 
658 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
659 
660 	write_lock(&tbl->lock);
661 
662 	/*
663 	 *	periodically recompute ReachableTime from random function
664 	 */
665 
666 	if (time_after(now, tbl->last_rand + 300 * HZ)) {
667 		struct neigh_parms *p;
668 		tbl->last_rand = now;
669 		for (p = &tbl->parms; p; p = p->next)
670 			p->reachable_time =
671 				neigh_rand_reach_time(p->base_reachable_time);
672 	}
673 
674 	np = &tbl->hash_buckets[tbl->hash_chain_gc];
675 	tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
676 
677 	while ((n = *np) != NULL) {
678 		unsigned int state;
679 
680 		write_lock(&n->lock);
681 
682 		state = n->nud_state;
683 		if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
684 			write_unlock(&n->lock);
685 			goto next_elt;
686 		}
687 
688 		if (time_before(n->used, n->confirmed))
689 			n->used = n->confirmed;
690 
691 		if (atomic_read(&n->refcnt) == 1 &&
692 		    (state == NUD_FAILED ||
693 		     time_after(now, n->used + n->parms->gc_staletime))) {
694 			*np = n->next;
695 			n->dead = 1;
696 			write_unlock(&n->lock);
697 			neigh_release(n);
698 			continue;
699 		}
700 		write_unlock(&n->lock);
701 
702 next_elt:
703 		np = &n->next;
704 	}
705 
706  	/* Cycle through all hash buckets every base_reachable_time/2 ticks.
707  	 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
708  	 * base_reachable_time.
709 	 */
710 	expire = tbl->parms.base_reachable_time >> 1;
711 	expire /= (tbl->hash_mask + 1);
712 	if (!expire)
713 		expire = 1;
714 
715  	mod_timer(&tbl->gc_timer, now + expire);
716 
717 	write_unlock(&tbl->lock);
718 }
719 
720 static __inline__ int neigh_max_probes(struct neighbour *n)
721 {
722 	struct neigh_parms *p = n->parms;
723 	return (n->nud_state & NUD_PROBE ?
724 		p->ucast_probes :
725 		p->ucast_probes + p->app_probes + p->mcast_probes);
726 }
727 
728 
729 /* Called when a timer expires for a neighbour entry. */
730 
731 static void neigh_timer_handler(unsigned long arg)
732 {
733 	unsigned long now, next;
734 	struct neighbour *neigh = (struct neighbour *)arg;
735 	unsigned state;
736 	int notify = 0;
737 
738 	write_lock(&neigh->lock);
739 
740 	state = neigh->nud_state;
741 	now = jiffies;
742 	next = now + HZ;
743 
744 	if (!(state & NUD_IN_TIMER)) {
745 #ifndef CONFIG_SMP
746 		printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
747 #endif
748 		goto out;
749 	}
750 
751 	if (state & NUD_REACHABLE) {
752 		if (time_before_eq(now,
753 				   neigh->confirmed + neigh->parms->reachable_time)) {
754 			NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
755 			next = neigh->confirmed + neigh->parms->reachable_time;
756 		} else if (time_before_eq(now,
757 					  neigh->used + neigh->parms->delay_probe_time)) {
758 			NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
759 			neigh->nud_state = NUD_DELAY;
760 			neigh_suspect(neigh);
761 			next = now + neigh->parms->delay_probe_time;
762 		} else {
763 			NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
764 			neigh->nud_state = NUD_STALE;
765 			neigh_suspect(neigh);
766 		}
767 	} else if (state & NUD_DELAY) {
768 		if (time_before_eq(now,
769 				   neigh->confirmed + neigh->parms->delay_probe_time)) {
770 			NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
771 			neigh->nud_state = NUD_REACHABLE;
772 			neigh_connect(neigh);
773 			next = neigh->confirmed + neigh->parms->reachable_time;
774 		} else {
775 			NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
776 			neigh->nud_state = NUD_PROBE;
777 			atomic_set(&neigh->probes, 0);
778 			next = now + neigh->parms->retrans_time;
779 		}
780 	} else {
781 		/* NUD_PROBE|NUD_INCOMPLETE */
782 		next = now + neigh->parms->retrans_time;
783 	}
784 
785 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
786 	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
787 		struct sk_buff *skb;
788 
789 		neigh->nud_state = NUD_FAILED;
790 		notify = 1;
791 		NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
792 		NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
793 
794 		/* It is very thin place. report_unreachable is very complicated
795 		   routine. Particularly, it can hit the same neighbour entry!
796 
797 		   So that, we try to be accurate and avoid dead loop. --ANK
798 		 */
799 		while (neigh->nud_state == NUD_FAILED &&
800 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
801 			write_unlock(&neigh->lock);
802 			neigh->ops->error_report(neigh, skb);
803 			write_lock(&neigh->lock);
804 		}
805 		skb_queue_purge(&neigh->arp_queue);
806 	}
807 
808 	if (neigh->nud_state & NUD_IN_TIMER) {
809 		neigh_hold(neigh);
810 		if (time_before(next, jiffies + HZ/2))
811 			next = jiffies + HZ/2;
812 		neigh->timer.expires = next;
813 		add_timer(&neigh->timer);
814 	}
815 	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
816 		struct sk_buff *skb = skb_peek(&neigh->arp_queue);
817 		/* keep skb alive even if arp_queue overflows */
818 		if (skb)
819 			skb_get(skb);
820 		write_unlock(&neigh->lock);
821 		neigh->ops->solicit(neigh, skb);
822 		atomic_inc(&neigh->probes);
823 		if (skb)
824 			kfree_skb(skb);
825 	} else {
826 out:
827 		write_unlock(&neigh->lock);
828 	}
829 
830 #ifdef CONFIG_ARPD
831 	if (notify && neigh->parms->app_probes)
832 		neigh_app_notify(neigh);
833 #endif
834 	neigh_release(neigh);
835 }
836 
837 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
838 {
839 	int rc;
840 	unsigned long now;
841 
842 	write_lock_bh(&neigh->lock);
843 
844 	rc = 0;
845 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
846 		goto out_unlock_bh;
847 
848 	now = jiffies;
849 
850 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
851 		if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
852 			atomic_set(&neigh->probes, neigh->parms->ucast_probes);
853 			neigh->nud_state     = NUD_INCOMPLETE;
854 			neigh_hold(neigh);
855 			neigh->timer.expires = now + 1;
856 			add_timer(&neigh->timer);
857 		} else {
858 			neigh->nud_state = NUD_FAILED;
859 			write_unlock_bh(&neigh->lock);
860 
861 			if (skb)
862 				kfree_skb(skb);
863 			return 1;
864 		}
865 	} else if (neigh->nud_state & NUD_STALE) {
866 		NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
867 		neigh_hold(neigh);
868 		neigh->nud_state = NUD_DELAY;
869 		neigh->timer.expires = jiffies + neigh->parms->delay_probe_time;
870 		add_timer(&neigh->timer);
871 	}
872 
873 	if (neigh->nud_state == NUD_INCOMPLETE) {
874 		if (skb) {
875 			if (skb_queue_len(&neigh->arp_queue) >=
876 			    neigh->parms->queue_len) {
877 				struct sk_buff *buff;
878 				buff = neigh->arp_queue.next;
879 				__skb_unlink(buff, &neigh->arp_queue);
880 				kfree_skb(buff);
881 			}
882 			__skb_queue_tail(&neigh->arp_queue, skb);
883 		}
884 		rc = 1;
885 	}
886 out_unlock_bh:
887 	write_unlock_bh(&neigh->lock);
888 	return rc;
889 }
890 
891 static __inline__ void neigh_update_hhs(struct neighbour *neigh)
892 {
893 	struct hh_cache *hh;
894 	void (*update)(struct hh_cache*, struct net_device*, unsigned char *) =
895 		neigh->dev->header_cache_update;
896 
897 	if (update) {
898 		for (hh = neigh->hh; hh; hh = hh->hh_next) {
899 			write_lock_bh(&hh->hh_lock);
900 			update(hh, neigh->dev, neigh->ha);
901 			write_unlock_bh(&hh->hh_lock);
902 		}
903 	}
904 }
905 
906 
907 
908 /* Generic update routine.
909    -- lladdr is new lladdr or NULL, if it is not supplied.
910    -- new    is new state.
911    -- flags
912 	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
913 				if it is different.
914 	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
915 				lladdr instead of overriding it
916 				if it is different.
917 				It also allows to retain current state
918 				if lladdr is unchanged.
919 	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
920 
921 	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
922 				NTF_ROUTER flag.
923 	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
924 				a router.
925 
926    Caller MUST hold reference count on the entry.
927  */
928 
929 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
930 		 u32 flags)
931 {
932 	u8 old;
933 	int err;
934 #ifdef CONFIG_ARPD
935 	int notify = 0;
936 #endif
937 	struct net_device *dev;
938 	int update_isrouter = 0;
939 
940 	write_lock_bh(&neigh->lock);
941 
942 	dev    = neigh->dev;
943 	old    = neigh->nud_state;
944 	err    = -EPERM;
945 
946 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
947 	    (old & (NUD_NOARP | NUD_PERMANENT)))
948 		goto out;
949 
950 	if (!(new & NUD_VALID)) {
951 		neigh_del_timer(neigh);
952 		if (old & NUD_CONNECTED)
953 			neigh_suspect(neigh);
954 		neigh->nud_state = new;
955 		err = 0;
956 #ifdef CONFIG_ARPD
957 		notify = old & NUD_VALID;
958 #endif
959 		goto out;
960 	}
961 
962 	/* Compare new lladdr with cached one */
963 	if (!dev->addr_len) {
964 		/* First case: device needs no address. */
965 		lladdr = neigh->ha;
966 	} else if (lladdr) {
967 		/* The second case: if something is already cached
968 		   and a new address is proposed:
969 		   - compare new & old
970 		   - if they are different, check override flag
971 		 */
972 		if ((old & NUD_VALID) &&
973 		    !memcmp(lladdr, neigh->ha, dev->addr_len))
974 			lladdr = neigh->ha;
975 	} else {
976 		/* No address is supplied; if we know something,
977 		   use it, otherwise discard the request.
978 		 */
979 		err = -EINVAL;
980 		if (!(old & NUD_VALID))
981 			goto out;
982 		lladdr = neigh->ha;
983 	}
984 
985 	if (new & NUD_CONNECTED)
986 		neigh->confirmed = jiffies;
987 	neigh->updated = jiffies;
988 
989 	/* If entry was valid and address is not changed,
990 	   do not change entry state, if new one is STALE.
991 	 */
992 	err = 0;
993 	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
994 	if (old & NUD_VALID) {
995 		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
996 			update_isrouter = 0;
997 			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
998 			    (old & NUD_CONNECTED)) {
999 				lladdr = neigh->ha;
1000 				new = NUD_STALE;
1001 			} else
1002 				goto out;
1003 		} else {
1004 			if (lladdr == neigh->ha && new == NUD_STALE &&
1005 			    ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1006 			     (old & NUD_CONNECTED))
1007 			    )
1008 				new = old;
1009 		}
1010 	}
1011 
1012 	if (new != old) {
1013 		neigh_del_timer(neigh);
1014 		if (new & NUD_IN_TIMER) {
1015 			neigh_hold(neigh);
1016 			neigh->timer.expires = jiffies +
1017 						((new & NUD_REACHABLE) ?
1018 						 neigh->parms->reachable_time : 0);
1019 			add_timer(&neigh->timer);
1020 		}
1021 		neigh->nud_state = new;
1022 	}
1023 
1024 	if (lladdr != neigh->ha) {
1025 		memcpy(&neigh->ha, lladdr, dev->addr_len);
1026 		neigh_update_hhs(neigh);
1027 		if (!(new & NUD_CONNECTED))
1028 			neigh->confirmed = jiffies -
1029 				      (neigh->parms->base_reachable_time << 1);
1030 #ifdef CONFIG_ARPD
1031 		notify = 1;
1032 #endif
1033 	}
1034 	if (new == old)
1035 		goto out;
1036 	if (new & NUD_CONNECTED)
1037 		neigh_connect(neigh);
1038 	else
1039 		neigh_suspect(neigh);
1040 	if (!(old & NUD_VALID)) {
1041 		struct sk_buff *skb;
1042 
1043 		/* Again: avoid dead loop if something went wrong */
1044 
1045 		while (neigh->nud_state & NUD_VALID &&
1046 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1047 			struct neighbour *n1 = neigh;
1048 			write_unlock_bh(&neigh->lock);
1049 			/* On shaper/eql skb->dst->neighbour != neigh :( */
1050 			if (skb->dst && skb->dst->neighbour)
1051 				n1 = skb->dst->neighbour;
1052 			n1->output(skb);
1053 			write_lock_bh(&neigh->lock);
1054 		}
1055 		skb_queue_purge(&neigh->arp_queue);
1056 	}
1057 out:
1058 	if (update_isrouter) {
1059 		neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1060 			(neigh->flags | NTF_ROUTER) :
1061 			(neigh->flags & ~NTF_ROUTER);
1062 	}
1063 	write_unlock_bh(&neigh->lock);
1064 #ifdef CONFIG_ARPD
1065 	if (notify && neigh->parms->app_probes)
1066 		neigh_app_notify(neigh);
1067 #endif
1068 	return err;
1069 }
1070 
1071 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1072 				 u8 *lladdr, void *saddr,
1073 				 struct net_device *dev)
1074 {
1075 	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1076 						 lladdr || !dev->addr_len);
1077 	if (neigh)
1078 		neigh_update(neigh, lladdr, NUD_STALE,
1079 			     NEIGH_UPDATE_F_OVERRIDE);
1080 	return neigh;
1081 }
1082 
1083 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1084 			  u16 protocol)
1085 {
1086 	struct hh_cache	*hh;
1087 	struct net_device *dev = dst->dev;
1088 
1089 	for (hh = n->hh; hh; hh = hh->hh_next)
1090 		if (hh->hh_type == protocol)
1091 			break;
1092 
1093 	if (!hh && (hh = kmalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1094 		memset(hh, 0, sizeof(struct hh_cache));
1095 		rwlock_init(&hh->hh_lock);
1096 		hh->hh_type = protocol;
1097 		atomic_set(&hh->hh_refcnt, 0);
1098 		hh->hh_next = NULL;
1099 		if (dev->hard_header_cache(n, hh)) {
1100 			kfree(hh);
1101 			hh = NULL;
1102 		} else {
1103 			atomic_inc(&hh->hh_refcnt);
1104 			hh->hh_next = n->hh;
1105 			n->hh	    = hh;
1106 			if (n->nud_state & NUD_CONNECTED)
1107 				hh->hh_output = n->ops->hh_output;
1108 			else
1109 				hh->hh_output = n->ops->output;
1110 		}
1111 	}
1112 	if (hh)	{
1113 		atomic_inc(&hh->hh_refcnt);
1114 		dst->hh = hh;
1115 	}
1116 }
1117 
1118 /* This function can be used in contexts, where only old dev_queue_xmit
1119    worked, f.e. if you want to override normal output path (eql, shaper),
1120    but resolution is not made yet.
1121  */
1122 
1123 int neigh_compat_output(struct sk_buff *skb)
1124 {
1125 	struct net_device *dev = skb->dev;
1126 
1127 	__skb_pull(skb, skb->nh.raw - skb->data);
1128 
1129 	if (dev->hard_header &&
1130 	    dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1131 		    	     skb->len) < 0 &&
1132 	    dev->rebuild_header(skb))
1133 		return 0;
1134 
1135 	return dev_queue_xmit(skb);
1136 }
1137 
1138 /* Slow and careful. */
1139 
1140 int neigh_resolve_output(struct sk_buff *skb)
1141 {
1142 	struct dst_entry *dst = skb->dst;
1143 	struct neighbour *neigh;
1144 	int rc = 0;
1145 
1146 	if (!dst || !(neigh = dst->neighbour))
1147 		goto discard;
1148 
1149 	__skb_pull(skb, skb->nh.raw - skb->data);
1150 
1151 	if (!neigh_event_send(neigh, skb)) {
1152 		int err;
1153 		struct net_device *dev = neigh->dev;
1154 		if (dev->hard_header_cache && !dst->hh) {
1155 			write_lock_bh(&neigh->lock);
1156 			if (!dst->hh)
1157 				neigh_hh_init(neigh, dst, dst->ops->protocol);
1158 			err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1159 					       neigh->ha, NULL, skb->len);
1160 			write_unlock_bh(&neigh->lock);
1161 		} else {
1162 			read_lock_bh(&neigh->lock);
1163 			err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1164 					       neigh->ha, NULL, skb->len);
1165 			read_unlock_bh(&neigh->lock);
1166 		}
1167 		if (err >= 0)
1168 			rc = neigh->ops->queue_xmit(skb);
1169 		else
1170 			goto out_kfree_skb;
1171 	}
1172 out:
1173 	return rc;
1174 discard:
1175 	NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1176 		      dst, dst ? dst->neighbour : NULL);
1177 out_kfree_skb:
1178 	rc = -EINVAL;
1179 	kfree_skb(skb);
1180 	goto out;
1181 }
1182 
1183 /* As fast as possible without hh cache */
1184 
1185 int neigh_connected_output(struct sk_buff *skb)
1186 {
1187 	int err;
1188 	struct dst_entry *dst = skb->dst;
1189 	struct neighbour *neigh = dst->neighbour;
1190 	struct net_device *dev = neigh->dev;
1191 
1192 	__skb_pull(skb, skb->nh.raw - skb->data);
1193 
1194 	read_lock_bh(&neigh->lock);
1195 	err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1196 			       neigh->ha, NULL, skb->len);
1197 	read_unlock_bh(&neigh->lock);
1198 	if (err >= 0)
1199 		err = neigh->ops->queue_xmit(skb);
1200 	else {
1201 		err = -EINVAL;
1202 		kfree_skb(skb);
1203 	}
1204 	return err;
1205 }
1206 
1207 static void neigh_proxy_process(unsigned long arg)
1208 {
1209 	struct neigh_table *tbl = (struct neigh_table *)arg;
1210 	long sched_next = 0;
1211 	unsigned long now = jiffies;
1212 	struct sk_buff *skb;
1213 
1214 	spin_lock(&tbl->proxy_queue.lock);
1215 
1216 	skb = tbl->proxy_queue.next;
1217 
1218 	while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1219 		struct sk_buff *back = skb;
1220 		long tdif = NEIGH_CB(back)->sched_next - now;
1221 
1222 		skb = skb->next;
1223 		if (tdif <= 0) {
1224 			struct net_device *dev = back->dev;
1225 			__skb_unlink(back, &tbl->proxy_queue);
1226 			if (tbl->proxy_redo && netif_running(dev))
1227 				tbl->proxy_redo(back);
1228 			else
1229 				kfree_skb(back);
1230 
1231 			dev_put(dev);
1232 		} else if (!sched_next || tdif < sched_next)
1233 			sched_next = tdif;
1234 	}
1235 	del_timer(&tbl->proxy_timer);
1236 	if (sched_next)
1237 		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1238 	spin_unlock(&tbl->proxy_queue.lock);
1239 }
1240 
1241 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1242 		    struct sk_buff *skb)
1243 {
1244 	unsigned long now = jiffies;
1245 	unsigned long sched_next = now + (net_random() % p->proxy_delay);
1246 
1247 	if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1248 		kfree_skb(skb);
1249 		return;
1250 	}
1251 
1252 	NEIGH_CB(skb)->sched_next = sched_next;
1253 	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1254 
1255 	spin_lock(&tbl->proxy_queue.lock);
1256 	if (del_timer(&tbl->proxy_timer)) {
1257 		if (time_before(tbl->proxy_timer.expires, sched_next))
1258 			sched_next = tbl->proxy_timer.expires;
1259 	}
1260 	dst_release(skb->dst);
1261 	skb->dst = NULL;
1262 	dev_hold(skb->dev);
1263 	__skb_queue_tail(&tbl->proxy_queue, skb);
1264 	mod_timer(&tbl->proxy_timer, sched_next);
1265 	spin_unlock(&tbl->proxy_queue.lock);
1266 }
1267 
1268 
1269 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1270 				      struct neigh_table *tbl)
1271 {
1272 	struct neigh_parms *p = kmalloc(sizeof(*p), GFP_KERNEL);
1273 
1274 	if (p) {
1275 		memcpy(p, &tbl->parms, sizeof(*p));
1276 		p->tbl		  = tbl;
1277 		atomic_set(&p->refcnt, 1);
1278 		INIT_RCU_HEAD(&p->rcu_head);
1279 		p->reachable_time =
1280 				neigh_rand_reach_time(p->base_reachable_time);
1281 		if (dev) {
1282 			if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1283 				kfree(p);
1284 				return NULL;
1285 			}
1286 
1287 			dev_hold(dev);
1288 			p->dev = dev;
1289 		}
1290 		p->sysctl_table = NULL;
1291 		write_lock_bh(&tbl->lock);
1292 		p->next		= tbl->parms.next;
1293 		tbl->parms.next = p;
1294 		write_unlock_bh(&tbl->lock);
1295 	}
1296 	return p;
1297 }
1298 
1299 static void neigh_rcu_free_parms(struct rcu_head *head)
1300 {
1301 	struct neigh_parms *parms =
1302 		container_of(head, struct neigh_parms, rcu_head);
1303 
1304 	neigh_parms_put(parms);
1305 }
1306 
1307 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1308 {
1309 	struct neigh_parms **p;
1310 
1311 	if (!parms || parms == &tbl->parms)
1312 		return;
1313 	write_lock_bh(&tbl->lock);
1314 	for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1315 		if (*p == parms) {
1316 			*p = parms->next;
1317 			parms->dead = 1;
1318 			write_unlock_bh(&tbl->lock);
1319 			if (parms->dev)
1320 				dev_put(parms->dev);
1321 			call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1322 			return;
1323 		}
1324 	}
1325 	write_unlock_bh(&tbl->lock);
1326 	NEIGH_PRINTK1("neigh_parms_release: not found\n");
1327 }
1328 
1329 void neigh_parms_destroy(struct neigh_parms *parms)
1330 {
1331 	kfree(parms);
1332 }
1333 
1334 
1335 void neigh_table_init(struct neigh_table *tbl)
1336 {
1337 	unsigned long now = jiffies;
1338 	unsigned long phsize;
1339 
1340 	atomic_set(&tbl->parms.refcnt, 1);
1341 	INIT_RCU_HEAD(&tbl->parms.rcu_head);
1342 	tbl->parms.reachable_time =
1343 			  neigh_rand_reach_time(tbl->parms.base_reachable_time);
1344 
1345 	if (!tbl->kmem_cachep)
1346 		tbl->kmem_cachep = kmem_cache_create(tbl->id,
1347 						     tbl->entry_size,
1348 						     0, SLAB_HWCACHE_ALIGN,
1349 						     NULL, NULL);
1350 
1351 	if (!tbl->kmem_cachep)
1352 		panic("cannot create neighbour cache");
1353 
1354 	tbl->stats = alloc_percpu(struct neigh_statistics);
1355 	if (!tbl->stats)
1356 		panic("cannot create neighbour cache statistics");
1357 
1358 #ifdef CONFIG_PROC_FS
1359 	tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat);
1360 	if (!tbl->pde)
1361 		panic("cannot create neighbour proc dir entry");
1362 	tbl->pde->proc_fops = &neigh_stat_seq_fops;
1363 	tbl->pde->data = tbl;
1364 #endif
1365 
1366 	tbl->hash_mask = 1;
1367 	tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1368 
1369 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1370 	tbl->phash_buckets = kmalloc(phsize, GFP_KERNEL);
1371 
1372 	if (!tbl->hash_buckets || !tbl->phash_buckets)
1373 		panic("cannot allocate neighbour cache hashes");
1374 
1375 	memset(tbl->phash_buckets, 0, phsize);
1376 
1377 	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1378 
1379 	rwlock_init(&tbl->lock);
1380 	init_timer(&tbl->gc_timer);
1381 	tbl->gc_timer.data     = (unsigned long)tbl;
1382 	tbl->gc_timer.function = neigh_periodic_timer;
1383 	tbl->gc_timer.expires  = now + 1;
1384 	add_timer(&tbl->gc_timer);
1385 
1386 	init_timer(&tbl->proxy_timer);
1387 	tbl->proxy_timer.data	  = (unsigned long)tbl;
1388 	tbl->proxy_timer.function = neigh_proxy_process;
1389 	skb_queue_head_init(&tbl->proxy_queue);
1390 
1391 	tbl->last_flush = now;
1392 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1393 	write_lock(&neigh_tbl_lock);
1394 	tbl->next	= neigh_tables;
1395 	neigh_tables	= tbl;
1396 	write_unlock(&neigh_tbl_lock);
1397 }
1398 
1399 int neigh_table_clear(struct neigh_table *tbl)
1400 {
1401 	struct neigh_table **tp;
1402 
1403 	/* It is not clean... Fix it to unload IPv6 module safely */
1404 	del_timer_sync(&tbl->gc_timer);
1405 	del_timer_sync(&tbl->proxy_timer);
1406 	pneigh_queue_purge(&tbl->proxy_queue);
1407 	neigh_ifdown(tbl, NULL);
1408 	if (atomic_read(&tbl->entries))
1409 		printk(KERN_CRIT "neighbour leakage\n");
1410 	write_lock(&neigh_tbl_lock);
1411 	for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1412 		if (*tp == tbl) {
1413 			*tp = tbl->next;
1414 			break;
1415 		}
1416 	}
1417 	write_unlock(&neigh_tbl_lock);
1418 
1419 	neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1420 	tbl->hash_buckets = NULL;
1421 
1422 	kfree(tbl->phash_buckets);
1423 	tbl->phash_buckets = NULL;
1424 
1425 	return 0;
1426 }
1427 
1428 int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1429 {
1430 	struct ndmsg *ndm = NLMSG_DATA(nlh);
1431 	struct rtattr **nda = arg;
1432 	struct neigh_table *tbl;
1433 	struct net_device *dev = NULL;
1434 	int err = -ENODEV;
1435 
1436 	if (ndm->ndm_ifindex &&
1437 	    (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1438 		goto out;
1439 
1440 	read_lock(&neigh_tbl_lock);
1441 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1442 		struct rtattr *dst_attr = nda[NDA_DST - 1];
1443 		struct neighbour *n;
1444 
1445 		if (tbl->family != ndm->ndm_family)
1446 			continue;
1447 		read_unlock(&neigh_tbl_lock);
1448 
1449 		err = -EINVAL;
1450 		if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1451 			goto out_dev_put;
1452 
1453 		if (ndm->ndm_flags & NTF_PROXY) {
1454 			err = pneigh_delete(tbl, RTA_DATA(dst_attr), dev);
1455 			goto out_dev_put;
1456 		}
1457 
1458 		if (!dev)
1459 			goto out;
1460 
1461 		n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1462 		if (n) {
1463 			err = neigh_update(n, NULL, NUD_FAILED,
1464 					   NEIGH_UPDATE_F_OVERRIDE|
1465 					   NEIGH_UPDATE_F_ADMIN);
1466 			neigh_release(n);
1467 		}
1468 		goto out_dev_put;
1469 	}
1470 	read_unlock(&neigh_tbl_lock);
1471 	err = -EADDRNOTAVAIL;
1472 out_dev_put:
1473 	if (dev)
1474 		dev_put(dev);
1475 out:
1476 	return err;
1477 }
1478 
1479 int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1480 {
1481 	struct ndmsg *ndm = NLMSG_DATA(nlh);
1482 	struct rtattr **nda = arg;
1483 	struct neigh_table *tbl;
1484 	struct net_device *dev = NULL;
1485 	int err = -ENODEV;
1486 
1487 	if (ndm->ndm_ifindex &&
1488 	    (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1489 		goto out;
1490 
1491 	read_lock(&neigh_tbl_lock);
1492 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1493 		struct rtattr *lladdr_attr = nda[NDA_LLADDR - 1];
1494 		struct rtattr *dst_attr = nda[NDA_DST - 1];
1495 		int override = 1;
1496 		struct neighbour *n;
1497 
1498 		if (tbl->family != ndm->ndm_family)
1499 			continue;
1500 		read_unlock(&neigh_tbl_lock);
1501 
1502 		err = -EINVAL;
1503 		if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1504 			goto out_dev_put;
1505 
1506 		if (ndm->ndm_flags & NTF_PROXY) {
1507 			err = -ENOBUFS;
1508 			if (pneigh_lookup(tbl, RTA_DATA(dst_attr), dev, 1))
1509 				err = 0;
1510 			goto out_dev_put;
1511 		}
1512 
1513 		err = -EINVAL;
1514 		if (!dev)
1515 			goto out;
1516 		if (lladdr_attr && RTA_PAYLOAD(lladdr_attr) < dev->addr_len)
1517 			goto out_dev_put;
1518 
1519 		n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1520 		if (n) {
1521 			if (nlh->nlmsg_flags & NLM_F_EXCL) {
1522 				err = -EEXIST;
1523 				neigh_release(n);
1524 				goto out_dev_put;
1525 			}
1526 
1527 			override = nlh->nlmsg_flags & NLM_F_REPLACE;
1528 		} else if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1529 			err = -ENOENT;
1530 			goto out_dev_put;
1531 		} else {
1532 			n = __neigh_lookup_errno(tbl, RTA_DATA(dst_attr), dev);
1533 			if (IS_ERR(n)) {
1534 				err = PTR_ERR(n);
1535 				goto out_dev_put;
1536 			}
1537 		}
1538 
1539 		err = neigh_update(n,
1540 				   lladdr_attr ? RTA_DATA(lladdr_attr) : NULL,
1541 				   ndm->ndm_state,
1542 				   (override ? NEIGH_UPDATE_F_OVERRIDE : 0) |
1543 				   NEIGH_UPDATE_F_ADMIN);
1544 
1545 		neigh_release(n);
1546 		goto out_dev_put;
1547 	}
1548 
1549 	read_unlock(&neigh_tbl_lock);
1550 	err = -EADDRNOTAVAIL;
1551 out_dev_put:
1552 	if (dev)
1553 		dev_put(dev);
1554 out:
1555 	return err;
1556 }
1557 
1558 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1559 {
1560 	struct rtattr *nest = NULL;
1561 
1562 	nest = RTA_NEST(skb, NDTA_PARMS);
1563 
1564 	if (parms->dev)
1565 		RTA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1566 
1567 	RTA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1568 	RTA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1569 	RTA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1570 	RTA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1571 	RTA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1572 	RTA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1573 	RTA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1574 	RTA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1575 		      parms->base_reachable_time);
1576 	RTA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1577 	RTA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1578 	RTA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1579 	RTA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1580 	RTA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1581 	RTA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1582 
1583 	return RTA_NEST_END(skb, nest);
1584 
1585 rtattr_failure:
1586 	return RTA_NEST_CANCEL(skb, nest);
1587 }
1588 
1589 static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb,
1590 			      struct netlink_callback *cb)
1591 {
1592 	struct nlmsghdr *nlh;
1593 	struct ndtmsg *ndtmsg;
1594 
1595 	nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1596 			       NLM_F_MULTI);
1597 
1598 	ndtmsg = NLMSG_DATA(nlh);
1599 
1600 	read_lock_bh(&tbl->lock);
1601 	ndtmsg->ndtm_family = tbl->family;
1602 	ndtmsg->ndtm_pad1   = 0;
1603 	ndtmsg->ndtm_pad2   = 0;
1604 
1605 	RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1606 	RTA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1607 	RTA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1608 	RTA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1609 	RTA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1610 
1611 	{
1612 		unsigned long now = jiffies;
1613 		unsigned int flush_delta = now - tbl->last_flush;
1614 		unsigned int rand_delta = now - tbl->last_rand;
1615 
1616 		struct ndt_config ndc = {
1617 			.ndtc_key_len		= tbl->key_len,
1618 			.ndtc_entry_size	= tbl->entry_size,
1619 			.ndtc_entries		= atomic_read(&tbl->entries),
1620 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
1621 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
1622 			.ndtc_hash_rnd		= tbl->hash_rnd,
1623 			.ndtc_hash_mask		= tbl->hash_mask,
1624 			.ndtc_hash_chain_gc	= tbl->hash_chain_gc,
1625 			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
1626 		};
1627 
1628 		RTA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1629 	}
1630 
1631 	{
1632 		int cpu;
1633 		struct ndt_stats ndst;
1634 
1635 		memset(&ndst, 0, sizeof(ndst));
1636 
1637 		for (cpu = 0; cpu < NR_CPUS; cpu++) {
1638 			struct neigh_statistics	*st;
1639 
1640 			if (!cpu_possible(cpu))
1641 				continue;
1642 
1643 			st = per_cpu_ptr(tbl->stats, cpu);
1644 			ndst.ndts_allocs		+= st->allocs;
1645 			ndst.ndts_destroys		+= st->destroys;
1646 			ndst.ndts_hash_grows		+= st->hash_grows;
1647 			ndst.ndts_res_failed		+= st->res_failed;
1648 			ndst.ndts_lookups		+= st->lookups;
1649 			ndst.ndts_hits			+= st->hits;
1650 			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
1651 			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
1652 			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
1653 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
1654 		}
1655 
1656 		RTA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1657 	}
1658 
1659 	BUG_ON(tbl->parms.dev);
1660 	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1661 		goto rtattr_failure;
1662 
1663 	read_unlock_bh(&tbl->lock);
1664 	return NLMSG_END(skb, nlh);
1665 
1666 rtattr_failure:
1667 	read_unlock_bh(&tbl->lock);
1668 	return NLMSG_CANCEL(skb, nlh);
1669 
1670 nlmsg_failure:
1671 	return -1;
1672 }
1673 
1674 static int neightbl_fill_param_info(struct neigh_table *tbl,
1675 				    struct neigh_parms *parms,
1676 				    struct sk_buff *skb,
1677 				    struct netlink_callback *cb)
1678 {
1679 	struct ndtmsg *ndtmsg;
1680 	struct nlmsghdr *nlh;
1681 
1682 	nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1683 			       NLM_F_MULTI);
1684 
1685 	ndtmsg = NLMSG_DATA(nlh);
1686 
1687 	read_lock_bh(&tbl->lock);
1688 	ndtmsg->ndtm_family = tbl->family;
1689 	ndtmsg->ndtm_pad1   = 0;
1690 	ndtmsg->ndtm_pad2   = 0;
1691 	RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1692 
1693 	if (neightbl_fill_parms(skb, parms) < 0)
1694 		goto rtattr_failure;
1695 
1696 	read_unlock_bh(&tbl->lock);
1697 	return NLMSG_END(skb, nlh);
1698 
1699 rtattr_failure:
1700 	read_unlock_bh(&tbl->lock);
1701 	return NLMSG_CANCEL(skb, nlh);
1702 
1703 nlmsg_failure:
1704 	return -1;
1705 }
1706 
1707 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1708 						      int ifindex)
1709 {
1710 	struct neigh_parms *p;
1711 
1712 	for (p = &tbl->parms; p; p = p->next)
1713 		if ((p->dev && p->dev->ifindex == ifindex) ||
1714 		    (!p->dev && !ifindex))
1715 			return p;
1716 
1717 	return NULL;
1718 }
1719 
1720 int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1721 {
1722 	struct neigh_table *tbl;
1723 	struct ndtmsg *ndtmsg = NLMSG_DATA(nlh);
1724 	struct rtattr **tb = arg;
1725 	int err = -EINVAL;
1726 
1727 	if (!tb[NDTA_NAME - 1] || !RTA_PAYLOAD(tb[NDTA_NAME - 1]))
1728 		return -EINVAL;
1729 
1730 	read_lock(&neigh_tbl_lock);
1731 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1732 		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1733 			continue;
1734 
1735 		if (!rtattr_strcmp(tb[NDTA_NAME - 1], tbl->id))
1736 			break;
1737 	}
1738 
1739 	if (tbl == NULL) {
1740 		err = -ENOENT;
1741 		goto errout;
1742 	}
1743 
1744 	/*
1745 	 * We acquire tbl->lock to be nice to the periodic timers and
1746 	 * make sure they always see a consistent set of values.
1747 	 */
1748 	write_lock_bh(&tbl->lock);
1749 
1750 	if (tb[NDTA_THRESH1 - 1])
1751 		tbl->gc_thresh1 = RTA_GET_U32(tb[NDTA_THRESH1 - 1]);
1752 
1753 	if (tb[NDTA_THRESH2 - 1])
1754 		tbl->gc_thresh2 = RTA_GET_U32(tb[NDTA_THRESH2 - 1]);
1755 
1756 	if (tb[NDTA_THRESH3 - 1])
1757 		tbl->gc_thresh3 = RTA_GET_U32(tb[NDTA_THRESH3 - 1]);
1758 
1759 	if (tb[NDTA_GC_INTERVAL - 1])
1760 		tbl->gc_interval = RTA_GET_MSECS(tb[NDTA_GC_INTERVAL - 1]);
1761 
1762 	if (tb[NDTA_PARMS - 1]) {
1763 		struct rtattr *tbp[NDTPA_MAX];
1764 		struct neigh_parms *p;
1765 		u32 ifindex = 0;
1766 
1767 		if (rtattr_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS - 1]) < 0)
1768 			goto rtattr_failure;
1769 
1770 		if (tbp[NDTPA_IFINDEX - 1])
1771 			ifindex = RTA_GET_U32(tbp[NDTPA_IFINDEX - 1]);
1772 
1773 		p = lookup_neigh_params(tbl, ifindex);
1774 		if (p == NULL) {
1775 			err = -ENOENT;
1776 			goto rtattr_failure;
1777 		}
1778 
1779 		if (tbp[NDTPA_QUEUE_LEN - 1])
1780 			p->queue_len = RTA_GET_U32(tbp[NDTPA_QUEUE_LEN - 1]);
1781 
1782 		if (tbp[NDTPA_PROXY_QLEN - 1])
1783 			p->proxy_qlen = RTA_GET_U32(tbp[NDTPA_PROXY_QLEN - 1]);
1784 
1785 		if (tbp[NDTPA_APP_PROBES - 1])
1786 			p->app_probes = RTA_GET_U32(tbp[NDTPA_APP_PROBES - 1]);
1787 
1788 		if (tbp[NDTPA_UCAST_PROBES - 1])
1789 			p->ucast_probes =
1790 			   RTA_GET_U32(tbp[NDTPA_UCAST_PROBES - 1]);
1791 
1792 		if (tbp[NDTPA_MCAST_PROBES - 1])
1793 			p->mcast_probes =
1794 			   RTA_GET_U32(tbp[NDTPA_MCAST_PROBES - 1]);
1795 
1796 		if (tbp[NDTPA_BASE_REACHABLE_TIME - 1])
1797 			p->base_reachable_time =
1798 			   RTA_GET_MSECS(tbp[NDTPA_BASE_REACHABLE_TIME - 1]);
1799 
1800 		if (tbp[NDTPA_GC_STALETIME - 1])
1801 			p->gc_staletime =
1802 			   RTA_GET_MSECS(tbp[NDTPA_GC_STALETIME - 1]);
1803 
1804 		if (tbp[NDTPA_DELAY_PROBE_TIME - 1])
1805 			p->delay_probe_time =
1806 			   RTA_GET_MSECS(tbp[NDTPA_DELAY_PROBE_TIME - 1]);
1807 
1808 		if (tbp[NDTPA_RETRANS_TIME - 1])
1809 			p->retrans_time =
1810 			   RTA_GET_MSECS(tbp[NDTPA_RETRANS_TIME - 1]);
1811 
1812 		if (tbp[NDTPA_ANYCAST_DELAY - 1])
1813 			p->anycast_delay =
1814 			   RTA_GET_MSECS(tbp[NDTPA_ANYCAST_DELAY - 1]);
1815 
1816 		if (tbp[NDTPA_PROXY_DELAY - 1])
1817 			p->proxy_delay =
1818 			   RTA_GET_MSECS(tbp[NDTPA_PROXY_DELAY - 1]);
1819 
1820 		if (tbp[NDTPA_LOCKTIME - 1])
1821 			p->locktime = RTA_GET_MSECS(tbp[NDTPA_LOCKTIME - 1]);
1822 	}
1823 
1824 	err = 0;
1825 
1826 rtattr_failure:
1827 	write_unlock_bh(&tbl->lock);
1828 errout:
1829 	read_unlock(&neigh_tbl_lock);
1830 	return err;
1831 }
1832 
1833 int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1834 {
1835 	int idx, family;
1836 	int s_idx = cb->args[0];
1837 	struct neigh_table *tbl;
1838 
1839 	family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1840 
1841 	read_lock(&neigh_tbl_lock);
1842 	for (tbl = neigh_tables, idx = 0; tbl; tbl = tbl->next) {
1843 		struct neigh_parms *p;
1844 
1845 		if (idx < s_idx || (family && tbl->family != family))
1846 			continue;
1847 
1848 		if (neightbl_fill_info(tbl, skb, cb) <= 0)
1849 			break;
1850 
1851 		for (++idx, p = tbl->parms.next; p; p = p->next, idx++) {
1852 			if (idx < s_idx)
1853 				continue;
1854 
1855 			if (neightbl_fill_param_info(tbl, p, skb, cb) <= 0)
1856 				goto out;
1857 		}
1858 
1859 	}
1860 out:
1861 	read_unlock(&neigh_tbl_lock);
1862 	cb->args[0] = idx;
1863 
1864 	return skb->len;
1865 }
1866 
1867 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n,
1868 			   u32 pid, u32 seq, int event, unsigned int flags)
1869 {
1870 	unsigned long now = jiffies;
1871 	unsigned char *b = skb->tail;
1872 	struct nda_cacheinfo ci;
1873 	int locked = 0;
1874 	u32 probes;
1875 	struct nlmsghdr *nlh = NLMSG_NEW(skb, pid, seq, event,
1876 					 sizeof(struct ndmsg), flags);
1877 	struct ndmsg *ndm = NLMSG_DATA(nlh);
1878 
1879 	ndm->ndm_family	 = n->ops->family;
1880 	ndm->ndm_pad1    = 0;
1881 	ndm->ndm_pad2    = 0;
1882 	ndm->ndm_flags	 = n->flags;
1883 	ndm->ndm_type	 = n->type;
1884 	ndm->ndm_ifindex = n->dev->ifindex;
1885 	RTA_PUT(skb, NDA_DST, n->tbl->key_len, n->primary_key);
1886 	read_lock_bh(&n->lock);
1887 	locked		 = 1;
1888 	ndm->ndm_state	 = n->nud_state;
1889 	if (n->nud_state & NUD_VALID)
1890 		RTA_PUT(skb, NDA_LLADDR, n->dev->addr_len, n->ha);
1891 	ci.ndm_used	 = now - n->used;
1892 	ci.ndm_confirmed = now - n->confirmed;
1893 	ci.ndm_updated	 = now - n->updated;
1894 	ci.ndm_refcnt	 = atomic_read(&n->refcnt) - 1;
1895 	probes = atomic_read(&n->probes);
1896 	read_unlock_bh(&n->lock);
1897 	locked		 = 0;
1898 	RTA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1899 	RTA_PUT(skb, NDA_PROBES, sizeof(probes), &probes);
1900 	nlh->nlmsg_len	 = skb->tail - b;
1901 	return skb->len;
1902 
1903 nlmsg_failure:
1904 rtattr_failure:
1905 	if (locked)
1906 		read_unlock_bh(&n->lock);
1907 	skb_trim(skb, b - skb->data);
1908 	return -1;
1909 }
1910 
1911 
1912 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
1913 			    struct netlink_callback *cb)
1914 {
1915 	struct neighbour *n;
1916 	int rc, h, s_h = cb->args[1];
1917 	int idx, s_idx = idx = cb->args[2];
1918 
1919 	for (h = 0; h <= tbl->hash_mask; h++) {
1920 		if (h < s_h)
1921 			continue;
1922 		if (h > s_h)
1923 			s_idx = 0;
1924 		read_lock_bh(&tbl->lock);
1925 		for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
1926 			if (idx < s_idx)
1927 				continue;
1928 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
1929 					    cb->nlh->nlmsg_seq,
1930 					    RTM_NEWNEIGH,
1931 					    NLM_F_MULTI) <= 0) {
1932 				read_unlock_bh(&tbl->lock);
1933 				rc = -1;
1934 				goto out;
1935 			}
1936 		}
1937 		read_unlock_bh(&tbl->lock);
1938 	}
1939 	rc = skb->len;
1940 out:
1941 	cb->args[1] = h;
1942 	cb->args[2] = idx;
1943 	return rc;
1944 }
1945 
1946 int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1947 {
1948 	struct neigh_table *tbl;
1949 	int t, family, s_t;
1950 
1951 	read_lock(&neigh_tbl_lock);
1952 	family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1953 	s_t = cb->args[0];
1954 
1955 	for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
1956 		if (t < s_t || (family && tbl->family != family))
1957 			continue;
1958 		if (t > s_t)
1959 			memset(&cb->args[1], 0, sizeof(cb->args) -
1960 						sizeof(cb->args[0]));
1961 		if (neigh_dump_table(tbl, skb, cb) < 0)
1962 			break;
1963 	}
1964 	read_unlock(&neigh_tbl_lock);
1965 
1966 	cb->args[0] = t;
1967 	return skb->len;
1968 }
1969 
1970 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
1971 {
1972 	int chain;
1973 
1974 	read_lock_bh(&tbl->lock);
1975 	for (chain = 0; chain <= tbl->hash_mask; chain++) {
1976 		struct neighbour *n;
1977 
1978 		for (n = tbl->hash_buckets[chain]; n; n = n->next)
1979 			cb(n, cookie);
1980 	}
1981 	read_unlock_bh(&tbl->lock);
1982 }
1983 EXPORT_SYMBOL(neigh_for_each);
1984 
1985 /* The tbl->lock must be held as a writer and BH disabled. */
1986 void __neigh_for_each_release(struct neigh_table *tbl,
1987 			      int (*cb)(struct neighbour *))
1988 {
1989 	int chain;
1990 
1991 	for (chain = 0; chain <= tbl->hash_mask; chain++) {
1992 		struct neighbour *n, **np;
1993 
1994 		np = &tbl->hash_buckets[chain];
1995 		while ((n = *np) != NULL) {
1996 			int release;
1997 
1998 			write_lock(&n->lock);
1999 			release = cb(n);
2000 			if (release) {
2001 				*np = n->next;
2002 				n->dead = 1;
2003 			} else
2004 				np = &n->next;
2005 			write_unlock(&n->lock);
2006 			if (release)
2007 				neigh_release(n);
2008 		}
2009 	}
2010 }
2011 EXPORT_SYMBOL(__neigh_for_each_release);
2012 
2013 #ifdef CONFIG_PROC_FS
2014 
2015 static struct neighbour *neigh_get_first(struct seq_file *seq)
2016 {
2017 	struct neigh_seq_state *state = seq->private;
2018 	struct neigh_table *tbl = state->tbl;
2019 	struct neighbour *n = NULL;
2020 	int bucket = state->bucket;
2021 
2022 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2023 	for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2024 		n = tbl->hash_buckets[bucket];
2025 
2026 		while (n) {
2027 			if (state->neigh_sub_iter) {
2028 				loff_t fakep = 0;
2029 				void *v;
2030 
2031 				v = state->neigh_sub_iter(state, n, &fakep);
2032 				if (!v)
2033 					goto next;
2034 			}
2035 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2036 				break;
2037 			if (n->nud_state & ~NUD_NOARP)
2038 				break;
2039 		next:
2040 			n = n->next;
2041 		}
2042 
2043 		if (n)
2044 			break;
2045 	}
2046 	state->bucket = bucket;
2047 
2048 	return n;
2049 }
2050 
2051 static struct neighbour *neigh_get_next(struct seq_file *seq,
2052 					struct neighbour *n,
2053 					loff_t *pos)
2054 {
2055 	struct neigh_seq_state *state = seq->private;
2056 	struct neigh_table *tbl = state->tbl;
2057 
2058 	if (state->neigh_sub_iter) {
2059 		void *v = state->neigh_sub_iter(state, n, pos);
2060 		if (v)
2061 			return n;
2062 	}
2063 	n = n->next;
2064 
2065 	while (1) {
2066 		while (n) {
2067 			if (state->neigh_sub_iter) {
2068 				void *v = state->neigh_sub_iter(state, n, pos);
2069 				if (v)
2070 					return n;
2071 				goto next;
2072 			}
2073 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2074 				break;
2075 
2076 			if (n->nud_state & ~NUD_NOARP)
2077 				break;
2078 		next:
2079 			n = n->next;
2080 		}
2081 
2082 		if (n)
2083 			break;
2084 
2085 		if (++state->bucket > tbl->hash_mask)
2086 			break;
2087 
2088 		n = tbl->hash_buckets[state->bucket];
2089 	}
2090 
2091 	if (n && pos)
2092 		--(*pos);
2093 	return n;
2094 }
2095 
2096 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2097 {
2098 	struct neighbour *n = neigh_get_first(seq);
2099 
2100 	if (n) {
2101 		while (*pos) {
2102 			n = neigh_get_next(seq, n, pos);
2103 			if (!n)
2104 				break;
2105 		}
2106 	}
2107 	return *pos ? NULL : n;
2108 }
2109 
2110 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2111 {
2112 	struct neigh_seq_state *state = seq->private;
2113 	struct neigh_table *tbl = state->tbl;
2114 	struct pneigh_entry *pn = NULL;
2115 	int bucket = state->bucket;
2116 
2117 	state->flags |= NEIGH_SEQ_IS_PNEIGH;
2118 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2119 		pn = tbl->phash_buckets[bucket];
2120 		if (pn)
2121 			break;
2122 	}
2123 	state->bucket = bucket;
2124 
2125 	return pn;
2126 }
2127 
2128 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2129 					    struct pneigh_entry *pn,
2130 					    loff_t *pos)
2131 {
2132 	struct neigh_seq_state *state = seq->private;
2133 	struct neigh_table *tbl = state->tbl;
2134 
2135 	pn = pn->next;
2136 	while (!pn) {
2137 		if (++state->bucket > PNEIGH_HASHMASK)
2138 			break;
2139 		pn = tbl->phash_buckets[state->bucket];
2140 		if (pn)
2141 			break;
2142 	}
2143 
2144 	if (pn && pos)
2145 		--(*pos);
2146 
2147 	return pn;
2148 }
2149 
2150 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2151 {
2152 	struct pneigh_entry *pn = pneigh_get_first(seq);
2153 
2154 	if (pn) {
2155 		while (*pos) {
2156 			pn = pneigh_get_next(seq, pn, pos);
2157 			if (!pn)
2158 				break;
2159 		}
2160 	}
2161 	return *pos ? NULL : pn;
2162 }
2163 
2164 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2165 {
2166 	struct neigh_seq_state *state = seq->private;
2167 	void *rc;
2168 
2169 	rc = neigh_get_idx(seq, pos);
2170 	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2171 		rc = pneigh_get_idx(seq, pos);
2172 
2173 	return rc;
2174 }
2175 
2176 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2177 {
2178 	struct neigh_seq_state *state = seq->private;
2179 	loff_t pos_minus_one;
2180 
2181 	state->tbl = tbl;
2182 	state->bucket = 0;
2183 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2184 
2185 	read_lock_bh(&tbl->lock);
2186 
2187 	pos_minus_one = *pos - 1;
2188 	return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2189 }
2190 EXPORT_SYMBOL(neigh_seq_start);
2191 
2192 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2193 {
2194 	struct neigh_seq_state *state;
2195 	void *rc;
2196 
2197 	if (v == SEQ_START_TOKEN) {
2198 		rc = neigh_get_idx(seq, pos);
2199 		goto out;
2200 	}
2201 
2202 	state = seq->private;
2203 	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2204 		rc = neigh_get_next(seq, v, NULL);
2205 		if (rc)
2206 			goto out;
2207 		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2208 			rc = pneigh_get_first(seq);
2209 	} else {
2210 		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2211 		rc = pneigh_get_next(seq, v, NULL);
2212 	}
2213 out:
2214 	++(*pos);
2215 	return rc;
2216 }
2217 EXPORT_SYMBOL(neigh_seq_next);
2218 
2219 void neigh_seq_stop(struct seq_file *seq, void *v)
2220 {
2221 	struct neigh_seq_state *state = seq->private;
2222 	struct neigh_table *tbl = state->tbl;
2223 
2224 	read_unlock_bh(&tbl->lock);
2225 }
2226 EXPORT_SYMBOL(neigh_seq_stop);
2227 
2228 /* statistics via seq_file */
2229 
2230 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2231 {
2232 	struct proc_dir_entry *pde = seq->private;
2233 	struct neigh_table *tbl = pde->data;
2234 	int cpu;
2235 
2236 	if (*pos == 0)
2237 		return SEQ_START_TOKEN;
2238 
2239 	for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2240 		if (!cpu_possible(cpu))
2241 			continue;
2242 		*pos = cpu+1;
2243 		return per_cpu_ptr(tbl->stats, cpu);
2244 	}
2245 	return NULL;
2246 }
2247 
2248 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2249 {
2250 	struct proc_dir_entry *pde = seq->private;
2251 	struct neigh_table *tbl = pde->data;
2252 	int cpu;
2253 
2254 	for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2255 		if (!cpu_possible(cpu))
2256 			continue;
2257 		*pos = cpu+1;
2258 		return per_cpu_ptr(tbl->stats, cpu);
2259 	}
2260 	return NULL;
2261 }
2262 
2263 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2264 {
2265 
2266 }
2267 
2268 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2269 {
2270 	struct proc_dir_entry *pde = seq->private;
2271 	struct neigh_table *tbl = pde->data;
2272 	struct neigh_statistics *st = v;
2273 
2274 	if (v == SEQ_START_TOKEN) {
2275 		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs\n");
2276 		return 0;
2277 	}
2278 
2279 	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2280 			"%08lx %08lx  %08lx %08lx\n",
2281 		   atomic_read(&tbl->entries),
2282 
2283 		   st->allocs,
2284 		   st->destroys,
2285 		   st->hash_grows,
2286 
2287 		   st->lookups,
2288 		   st->hits,
2289 
2290 		   st->res_failed,
2291 
2292 		   st->rcv_probes_mcast,
2293 		   st->rcv_probes_ucast,
2294 
2295 		   st->periodic_gc_runs,
2296 		   st->forced_gc_runs
2297 		   );
2298 
2299 	return 0;
2300 }
2301 
2302 static struct seq_operations neigh_stat_seq_ops = {
2303 	.start	= neigh_stat_seq_start,
2304 	.next	= neigh_stat_seq_next,
2305 	.stop	= neigh_stat_seq_stop,
2306 	.show	= neigh_stat_seq_show,
2307 };
2308 
2309 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2310 {
2311 	int ret = seq_open(file, &neigh_stat_seq_ops);
2312 
2313 	if (!ret) {
2314 		struct seq_file *sf = file->private_data;
2315 		sf->private = PDE(inode);
2316 	}
2317 	return ret;
2318 };
2319 
2320 static struct file_operations neigh_stat_seq_fops = {
2321 	.owner	 = THIS_MODULE,
2322 	.open 	 = neigh_stat_seq_open,
2323 	.read	 = seq_read,
2324 	.llseek	 = seq_lseek,
2325 	.release = seq_release,
2326 };
2327 
2328 #endif /* CONFIG_PROC_FS */
2329 
2330 #ifdef CONFIG_ARPD
2331 void neigh_app_ns(struct neighbour *n)
2332 {
2333 	struct nlmsghdr  *nlh;
2334 	int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2335 	struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2336 
2337 	if (!skb)
2338 		return;
2339 
2340 	if (neigh_fill_info(skb, n, 0, 0, RTM_GETNEIGH, 0) < 0) {
2341 		kfree_skb(skb);
2342 		return;
2343 	}
2344 	nlh			   = (struct nlmsghdr *)skb->data;
2345 	nlh->nlmsg_flags	   = NLM_F_REQUEST;
2346 	NETLINK_CB(skb).dst_group  = RTNLGRP_NEIGH;
2347 	netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2348 }
2349 
2350 static void neigh_app_notify(struct neighbour *n)
2351 {
2352 	struct nlmsghdr *nlh;
2353 	int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2354 	struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2355 
2356 	if (!skb)
2357 		return;
2358 
2359 	if (neigh_fill_info(skb, n, 0, 0, RTM_NEWNEIGH, 0) < 0) {
2360 		kfree_skb(skb);
2361 		return;
2362 	}
2363 	nlh			   = (struct nlmsghdr *)skb->data;
2364 	NETLINK_CB(skb).dst_group  = RTNLGRP_NEIGH;
2365 	netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2366 }
2367 
2368 #endif /* CONFIG_ARPD */
2369 
2370 #ifdef CONFIG_SYSCTL
2371 
2372 static struct neigh_sysctl_table {
2373 	struct ctl_table_header *sysctl_header;
2374 	ctl_table		neigh_vars[__NET_NEIGH_MAX];
2375 	ctl_table		neigh_dev[2];
2376 	ctl_table		neigh_neigh_dir[2];
2377 	ctl_table		neigh_proto_dir[2];
2378 	ctl_table		neigh_root_dir[2];
2379 } neigh_sysctl_template = {
2380 	.neigh_vars = {
2381 		{
2382 			.ctl_name	= NET_NEIGH_MCAST_SOLICIT,
2383 			.procname	= "mcast_solicit",
2384 			.maxlen		= sizeof(int),
2385 			.mode		= 0644,
2386 			.proc_handler	= &proc_dointvec,
2387 		},
2388 		{
2389 			.ctl_name	= NET_NEIGH_UCAST_SOLICIT,
2390 			.procname	= "ucast_solicit",
2391 			.maxlen		= sizeof(int),
2392 			.mode		= 0644,
2393 			.proc_handler	= &proc_dointvec,
2394 		},
2395 		{
2396 			.ctl_name	= NET_NEIGH_APP_SOLICIT,
2397 			.procname	= "app_solicit",
2398 			.maxlen		= sizeof(int),
2399 			.mode		= 0644,
2400 			.proc_handler	= &proc_dointvec,
2401 		},
2402 		{
2403 			.ctl_name	= NET_NEIGH_RETRANS_TIME,
2404 			.procname	= "retrans_time",
2405 			.maxlen		= sizeof(int),
2406 			.mode		= 0644,
2407 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2408 		},
2409 		{
2410 			.ctl_name	= NET_NEIGH_REACHABLE_TIME,
2411 			.procname	= "base_reachable_time",
2412 			.maxlen		= sizeof(int),
2413 			.mode		= 0644,
2414 			.proc_handler	= &proc_dointvec_jiffies,
2415 			.strategy	= &sysctl_jiffies,
2416 		},
2417 		{
2418 			.ctl_name	= NET_NEIGH_DELAY_PROBE_TIME,
2419 			.procname	= "delay_first_probe_time",
2420 			.maxlen		= sizeof(int),
2421 			.mode		= 0644,
2422 			.proc_handler	= &proc_dointvec_jiffies,
2423 			.strategy	= &sysctl_jiffies,
2424 		},
2425 		{
2426 			.ctl_name	= NET_NEIGH_GC_STALE_TIME,
2427 			.procname	= "gc_stale_time",
2428 			.maxlen		= sizeof(int),
2429 			.mode		= 0644,
2430 			.proc_handler	= &proc_dointvec_jiffies,
2431 			.strategy	= &sysctl_jiffies,
2432 		},
2433 		{
2434 			.ctl_name	= NET_NEIGH_UNRES_QLEN,
2435 			.procname	= "unres_qlen",
2436 			.maxlen		= sizeof(int),
2437 			.mode		= 0644,
2438 			.proc_handler	= &proc_dointvec,
2439 		},
2440 		{
2441 			.ctl_name	= NET_NEIGH_PROXY_QLEN,
2442 			.procname	= "proxy_qlen",
2443 			.maxlen		= sizeof(int),
2444 			.mode		= 0644,
2445 			.proc_handler	= &proc_dointvec,
2446 		},
2447 		{
2448 			.ctl_name	= NET_NEIGH_ANYCAST_DELAY,
2449 			.procname	= "anycast_delay",
2450 			.maxlen		= sizeof(int),
2451 			.mode		= 0644,
2452 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2453 		},
2454 		{
2455 			.ctl_name	= NET_NEIGH_PROXY_DELAY,
2456 			.procname	= "proxy_delay",
2457 			.maxlen		= sizeof(int),
2458 			.mode		= 0644,
2459 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2460 		},
2461 		{
2462 			.ctl_name	= NET_NEIGH_LOCKTIME,
2463 			.procname	= "locktime",
2464 			.maxlen		= sizeof(int),
2465 			.mode		= 0644,
2466 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2467 		},
2468 		{
2469 			.ctl_name	= NET_NEIGH_GC_INTERVAL,
2470 			.procname	= "gc_interval",
2471 			.maxlen		= sizeof(int),
2472 			.mode		= 0644,
2473 			.proc_handler	= &proc_dointvec_jiffies,
2474 			.strategy	= &sysctl_jiffies,
2475 		},
2476 		{
2477 			.ctl_name	= NET_NEIGH_GC_THRESH1,
2478 			.procname	= "gc_thresh1",
2479 			.maxlen		= sizeof(int),
2480 			.mode		= 0644,
2481 			.proc_handler	= &proc_dointvec,
2482 		},
2483 		{
2484 			.ctl_name	= NET_NEIGH_GC_THRESH2,
2485 			.procname	= "gc_thresh2",
2486 			.maxlen		= sizeof(int),
2487 			.mode		= 0644,
2488 			.proc_handler	= &proc_dointvec,
2489 		},
2490 		{
2491 			.ctl_name	= NET_NEIGH_GC_THRESH3,
2492 			.procname	= "gc_thresh3",
2493 			.maxlen		= sizeof(int),
2494 			.mode		= 0644,
2495 			.proc_handler	= &proc_dointvec,
2496 		},
2497 		{
2498 			.ctl_name	= NET_NEIGH_RETRANS_TIME_MS,
2499 			.procname	= "retrans_time_ms",
2500 			.maxlen		= sizeof(int),
2501 			.mode		= 0644,
2502 			.proc_handler	= &proc_dointvec_ms_jiffies,
2503 			.strategy	= &sysctl_ms_jiffies,
2504 		},
2505 		{
2506 			.ctl_name	= NET_NEIGH_REACHABLE_TIME_MS,
2507 			.procname	= "base_reachable_time_ms",
2508 			.maxlen		= sizeof(int),
2509 			.mode		= 0644,
2510 			.proc_handler	= &proc_dointvec_ms_jiffies,
2511 			.strategy	= &sysctl_ms_jiffies,
2512 		},
2513 	},
2514 	.neigh_dev = {
2515 		{
2516 			.ctl_name	= NET_PROTO_CONF_DEFAULT,
2517 			.procname	= "default",
2518 			.mode		= 0555,
2519 		},
2520 	},
2521 	.neigh_neigh_dir = {
2522 		{
2523 			.procname	= "neigh",
2524 			.mode		= 0555,
2525 		},
2526 	},
2527 	.neigh_proto_dir = {
2528 		{
2529 			.mode		= 0555,
2530 		},
2531 	},
2532 	.neigh_root_dir = {
2533 		{
2534 			.ctl_name	= CTL_NET,
2535 			.procname	= "net",
2536 			.mode		= 0555,
2537 		},
2538 	},
2539 };
2540 
2541 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2542 			  int p_id, int pdev_id, char *p_name,
2543 			  proc_handler *handler, ctl_handler *strategy)
2544 {
2545 	struct neigh_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
2546 	const char *dev_name_source = NULL;
2547 	char *dev_name = NULL;
2548 	int err = 0;
2549 
2550 	if (!t)
2551 		return -ENOBUFS;
2552 	memcpy(t, &neigh_sysctl_template, sizeof(*t));
2553 	t->neigh_vars[0].data  = &p->mcast_probes;
2554 	t->neigh_vars[1].data  = &p->ucast_probes;
2555 	t->neigh_vars[2].data  = &p->app_probes;
2556 	t->neigh_vars[3].data  = &p->retrans_time;
2557 	t->neigh_vars[4].data  = &p->base_reachable_time;
2558 	t->neigh_vars[5].data  = &p->delay_probe_time;
2559 	t->neigh_vars[6].data  = &p->gc_staletime;
2560 	t->neigh_vars[7].data  = &p->queue_len;
2561 	t->neigh_vars[8].data  = &p->proxy_qlen;
2562 	t->neigh_vars[9].data  = &p->anycast_delay;
2563 	t->neigh_vars[10].data = &p->proxy_delay;
2564 	t->neigh_vars[11].data = &p->locktime;
2565 
2566 	if (dev) {
2567 		dev_name_source = dev->name;
2568 		t->neigh_dev[0].ctl_name = dev->ifindex;
2569 		t->neigh_vars[12].procname = NULL;
2570 		t->neigh_vars[13].procname = NULL;
2571 		t->neigh_vars[14].procname = NULL;
2572 		t->neigh_vars[15].procname = NULL;
2573 	} else {
2574  		dev_name_source = t->neigh_dev[0].procname;
2575 		t->neigh_vars[12].data = (int *)(p + 1);
2576 		t->neigh_vars[13].data = (int *)(p + 1) + 1;
2577 		t->neigh_vars[14].data = (int *)(p + 1) + 2;
2578 		t->neigh_vars[15].data = (int *)(p + 1) + 3;
2579 	}
2580 
2581 	t->neigh_vars[16].data  = &p->retrans_time;
2582 	t->neigh_vars[17].data  = &p->base_reachable_time;
2583 
2584 	if (handler || strategy) {
2585 		/* RetransTime */
2586 		t->neigh_vars[3].proc_handler = handler;
2587 		t->neigh_vars[3].strategy = strategy;
2588 		t->neigh_vars[3].extra1 = dev;
2589 		/* ReachableTime */
2590 		t->neigh_vars[4].proc_handler = handler;
2591 		t->neigh_vars[4].strategy = strategy;
2592 		t->neigh_vars[4].extra1 = dev;
2593 		/* RetransTime (in milliseconds)*/
2594 		t->neigh_vars[16].proc_handler = handler;
2595 		t->neigh_vars[16].strategy = strategy;
2596 		t->neigh_vars[16].extra1 = dev;
2597 		/* ReachableTime (in milliseconds) */
2598 		t->neigh_vars[17].proc_handler = handler;
2599 		t->neigh_vars[17].strategy = strategy;
2600 		t->neigh_vars[17].extra1 = dev;
2601 	}
2602 
2603 	dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2604 	if (!dev_name) {
2605 		err = -ENOBUFS;
2606 		goto free;
2607 	}
2608 
2609  	t->neigh_dev[0].procname = dev_name;
2610 
2611 	t->neigh_neigh_dir[0].ctl_name = pdev_id;
2612 
2613 	t->neigh_proto_dir[0].procname = p_name;
2614 	t->neigh_proto_dir[0].ctl_name = p_id;
2615 
2616 	t->neigh_dev[0].child	       = t->neigh_vars;
2617 	t->neigh_neigh_dir[0].child    = t->neigh_dev;
2618 	t->neigh_proto_dir[0].child    = t->neigh_neigh_dir;
2619 	t->neigh_root_dir[0].child     = t->neigh_proto_dir;
2620 
2621 	t->sysctl_header = register_sysctl_table(t->neigh_root_dir, 0);
2622 	if (!t->sysctl_header) {
2623 		err = -ENOBUFS;
2624 		goto free_procname;
2625 	}
2626 	p->sysctl_table = t;
2627 	return 0;
2628 
2629 	/* error path */
2630  free_procname:
2631 	kfree(dev_name);
2632  free:
2633 	kfree(t);
2634 
2635 	return err;
2636 }
2637 
2638 void neigh_sysctl_unregister(struct neigh_parms *p)
2639 {
2640 	if (p->sysctl_table) {
2641 		struct neigh_sysctl_table *t = p->sysctl_table;
2642 		p->sysctl_table = NULL;
2643 		unregister_sysctl_table(t->sysctl_header);
2644 		kfree(t->neigh_dev[0].procname);
2645 		kfree(t);
2646 	}
2647 }
2648 
2649 #endif	/* CONFIG_SYSCTL */
2650 
2651 EXPORT_SYMBOL(__neigh_event_send);
2652 EXPORT_SYMBOL(neigh_add);
2653 EXPORT_SYMBOL(neigh_changeaddr);
2654 EXPORT_SYMBOL(neigh_compat_output);
2655 EXPORT_SYMBOL(neigh_connected_output);
2656 EXPORT_SYMBOL(neigh_create);
2657 EXPORT_SYMBOL(neigh_delete);
2658 EXPORT_SYMBOL(neigh_destroy);
2659 EXPORT_SYMBOL(neigh_dump_info);
2660 EXPORT_SYMBOL(neigh_event_ns);
2661 EXPORT_SYMBOL(neigh_ifdown);
2662 EXPORT_SYMBOL(neigh_lookup);
2663 EXPORT_SYMBOL(neigh_lookup_nodev);
2664 EXPORT_SYMBOL(neigh_parms_alloc);
2665 EXPORT_SYMBOL(neigh_parms_release);
2666 EXPORT_SYMBOL(neigh_rand_reach_time);
2667 EXPORT_SYMBOL(neigh_resolve_output);
2668 EXPORT_SYMBOL(neigh_table_clear);
2669 EXPORT_SYMBOL(neigh_table_init);
2670 EXPORT_SYMBOL(neigh_update);
2671 EXPORT_SYMBOL(neigh_update_hhs);
2672 EXPORT_SYMBOL(pneigh_enqueue);
2673 EXPORT_SYMBOL(pneigh_lookup);
2674 EXPORT_SYMBOL(neightbl_dump_info);
2675 EXPORT_SYMBOL(neightbl_set);
2676 
2677 #ifdef CONFIG_ARPD
2678 EXPORT_SYMBOL(neigh_app_ns);
2679 #endif
2680 #ifdef CONFIG_SYSCTL
2681 EXPORT_SYMBOL(neigh_sysctl_register);
2682 EXPORT_SYMBOL(neigh_sysctl_unregister);
2683 #endif
2684