xref: /linux/net/core/neighbour.c (revision ccea15f45eb0ab12d658f88b5d4be005cb2bb1a7)
1 /*
2  *	Generic address resolution entity
3  *
4  *	Authors:
5  *	Pedro Roque		<roque@di.fc.ul.pt>
6  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *	Fixes:
14  *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
15  *	Harald Welte		Add neighbour cache statistics like rtstat
16  */
17 
18 #include <linux/config.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/socket.h>
23 #include <linux/sched.h>
24 #include <linux/netdevice.h>
25 #include <linux/proc_fs.h>
26 #ifdef CONFIG_SYSCTL
27 #include <linux/sysctl.h>
28 #endif
29 #include <linux/times.h>
30 #include <net/neighbour.h>
31 #include <net/dst.h>
32 #include <net/sock.h>
33 #include <linux/rtnetlink.h>
34 #include <linux/random.h>
35 #include <linux/string.h>
36 
37 #define NEIGH_DEBUG 1
38 
39 #define NEIGH_PRINTK(x...) printk(x)
40 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
41 #define NEIGH_PRINTK0 NEIGH_PRINTK
42 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
43 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
44 
45 #if NEIGH_DEBUG >= 1
46 #undef NEIGH_PRINTK1
47 #define NEIGH_PRINTK1 NEIGH_PRINTK
48 #endif
49 #if NEIGH_DEBUG >= 2
50 #undef NEIGH_PRINTK2
51 #define NEIGH_PRINTK2 NEIGH_PRINTK
52 #endif
53 
54 #define PNEIGH_HASHMASK		0xF
55 
56 static void neigh_timer_handler(unsigned long arg);
57 #ifdef CONFIG_ARPD
58 static void neigh_app_notify(struct neighbour *n);
59 #endif
60 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
61 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
62 
63 static struct neigh_table *neigh_tables;
64 #ifdef CONFIG_PROC_FS
65 static struct file_operations neigh_stat_seq_fops;
66 #endif
67 
68 /*
69    Neighbour hash table buckets are protected with rwlock tbl->lock.
70 
71    - All the scans/updates to hash buckets MUST be made under this lock.
72    - NOTHING clever should be made under this lock: no callbacks
73      to protocol backends, no attempts to send something to network.
74      It will result in deadlocks, if backend/driver wants to use neighbour
75      cache.
76    - If the entry requires some non-trivial actions, increase
77      its reference count and release table lock.
78 
79    Neighbour entries are protected:
80    - with reference count.
81    - with rwlock neigh->lock
82 
83    Reference count prevents destruction.
84 
85    neigh->lock mainly serializes ll address data and its validity state.
86    However, the same lock is used to protect another entry fields:
87     - timer
88     - resolution queue
89 
90    Again, nothing clever shall be made under neigh->lock,
91    the most complicated procedure, which we allow is dev->hard_header.
92    It is supposed, that dev->hard_header is simplistic and does
93    not make callbacks to neighbour tables.
94 
95    The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96    list of neighbour tables. This list is used only in process context,
97  */
98 
99 static DEFINE_RWLOCK(neigh_tbl_lock);
100 
101 static int neigh_blackhole(struct sk_buff *skb)
102 {
103 	kfree_skb(skb);
104 	return -ENETDOWN;
105 }
106 
107 /*
108  * It is random distribution in the interval (1/2)*base...(3/2)*base.
109  * It corresponds to default IPv6 settings and is not overridable,
110  * because it is really reasonable choice.
111  */
112 
113 unsigned long neigh_rand_reach_time(unsigned long base)
114 {
115 	return (base ? (net_random() % base) + (base >> 1) : 0);
116 }
117 
118 
119 static int neigh_forced_gc(struct neigh_table *tbl)
120 {
121 	int shrunk = 0;
122 	int i;
123 
124 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
125 
126 	write_lock_bh(&tbl->lock);
127 	for (i = 0; i <= tbl->hash_mask; i++) {
128 		struct neighbour *n, **np;
129 
130 		np = &tbl->hash_buckets[i];
131 		while ((n = *np) != NULL) {
132 			/* Neighbour record may be discarded if:
133 			 * - nobody refers to it.
134 			 * - it is not permanent
135 			 */
136 			write_lock(&n->lock);
137 			if (atomic_read(&n->refcnt) == 1 &&
138 			    !(n->nud_state & NUD_PERMANENT)) {
139 				*np	= n->next;
140 				n->dead = 1;
141 				shrunk	= 1;
142 				write_unlock(&n->lock);
143 				neigh_release(n);
144 				continue;
145 			}
146 			write_unlock(&n->lock);
147 			np = &n->next;
148 		}
149 	}
150 
151 	tbl->last_flush = jiffies;
152 
153 	write_unlock_bh(&tbl->lock);
154 
155 	return shrunk;
156 }
157 
158 static int neigh_del_timer(struct neighbour *n)
159 {
160 	if ((n->nud_state & NUD_IN_TIMER) &&
161 	    del_timer(&n->timer)) {
162 		neigh_release(n);
163 		return 1;
164 	}
165 	return 0;
166 }
167 
168 static void pneigh_queue_purge(struct sk_buff_head *list)
169 {
170 	struct sk_buff *skb;
171 
172 	while ((skb = skb_dequeue(list)) != NULL) {
173 		dev_put(skb->dev);
174 		kfree_skb(skb);
175 	}
176 }
177 
178 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
179 {
180 	int i;
181 
182 	for (i = 0; i <= tbl->hash_mask; i++) {
183 		struct neighbour *n, **np = &tbl->hash_buckets[i];
184 
185 		while ((n = *np) != NULL) {
186 			if (dev && n->dev != dev) {
187 				np = &n->next;
188 				continue;
189 			}
190 			*np = n->next;
191 			write_lock(&n->lock);
192 			neigh_del_timer(n);
193 			n->dead = 1;
194 
195 			if (atomic_read(&n->refcnt) != 1) {
196 				/* The most unpleasant situation.
197 				   We must destroy neighbour entry,
198 				   but someone still uses it.
199 
200 				   The destroy will be delayed until
201 				   the last user releases us, but
202 				   we must kill timers etc. and move
203 				   it to safe state.
204 				 */
205 				skb_queue_purge(&n->arp_queue);
206 				n->output = neigh_blackhole;
207 				if (n->nud_state & NUD_VALID)
208 					n->nud_state = NUD_NOARP;
209 				else
210 					n->nud_state = NUD_NONE;
211 				NEIGH_PRINTK2("neigh %p is stray.\n", n);
212 			}
213 			write_unlock(&n->lock);
214 			neigh_release(n);
215 		}
216 	}
217 }
218 
219 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
220 {
221 	write_lock_bh(&tbl->lock);
222 	neigh_flush_dev(tbl, dev);
223 	write_unlock_bh(&tbl->lock);
224 }
225 
226 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
227 {
228 	write_lock_bh(&tbl->lock);
229 	neigh_flush_dev(tbl, dev);
230 	pneigh_ifdown(tbl, dev);
231 	write_unlock_bh(&tbl->lock);
232 
233 	del_timer_sync(&tbl->proxy_timer);
234 	pneigh_queue_purge(&tbl->proxy_queue);
235 	return 0;
236 }
237 
238 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
239 {
240 	struct neighbour *n = NULL;
241 	unsigned long now = jiffies;
242 	int entries;
243 
244 	entries = atomic_inc_return(&tbl->entries) - 1;
245 	if (entries >= tbl->gc_thresh3 ||
246 	    (entries >= tbl->gc_thresh2 &&
247 	     time_after(now, tbl->last_flush + 5 * HZ))) {
248 		if (!neigh_forced_gc(tbl) &&
249 		    entries >= tbl->gc_thresh3)
250 			goto out_entries;
251 	}
252 
253 	n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
254 	if (!n)
255 		goto out_entries;
256 
257 	memset(n, 0, tbl->entry_size);
258 
259 	skb_queue_head_init(&n->arp_queue);
260 	rwlock_init(&n->lock);
261 	n->updated	  = n->used = now;
262 	n->nud_state	  = NUD_NONE;
263 	n->output	  = neigh_blackhole;
264 	n->parms	  = neigh_parms_clone(&tbl->parms);
265 	init_timer(&n->timer);
266 	n->timer.function = neigh_timer_handler;
267 	n->timer.data	  = (unsigned long)n;
268 
269 	NEIGH_CACHE_STAT_INC(tbl, allocs);
270 	n->tbl		  = tbl;
271 	atomic_set(&n->refcnt, 1);
272 	n->dead		  = 1;
273 out:
274 	return n;
275 
276 out_entries:
277 	atomic_dec(&tbl->entries);
278 	goto out;
279 }
280 
281 static struct neighbour **neigh_hash_alloc(unsigned int entries)
282 {
283 	unsigned long size = entries * sizeof(struct neighbour *);
284 	struct neighbour **ret;
285 
286 	if (size <= PAGE_SIZE) {
287 		ret = kzalloc(size, GFP_ATOMIC);
288 	} else {
289 		ret = (struct neighbour **)
290 		      __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
291 	}
292 	return ret;
293 }
294 
295 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
296 {
297 	unsigned long size = entries * sizeof(struct neighbour *);
298 
299 	if (size <= PAGE_SIZE)
300 		kfree(hash);
301 	else
302 		free_pages((unsigned long)hash, get_order(size));
303 }
304 
305 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
306 {
307 	struct neighbour **new_hash, **old_hash;
308 	unsigned int i, new_hash_mask, old_entries;
309 
310 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
311 
312 	BUG_ON(new_entries & (new_entries - 1));
313 	new_hash = neigh_hash_alloc(new_entries);
314 	if (!new_hash)
315 		return;
316 
317 	old_entries = tbl->hash_mask + 1;
318 	new_hash_mask = new_entries - 1;
319 	old_hash = tbl->hash_buckets;
320 
321 	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
322 	for (i = 0; i < old_entries; i++) {
323 		struct neighbour *n, *next;
324 
325 		for (n = old_hash[i]; n; n = next) {
326 			unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
327 
328 			hash_val &= new_hash_mask;
329 			next = n->next;
330 
331 			n->next = new_hash[hash_val];
332 			new_hash[hash_val] = n;
333 		}
334 	}
335 	tbl->hash_buckets = new_hash;
336 	tbl->hash_mask = new_hash_mask;
337 
338 	neigh_hash_free(old_hash, old_entries);
339 }
340 
341 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
342 			       struct net_device *dev)
343 {
344 	struct neighbour *n;
345 	int key_len = tbl->key_len;
346 	u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
347 
348 	NEIGH_CACHE_STAT_INC(tbl, lookups);
349 
350 	read_lock_bh(&tbl->lock);
351 	for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
352 		if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
353 			neigh_hold(n);
354 			NEIGH_CACHE_STAT_INC(tbl, hits);
355 			break;
356 		}
357 	}
358 	read_unlock_bh(&tbl->lock);
359 	return n;
360 }
361 
362 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
363 {
364 	struct neighbour *n;
365 	int key_len = tbl->key_len;
366 	u32 hash_val = tbl->hash(pkey, NULL) & tbl->hash_mask;
367 
368 	NEIGH_CACHE_STAT_INC(tbl, lookups);
369 
370 	read_lock_bh(&tbl->lock);
371 	for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
372 		if (!memcmp(n->primary_key, pkey, key_len)) {
373 			neigh_hold(n);
374 			NEIGH_CACHE_STAT_INC(tbl, hits);
375 			break;
376 		}
377 	}
378 	read_unlock_bh(&tbl->lock);
379 	return n;
380 }
381 
382 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
383 			       struct net_device *dev)
384 {
385 	u32 hash_val;
386 	int key_len = tbl->key_len;
387 	int error;
388 	struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
389 
390 	if (!n) {
391 		rc = ERR_PTR(-ENOBUFS);
392 		goto out;
393 	}
394 
395 	memcpy(n->primary_key, pkey, key_len);
396 	n->dev = dev;
397 	dev_hold(dev);
398 
399 	/* Protocol specific setup. */
400 	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
401 		rc = ERR_PTR(error);
402 		goto out_neigh_release;
403 	}
404 
405 	/* Device specific setup. */
406 	if (n->parms->neigh_setup &&
407 	    (error = n->parms->neigh_setup(n)) < 0) {
408 		rc = ERR_PTR(error);
409 		goto out_neigh_release;
410 	}
411 
412 	n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
413 
414 	write_lock_bh(&tbl->lock);
415 
416 	if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
417 		neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
418 
419 	hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
420 
421 	if (n->parms->dead) {
422 		rc = ERR_PTR(-EINVAL);
423 		goto out_tbl_unlock;
424 	}
425 
426 	for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
427 		if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
428 			neigh_hold(n1);
429 			rc = n1;
430 			goto out_tbl_unlock;
431 		}
432 	}
433 
434 	n->next = tbl->hash_buckets[hash_val];
435 	tbl->hash_buckets[hash_val] = n;
436 	n->dead = 0;
437 	neigh_hold(n);
438 	write_unlock_bh(&tbl->lock);
439 	NEIGH_PRINTK2("neigh %p is created.\n", n);
440 	rc = n;
441 out:
442 	return rc;
443 out_tbl_unlock:
444 	write_unlock_bh(&tbl->lock);
445 out_neigh_release:
446 	neigh_release(n);
447 	goto out;
448 }
449 
450 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
451 				    struct net_device *dev, int creat)
452 {
453 	struct pneigh_entry *n;
454 	int key_len = tbl->key_len;
455 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
456 
457 	hash_val ^= (hash_val >> 16);
458 	hash_val ^= hash_val >> 8;
459 	hash_val ^= hash_val >> 4;
460 	hash_val &= PNEIGH_HASHMASK;
461 
462 	read_lock_bh(&tbl->lock);
463 
464 	for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
465 		if (!memcmp(n->key, pkey, key_len) &&
466 		    (n->dev == dev || !n->dev)) {
467 			read_unlock_bh(&tbl->lock);
468 			goto out;
469 		}
470 	}
471 	read_unlock_bh(&tbl->lock);
472 	n = NULL;
473 	if (!creat)
474 		goto out;
475 
476 	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
477 	if (!n)
478 		goto out;
479 
480 	memcpy(n->key, pkey, key_len);
481 	n->dev = dev;
482 	if (dev)
483 		dev_hold(dev);
484 
485 	if (tbl->pconstructor && tbl->pconstructor(n)) {
486 		if (dev)
487 			dev_put(dev);
488 		kfree(n);
489 		n = NULL;
490 		goto out;
491 	}
492 
493 	write_lock_bh(&tbl->lock);
494 	n->next = tbl->phash_buckets[hash_val];
495 	tbl->phash_buckets[hash_val] = n;
496 	write_unlock_bh(&tbl->lock);
497 out:
498 	return n;
499 }
500 
501 
502 int pneigh_delete(struct neigh_table *tbl, const void *pkey,
503 		  struct net_device *dev)
504 {
505 	struct pneigh_entry *n, **np;
506 	int key_len = tbl->key_len;
507 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
508 
509 	hash_val ^= (hash_val >> 16);
510 	hash_val ^= hash_val >> 8;
511 	hash_val ^= hash_val >> 4;
512 	hash_val &= PNEIGH_HASHMASK;
513 
514 	write_lock_bh(&tbl->lock);
515 	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
516 	     np = &n->next) {
517 		if (!memcmp(n->key, pkey, key_len) && n->dev == dev) {
518 			*np = n->next;
519 			write_unlock_bh(&tbl->lock);
520 			if (tbl->pdestructor)
521 				tbl->pdestructor(n);
522 			if (n->dev)
523 				dev_put(n->dev);
524 			kfree(n);
525 			return 0;
526 		}
527 	}
528 	write_unlock_bh(&tbl->lock);
529 	return -ENOENT;
530 }
531 
532 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
533 {
534 	struct pneigh_entry *n, **np;
535 	u32 h;
536 
537 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
538 		np = &tbl->phash_buckets[h];
539 		while ((n = *np) != NULL) {
540 			if (!dev || n->dev == dev) {
541 				*np = n->next;
542 				if (tbl->pdestructor)
543 					tbl->pdestructor(n);
544 				if (n->dev)
545 					dev_put(n->dev);
546 				kfree(n);
547 				continue;
548 			}
549 			np = &n->next;
550 		}
551 	}
552 	return -ENOENT;
553 }
554 
555 
556 /*
557  *	neighbour must already be out of the table;
558  *
559  */
560 void neigh_destroy(struct neighbour *neigh)
561 {
562 	struct hh_cache *hh;
563 
564 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
565 
566 	if (!neigh->dead) {
567 		printk(KERN_WARNING
568 		       "Destroying alive neighbour %p\n", neigh);
569 		dump_stack();
570 		return;
571 	}
572 
573 	if (neigh_del_timer(neigh))
574 		printk(KERN_WARNING "Impossible event.\n");
575 
576 	while ((hh = neigh->hh) != NULL) {
577 		neigh->hh = hh->hh_next;
578 		hh->hh_next = NULL;
579 		write_lock_bh(&hh->hh_lock);
580 		hh->hh_output = neigh_blackhole;
581 		write_unlock_bh(&hh->hh_lock);
582 		if (atomic_dec_and_test(&hh->hh_refcnt))
583 			kfree(hh);
584 	}
585 
586 	if (neigh->parms->neigh_destructor)
587 		(neigh->parms->neigh_destructor)(neigh);
588 
589 	skb_queue_purge(&neigh->arp_queue);
590 
591 	dev_put(neigh->dev);
592 	neigh_parms_put(neigh->parms);
593 
594 	NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
595 
596 	atomic_dec(&neigh->tbl->entries);
597 	kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
598 }
599 
600 /* Neighbour state is suspicious;
601    disable fast path.
602 
603    Called with write_locked neigh.
604  */
605 static void neigh_suspect(struct neighbour *neigh)
606 {
607 	struct hh_cache *hh;
608 
609 	NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
610 
611 	neigh->output = neigh->ops->output;
612 
613 	for (hh = neigh->hh; hh; hh = hh->hh_next)
614 		hh->hh_output = neigh->ops->output;
615 }
616 
617 /* Neighbour state is OK;
618    enable fast path.
619 
620    Called with write_locked neigh.
621  */
622 static void neigh_connect(struct neighbour *neigh)
623 {
624 	struct hh_cache *hh;
625 
626 	NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
627 
628 	neigh->output = neigh->ops->connected_output;
629 
630 	for (hh = neigh->hh; hh; hh = hh->hh_next)
631 		hh->hh_output = neigh->ops->hh_output;
632 }
633 
634 static void neigh_periodic_timer(unsigned long arg)
635 {
636 	struct neigh_table *tbl = (struct neigh_table *)arg;
637 	struct neighbour *n, **np;
638 	unsigned long expire, now = jiffies;
639 
640 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
641 
642 	write_lock(&tbl->lock);
643 
644 	/*
645 	 *	periodically recompute ReachableTime from random function
646 	 */
647 
648 	if (time_after(now, tbl->last_rand + 300 * HZ)) {
649 		struct neigh_parms *p;
650 		tbl->last_rand = now;
651 		for (p = &tbl->parms; p; p = p->next)
652 			p->reachable_time =
653 				neigh_rand_reach_time(p->base_reachable_time);
654 	}
655 
656 	np = &tbl->hash_buckets[tbl->hash_chain_gc];
657 	tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
658 
659 	while ((n = *np) != NULL) {
660 		unsigned int state;
661 
662 		write_lock(&n->lock);
663 
664 		state = n->nud_state;
665 		if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
666 			write_unlock(&n->lock);
667 			goto next_elt;
668 		}
669 
670 		if (time_before(n->used, n->confirmed))
671 			n->used = n->confirmed;
672 
673 		if (atomic_read(&n->refcnt) == 1 &&
674 		    (state == NUD_FAILED ||
675 		     time_after(now, n->used + n->parms->gc_staletime))) {
676 			*np = n->next;
677 			n->dead = 1;
678 			write_unlock(&n->lock);
679 			neigh_release(n);
680 			continue;
681 		}
682 		write_unlock(&n->lock);
683 
684 next_elt:
685 		np = &n->next;
686 	}
687 
688  	/* Cycle through all hash buckets every base_reachable_time/2 ticks.
689  	 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
690  	 * base_reachable_time.
691 	 */
692 	expire = tbl->parms.base_reachable_time >> 1;
693 	expire /= (tbl->hash_mask + 1);
694 	if (!expire)
695 		expire = 1;
696 
697  	mod_timer(&tbl->gc_timer, now + expire);
698 
699 	write_unlock(&tbl->lock);
700 }
701 
702 static __inline__ int neigh_max_probes(struct neighbour *n)
703 {
704 	struct neigh_parms *p = n->parms;
705 	return (n->nud_state & NUD_PROBE ?
706 		p->ucast_probes :
707 		p->ucast_probes + p->app_probes + p->mcast_probes);
708 }
709 
710 static inline void neigh_add_timer(struct neighbour *n, unsigned long when)
711 {
712 	if (unlikely(mod_timer(&n->timer, when))) {
713 		printk("NEIGH: BUG, double timer add, state is %x\n",
714 		       n->nud_state);
715 		dump_stack();
716 	}
717 }
718 
719 /* Called when a timer expires for a neighbour entry. */
720 
721 static void neigh_timer_handler(unsigned long arg)
722 {
723 	unsigned long now, next;
724 	struct neighbour *neigh = (struct neighbour *)arg;
725 	unsigned state;
726 	int notify = 0;
727 
728 	write_lock(&neigh->lock);
729 
730 	state = neigh->nud_state;
731 	now = jiffies;
732 	next = now + HZ;
733 
734 	if (!(state & NUD_IN_TIMER)) {
735 #ifndef CONFIG_SMP
736 		printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
737 #endif
738 		goto out;
739 	}
740 
741 	if (state & NUD_REACHABLE) {
742 		if (time_before_eq(now,
743 				   neigh->confirmed + neigh->parms->reachable_time)) {
744 			NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
745 			next = neigh->confirmed + neigh->parms->reachable_time;
746 		} else if (time_before_eq(now,
747 					  neigh->used + neigh->parms->delay_probe_time)) {
748 			NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
749 			neigh->nud_state = NUD_DELAY;
750 			neigh->updated = jiffies;
751 			neigh_suspect(neigh);
752 			next = now + neigh->parms->delay_probe_time;
753 		} else {
754 			NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
755 			neigh->nud_state = NUD_STALE;
756 			neigh->updated = jiffies;
757 			neigh_suspect(neigh);
758 		}
759 	} else if (state & NUD_DELAY) {
760 		if (time_before_eq(now,
761 				   neigh->confirmed + neigh->parms->delay_probe_time)) {
762 			NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
763 			neigh->nud_state = NUD_REACHABLE;
764 			neigh->updated = jiffies;
765 			neigh_connect(neigh);
766 			next = neigh->confirmed + neigh->parms->reachable_time;
767 		} else {
768 			NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
769 			neigh->nud_state = NUD_PROBE;
770 			neigh->updated = jiffies;
771 			atomic_set(&neigh->probes, 0);
772 			next = now + neigh->parms->retrans_time;
773 		}
774 	} else {
775 		/* NUD_PROBE|NUD_INCOMPLETE */
776 		next = now + neigh->parms->retrans_time;
777 	}
778 
779 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
780 	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
781 		struct sk_buff *skb;
782 
783 		neigh->nud_state = NUD_FAILED;
784 		neigh->updated = jiffies;
785 		notify = 1;
786 		NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
787 		NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
788 
789 		/* It is very thin place. report_unreachable is very complicated
790 		   routine. Particularly, it can hit the same neighbour entry!
791 
792 		   So that, we try to be accurate and avoid dead loop. --ANK
793 		 */
794 		while (neigh->nud_state == NUD_FAILED &&
795 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
796 			write_unlock(&neigh->lock);
797 			neigh->ops->error_report(neigh, skb);
798 			write_lock(&neigh->lock);
799 		}
800 		skb_queue_purge(&neigh->arp_queue);
801 	}
802 
803 	if (neigh->nud_state & NUD_IN_TIMER) {
804 		if (time_before(next, jiffies + HZ/2))
805 			next = jiffies + HZ/2;
806 		if (!mod_timer(&neigh->timer, next))
807 			neigh_hold(neigh);
808 	}
809 	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
810 		struct sk_buff *skb = skb_peek(&neigh->arp_queue);
811 		/* keep skb alive even if arp_queue overflows */
812 		if (skb)
813 			skb_get(skb);
814 		write_unlock(&neigh->lock);
815 		neigh->ops->solicit(neigh, skb);
816 		atomic_inc(&neigh->probes);
817 		if (skb)
818 			kfree_skb(skb);
819 	} else {
820 out:
821 		write_unlock(&neigh->lock);
822 	}
823 
824 #ifdef CONFIG_ARPD
825 	if (notify && neigh->parms->app_probes)
826 		neigh_app_notify(neigh);
827 #endif
828 	neigh_release(neigh);
829 }
830 
831 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
832 {
833 	int rc;
834 	unsigned long now;
835 
836 	write_lock_bh(&neigh->lock);
837 
838 	rc = 0;
839 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
840 		goto out_unlock_bh;
841 
842 	now = jiffies;
843 
844 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
845 		if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
846 			atomic_set(&neigh->probes, neigh->parms->ucast_probes);
847 			neigh->nud_state     = NUD_INCOMPLETE;
848 			neigh->updated = jiffies;
849 			neigh_hold(neigh);
850 			neigh_add_timer(neigh, now + 1);
851 		} else {
852 			neigh->nud_state = NUD_FAILED;
853 			neigh->updated = jiffies;
854 			write_unlock_bh(&neigh->lock);
855 
856 			if (skb)
857 				kfree_skb(skb);
858 			return 1;
859 		}
860 	} else if (neigh->nud_state & NUD_STALE) {
861 		NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
862 		neigh_hold(neigh);
863 		neigh->nud_state = NUD_DELAY;
864 		neigh->updated = jiffies;
865 		neigh_add_timer(neigh,
866 				jiffies + neigh->parms->delay_probe_time);
867 	}
868 
869 	if (neigh->nud_state == NUD_INCOMPLETE) {
870 		if (skb) {
871 			if (skb_queue_len(&neigh->arp_queue) >=
872 			    neigh->parms->queue_len) {
873 				struct sk_buff *buff;
874 				buff = neigh->arp_queue.next;
875 				__skb_unlink(buff, &neigh->arp_queue);
876 				kfree_skb(buff);
877 			}
878 			__skb_queue_tail(&neigh->arp_queue, skb);
879 		}
880 		rc = 1;
881 	}
882 out_unlock_bh:
883 	write_unlock_bh(&neigh->lock);
884 	return rc;
885 }
886 
887 static __inline__ void neigh_update_hhs(struct neighbour *neigh)
888 {
889 	struct hh_cache *hh;
890 	void (*update)(struct hh_cache*, struct net_device*, unsigned char *) =
891 		neigh->dev->header_cache_update;
892 
893 	if (update) {
894 		for (hh = neigh->hh; hh; hh = hh->hh_next) {
895 			write_lock_bh(&hh->hh_lock);
896 			update(hh, neigh->dev, neigh->ha);
897 			write_unlock_bh(&hh->hh_lock);
898 		}
899 	}
900 }
901 
902 
903 
904 /* Generic update routine.
905    -- lladdr is new lladdr or NULL, if it is not supplied.
906    -- new    is new state.
907    -- flags
908 	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
909 				if it is different.
910 	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
911 				lladdr instead of overriding it
912 				if it is different.
913 				It also allows to retain current state
914 				if lladdr is unchanged.
915 	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
916 
917 	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
918 				NTF_ROUTER flag.
919 	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
920 				a router.
921 
922    Caller MUST hold reference count on the entry.
923  */
924 
925 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
926 		 u32 flags)
927 {
928 	u8 old;
929 	int err;
930 #ifdef CONFIG_ARPD
931 	int notify = 0;
932 #endif
933 	struct net_device *dev;
934 	int update_isrouter = 0;
935 
936 	write_lock_bh(&neigh->lock);
937 
938 	dev    = neigh->dev;
939 	old    = neigh->nud_state;
940 	err    = -EPERM;
941 
942 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
943 	    (old & (NUD_NOARP | NUD_PERMANENT)))
944 		goto out;
945 
946 	if (!(new & NUD_VALID)) {
947 		neigh_del_timer(neigh);
948 		if (old & NUD_CONNECTED)
949 			neigh_suspect(neigh);
950 		neigh->nud_state = new;
951 		err = 0;
952 #ifdef CONFIG_ARPD
953 		notify = old & NUD_VALID;
954 #endif
955 		goto out;
956 	}
957 
958 	/* Compare new lladdr with cached one */
959 	if (!dev->addr_len) {
960 		/* First case: device needs no address. */
961 		lladdr = neigh->ha;
962 	} else if (lladdr) {
963 		/* The second case: if something is already cached
964 		   and a new address is proposed:
965 		   - compare new & old
966 		   - if they are different, check override flag
967 		 */
968 		if ((old & NUD_VALID) &&
969 		    !memcmp(lladdr, neigh->ha, dev->addr_len))
970 			lladdr = neigh->ha;
971 	} else {
972 		/* No address is supplied; if we know something,
973 		   use it, otherwise discard the request.
974 		 */
975 		err = -EINVAL;
976 		if (!(old & NUD_VALID))
977 			goto out;
978 		lladdr = neigh->ha;
979 	}
980 
981 	if (new & NUD_CONNECTED)
982 		neigh->confirmed = jiffies;
983 	neigh->updated = jiffies;
984 
985 	/* If entry was valid and address is not changed,
986 	   do not change entry state, if new one is STALE.
987 	 */
988 	err = 0;
989 	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
990 	if (old & NUD_VALID) {
991 		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
992 			update_isrouter = 0;
993 			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
994 			    (old & NUD_CONNECTED)) {
995 				lladdr = neigh->ha;
996 				new = NUD_STALE;
997 			} else
998 				goto out;
999 		} else {
1000 			if (lladdr == neigh->ha && new == NUD_STALE &&
1001 			    ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1002 			     (old & NUD_CONNECTED))
1003 			    )
1004 				new = old;
1005 		}
1006 	}
1007 
1008 	if (new != old) {
1009 		neigh_del_timer(neigh);
1010 		if (new & NUD_IN_TIMER) {
1011 			neigh_hold(neigh);
1012 			neigh_add_timer(neigh, (jiffies +
1013 						((new & NUD_REACHABLE) ?
1014 						 neigh->parms->reachable_time :
1015 						 0)));
1016 		}
1017 		neigh->nud_state = new;
1018 	}
1019 
1020 	if (lladdr != neigh->ha) {
1021 		memcpy(&neigh->ha, lladdr, dev->addr_len);
1022 		neigh_update_hhs(neigh);
1023 		if (!(new & NUD_CONNECTED))
1024 			neigh->confirmed = jiffies -
1025 				      (neigh->parms->base_reachable_time << 1);
1026 #ifdef CONFIG_ARPD
1027 		notify = 1;
1028 #endif
1029 	}
1030 	if (new == old)
1031 		goto out;
1032 	if (new & NUD_CONNECTED)
1033 		neigh_connect(neigh);
1034 	else
1035 		neigh_suspect(neigh);
1036 	if (!(old & NUD_VALID)) {
1037 		struct sk_buff *skb;
1038 
1039 		/* Again: avoid dead loop if something went wrong */
1040 
1041 		while (neigh->nud_state & NUD_VALID &&
1042 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1043 			struct neighbour *n1 = neigh;
1044 			write_unlock_bh(&neigh->lock);
1045 			/* On shaper/eql skb->dst->neighbour != neigh :( */
1046 			if (skb->dst && skb->dst->neighbour)
1047 				n1 = skb->dst->neighbour;
1048 			n1->output(skb);
1049 			write_lock_bh(&neigh->lock);
1050 		}
1051 		skb_queue_purge(&neigh->arp_queue);
1052 	}
1053 out:
1054 	if (update_isrouter) {
1055 		neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1056 			(neigh->flags | NTF_ROUTER) :
1057 			(neigh->flags & ~NTF_ROUTER);
1058 	}
1059 	write_unlock_bh(&neigh->lock);
1060 #ifdef CONFIG_ARPD
1061 	if (notify && neigh->parms->app_probes)
1062 		neigh_app_notify(neigh);
1063 #endif
1064 	return err;
1065 }
1066 
1067 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1068 				 u8 *lladdr, void *saddr,
1069 				 struct net_device *dev)
1070 {
1071 	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1072 						 lladdr || !dev->addr_len);
1073 	if (neigh)
1074 		neigh_update(neigh, lladdr, NUD_STALE,
1075 			     NEIGH_UPDATE_F_OVERRIDE);
1076 	return neigh;
1077 }
1078 
1079 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1080 			  u16 protocol)
1081 {
1082 	struct hh_cache	*hh;
1083 	struct net_device *dev = dst->dev;
1084 
1085 	for (hh = n->hh; hh; hh = hh->hh_next)
1086 		if (hh->hh_type == protocol)
1087 			break;
1088 
1089 	if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1090 		rwlock_init(&hh->hh_lock);
1091 		hh->hh_type = protocol;
1092 		atomic_set(&hh->hh_refcnt, 0);
1093 		hh->hh_next = NULL;
1094 		if (dev->hard_header_cache(n, hh)) {
1095 			kfree(hh);
1096 			hh = NULL;
1097 		} else {
1098 			atomic_inc(&hh->hh_refcnt);
1099 			hh->hh_next = n->hh;
1100 			n->hh	    = hh;
1101 			if (n->nud_state & NUD_CONNECTED)
1102 				hh->hh_output = n->ops->hh_output;
1103 			else
1104 				hh->hh_output = n->ops->output;
1105 		}
1106 	}
1107 	if (hh)	{
1108 		atomic_inc(&hh->hh_refcnt);
1109 		dst->hh = hh;
1110 	}
1111 }
1112 
1113 /* This function can be used in contexts, where only old dev_queue_xmit
1114    worked, f.e. if you want to override normal output path (eql, shaper),
1115    but resolution is not made yet.
1116  */
1117 
1118 int neigh_compat_output(struct sk_buff *skb)
1119 {
1120 	struct net_device *dev = skb->dev;
1121 
1122 	__skb_pull(skb, skb->nh.raw - skb->data);
1123 
1124 	if (dev->hard_header &&
1125 	    dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1126 		    	     skb->len) < 0 &&
1127 	    dev->rebuild_header(skb))
1128 		return 0;
1129 
1130 	return dev_queue_xmit(skb);
1131 }
1132 
1133 /* Slow and careful. */
1134 
1135 int neigh_resolve_output(struct sk_buff *skb)
1136 {
1137 	struct dst_entry *dst = skb->dst;
1138 	struct neighbour *neigh;
1139 	int rc = 0;
1140 
1141 	if (!dst || !(neigh = dst->neighbour))
1142 		goto discard;
1143 
1144 	__skb_pull(skb, skb->nh.raw - skb->data);
1145 
1146 	if (!neigh_event_send(neigh, skb)) {
1147 		int err;
1148 		struct net_device *dev = neigh->dev;
1149 		if (dev->hard_header_cache && !dst->hh) {
1150 			write_lock_bh(&neigh->lock);
1151 			if (!dst->hh)
1152 				neigh_hh_init(neigh, dst, dst->ops->protocol);
1153 			err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1154 					       neigh->ha, NULL, skb->len);
1155 			write_unlock_bh(&neigh->lock);
1156 		} else {
1157 			read_lock_bh(&neigh->lock);
1158 			err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1159 					       neigh->ha, NULL, skb->len);
1160 			read_unlock_bh(&neigh->lock);
1161 		}
1162 		if (err >= 0)
1163 			rc = neigh->ops->queue_xmit(skb);
1164 		else
1165 			goto out_kfree_skb;
1166 	}
1167 out:
1168 	return rc;
1169 discard:
1170 	NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1171 		      dst, dst ? dst->neighbour : NULL);
1172 out_kfree_skb:
1173 	rc = -EINVAL;
1174 	kfree_skb(skb);
1175 	goto out;
1176 }
1177 
1178 /* As fast as possible without hh cache */
1179 
1180 int neigh_connected_output(struct sk_buff *skb)
1181 {
1182 	int err;
1183 	struct dst_entry *dst = skb->dst;
1184 	struct neighbour *neigh = dst->neighbour;
1185 	struct net_device *dev = neigh->dev;
1186 
1187 	__skb_pull(skb, skb->nh.raw - skb->data);
1188 
1189 	read_lock_bh(&neigh->lock);
1190 	err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1191 			       neigh->ha, NULL, skb->len);
1192 	read_unlock_bh(&neigh->lock);
1193 	if (err >= 0)
1194 		err = neigh->ops->queue_xmit(skb);
1195 	else {
1196 		err = -EINVAL;
1197 		kfree_skb(skb);
1198 	}
1199 	return err;
1200 }
1201 
1202 static void neigh_proxy_process(unsigned long arg)
1203 {
1204 	struct neigh_table *tbl = (struct neigh_table *)arg;
1205 	long sched_next = 0;
1206 	unsigned long now = jiffies;
1207 	struct sk_buff *skb;
1208 
1209 	spin_lock(&tbl->proxy_queue.lock);
1210 
1211 	skb = tbl->proxy_queue.next;
1212 
1213 	while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1214 		struct sk_buff *back = skb;
1215 		long tdif = NEIGH_CB(back)->sched_next - now;
1216 
1217 		skb = skb->next;
1218 		if (tdif <= 0) {
1219 			struct net_device *dev = back->dev;
1220 			__skb_unlink(back, &tbl->proxy_queue);
1221 			if (tbl->proxy_redo && netif_running(dev))
1222 				tbl->proxy_redo(back);
1223 			else
1224 				kfree_skb(back);
1225 
1226 			dev_put(dev);
1227 		} else if (!sched_next || tdif < sched_next)
1228 			sched_next = tdif;
1229 	}
1230 	del_timer(&tbl->proxy_timer);
1231 	if (sched_next)
1232 		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1233 	spin_unlock(&tbl->proxy_queue.lock);
1234 }
1235 
1236 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1237 		    struct sk_buff *skb)
1238 {
1239 	unsigned long now = jiffies;
1240 	unsigned long sched_next = now + (net_random() % p->proxy_delay);
1241 
1242 	if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1243 		kfree_skb(skb);
1244 		return;
1245 	}
1246 
1247 	NEIGH_CB(skb)->sched_next = sched_next;
1248 	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1249 
1250 	spin_lock(&tbl->proxy_queue.lock);
1251 	if (del_timer(&tbl->proxy_timer)) {
1252 		if (time_before(tbl->proxy_timer.expires, sched_next))
1253 			sched_next = tbl->proxy_timer.expires;
1254 	}
1255 	dst_release(skb->dst);
1256 	skb->dst = NULL;
1257 	dev_hold(skb->dev);
1258 	__skb_queue_tail(&tbl->proxy_queue, skb);
1259 	mod_timer(&tbl->proxy_timer, sched_next);
1260 	spin_unlock(&tbl->proxy_queue.lock);
1261 }
1262 
1263 
1264 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1265 				      struct neigh_table *tbl)
1266 {
1267 	struct neigh_parms *p = kmalloc(sizeof(*p), GFP_KERNEL);
1268 
1269 	if (p) {
1270 		memcpy(p, &tbl->parms, sizeof(*p));
1271 		p->tbl		  = tbl;
1272 		atomic_set(&p->refcnt, 1);
1273 		INIT_RCU_HEAD(&p->rcu_head);
1274 		p->reachable_time =
1275 				neigh_rand_reach_time(p->base_reachable_time);
1276 		if (dev) {
1277 			if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1278 				kfree(p);
1279 				return NULL;
1280 			}
1281 
1282 			dev_hold(dev);
1283 			p->dev = dev;
1284 		}
1285 		p->sysctl_table = NULL;
1286 		write_lock_bh(&tbl->lock);
1287 		p->next		= tbl->parms.next;
1288 		tbl->parms.next = p;
1289 		write_unlock_bh(&tbl->lock);
1290 	}
1291 	return p;
1292 }
1293 
1294 static void neigh_rcu_free_parms(struct rcu_head *head)
1295 {
1296 	struct neigh_parms *parms =
1297 		container_of(head, struct neigh_parms, rcu_head);
1298 
1299 	neigh_parms_put(parms);
1300 }
1301 
1302 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1303 {
1304 	struct neigh_parms **p;
1305 
1306 	if (!parms || parms == &tbl->parms)
1307 		return;
1308 	write_lock_bh(&tbl->lock);
1309 	for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1310 		if (*p == parms) {
1311 			*p = parms->next;
1312 			parms->dead = 1;
1313 			write_unlock_bh(&tbl->lock);
1314 			if (parms->dev)
1315 				dev_put(parms->dev);
1316 			call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1317 			return;
1318 		}
1319 	}
1320 	write_unlock_bh(&tbl->lock);
1321 	NEIGH_PRINTK1("neigh_parms_release: not found\n");
1322 }
1323 
1324 void neigh_parms_destroy(struct neigh_parms *parms)
1325 {
1326 	kfree(parms);
1327 }
1328 
1329 
1330 void neigh_table_init(struct neigh_table *tbl)
1331 {
1332 	unsigned long now = jiffies;
1333 	unsigned long phsize;
1334 
1335 	atomic_set(&tbl->parms.refcnt, 1);
1336 	INIT_RCU_HEAD(&tbl->parms.rcu_head);
1337 	tbl->parms.reachable_time =
1338 			  neigh_rand_reach_time(tbl->parms.base_reachable_time);
1339 
1340 	if (!tbl->kmem_cachep)
1341 		tbl->kmem_cachep = kmem_cache_create(tbl->id,
1342 						     tbl->entry_size,
1343 						     0, SLAB_HWCACHE_ALIGN,
1344 						     NULL, NULL);
1345 
1346 	if (!tbl->kmem_cachep)
1347 		panic("cannot create neighbour cache");
1348 
1349 	tbl->stats = alloc_percpu(struct neigh_statistics);
1350 	if (!tbl->stats)
1351 		panic("cannot create neighbour cache statistics");
1352 
1353 #ifdef CONFIG_PROC_FS
1354 	tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat);
1355 	if (!tbl->pde)
1356 		panic("cannot create neighbour proc dir entry");
1357 	tbl->pde->proc_fops = &neigh_stat_seq_fops;
1358 	tbl->pde->data = tbl;
1359 #endif
1360 
1361 	tbl->hash_mask = 1;
1362 	tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1363 
1364 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1365 	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1366 
1367 	if (!tbl->hash_buckets || !tbl->phash_buckets)
1368 		panic("cannot allocate neighbour cache hashes");
1369 
1370 	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1371 
1372 	rwlock_init(&tbl->lock);
1373 	init_timer(&tbl->gc_timer);
1374 	tbl->gc_timer.data     = (unsigned long)tbl;
1375 	tbl->gc_timer.function = neigh_periodic_timer;
1376 	tbl->gc_timer.expires  = now + 1;
1377 	add_timer(&tbl->gc_timer);
1378 
1379 	init_timer(&tbl->proxy_timer);
1380 	tbl->proxy_timer.data	  = (unsigned long)tbl;
1381 	tbl->proxy_timer.function = neigh_proxy_process;
1382 	skb_queue_head_init(&tbl->proxy_queue);
1383 
1384 	tbl->last_flush = now;
1385 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1386 	write_lock(&neigh_tbl_lock);
1387 	tbl->next	= neigh_tables;
1388 	neigh_tables	= tbl;
1389 	write_unlock(&neigh_tbl_lock);
1390 }
1391 
1392 int neigh_table_clear(struct neigh_table *tbl)
1393 {
1394 	struct neigh_table **tp;
1395 
1396 	/* It is not clean... Fix it to unload IPv6 module safely */
1397 	del_timer_sync(&tbl->gc_timer);
1398 	del_timer_sync(&tbl->proxy_timer);
1399 	pneigh_queue_purge(&tbl->proxy_queue);
1400 	neigh_ifdown(tbl, NULL);
1401 	if (atomic_read(&tbl->entries))
1402 		printk(KERN_CRIT "neighbour leakage\n");
1403 	write_lock(&neigh_tbl_lock);
1404 	for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1405 		if (*tp == tbl) {
1406 			*tp = tbl->next;
1407 			break;
1408 		}
1409 	}
1410 	write_unlock(&neigh_tbl_lock);
1411 
1412 	neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1413 	tbl->hash_buckets = NULL;
1414 
1415 	kfree(tbl->phash_buckets);
1416 	tbl->phash_buckets = NULL;
1417 
1418 	return 0;
1419 }
1420 
1421 int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1422 {
1423 	struct ndmsg *ndm = NLMSG_DATA(nlh);
1424 	struct rtattr **nda = arg;
1425 	struct neigh_table *tbl;
1426 	struct net_device *dev = NULL;
1427 	int err = -ENODEV;
1428 
1429 	if (ndm->ndm_ifindex &&
1430 	    (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1431 		goto out;
1432 
1433 	read_lock(&neigh_tbl_lock);
1434 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1435 		struct rtattr *dst_attr = nda[NDA_DST - 1];
1436 		struct neighbour *n;
1437 
1438 		if (tbl->family != ndm->ndm_family)
1439 			continue;
1440 		read_unlock(&neigh_tbl_lock);
1441 
1442 		err = -EINVAL;
1443 		if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1444 			goto out_dev_put;
1445 
1446 		if (ndm->ndm_flags & NTF_PROXY) {
1447 			err = pneigh_delete(tbl, RTA_DATA(dst_attr), dev);
1448 			goto out_dev_put;
1449 		}
1450 
1451 		if (!dev)
1452 			goto out;
1453 
1454 		n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1455 		if (n) {
1456 			err = neigh_update(n, NULL, NUD_FAILED,
1457 					   NEIGH_UPDATE_F_OVERRIDE|
1458 					   NEIGH_UPDATE_F_ADMIN);
1459 			neigh_release(n);
1460 		}
1461 		goto out_dev_put;
1462 	}
1463 	read_unlock(&neigh_tbl_lock);
1464 	err = -EADDRNOTAVAIL;
1465 out_dev_put:
1466 	if (dev)
1467 		dev_put(dev);
1468 out:
1469 	return err;
1470 }
1471 
1472 int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1473 {
1474 	struct ndmsg *ndm = NLMSG_DATA(nlh);
1475 	struct rtattr **nda = arg;
1476 	struct neigh_table *tbl;
1477 	struct net_device *dev = NULL;
1478 	int err = -ENODEV;
1479 
1480 	if (ndm->ndm_ifindex &&
1481 	    (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1482 		goto out;
1483 
1484 	read_lock(&neigh_tbl_lock);
1485 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1486 		struct rtattr *lladdr_attr = nda[NDA_LLADDR - 1];
1487 		struct rtattr *dst_attr = nda[NDA_DST - 1];
1488 		int override = 1;
1489 		struct neighbour *n;
1490 
1491 		if (tbl->family != ndm->ndm_family)
1492 			continue;
1493 		read_unlock(&neigh_tbl_lock);
1494 
1495 		err = -EINVAL;
1496 		if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1497 			goto out_dev_put;
1498 
1499 		if (ndm->ndm_flags & NTF_PROXY) {
1500 			err = -ENOBUFS;
1501 			if (pneigh_lookup(tbl, RTA_DATA(dst_attr), dev, 1))
1502 				err = 0;
1503 			goto out_dev_put;
1504 		}
1505 
1506 		err = -EINVAL;
1507 		if (!dev)
1508 			goto out;
1509 		if (lladdr_attr && RTA_PAYLOAD(lladdr_attr) < dev->addr_len)
1510 			goto out_dev_put;
1511 
1512 		n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1513 		if (n) {
1514 			if (nlh->nlmsg_flags & NLM_F_EXCL) {
1515 				err = -EEXIST;
1516 				neigh_release(n);
1517 				goto out_dev_put;
1518 			}
1519 
1520 			override = nlh->nlmsg_flags & NLM_F_REPLACE;
1521 		} else if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1522 			err = -ENOENT;
1523 			goto out_dev_put;
1524 		} else {
1525 			n = __neigh_lookup_errno(tbl, RTA_DATA(dst_attr), dev);
1526 			if (IS_ERR(n)) {
1527 				err = PTR_ERR(n);
1528 				goto out_dev_put;
1529 			}
1530 		}
1531 
1532 		err = neigh_update(n,
1533 				   lladdr_attr ? RTA_DATA(lladdr_attr) : NULL,
1534 				   ndm->ndm_state,
1535 				   (override ? NEIGH_UPDATE_F_OVERRIDE : 0) |
1536 				   NEIGH_UPDATE_F_ADMIN);
1537 
1538 		neigh_release(n);
1539 		goto out_dev_put;
1540 	}
1541 
1542 	read_unlock(&neigh_tbl_lock);
1543 	err = -EADDRNOTAVAIL;
1544 out_dev_put:
1545 	if (dev)
1546 		dev_put(dev);
1547 out:
1548 	return err;
1549 }
1550 
1551 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1552 {
1553 	struct rtattr *nest = NULL;
1554 
1555 	nest = RTA_NEST(skb, NDTA_PARMS);
1556 
1557 	if (parms->dev)
1558 		RTA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1559 
1560 	RTA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1561 	RTA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1562 	RTA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1563 	RTA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1564 	RTA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1565 	RTA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1566 	RTA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1567 	RTA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1568 		      parms->base_reachable_time);
1569 	RTA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1570 	RTA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1571 	RTA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1572 	RTA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1573 	RTA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1574 	RTA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1575 
1576 	return RTA_NEST_END(skb, nest);
1577 
1578 rtattr_failure:
1579 	return RTA_NEST_CANCEL(skb, nest);
1580 }
1581 
1582 static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb,
1583 			      struct netlink_callback *cb)
1584 {
1585 	struct nlmsghdr *nlh;
1586 	struct ndtmsg *ndtmsg;
1587 
1588 	nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1589 			       NLM_F_MULTI);
1590 
1591 	ndtmsg = NLMSG_DATA(nlh);
1592 
1593 	read_lock_bh(&tbl->lock);
1594 	ndtmsg->ndtm_family = tbl->family;
1595 	ndtmsg->ndtm_pad1   = 0;
1596 	ndtmsg->ndtm_pad2   = 0;
1597 
1598 	RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1599 	RTA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1600 	RTA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1601 	RTA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1602 	RTA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1603 
1604 	{
1605 		unsigned long now = jiffies;
1606 		unsigned int flush_delta = now - tbl->last_flush;
1607 		unsigned int rand_delta = now - tbl->last_rand;
1608 
1609 		struct ndt_config ndc = {
1610 			.ndtc_key_len		= tbl->key_len,
1611 			.ndtc_entry_size	= tbl->entry_size,
1612 			.ndtc_entries		= atomic_read(&tbl->entries),
1613 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
1614 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
1615 			.ndtc_hash_rnd		= tbl->hash_rnd,
1616 			.ndtc_hash_mask		= tbl->hash_mask,
1617 			.ndtc_hash_chain_gc	= tbl->hash_chain_gc,
1618 			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
1619 		};
1620 
1621 		RTA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1622 	}
1623 
1624 	{
1625 		int cpu;
1626 		struct ndt_stats ndst;
1627 
1628 		memset(&ndst, 0, sizeof(ndst));
1629 
1630 		for_each_possible_cpu(cpu) {
1631 			struct neigh_statistics	*st;
1632 
1633 			st = per_cpu_ptr(tbl->stats, cpu);
1634 			ndst.ndts_allocs		+= st->allocs;
1635 			ndst.ndts_destroys		+= st->destroys;
1636 			ndst.ndts_hash_grows		+= st->hash_grows;
1637 			ndst.ndts_res_failed		+= st->res_failed;
1638 			ndst.ndts_lookups		+= st->lookups;
1639 			ndst.ndts_hits			+= st->hits;
1640 			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
1641 			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
1642 			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
1643 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
1644 		}
1645 
1646 		RTA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1647 	}
1648 
1649 	BUG_ON(tbl->parms.dev);
1650 	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1651 		goto rtattr_failure;
1652 
1653 	read_unlock_bh(&tbl->lock);
1654 	return NLMSG_END(skb, nlh);
1655 
1656 rtattr_failure:
1657 	read_unlock_bh(&tbl->lock);
1658 	return NLMSG_CANCEL(skb, nlh);
1659 
1660 nlmsg_failure:
1661 	return -1;
1662 }
1663 
1664 static int neightbl_fill_param_info(struct neigh_table *tbl,
1665 				    struct neigh_parms *parms,
1666 				    struct sk_buff *skb,
1667 				    struct netlink_callback *cb)
1668 {
1669 	struct ndtmsg *ndtmsg;
1670 	struct nlmsghdr *nlh;
1671 
1672 	nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1673 			       NLM_F_MULTI);
1674 
1675 	ndtmsg = NLMSG_DATA(nlh);
1676 
1677 	read_lock_bh(&tbl->lock);
1678 	ndtmsg->ndtm_family = tbl->family;
1679 	ndtmsg->ndtm_pad1   = 0;
1680 	ndtmsg->ndtm_pad2   = 0;
1681 	RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1682 
1683 	if (neightbl_fill_parms(skb, parms) < 0)
1684 		goto rtattr_failure;
1685 
1686 	read_unlock_bh(&tbl->lock);
1687 	return NLMSG_END(skb, nlh);
1688 
1689 rtattr_failure:
1690 	read_unlock_bh(&tbl->lock);
1691 	return NLMSG_CANCEL(skb, nlh);
1692 
1693 nlmsg_failure:
1694 	return -1;
1695 }
1696 
1697 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1698 						      int ifindex)
1699 {
1700 	struct neigh_parms *p;
1701 
1702 	for (p = &tbl->parms; p; p = p->next)
1703 		if ((p->dev && p->dev->ifindex == ifindex) ||
1704 		    (!p->dev && !ifindex))
1705 			return p;
1706 
1707 	return NULL;
1708 }
1709 
1710 int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1711 {
1712 	struct neigh_table *tbl;
1713 	struct ndtmsg *ndtmsg = NLMSG_DATA(nlh);
1714 	struct rtattr **tb = arg;
1715 	int err = -EINVAL;
1716 
1717 	if (!tb[NDTA_NAME - 1] || !RTA_PAYLOAD(tb[NDTA_NAME - 1]))
1718 		return -EINVAL;
1719 
1720 	read_lock(&neigh_tbl_lock);
1721 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1722 		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1723 			continue;
1724 
1725 		if (!rtattr_strcmp(tb[NDTA_NAME - 1], tbl->id))
1726 			break;
1727 	}
1728 
1729 	if (tbl == NULL) {
1730 		err = -ENOENT;
1731 		goto errout;
1732 	}
1733 
1734 	/*
1735 	 * We acquire tbl->lock to be nice to the periodic timers and
1736 	 * make sure they always see a consistent set of values.
1737 	 */
1738 	write_lock_bh(&tbl->lock);
1739 
1740 	if (tb[NDTA_THRESH1 - 1])
1741 		tbl->gc_thresh1 = RTA_GET_U32(tb[NDTA_THRESH1 - 1]);
1742 
1743 	if (tb[NDTA_THRESH2 - 1])
1744 		tbl->gc_thresh2 = RTA_GET_U32(tb[NDTA_THRESH2 - 1]);
1745 
1746 	if (tb[NDTA_THRESH3 - 1])
1747 		tbl->gc_thresh3 = RTA_GET_U32(tb[NDTA_THRESH3 - 1]);
1748 
1749 	if (tb[NDTA_GC_INTERVAL - 1])
1750 		tbl->gc_interval = RTA_GET_MSECS(tb[NDTA_GC_INTERVAL - 1]);
1751 
1752 	if (tb[NDTA_PARMS - 1]) {
1753 		struct rtattr *tbp[NDTPA_MAX];
1754 		struct neigh_parms *p;
1755 		u32 ifindex = 0;
1756 
1757 		if (rtattr_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS - 1]) < 0)
1758 			goto rtattr_failure;
1759 
1760 		if (tbp[NDTPA_IFINDEX - 1])
1761 			ifindex = RTA_GET_U32(tbp[NDTPA_IFINDEX - 1]);
1762 
1763 		p = lookup_neigh_params(tbl, ifindex);
1764 		if (p == NULL) {
1765 			err = -ENOENT;
1766 			goto rtattr_failure;
1767 		}
1768 
1769 		if (tbp[NDTPA_QUEUE_LEN - 1])
1770 			p->queue_len = RTA_GET_U32(tbp[NDTPA_QUEUE_LEN - 1]);
1771 
1772 		if (tbp[NDTPA_PROXY_QLEN - 1])
1773 			p->proxy_qlen = RTA_GET_U32(tbp[NDTPA_PROXY_QLEN - 1]);
1774 
1775 		if (tbp[NDTPA_APP_PROBES - 1])
1776 			p->app_probes = RTA_GET_U32(tbp[NDTPA_APP_PROBES - 1]);
1777 
1778 		if (tbp[NDTPA_UCAST_PROBES - 1])
1779 			p->ucast_probes =
1780 			   RTA_GET_U32(tbp[NDTPA_UCAST_PROBES - 1]);
1781 
1782 		if (tbp[NDTPA_MCAST_PROBES - 1])
1783 			p->mcast_probes =
1784 			   RTA_GET_U32(tbp[NDTPA_MCAST_PROBES - 1]);
1785 
1786 		if (tbp[NDTPA_BASE_REACHABLE_TIME - 1])
1787 			p->base_reachable_time =
1788 			   RTA_GET_MSECS(tbp[NDTPA_BASE_REACHABLE_TIME - 1]);
1789 
1790 		if (tbp[NDTPA_GC_STALETIME - 1])
1791 			p->gc_staletime =
1792 			   RTA_GET_MSECS(tbp[NDTPA_GC_STALETIME - 1]);
1793 
1794 		if (tbp[NDTPA_DELAY_PROBE_TIME - 1])
1795 			p->delay_probe_time =
1796 			   RTA_GET_MSECS(tbp[NDTPA_DELAY_PROBE_TIME - 1]);
1797 
1798 		if (tbp[NDTPA_RETRANS_TIME - 1])
1799 			p->retrans_time =
1800 			   RTA_GET_MSECS(tbp[NDTPA_RETRANS_TIME - 1]);
1801 
1802 		if (tbp[NDTPA_ANYCAST_DELAY - 1])
1803 			p->anycast_delay =
1804 			   RTA_GET_MSECS(tbp[NDTPA_ANYCAST_DELAY - 1]);
1805 
1806 		if (tbp[NDTPA_PROXY_DELAY - 1])
1807 			p->proxy_delay =
1808 			   RTA_GET_MSECS(tbp[NDTPA_PROXY_DELAY - 1]);
1809 
1810 		if (tbp[NDTPA_LOCKTIME - 1])
1811 			p->locktime = RTA_GET_MSECS(tbp[NDTPA_LOCKTIME - 1]);
1812 	}
1813 
1814 	err = 0;
1815 
1816 rtattr_failure:
1817 	write_unlock_bh(&tbl->lock);
1818 errout:
1819 	read_unlock(&neigh_tbl_lock);
1820 	return err;
1821 }
1822 
1823 int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1824 {
1825 	int idx, family;
1826 	int s_idx = cb->args[0];
1827 	struct neigh_table *tbl;
1828 
1829 	family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1830 
1831 	read_lock(&neigh_tbl_lock);
1832 	for (tbl = neigh_tables, idx = 0; tbl; tbl = tbl->next) {
1833 		struct neigh_parms *p;
1834 
1835 		if (idx < s_idx || (family && tbl->family != family))
1836 			continue;
1837 
1838 		if (neightbl_fill_info(tbl, skb, cb) <= 0)
1839 			break;
1840 
1841 		for (++idx, p = tbl->parms.next; p; p = p->next, idx++) {
1842 			if (idx < s_idx)
1843 				continue;
1844 
1845 			if (neightbl_fill_param_info(tbl, p, skb, cb) <= 0)
1846 				goto out;
1847 		}
1848 
1849 	}
1850 out:
1851 	read_unlock(&neigh_tbl_lock);
1852 	cb->args[0] = idx;
1853 
1854 	return skb->len;
1855 }
1856 
1857 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n,
1858 			   u32 pid, u32 seq, int event, unsigned int flags)
1859 {
1860 	unsigned long now = jiffies;
1861 	unsigned char *b = skb->tail;
1862 	struct nda_cacheinfo ci;
1863 	int locked = 0;
1864 	u32 probes;
1865 	struct nlmsghdr *nlh = NLMSG_NEW(skb, pid, seq, event,
1866 					 sizeof(struct ndmsg), flags);
1867 	struct ndmsg *ndm = NLMSG_DATA(nlh);
1868 
1869 	ndm->ndm_family	 = n->ops->family;
1870 	ndm->ndm_pad1    = 0;
1871 	ndm->ndm_pad2    = 0;
1872 	ndm->ndm_flags	 = n->flags;
1873 	ndm->ndm_type	 = n->type;
1874 	ndm->ndm_ifindex = n->dev->ifindex;
1875 	RTA_PUT(skb, NDA_DST, n->tbl->key_len, n->primary_key);
1876 	read_lock_bh(&n->lock);
1877 	locked		 = 1;
1878 	ndm->ndm_state	 = n->nud_state;
1879 	if (n->nud_state & NUD_VALID)
1880 		RTA_PUT(skb, NDA_LLADDR, n->dev->addr_len, n->ha);
1881 	ci.ndm_used	 = now - n->used;
1882 	ci.ndm_confirmed = now - n->confirmed;
1883 	ci.ndm_updated	 = now - n->updated;
1884 	ci.ndm_refcnt	 = atomic_read(&n->refcnt) - 1;
1885 	probes = atomic_read(&n->probes);
1886 	read_unlock_bh(&n->lock);
1887 	locked		 = 0;
1888 	RTA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1889 	RTA_PUT(skb, NDA_PROBES, sizeof(probes), &probes);
1890 	nlh->nlmsg_len	 = skb->tail - b;
1891 	return skb->len;
1892 
1893 nlmsg_failure:
1894 rtattr_failure:
1895 	if (locked)
1896 		read_unlock_bh(&n->lock);
1897 	skb_trim(skb, b - skb->data);
1898 	return -1;
1899 }
1900 
1901 
1902 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
1903 			    struct netlink_callback *cb)
1904 {
1905 	struct neighbour *n;
1906 	int rc, h, s_h = cb->args[1];
1907 	int idx, s_idx = idx = cb->args[2];
1908 
1909 	for (h = 0; h <= tbl->hash_mask; h++) {
1910 		if (h < s_h)
1911 			continue;
1912 		if (h > s_h)
1913 			s_idx = 0;
1914 		read_lock_bh(&tbl->lock);
1915 		for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
1916 			if (idx < s_idx)
1917 				continue;
1918 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
1919 					    cb->nlh->nlmsg_seq,
1920 					    RTM_NEWNEIGH,
1921 					    NLM_F_MULTI) <= 0) {
1922 				read_unlock_bh(&tbl->lock);
1923 				rc = -1;
1924 				goto out;
1925 			}
1926 		}
1927 		read_unlock_bh(&tbl->lock);
1928 	}
1929 	rc = skb->len;
1930 out:
1931 	cb->args[1] = h;
1932 	cb->args[2] = idx;
1933 	return rc;
1934 }
1935 
1936 int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1937 {
1938 	struct neigh_table *tbl;
1939 	int t, family, s_t;
1940 
1941 	read_lock(&neigh_tbl_lock);
1942 	family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1943 	s_t = cb->args[0];
1944 
1945 	for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
1946 		if (t < s_t || (family && tbl->family != family))
1947 			continue;
1948 		if (t > s_t)
1949 			memset(&cb->args[1], 0, sizeof(cb->args) -
1950 						sizeof(cb->args[0]));
1951 		if (neigh_dump_table(tbl, skb, cb) < 0)
1952 			break;
1953 	}
1954 	read_unlock(&neigh_tbl_lock);
1955 
1956 	cb->args[0] = t;
1957 	return skb->len;
1958 }
1959 
1960 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
1961 {
1962 	int chain;
1963 
1964 	read_lock_bh(&tbl->lock);
1965 	for (chain = 0; chain <= tbl->hash_mask; chain++) {
1966 		struct neighbour *n;
1967 
1968 		for (n = tbl->hash_buckets[chain]; n; n = n->next)
1969 			cb(n, cookie);
1970 	}
1971 	read_unlock_bh(&tbl->lock);
1972 }
1973 EXPORT_SYMBOL(neigh_for_each);
1974 
1975 /* The tbl->lock must be held as a writer and BH disabled. */
1976 void __neigh_for_each_release(struct neigh_table *tbl,
1977 			      int (*cb)(struct neighbour *))
1978 {
1979 	int chain;
1980 
1981 	for (chain = 0; chain <= tbl->hash_mask; chain++) {
1982 		struct neighbour *n, **np;
1983 
1984 		np = &tbl->hash_buckets[chain];
1985 		while ((n = *np) != NULL) {
1986 			int release;
1987 
1988 			write_lock(&n->lock);
1989 			release = cb(n);
1990 			if (release) {
1991 				*np = n->next;
1992 				n->dead = 1;
1993 			} else
1994 				np = &n->next;
1995 			write_unlock(&n->lock);
1996 			if (release)
1997 				neigh_release(n);
1998 		}
1999 	}
2000 }
2001 EXPORT_SYMBOL(__neigh_for_each_release);
2002 
2003 #ifdef CONFIG_PROC_FS
2004 
2005 static struct neighbour *neigh_get_first(struct seq_file *seq)
2006 {
2007 	struct neigh_seq_state *state = seq->private;
2008 	struct neigh_table *tbl = state->tbl;
2009 	struct neighbour *n = NULL;
2010 	int bucket = state->bucket;
2011 
2012 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2013 	for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2014 		n = tbl->hash_buckets[bucket];
2015 
2016 		while (n) {
2017 			if (state->neigh_sub_iter) {
2018 				loff_t fakep = 0;
2019 				void *v;
2020 
2021 				v = state->neigh_sub_iter(state, n, &fakep);
2022 				if (!v)
2023 					goto next;
2024 			}
2025 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2026 				break;
2027 			if (n->nud_state & ~NUD_NOARP)
2028 				break;
2029 		next:
2030 			n = n->next;
2031 		}
2032 
2033 		if (n)
2034 			break;
2035 	}
2036 	state->bucket = bucket;
2037 
2038 	return n;
2039 }
2040 
2041 static struct neighbour *neigh_get_next(struct seq_file *seq,
2042 					struct neighbour *n,
2043 					loff_t *pos)
2044 {
2045 	struct neigh_seq_state *state = seq->private;
2046 	struct neigh_table *tbl = state->tbl;
2047 
2048 	if (state->neigh_sub_iter) {
2049 		void *v = state->neigh_sub_iter(state, n, pos);
2050 		if (v)
2051 			return n;
2052 	}
2053 	n = n->next;
2054 
2055 	while (1) {
2056 		while (n) {
2057 			if (state->neigh_sub_iter) {
2058 				void *v = state->neigh_sub_iter(state, n, pos);
2059 				if (v)
2060 					return n;
2061 				goto next;
2062 			}
2063 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2064 				break;
2065 
2066 			if (n->nud_state & ~NUD_NOARP)
2067 				break;
2068 		next:
2069 			n = n->next;
2070 		}
2071 
2072 		if (n)
2073 			break;
2074 
2075 		if (++state->bucket > tbl->hash_mask)
2076 			break;
2077 
2078 		n = tbl->hash_buckets[state->bucket];
2079 	}
2080 
2081 	if (n && pos)
2082 		--(*pos);
2083 	return n;
2084 }
2085 
2086 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2087 {
2088 	struct neighbour *n = neigh_get_first(seq);
2089 
2090 	if (n) {
2091 		while (*pos) {
2092 			n = neigh_get_next(seq, n, pos);
2093 			if (!n)
2094 				break;
2095 		}
2096 	}
2097 	return *pos ? NULL : n;
2098 }
2099 
2100 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2101 {
2102 	struct neigh_seq_state *state = seq->private;
2103 	struct neigh_table *tbl = state->tbl;
2104 	struct pneigh_entry *pn = NULL;
2105 	int bucket = state->bucket;
2106 
2107 	state->flags |= NEIGH_SEQ_IS_PNEIGH;
2108 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2109 		pn = tbl->phash_buckets[bucket];
2110 		if (pn)
2111 			break;
2112 	}
2113 	state->bucket = bucket;
2114 
2115 	return pn;
2116 }
2117 
2118 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2119 					    struct pneigh_entry *pn,
2120 					    loff_t *pos)
2121 {
2122 	struct neigh_seq_state *state = seq->private;
2123 	struct neigh_table *tbl = state->tbl;
2124 
2125 	pn = pn->next;
2126 	while (!pn) {
2127 		if (++state->bucket > PNEIGH_HASHMASK)
2128 			break;
2129 		pn = tbl->phash_buckets[state->bucket];
2130 		if (pn)
2131 			break;
2132 	}
2133 
2134 	if (pn && pos)
2135 		--(*pos);
2136 
2137 	return pn;
2138 }
2139 
2140 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2141 {
2142 	struct pneigh_entry *pn = pneigh_get_first(seq);
2143 
2144 	if (pn) {
2145 		while (*pos) {
2146 			pn = pneigh_get_next(seq, pn, pos);
2147 			if (!pn)
2148 				break;
2149 		}
2150 	}
2151 	return *pos ? NULL : pn;
2152 }
2153 
2154 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2155 {
2156 	struct neigh_seq_state *state = seq->private;
2157 	void *rc;
2158 
2159 	rc = neigh_get_idx(seq, pos);
2160 	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2161 		rc = pneigh_get_idx(seq, pos);
2162 
2163 	return rc;
2164 }
2165 
2166 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2167 {
2168 	struct neigh_seq_state *state = seq->private;
2169 	loff_t pos_minus_one;
2170 
2171 	state->tbl = tbl;
2172 	state->bucket = 0;
2173 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2174 
2175 	read_lock_bh(&tbl->lock);
2176 
2177 	pos_minus_one = *pos - 1;
2178 	return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2179 }
2180 EXPORT_SYMBOL(neigh_seq_start);
2181 
2182 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2183 {
2184 	struct neigh_seq_state *state;
2185 	void *rc;
2186 
2187 	if (v == SEQ_START_TOKEN) {
2188 		rc = neigh_get_idx(seq, pos);
2189 		goto out;
2190 	}
2191 
2192 	state = seq->private;
2193 	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2194 		rc = neigh_get_next(seq, v, NULL);
2195 		if (rc)
2196 			goto out;
2197 		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2198 			rc = pneigh_get_first(seq);
2199 	} else {
2200 		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2201 		rc = pneigh_get_next(seq, v, NULL);
2202 	}
2203 out:
2204 	++(*pos);
2205 	return rc;
2206 }
2207 EXPORT_SYMBOL(neigh_seq_next);
2208 
2209 void neigh_seq_stop(struct seq_file *seq, void *v)
2210 {
2211 	struct neigh_seq_state *state = seq->private;
2212 	struct neigh_table *tbl = state->tbl;
2213 
2214 	read_unlock_bh(&tbl->lock);
2215 }
2216 EXPORT_SYMBOL(neigh_seq_stop);
2217 
2218 /* statistics via seq_file */
2219 
2220 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2221 {
2222 	struct proc_dir_entry *pde = seq->private;
2223 	struct neigh_table *tbl = pde->data;
2224 	int cpu;
2225 
2226 	if (*pos == 0)
2227 		return SEQ_START_TOKEN;
2228 
2229 	for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2230 		if (!cpu_possible(cpu))
2231 			continue;
2232 		*pos = cpu+1;
2233 		return per_cpu_ptr(tbl->stats, cpu);
2234 	}
2235 	return NULL;
2236 }
2237 
2238 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2239 {
2240 	struct proc_dir_entry *pde = seq->private;
2241 	struct neigh_table *tbl = pde->data;
2242 	int cpu;
2243 
2244 	for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2245 		if (!cpu_possible(cpu))
2246 			continue;
2247 		*pos = cpu+1;
2248 		return per_cpu_ptr(tbl->stats, cpu);
2249 	}
2250 	return NULL;
2251 }
2252 
2253 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2254 {
2255 
2256 }
2257 
2258 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2259 {
2260 	struct proc_dir_entry *pde = seq->private;
2261 	struct neigh_table *tbl = pde->data;
2262 	struct neigh_statistics *st = v;
2263 
2264 	if (v == SEQ_START_TOKEN) {
2265 		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs\n");
2266 		return 0;
2267 	}
2268 
2269 	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2270 			"%08lx %08lx  %08lx %08lx\n",
2271 		   atomic_read(&tbl->entries),
2272 
2273 		   st->allocs,
2274 		   st->destroys,
2275 		   st->hash_grows,
2276 
2277 		   st->lookups,
2278 		   st->hits,
2279 
2280 		   st->res_failed,
2281 
2282 		   st->rcv_probes_mcast,
2283 		   st->rcv_probes_ucast,
2284 
2285 		   st->periodic_gc_runs,
2286 		   st->forced_gc_runs
2287 		   );
2288 
2289 	return 0;
2290 }
2291 
2292 static struct seq_operations neigh_stat_seq_ops = {
2293 	.start	= neigh_stat_seq_start,
2294 	.next	= neigh_stat_seq_next,
2295 	.stop	= neigh_stat_seq_stop,
2296 	.show	= neigh_stat_seq_show,
2297 };
2298 
2299 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2300 {
2301 	int ret = seq_open(file, &neigh_stat_seq_ops);
2302 
2303 	if (!ret) {
2304 		struct seq_file *sf = file->private_data;
2305 		sf->private = PDE(inode);
2306 	}
2307 	return ret;
2308 };
2309 
2310 static struct file_operations neigh_stat_seq_fops = {
2311 	.owner	 = THIS_MODULE,
2312 	.open 	 = neigh_stat_seq_open,
2313 	.read	 = seq_read,
2314 	.llseek	 = seq_lseek,
2315 	.release = seq_release,
2316 };
2317 
2318 #endif /* CONFIG_PROC_FS */
2319 
2320 #ifdef CONFIG_ARPD
2321 void neigh_app_ns(struct neighbour *n)
2322 {
2323 	struct nlmsghdr  *nlh;
2324 	int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2325 	struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2326 
2327 	if (!skb)
2328 		return;
2329 
2330 	if (neigh_fill_info(skb, n, 0, 0, RTM_GETNEIGH, 0) < 0) {
2331 		kfree_skb(skb);
2332 		return;
2333 	}
2334 	nlh			   = (struct nlmsghdr *)skb->data;
2335 	nlh->nlmsg_flags	   = NLM_F_REQUEST;
2336 	NETLINK_CB(skb).dst_group  = RTNLGRP_NEIGH;
2337 	netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2338 }
2339 
2340 static void neigh_app_notify(struct neighbour *n)
2341 {
2342 	struct nlmsghdr *nlh;
2343 	int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2344 	struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2345 
2346 	if (!skb)
2347 		return;
2348 
2349 	if (neigh_fill_info(skb, n, 0, 0, RTM_NEWNEIGH, 0) < 0) {
2350 		kfree_skb(skb);
2351 		return;
2352 	}
2353 	nlh			   = (struct nlmsghdr *)skb->data;
2354 	NETLINK_CB(skb).dst_group  = RTNLGRP_NEIGH;
2355 	netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2356 }
2357 
2358 #endif /* CONFIG_ARPD */
2359 
2360 #ifdef CONFIG_SYSCTL
2361 
2362 static struct neigh_sysctl_table {
2363 	struct ctl_table_header *sysctl_header;
2364 	ctl_table		neigh_vars[__NET_NEIGH_MAX];
2365 	ctl_table		neigh_dev[2];
2366 	ctl_table		neigh_neigh_dir[2];
2367 	ctl_table		neigh_proto_dir[2];
2368 	ctl_table		neigh_root_dir[2];
2369 } neigh_sysctl_template = {
2370 	.neigh_vars = {
2371 		{
2372 			.ctl_name	= NET_NEIGH_MCAST_SOLICIT,
2373 			.procname	= "mcast_solicit",
2374 			.maxlen		= sizeof(int),
2375 			.mode		= 0644,
2376 			.proc_handler	= &proc_dointvec,
2377 		},
2378 		{
2379 			.ctl_name	= NET_NEIGH_UCAST_SOLICIT,
2380 			.procname	= "ucast_solicit",
2381 			.maxlen		= sizeof(int),
2382 			.mode		= 0644,
2383 			.proc_handler	= &proc_dointvec,
2384 		},
2385 		{
2386 			.ctl_name	= NET_NEIGH_APP_SOLICIT,
2387 			.procname	= "app_solicit",
2388 			.maxlen		= sizeof(int),
2389 			.mode		= 0644,
2390 			.proc_handler	= &proc_dointvec,
2391 		},
2392 		{
2393 			.ctl_name	= NET_NEIGH_RETRANS_TIME,
2394 			.procname	= "retrans_time",
2395 			.maxlen		= sizeof(int),
2396 			.mode		= 0644,
2397 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2398 		},
2399 		{
2400 			.ctl_name	= NET_NEIGH_REACHABLE_TIME,
2401 			.procname	= "base_reachable_time",
2402 			.maxlen		= sizeof(int),
2403 			.mode		= 0644,
2404 			.proc_handler	= &proc_dointvec_jiffies,
2405 			.strategy	= &sysctl_jiffies,
2406 		},
2407 		{
2408 			.ctl_name	= NET_NEIGH_DELAY_PROBE_TIME,
2409 			.procname	= "delay_first_probe_time",
2410 			.maxlen		= sizeof(int),
2411 			.mode		= 0644,
2412 			.proc_handler	= &proc_dointvec_jiffies,
2413 			.strategy	= &sysctl_jiffies,
2414 		},
2415 		{
2416 			.ctl_name	= NET_NEIGH_GC_STALE_TIME,
2417 			.procname	= "gc_stale_time",
2418 			.maxlen		= sizeof(int),
2419 			.mode		= 0644,
2420 			.proc_handler	= &proc_dointvec_jiffies,
2421 			.strategy	= &sysctl_jiffies,
2422 		},
2423 		{
2424 			.ctl_name	= NET_NEIGH_UNRES_QLEN,
2425 			.procname	= "unres_qlen",
2426 			.maxlen		= sizeof(int),
2427 			.mode		= 0644,
2428 			.proc_handler	= &proc_dointvec,
2429 		},
2430 		{
2431 			.ctl_name	= NET_NEIGH_PROXY_QLEN,
2432 			.procname	= "proxy_qlen",
2433 			.maxlen		= sizeof(int),
2434 			.mode		= 0644,
2435 			.proc_handler	= &proc_dointvec,
2436 		},
2437 		{
2438 			.ctl_name	= NET_NEIGH_ANYCAST_DELAY,
2439 			.procname	= "anycast_delay",
2440 			.maxlen		= sizeof(int),
2441 			.mode		= 0644,
2442 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2443 		},
2444 		{
2445 			.ctl_name	= NET_NEIGH_PROXY_DELAY,
2446 			.procname	= "proxy_delay",
2447 			.maxlen		= sizeof(int),
2448 			.mode		= 0644,
2449 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2450 		},
2451 		{
2452 			.ctl_name	= NET_NEIGH_LOCKTIME,
2453 			.procname	= "locktime",
2454 			.maxlen		= sizeof(int),
2455 			.mode		= 0644,
2456 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2457 		},
2458 		{
2459 			.ctl_name	= NET_NEIGH_GC_INTERVAL,
2460 			.procname	= "gc_interval",
2461 			.maxlen		= sizeof(int),
2462 			.mode		= 0644,
2463 			.proc_handler	= &proc_dointvec_jiffies,
2464 			.strategy	= &sysctl_jiffies,
2465 		},
2466 		{
2467 			.ctl_name	= NET_NEIGH_GC_THRESH1,
2468 			.procname	= "gc_thresh1",
2469 			.maxlen		= sizeof(int),
2470 			.mode		= 0644,
2471 			.proc_handler	= &proc_dointvec,
2472 		},
2473 		{
2474 			.ctl_name	= NET_NEIGH_GC_THRESH2,
2475 			.procname	= "gc_thresh2",
2476 			.maxlen		= sizeof(int),
2477 			.mode		= 0644,
2478 			.proc_handler	= &proc_dointvec,
2479 		},
2480 		{
2481 			.ctl_name	= NET_NEIGH_GC_THRESH3,
2482 			.procname	= "gc_thresh3",
2483 			.maxlen		= sizeof(int),
2484 			.mode		= 0644,
2485 			.proc_handler	= &proc_dointvec,
2486 		},
2487 		{
2488 			.ctl_name	= NET_NEIGH_RETRANS_TIME_MS,
2489 			.procname	= "retrans_time_ms",
2490 			.maxlen		= sizeof(int),
2491 			.mode		= 0644,
2492 			.proc_handler	= &proc_dointvec_ms_jiffies,
2493 			.strategy	= &sysctl_ms_jiffies,
2494 		},
2495 		{
2496 			.ctl_name	= NET_NEIGH_REACHABLE_TIME_MS,
2497 			.procname	= "base_reachable_time_ms",
2498 			.maxlen		= sizeof(int),
2499 			.mode		= 0644,
2500 			.proc_handler	= &proc_dointvec_ms_jiffies,
2501 			.strategy	= &sysctl_ms_jiffies,
2502 		},
2503 	},
2504 	.neigh_dev = {
2505 		{
2506 			.ctl_name	= NET_PROTO_CONF_DEFAULT,
2507 			.procname	= "default",
2508 			.mode		= 0555,
2509 		},
2510 	},
2511 	.neigh_neigh_dir = {
2512 		{
2513 			.procname	= "neigh",
2514 			.mode		= 0555,
2515 		},
2516 	},
2517 	.neigh_proto_dir = {
2518 		{
2519 			.mode		= 0555,
2520 		},
2521 	},
2522 	.neigh_root_dir = {
2523 		{
2524 			.ctl_name	= CTL_NET,
2525 			.procname	= "net",
2526 			.mode		= 0555,
2527 		},
2528 	},
2529 };
2530 
2531 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2532 			  int p_id, int pdev_id, char *p_name,
2533 			  proc_handler *handler, ctl_handler *strategy)
2534 {
2535 	struct neigh_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
2536 	const char *dev_name_source = NULL;
2537 	char *dev_name = NULL;
2538 	int err = 0;
2539 
2540 	if (!t)
2541 		return -ENOBUFS;
2542 	memcpy(t, &neigh_sysctl_template, sizeof(*t));
2543 	t->neigh_vars[0].data  = &p->mcast_probes;
2544 	t->neigh_vars[1].data  = &p->ucast_probes;
2545 	t->neigh_vars[2].data  = &p->app_probes;
2546 	t->neigh_vars[3].data  = &p->retrans_time;
2547 	t->neigh_vars[4].data  = &p->base_reachable_time;
2548 	t->neigh_vars[5].data  = &p->delay_probe_time;
2549 	t->neigh_vars[6].data  = &p->gc_staletime;
2550 	t->neigh_vars[7].data  = &p->queue_len;
2551 	t->neigh_vars[8].data  = &p->proxy_qlen;
2552 	t->neigh_vars[9].data  = &p->anycast_delay;
2553 	t->neigh_vars[10].data = &p->proxy_delay;
2554 	t->neigh_vars[11].data = &p->locktime;
2555 
2556 	if (dev) {
2557 		dev_name_source = dev->name;
2558 		t->neigh_dev[0].ctl_name = dev->ifindex;
2559 		t->neigh_vars[12].procname = NULL;
2560 		t->neigh_vars[13].procname = NULL;
2561 		t->neigh_vars[14].procname = NULL;
2562 		t->neigh_vars[15].procname = NULL;
2563 	} else {
2564  		dev_name_source = t->neigh_dev[0].procname;
2565 		t->neigh_vars[12].data = (int *)(p + 1);
2566 		t->neigh_vars[13].data = (int *)(p + 1) + 1;
2567 		t->neigh_vars[14].data = (int *)(p + 1) + 2;
2568 		t->neigh_vars[15].data = (int *)(p + 1) + 3;
2569 	}
2570 
2571 	t->neigh_vars[16].data  = &p->retrans_time;
2572 	t->neigh_vars[17].data  = &p->base_reachable_time;
2573 
2574 	if (handler || strategy) {
2575 		/* RetransTime */
2576 		t->neigh_vars[3].proc_handler = handler;
2577 		t->neigh_vars[3].strategy = strategy;
2578 		t->neigh_vars[3].extra1 = dev;
2579 		/* ReachableTime */
2580 		t->neigh_vars[4].proc_handler = handler;
2581 		t->neigh_vars[4].strategy = strategy;
2582 		t->neigh_vars[4].extra1 = dev;
2583 		/* RetransTime (in milliseconds)*/
2584 		t->neigh_vars[16].proc_handler = handler;
2585 		t->neigh_vars[16].strategy = strategy;
2586 		t->neigh_vars[16].extra1 = dev;
2587 		/* ReachableTime (in milliseconds) */
2588 		t->neigh_vars[17].proc_handler = handler;
2589 		t->neigh_vars[17].strategy = strategy;
2590 		t->neigh_vars[17].extra1 = dev;
2591 	}
2592 
2593 	dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2594 	if (!dev_name) {
2595 		err = -ENOBUFS;
2596 		goto free;
2597 	}
2598 
2599  	t->neigh_dev[0].procname = dev_name;
2600 
2601 	t->neigh_neigh_dir[0].ctl_name = pdev_id;
2602 
2603 	t->neigh_proto_dir[0].procname = p_name;
2604 	t->neigh_proto_dir[0].ctl_name = p_id;
2605 
2606 	t->neigh_dev[0].child	       = t->neigh_vars;
2607 	t->neigh_neigh_dir[0].child    = t->neigh_dev;
2608 	t->neigh_proto_dir[0].child    = t->neigh_neigh_dir;
2609 	t->neigh_root_dir[0].child     = t->neigh_proto_dir;
2610 
2611 	t->sysctl_header = register_sysctl_table(t->neigh_root_dir, 0);
2612 	if (!t->sysctl_header) {
2613 		err = -ENOBUFS;
2614 		goto free_procname;
2615 	}
2616 	p->sysctl_table = t;
2617 	return 0;
2618 
2619 	/* error path */
2620  free_procname:
2621 	kfree(dev_name);
2622  free:
2623 	kfree(t);
2624 
2625 	return err;
2626 }
2627 
2628 void neigh_sysctl_unregister(struct neigh_parms *p)
2629 {
2630 	if (p->sysctl_table) {
2631 		struct neigh_sysctl_table *t = p->sysctl_table;
2632 		p->sysctl_table = NULL;
2633 		unregister_sysctl_table(t->sysctl_header);
2634 		kfree(t->neigh_dev[0].procname);
2635 		kfree(t);
2636 	}
2637 }
2638 
2639 #endif	/* CONFIG_SYSCTL */
2640 
2641 EXPORT_SYMBOL(__neigh_event_send);
2642 EXPORT_SYMBOL(neigh_add);
2643 EXPORT_SYMBOL(neigh_changeaddr);
2644 EXPORT_SYMBOL(neigh_compat_output);
2645 EXPORT_SYMBOL(neigh_connected_output);
2646 EXPORT_SYMBOL(neigh_create);
2647 EXPORT_SYMBOL(neigh_delete);
2648 EXPORT_SYMBOL(neigh_destroy);
2649 EXPORT_SYMBOL(neigh_dump_info);
2650 EXPORT_SYMBOL(neigh_event_ns);
2651 EXPORT_SYMBOL(neigh_ifdown);
2652 EXPORT_SYMBOL(neigh_lookup);
2653 EXPORT_SYMBOL(neigh_lookup_nodev);
2654 EXPORT_SYMBOL(neigh_parms_alloc);
2655 EXPORT_SYMBOL(neigh_parms_release);
2656 EXPORT_SYMBOL(neigh_rand_reach_time);
2657 EXPORT_SYMBOL(neigh_resolve_output);
2658 EXPORT_SYMBOL(neigh_table_clear);
2659 EXPORT_SYMBOL(neigh_table_init);
2660 EXPORT_SYMBOL(neigh_update);
2661 EXPORT_SYMBOL(neigh_update_hhs);
2662 EXPORT_SYMBOL(pneigh_enqueue);
2663 EXPORT_SYMBOL(pneigh_lookup);
2664 EXPORT_SYMBOL(neightbl_dump_info);
2665 EXPORT_SYMBOL(neightbl_set);
2666 
2667 #ifdef CONFIG_ARPD
2668 EXPORT_SYMBOL(neigh_app_ns);
2669 #endif
2670 #ifdef CONFIG_SYSCTL
2671 EXPORT_SYMBOL(neigh_sysctl_register);
2672 EXPORT_SYMBOL(neigh_sysctl_unregister);
2673 #endif
2674