xref: /linux/net/core/neighbour.c (revision de2fe5e07d58424bc286fff3fd3c1b0bf933cd58)
1 /*
2  *	Generic address resolution entity
3  *
4  *	Authors:
5  *	Pedro Roque		<roque@di.fc.ul.pt>
6  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *	Fixes:
14  *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
15  *	Harald Welte		Add neighbour cache statistics like rtstat
16  */
17 
18 #include <linux/config.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/socket.h>
23 #include <linux/sched.h>
24 #include <linux/netdevice.h>
25 #include <linux/proc_fs.h>
26 #ifdef CONFIG_SYSCTL
27 #include <linux/sysctl.h>
28 #endif
29 #include <linux/times.h>
30 #include <net/neighbour.h>
31 #include <net/dst.h>
32 #include <net/sock.h>
33 #include <linux/rtnetlink.h>
34 #include <linux/random.h>
35 #include <linux/string.h>
36 
37 #define NEIGH_DEBUG 1
38 
39 #define NEIGH_PRINTK(x...) printk(x)
40 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
41 #define NEIGH_PRINTK0 NEIGH_PRINTK
42 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
43 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
44 
45 #if NEIGH_DEBUG >= 1
46 #undef NEIGH_PRINTK1
47 #define NEIGH_PRINTK1 NEIGH_PRINTK
48 #endif
49 #if NEIGH_DEBUG >= 2
50 #undef NEIGH_PRINTK2
51 #define NEIGH_PRINTK2 NEIGH_PRINTK
52 #endif
53 
54 #define PNEIGH_HASHMASK		0xF
55 
56 static void neigh_timer_handler(unsigned long arg);
57 #ifdef CONFIG_ARPD
58 static void neigh_app_notify(struct neighbour *n);
59 #endif
60 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
61 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
62 
63 static struct neigh_table *neigh_tables;
64 #ifdef CONFIG_PROC_FS
65 static struct file_operations neigh_stat_seq_fops;
66 #endif
67 
68 /*
69    Neighbour hash table buckets are protected with rwlock tbl->lock.
70 
71    - All the scans/updates to hash buckets MUST be made under this lock.
72    - NOTHING clever should be made under this lock: no callbacks
73      to protocol backends, no attempts to send something to network.
74      It will result in deadlocks, if backend/driver wants to use neighbour
75      cache.
76    - If the entry requires some non-trivial actions, increase
77      its reference count and release table lock.
78 
79    Neighbour entries are protected:
80    - with reference count.
81    - with rwlock neigh->lock
82 
83    Reference count prevents destruction.
84 
85    neigh->lock mainly serializes ll address data and its validity state.
86    However, the same lock is used to protect another entry fields:
87     - timer
88     - resolution queue
89 
90    Again, nothing clever shall be made under neigh->lock,
91    the most complicated procedure, which we allow is dev->hard_header.
92    It is supposed, that dev->hard_header is simplistic and does
93    not make callbacks to neighbour tables.
94 
95    The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96    list of neighbour tables. This list is used only in process context,
97  */
98 
99 static DEFINE_RWLOCK(neigh_tbl_lock);
100 
101 static int neigh_blackhole(struct sk_buff *skb)
102 {
103 	kfree_skb(skb);
104 	return -ENETDOWN;
105 }
106 
107 /*
108  * It is random distribution in the interval (1/2)*base...(3/2)*base.
109  * It corresponds to default IPv6 settings and is not overridable,
110  * because it is really reasonable choice.
111  */
112 
113 unsigned long neigh_rand_reach_time(unsigned long base)
114 {
115 	return (base ? (net_random() % base) + (base >> 1) : 0);
116 }
117 
118 
119 static int neigh_forced_gc(struct neigh_table *tbl)
120 {
121 	int shrunk = 0;
122 	int i;
123 
124 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
125 
126 	write_lock_bh(&tbl->lock);
127 	for (i = 0; i <= tbl->hash_mask; i++) {
128 		struct neighbour *n, **np;
129 
130 		np = &tbl->hash_buckets[i];
131 		while ((n = *np) != NULL) {
132 			/* Neighbour record may be discarded if:
133 			 * - nobody refers to it.
134 			 * - it is not permanent
135 			 */
136 			write_lock(&n->lock);
137 			if (atomic_read(&n->refcnt) == 1 &&
138 			    !(n->nud_state & NUD_PERMANENT)) {
139 				*np	= n->next;
140 				n->dead = 1;
141 				shrunk	= 1;
142 				write_unlock(&n->lock);
143 				neigh_release(n);
144 				continue;
145 			}
146 			write_unlock(&n->lock);
147 			np = &n->next;
148 		}
149 	}
150 
151 	tbl->last_flush = jiffies;
152 
153 	write_unlock_bh(&tbl->lock);
154 
155 	return shrunk;
156 }
157 
158 static int neigh_del_timer(struct neighbour *n)
159 {
160 	if ((n->nud_state & NUD_IN_TIMER) &&
161 	    del_timer(&n->timer)) {
162 		neigh_release(n);
163 		return 1;
164 	}
165 	return 0;
166 }
167 
168 static void pneigh_queue_purge(struct sk_buff_head *list)
169 {
170 	struct sk_buff *skb;
171 
172 	while ((skb = skb_dequeue(list)) != NULL) {
173 		dev_put(skb->dev);
174 		kfree_skb(skb);
175 	}
176 }
177 
178 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
179 {
180 	int i;
181 
182 	for (i = 0; i <= tbl->hash_mask; i++) {
183 		struct neighbour *n, **np = &tbl->hash_buckets[i];
184 
185 		while ((n = *np) != NULL) {
186 			if (dev && n->dev != dev) {
187 				np = &n->next;
188 				continue;
189 			}
190 			*np = n->next;
191 			write_lock(&n->lock);
192 			neigh_del_timer(n);
193 			n->dead = 1;
194 
195 			if (atomic_read(&n->refcnt) != 1) {
196 				/* The most unpleasant situation.
197 				   We must destroy neighbour entry,
198 				   but someone still uses it.
199 
200 				   The destroy will be delayed until
201 				   the last user releases us, but
202 				   we must kill timers etc. and move
203 				   it to safe state.
204 				 */
205 				skb_queue_purge(&n->arp_queue);
206 				n->output = neigh_blackhole;
207 				if (n->nud_state & NUD_VALID)
208 					n->nud_state = NUD_NOARP;
209 				else
210 					n->nud_state = NUD_NONE;
211 				NEIGH_PRINTK2("neigh %p is stray.\n", n);
212 			}
213 			write_unlock(&n->lock);
214 			neigh_release(n);
215 		}
216 	}
217 }
218 
219 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
220 {
221 	write_lock_bh(&tbl->lock);
222 	neigh_flush_dev(tbl, dev);
223 	write_unlock_bh(&tbl->lock);
224 }
225 
226 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
227 {
228 	write_lock_bh(&tbl->lock);
229 	neigh_flush_dev(tbl, dev);
230 	pneigh_ifdown(tbl, dev);
231 	write_unlock_bh(&tbl->lock);
232 
233 	del_timer_sync(&tbl->proxy_timer);
234 	pneigh_queue_purge(&tbl->proxy_queue);
235 	return 0;
236 }
237 
238 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
239 {
240 	struct neighbour *n = NULL;
241 	unsigned long now = jiffies;
242 	int entries;
243 
244 	entries = atomic_inc_return(&tbl->entries) - 1;
245 	if (entries >= tbl->gc_thresh3 ||
246 	    (entries >= tbl->gc_thresh2 &&
247 	     time_after(now, tbl->last_flush + 5 * HZ))) {
248 		if (!neigh_forced_gc(tbl) &&
249 		    entries >= tbl->gc_thresh3)
250 			goto out_entries;
251 	}
252 
253 	n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
254 	if (!n)
255 		goto out_entries;
256 
257 	memset(n, 0, tbl->entry_size);
258 
259 	skb_queue_head_init(&n->arp_queue);
260 	rwlock_init(&n->lock);
261 	n->updated	  = n->used = now;
262 	n->nud_state	  = NUD_NONE;
263 	n->output	  = neigh_blackhole;
264 	n->parms	  = neigh_parms_clone(&tbl->parms);
265 	init_timer(&n->timer);
266 	n->timer.function = neigh_timer_handler;
267 	n->timer.data	  = (unsigned long)n;
268 
269 	NEIGH_CACHE_STAT_INC(tbl, allocs);
270 	n->tbl		  = tbl;
271 	atomic_set(&n->refcnt, 1);
272 	n->dead		  = 1;
273 out:
274 	return n;
275 
276 out_entries:
277 	atomic_dec(&tbl->entries);
278 	goto out;
279 }
280 
281 static struct neighbour **neigh_hash_alloc(unsigned int entries)
282 {
283 	unsigned long size = entries * sizeof(struct neighbour *);
284 	struct neighbour **ret;
285 
286 	if (size <= PAGE_SIZE) {
287 		ret = kmalloc(size, GFP_ATOMIC);
288 	} else {
289 		ret = (struct neighbour **)
290 			__get_free_pages(GFP_ATOMIC, get_order(size));
291 	}
292 	if (ret)
293 		memset(ret, 0, size);
294 
295 	return ret;
296 }
297 
298 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
299 {
300 	unsigned long size = entries * sizeof(struct neighbour *);
301 
302 	if (size <= PAGE_SIZE)
303 		kfree(hash);
304 	else
305 		free_pages((unsigned long)hash, get_order(size));
306 }
307 
308 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
309 {
310 	struct neighbour **new_hash, **old_hash;
311 	unsigned int i, new_hash_mask, old_entries;
312 
313 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
314 
315 	BUG_ON(new_entries & (new_entries - 1));
316 	new_hash = neigh_hash_alloc(new_entries);
317 	if (!new_hash)
318 		return;
319 
320 	old_entries = tbl->hash_mask + 1;
321 	new_hash_mask = new_entries - 1;
322 	old_hash = tbl->hash_buckets;
323 
324 	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
325 	for (i = 0; i < old_entries; i++) {
326 		struct neighbour *n, *next;
327 
328 		for (n = old_hash[i]; n; n = next) {
329 			unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
330 
331 			hash_val &= new_hash_mask;
332 			next = n->next;
333 
334 			n->next = new_hash[hash_val];
335 			new_hash[hash_val] = n;
336 		}
337 	}
338 	tbl->hash_buckets = new_hash;
339 	tbl->hash_mask = new_hash_mask;
340 
341 	neigh_hash_free(old_hash, old_entries);
342 }
343 
344 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
345 			       struct net_device *dev)
346 {
347 	struct neighbour *n;
348 	int key_len = tbl->key_len;
349 	u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
350 
351 	NEIGH_CACHE_STAT_INC(tbl, lookups);
352 
353 	read_lock_bh(&tbl->lock);
354 	for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
355 		if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
356 			neigh_hold(n);
357 			NEIGH_CACHE_STAT_INC(tbl, hits);
358 			break;
359 		}
360 	}
361 	read_unlock_bh(&tbl->lock);
362 	return n;
363 }
364 
365 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
366 {
367 	struct neighbour *n;
368 	int key_len = tbl->key_len;
369 	u32 hash_val = tbl->hash(pkey, NULL) & tbl->hash_mask;
370 
371 	NEIGH_CACHE_STAT_INC(tbl, lookups);
372 
373 	read_lock_bh(&tbl->lock);
374 	for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
375 		if (!memcmp(n->primary_key, pkey, key_len)) {
376 			neigh_hold(n);
377 			NEIGH_CACHE_STAT_INC(tbl, hits);
378 			break;
379 		}
380 	}
381 	read_unlock_bh(&tbl->lock);
382 	return n;
383 }
384 
385 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
386 			       struct net_device *dev)
387 {
388 	u32 hash_val;
389 	int key_len = tbl->key_len;
390 	int error;
391 	struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
392 
393 	if (!n) {
394 		rc = ERR_PTR(-ENOBUFS);
395 		goto out;
396 	}
397 
398 	memcpy(n->primary_key, pkey, key_len);
399 	n->dev = dev;
400 	dev_hold(dev);
401 
402 	/* Protocol specific setup. */
403 	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
404 		rc = ERR_PTR(error);
405 		goto out_neigh_release;
406 	}
407 
408 	/* Device specific setup. */
409 	if (n->parms->neigh_setup &&
410 	    (error = n->parms->neigh_setup(n)) < 0) {
411 		rc = ERR_PTR(error);
412 		goto out_neigh_release;
413 	}
414 
415 	n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
416 
417 	write_lock_bh(&tbl->lock);
418 
419 	if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
420 		neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
421 
422 	hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
423 
424 	if (n->parms->dead) {
425 		rc = ERR_PTR(-EINVAL);
426 		goto out_tbl_unlock;
427 	}
428 
429 	for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
430 		if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
431 			neigh_hold(n1);
432 			rc = n1;
433 			goto out_tbl_unlock;
434 		}
435 	}
436 
437 	n->next = tbl->hash_buckets[hash_val];
438 	tbl->hash_buckets[hash_val] = n;
439 	n->dead = 0;
440 	neigh_hold(n);
441 	write_unlock_bh(&tbl->lock);
442 	NEIGH_PRINTK2("neigh %p is created.\n", n);
443 	rc = n;
444 out:
445 	return rc;
446 out_tbl_unlock:
447 	write_unlock_bh(&tbl->lock);
448 out_neigh_release:
449 	neigh_release(n);
450 	goto out;
451 }
452 
453 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
454 				    struct net_device *dev, int creat)
455 {
456 	struct pneigh_entry *n;
457 	int key_len = tbl->key_len;
458 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
459 
460 	hash_val ^= (hash_val >> 16);
461 	hash_val ^= hash_val >> 8;
462 	hash_val ^= hash_val >> 4;
463 	hash_val &= PNEIGH_HASHMASK;
464 
465 	read_lock_bh(&tbl->lock);
466 
467 	for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
468 		if (!memcmp(n->key, pkey, key_len) &&
469 		    (n->dev == dev || !n->dev)) {
470 			read_unlock_bh(&tbl->lock);
471 			goto out;
472 		}
473 	}
474 	read_unlock_bh(&tbl->lock);
475 	n = NULL;
476 	if (!creat)
477 		goto out;
478 
479 	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
480 	if (!n)
481 		goto out;
482 
483 	memcpy(n->key, pkey, key_len);
484 	n->dev = dev;
485 	if (dev)
486 		dev_hold(dev);
487 
488 	if (tbl->pconstructor && tbl->pconstructor(n)) {
489 		if (dev)
490 			dev_put(dev);
491 		kfree(n);
492 		n = NULL;
493 		goto out;
494 	}
495 
496 	write_lock_bh(&tbl->lock);
497 	n->next = tbl->phash_buckets[hash_val];
498 	tbl->phash_buckets[hash_val] = n;
499 	write_unlock_bh(&tbl->lock);
500 out:
501 	return n;
502 }
503 
504 
505 int pneigh_delete(struct neigh_table *tbl, const void *pkey,
506 		  struct net_device *dev)
507 {
508 	struct pneigh_entry *n, **np;
509 	int key_len = tbl->key_len;
510 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
511 
512 	hash_val ^= (hash_val >> 16);
513 	hash_val ^= hash_val >> 8;
514 	hash_val ^= hash_val >> 4;
515 	hash_val &= PNEIGH_HASHMASK;
516 
517 	write_lock_bh(&tbl->lock);
518 	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
519 	     np = &n->next) {
520 		if (!memcmp(n->key, pkey, key_len) && n->dev == dev) {
521 			*np = n->next;
522 			write_unlock_bh(&tbl->lock);
523 			if (tbl->pdestructor)
524 				tbl->pdestructor(n);
525 			if (n->dev)
526 				dev_put(n->dev);
527 			kfree(n);
528 			return 0;
529 		}
530 	}
531 	write_unlock_bh(&tbl->lock);
532 	return -ENOENT;
533 }
534 
535 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
536 {
537 	struct pneigh_entry *n, **np;
538 	u32 h;
539 
540 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
541 		np = &tbl->phash_buckets[h];
542 		while ((n = *np) != NULL) {
543 			if (!dev || n->dev == dev) {
544 				*np = n->next;
545 				if (tbl->pdestructor)
546 					tbl->pdestructor(n);
547 				if (n->dev)
548 					dev_put(n->dev);
549 				kfree(n);
550 				continue;
551 			}
552 			np = &n->next;
553 		}
554 	}
555 	return -ENOENT;
556 }
557 
558 
559 /*
560  *	neighbour must already be out of the table;
561  *
562  */
563 void neigh_destroy(struct neighbour *neigh)
564 {
565 	struct hh_cache *hh;
566 
567 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
568 
569 	if (!neigh->dead) {
570 		printk(KERN_WARNING
571 		       "Destroying alive neighbour %p\n", neigh);
572 		dump_stack();
573 		return;
574 	}
575 
576 	if (neigh_del_timer(neigh))
577 		printk(KERN_WARNING "Impossible event.\n");
578 
579 	while ((hh = neigh->hh) != NULL) {
580 		neigh->hh = hh->hh_next;
581 		hh->hh_next = NULL;
582 		write_lock_bh(&hh->hh_lock);
583 		hh->hh_output = neigh_blackhole;
584 		write_unlock_bh(&hh->hh_lock);
585 		if (atomic_dec_and_test(&hh->hh_refcnt))
586 			kfree(hh);
587 	}
588 
589 	if (neigh->parms->neigh_destructor)
590 		(neigh->parms->neigh_destructor)(neigh);
591 
592 	skb_queue_purge(&neigh->arp_queue);
593 
594 	dev_put(neigh->dev);
595 	neigh_parms_put(neigh->parms);
596 
597 	NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
598 
599 	atomic_dec(&neigh->tbl->entries);
600 	kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
601 }
602 
603 /* Neighbour state is suspicious;
604    disable fast path.
605 
606    Called with write_locked neigh.
607  */
608 static void neigh_suspect(struct neighbour *neigh)
609 {
610 	struct hh_cache *hh;
611 
612 	NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
613 
614 	neigh->output = neigh->ops->output;
615 
616 	for (hh = neigh->hh; hh; hh = hh->hh_next)
617 		hh->hh_output = neigh->ops->output;
618 }
619 
620 /* Neighbour state is OK;
621    enable fast path.
622 
623    Called with write_locked neigh.
624  */
625 static void neigh_connect(struct neighbour *neigh)
626 {
627 	struct hh_cache *hh;
628 
629 	NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
630 
631 	neigh->output = neigh->ops->connected_output;
632 
633 	for (hh = neigh->hh; hh; hh = hh->hh_next)
634 		hh->hh_output = neigh->ops->hh_output;
635 }
636 
637 static void neigh_periodic_timer(unsigned long arg)
638 {
639 	struct neigh_table *tbl = (struct neigh_table *)arg;
640 	struct neighbour *n, **np;
641 	unsigned long expire, now = jiffies;
642 
643 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
644 
645 	write_lock(&tbl->lock);
646 
647 	/*
648 	 *	periodically recompute ReachableTime from random function
649 	 */
650 
651 	if (time_after(now, tbl->last_rand + 300 * HZ)) {
652 		struct neigh_parms *p;
653 		tbl->last_rand = now;
654 		for (p = &tbl->parms; p; p = p->next)
655 			p->reachable_time =
656 				neigh_rand_reach_time(p->base_reachable_time);
657 	}
658 
659 	np = &tbl->hash_buckets[tbl->hash_chain_gc];
660 	tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
661 
662 	while ((n = *np) != NULL) {
663 		unsigned int state;
664 
665 		write_lock(&n->lock);
666 
667 		state = n->nud_state;
668 		if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
669 			write_unlock(&n->lock);
670 			goto next_elt;
671 		}
672 
673 		if (time_before(n->used, n->confirmed))
674 			n->used = n->confirmed;
675 
676 		if (atomic_read(&n->refcnt) == 1 &&
677 		    (state == NUD_FAILED ||
678 		     time_after(now, n->used + n->parms->gc_staletime))) {
679 			*np = n->next;
680 			n->dead = 1;
681 			write_unlock(&n->lock);
682 			neigh_release(n);
683 			continue;
684 		}
685 		write_unlock(&n->lock);
686 
687 next_elt:
688 		np = &n->next;
689 	}
690 
691  	/* Cycle through all hash buckets every base_reachable_time/2 ticks.
692  	 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
693  	 * base_reachable_time.
694 	 */
695 	expire = tbl->parms.base_reachable_time >> 1;
696 	expire /= (tbl->hash_mask + 1);
697 	if (!expire)
698 		expire = 1;
699 
700  	mod_timer(&tbl->gc_timer, now + expire);
701 
702 	write_unlock(&tbl->lock);
703 }
704 
705 static __inline__ int neigh_max_probes(struct neighbour *n)
706 {
707 	struct neigh_parms *p = n->parms;
708 	return (n->nud_state & NUD_PROBE ?
709 		p->ucast_probes :
710 		p->ucast_probes + p->app_probes + p->mcast_probes);
711 }
712 
713 static inline void neigh_add_timer(struct neighbour *n, unsigned long when)
714 {
715 	if (unlikely(mod_timer(&n->timer, when))) {
716 		printk("NEIGH: BUG, double timer add, state is %x\n",
717 		       n->nud_state);
718 		dump_stack();
719 	}
720 }
721 
722 /* Called when a timer expires for a neighbour entry. */
723 
724 static void neigh_timer_handler(unsigned long arg)
725 {
726 	unsigned long now, next;
727 	struct neighbour *neigh = (struct neighbour *)arg;
728 	unsigned state;
729 	int notify = 0;
730 
731 	write_lock(&neigh->lock);
732 
733 	state = neigh->nud_state;
734 	now = jiffies;
735 	next = now + HZ;
736 
737 	if (!(state & NUD_IN_TIMER)) {
738 #ifndef CONFIG_SMP
739 		printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
740 #endif
741 		goto out;
742 	}
743 
744 	if (state & NUD_REACHABLE) {
745 		if (time_before_eq(now,
746 				   neigh->confirmed + neigh->parms->reachable_time)) {
747 			NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
748 			next = neigh->confirmed + neigh->parms->reachable_time;
749 		} else if (time_before_eq(now,
750 					  neigh->used + neigh->parms->delay_probe_time)) {
751 			NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
752 			neigh->nud_state = NUD_DELAY;
753 			neigh->updated = jiffies;
754 			neigh_suspect(neigh);
755 			next = now + neigh->parms->delay_probe_time;
756 		} else {
757 			NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
758 			neigh->nud_state = NUD_STALE;
759 			neigh->updated = jiffies;
760 			neigh_suspect(neigh);
761 		}
762 	} else if (state & NUD_DELAY) {
763 		if (time_before_eq(now,
764 				   neigh->confirmed + neigh->parms->delay_probe_time)) {
765 			NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
766 			neigh->nud_state = NUD_REACHABLE;
767 			neigh->updated = jiffies;
768 			neigh_connect(neigh);
769 			next = neigh->confirmed + neigh->parms->reachable_time;
770 		} else {
771 			NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
772 			neigh->nud_state = NUD_PROBE;
773 			neigh->updated = jiffies;
774 			atomic_set(&neigh->probes, 0);
775 			next = now + neigh->parms->retrans_time;
776 		}
777 	} else {
778 		/* NUD_PROBE|NUD_INCOMPLETE */
779 		next = now + neigh->parms->retrans_time;
780 	}
781 
782 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
783 	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
784 		struct sk_buff *skb;
785 
786 		neigh->nud_state = NUD_FAILED;
787 		neigh->updated = jiffies;
788 		notify = 1;
789 		NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
790 		NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
791 
792 		/* It is very thin place. report_unreachable is very complicated
793 		   routine. Particularly, it can hit the same neighbour entry!
794 
795 		   So that, we try to be accurate and avoid dead loop. --ANK
796 		 */
797 		while (neigh->nud_state == NUD_FAILED &&
798 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
799 			write_unlock(&neigh->lock);
800 			neigh->ops->error_report(neigh, skb);
801 			write_lock(&neigh->lock);
802 		}
803 		skb_queue_purge(&neigh->arp_queue);
804 	}
805 
806 	if (neigh->nud_state & NUD_IN_TIMER) {
807 		if (time_before(next, jiffies + HZ/2))
808 			next = jiffies + HZ/2;
809 		if (!mod_timer(&neigh->timer, next))
810 			neigh_hold(neigh);
811 	}
812 	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
813 		struct sk_buff *skb = skb_peek(&neigh->arp_queue);
814 		/* keep skb alive even if arp_queue overflows */
815 		if (skb)
816 			skb_get(skb);
817 		write_unlock(&neigh->lock);
818 		neigh->ops->solicit(neigh, skb);
819 		atomic_inc(&neigh->probes);
820 		if (skb)
821 			kfree_skb(skb);
822 	} else {
823 out:
824 		write_unlock(&neigh->lock);
825 	}
826 
827 #ifdef CONFIG_ARPD
828 	if (notify && neigh->parms->app_probes)
829 		neigh_app_notify(neigh);
830 #endif
831 	neigh_release(neigh);
832 }
833 
834 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
835 {
836 	int rc;
837 	unsigned long now;
838 
839 	write_lock_bh(&neigh->lock);
840 
841 	rc = 0;
842 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
843 		goto out_unlock_bh;
844 
845 	now = jiffies;
846 
847 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
848 		if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
849 			atomic_set(&neigh->probes, neigh->parms->ucast_probes);
850 			neigh->nud_state     = NUD_INCOMPLETE;
851 			neigh->updated = jiffies;
852 			neigh_hold(neigh);
853 			neigh_add_timer(neigh, now + 1);
854 		} else {
855 			neigh->nud_state = NUD_FAILED;
856 			neigh->updated = jiffies;
857 			write_unlock_bh(&neigh->lock);
858 
859 			if (skb)
860 				kfree_skb(skb);
861 			return 1;
862 		}
863 	} else if (neigh->nud_state & NUD_STALE) {
864 		NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
865 		neigh_hold(neigh);
866 		neigh->nud_state = NUD_DELAY;
867 		neigh->updated = jiffies;
868 		neigh_add_timer(neigh,
869 				jiffies + neigh->parms->delay_probe_time);
870 	}
871 
872 	if (neigh->nud_state == NUD_INCOMPLETE) {
873 		if (skb) {
874 			if (skb_queue_len(&neigh->arp_queue) >=
875 			    neigh->parms->queue_len) {
876 				struct sk_buff *buff;
877 				buff = neigh->arp_queue.next;
878 				__skb_unlink(buff, &neigh->arp_queue);
879 				kfree_skb(buff);
880 			}
881 			__skb_queue_tail(&neigh->arp_queue, skb);
882 		}
883 		rc = 1;
884 	}
885 out_unlock_bh:
886 	write_unlock_bh(&neigh->lock);
887 	return rc;
888 }
889 
890 static __inline__ void neigh_update_hhs(struct neighbour *neigh)
891 {
892 	struct hh_cache *hh;
893 	void (*update)(struct hh_cache*, struct net_device*, unsigned char *) =
894 		neigh->dev->header_cache_update;
895 
896 	if (update) {
897 		for (hh = neigh->hh; hh; hh = hh->hh_next) {
898 			write_lock_bh(&hh->hh_lock);
899 			update(hh, neigh->dev, neigh->ha);
900 			write_unlock_bh(&hh->hh_lock);
901 		}
902 	}
903 }
904 
905 
906 
907 /* Generic update routine.
908    -- lladdr is new lladdr or NULL, if it is not supplied.
909    -- new    is new state.
910    -- flags
911 	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
912 				if it is different.
913 	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
914 				lladdr instead of overriding it
915 				if it is different.
916 				It also allows to retain current state
917 				if lladdr is unchanged.
918 	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
919 
920 	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
921 				NTF_ROUTER flag.
922 	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
923 				a router.
924 
925    Caller MUST hold reference count on the entry.
926  */
927 
928 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
929 		 u32 flags)
930 {
931 	u8 old;
932 	int err;
933 #ifdef CONFIG_ARPD
934 	int notify = 0;
935 #endif
936 	struct net_device *dev;
937 	int update_isrouter = 0;
938 
939 	write_lock_bh(&neigh->lock);
940 
941 	dev    = neigh->dev;
942 	old    = neigh->nud_state;
943 	err    = -EPERM;
944 
945 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
946 	    (old & (NUD_NOARP | NUD_PERMANENT)))
947 		goto out;
948 
949 	if (!(new & NUD_VALID)) {
950 		neigh_del_timer(neigh);
951 		if (old & NUD_CONNECTED)
952 			neigh_suspect(neigh);
953 		neigh->nud_state = new;
954 		err = 0;
955 #ifdef CONFIG_ARPD
956 		notify = old & NUD_VALID;
957 #endif
958 		goto out;
959 	}
960 
961 	/* Compare new lladdr with cached one */
962 	if (!dev->addr_len) {
963 		/* First case: device needs no address. */
964 		lladdr = neigh->ha;
965 	} else if (lladdr) {
966 		/* The second case: if something is already cached
967 		   and a new address is proposed:
968 		   - compare new & old
969 		   - if they are different, check override flag
970 		 */
971 		if ((old & NUD_VALID) &&
972 		    !memcmp(lladdr, neigh->ha, dev->addr_len))
973 			lladdr = neigh->ha;
974 	} else {
975 		/* No address is supplied; if we know something,
976 		   use it, otherwise discard the request.
977 		 */
978 		err = -EINVAL;
979 		if (!(old & NUD_VALID))
980 			goto out;
981 		lladdr = neigh->ha;
982 	}
983 
984 	if (new & NUD_CONNECTED)
985 		neigh->confirmed = jiffies;
986 	neigh->updated = jiffies;
987 
988 	/* If entry was valid and address is not changed,
989 	   do not change entry state, if new one is STALE.
990 	 */
991 	err = 0;
992 	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
993 	if (old & NUD_VALID) {
994 		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
995 			update_isrouter = 0;
996 			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
997 			    (old & NUD_CONNECTED)) {
998 				lladdr = neigh->ha;
999 				new = NUD_STALE;
1000 			} else
1001 				goto out;
1002 		} else {
1003 			if (lladdr == neigh->ha && new == NUD_STALE &&
1004 			    ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1005 			     (old & NUD_CONNECTED))
1006 			    )
1007 				new = old;
1008 		}
1009 	}
1010 
1011 	if (new != old) {
1012 		neigh_del_timer(neigh);
1013 		if (new & NUD_IN_TIMER) {
1014 			neigh_hold(neigh);
1015 			neigh_add_timer(neigh, (jiffies +
1016 						((new & NUD_REACHABLE) ?
1017 						 neigh->parms->reachable_time :
1018 						 0)));
1019 		}
1020 		neigh->nud_state = new;
1021 	}
1022 
1023 	if (lladdr != neigh->ha) {
1024 		memcpy(&neigh->ha, lladdr, dev->addr_len);
1025 		neigh_update_hhs(neigh);
1026 		if (!(new & NUD_CONNECTED))
1027 			neigh->confirmed = jiffies -
1028 				      (neigh->parms->base_reachable_time << 1);
1029 #ifdef CONFIG_ARPD
1030 		notify = 1;
1031 #endif
1032 	}
1033 	if (new == old)
1034 		goto out;
1035 	if (new & NUD_CONNECTED)
1036 		neigh_connect(neigh);
1037 	else
1038 		neigh_suspect(neigh);
1039 	if (!(old & NUD_VALID)) {
1040 		struct sk_buff *skb;
1041 
1042 		/* Again: avoid dead loop if something went wrong */
1043 
1044 		while (neigh->nud_state & NUD_VALID &&
1045 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1046 			struct neighbour *n1 = neigh;
1047 			write_unlock_bh(&neigh->lock);
1048 			/* On shaper/eql skb->dst->neighbour != neigh :( */
1049 			if (skb->dst && skb->dst->neighbour)
1050 				n1 = skb->dst->neighbour;
1051 			n1->output(skb);
1052 			write_lock_bh(&neigh->lock);
1053 		}
1054 		skb_queue_purge(&neigh->arp_queue);
1055 	}
1056 out:
1057 	if (update_isrouter) {
1058 		neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1059 			(neigh->flags | NTF_ROUTER) :
1060 			(neigh->flags & ~NTF_ROUTER);
1061 	}
1062 	write_unlock_bh(&neigh->lock);
1063 #ifdef CONFIG_ARPD
1064 	if (notify && neigh->parms->app_probes)
1065 		neigh_app_notify(neigh);
1066 #endif
1067 	return err;
1068 }
1069 
1070 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1071 				 u8 *lladdr, void *saddr,
1072 				 struct net_device *dev)
1073 {
1074 	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1075 						 lladdr || !dev->addr_len);
1076 	if (neigh)
1077 		neigh_update(neigh, lladdr, NUD_STALE,
1078 			     NEIGH_UPDATE_F_OVERRIDE);
1079 	return neigh;
1080 }
1081 
1082 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1083 			  u16 protocol)
1084 {
1085 	struct hh_cache	*hh;
1086 	struct net_device *dev = dst->dev;
1087 
1088 	for (hh = n->hh; hh; hh = hh->hh_next)
1089 		if (hh->hh_type == protocol)
1090 			break;
1091 
1092 	if (!hh && (hh = kmalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1093 		memset(hh, 0, sizeof(struct hh_cache));
1094 		rwlock_init(&hh->hh_lock);
1095 		hh->hh_type = protocol;
1096 		atomic_set(&hh->hh_refcnt, 0);
1097 		hh->hh_next = NULL;
1098 		if (dev->hard_header_cache(n, hh)) {
1099 			kfree(hh);
1100 			hh = NULL;
1101 		} else {
1102 			atomic_inc(&hh->hh_refcnt);
1103 			hh->hh_next = n->hh;
1104 			n->hh	    = hh;
1105 			if (n->nud_state & NUD_CONNECTED)
1106 				hh->hh_output = n->ops->hh_output;
1107 			else
1108 				hh->hh_output = n->ops->output;
1109 		}
1110 	}
1111 	if (hh)	{
1112 		atomic_inc(&hh->hh_refcnt);
1113 		dst->hh = hh;
1114 	}
1115 }
1116 
1117 /* This function can be used in contexts, where only old dev_queue_xmit
1118    worked, f.e. if you want to override normal output path (eql, shaper),
1119    but resolution is not made yet.
1120  */
1121 
1122 int neigh_compat_output(struct sk_buff *skb)
1123 {
1124 	struct net_device *dev = skb->dev;
1125 
1126 	__skb_pull(skb, skb->nh.raw - skb->data);
1127 
1128 	if (dev->hard_header &&
1129 	    dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1130 		    	     skb->len) < 0 &&
1131 	    dev->rebuild_header(skb))
1132 		return 0;
1133 
1134 	return dev_queue_xmit(skb);
1135 }
1136 
1137 /* Slow and careful. */
1138 
1139 int neigh_resolve_output(struct sk_buff *skb)
1140 {
1141 	struct dst_entry *dst = skb->dst;
1142 	struct neighbour *neigh;
1143 	int rc = 0;
1144 
1145 	if (!dst || !(neigh = dst->neighbour))
1146 		goto discard;
1147 
1148 	__skb_pull(skb, skb->nh.raw - skb->data);
1149 
1150 	if (!neigh_event_send(neigh, skb)) {
1151 		int err;
1152 		struct net_device *dev = neigh->dev;
1153 		if (dev->hard_header_cache && !dst->hh) {
1154 			write_lock_bh(&neigh->lock);
1155 			if (!dst->hh)
1156 				neigh_hh_init(neigh, dst, dst->ops->protocol);
1157 			err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1158 					       neigh->ha, NULL, skb->len);
1159 			write_unlock_bh(&neigh->lock);
1160 		} else {
1161 			read_lock_bh(&neigh->lock);
1162 			err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1163 					       neigh->ha, NULL, skb->len);
1164 			read_unlock_bh(&neigh->lock);
1165 		}
1166 		if (err >= 0)
1167 			rc = neigh->ops->queue_xmit(skb);
1168 		else
1169 			goto out_kfree_skb;
1170 	}
1171 out:
1172 	return rc;
1173 discard:
1174 	NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1175 		      dst, dst ? dst->neighbour : NULL);
1176 out_kfree_skb:
1177 	rc = -EINVAL;
1178 	kfree_skb(skb);
1179 	goto out;
1180 }
1181 
1182 /* As fast as possible without hh cache */
1183 
1184 int neigh_connected_output(struct sk_buff *skb)
1185 {
1186 	int err;
1187 	struct dst_entry *dst = skb->dst;
1188 	struct neighbour *neigh = dst->neighbour;
1189 	struct net_device *dev = neigh->dev;
1190 
1191 	__skb_pull(skb, skb->nh.raw - skb->data);
1192 
1193 	read_lock_bh(&neigh->lock);
1194 	err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1195 			       neigh->ha, NULL, skb->len);
1196 	read_unlock_bh(&neigh->lock);
1197 	if (err >= 0)
1198 		err = neigh->ops->queue_xmit(skb);
1199 	else {
1200 		err = -EINVAL;
1201 		kfree_skb(skb);
1202 	}
1203 	return err;
1204 }
1205 
1206 static void neigh_proxy_process(unsigned long arg)
1207 {
1208 	struct neigh_table *tbl = (struct neigh_table *)arg;
1209 	long sched_next = 0;
1210 	unsigned long now = jiffies;
1211 	struct sk_buff *skb;
1212 
1213 	spin_lock(&tbl->proxy_queue.lock);
1214 
1215 	skb = tbl->proxy_queue.next;
1216 
1217 	while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1218 		struct sk_buff *back = skb;
1219 		long tdif = NEIGH_CB(back)->sched_next - now;
1220 
1221 		skb = skb->next;
1222 		if (tdif <= 0) {
1223 			struct net_device *dev = back->dev;
1224 			__skb_unlink(back, &tbl->proxy_queue);
1225 			if (tbl->proxy_redo && netif_running(dev))
1226 				tbl->proxy_redo(back);
1227 			else
1228 				kfree_skb(back);
1229 
1230 			dev_put(dev);
1231 		} else if (!sched_next || tdif < sched_next)
1232 			sched_next = tdif;
1233 	}
1234 	del_timer(&tbl->proxy_timer);
1235 	if (sched_next)
1236 		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1237 	spin_unlock(&tbl->proxy_queue.lock);
1238 }
1239 
1240 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1241 		    struct sk_buff *skb)
1242 {
1243 	unsigned long now = jiffies;
1244 	unsigned long sched_next = now + (net_random() % p->proxy_delay);
1245 
1246 	if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1247 		kfree_skb(skb);
1248 		return;
1249 	}
1250 
1251 	NEIGH_CB(skb)->sched_next = sched_next;
1252 	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1253 
1254 	spin_lock(&tbl->proxy_queue.lock);
1255 	if (del_timer(&tbl->proxy_timer)) {
1256 		if (time_before(tbl->proxy_timer.expires, sched_next))
1257 			sched_next = tbl->proxy_timer.expires;
1258 	}
1259 	dst_release(skb->dst);
1260 	skb->dst = NULL;
1261 	dev_hold(skb->dev);
1262 	__skb_queue_tail(&tbl->proxy_queue, skb);
1263 	mod_timer(&tbl->proxy_timer, sched_next);
1264 	spin_unlock(&tbl->proxy_queue.lock);
1265 }
1266 
1267 
1268 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1269 				      struct neigh_table *tbl)
1270 {
1271 	struct neigh_parms *p = kmalloc(sizeof(*p), GFP_KERNEL);
1272 
1273 	if (p) {
1274 		memcpy(p, &tbl->parms, sizeof(*p));
1275 		p->tbl		  = tbl;
1276 		atomic_set(&p->refcnt, 1);
1277 		INIT_RCU_HEAD(&p->rcu_head);
1278 		p->reachable_time =
1279 				neigh_rand_reach_time(p->base_reachable_time);
1280 		if (dev) {
1281 			if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1282 				kfree(p);
1283 				return NULL;
1284 			}
1285 
1286 			dev_hold(dev);
1287 			p->dev = dev;
1288 		}
1289 		p->sysctl_table = NULL;
1290 		write_lock_bh(&tbl->lock);
1291 		p->next		= tbl->parms.next;
1292 		tbl->parms.next = p;
1293 		write_unlock_bh(&tbl->lock);
1294 	}
1295 	return p;
1296 }
1297 
1298 static void neigh_rcu_free_parms(struct rcu_head *head)
1299 {
1300 	struct neigh_parms *parms =
1301 		container_of(head, struct neigh_parms, rcu_head);
1302 
1303 	neigh_parms_put(parms);
1304 }
1305 
1306 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1307 {
1308 	struct neigh_parms **p;
1309 
1310 	if (!parms || parms == &tbl->parms)
1311 		return;
1312 	write_lock_bh(&tbl->lock);
1313 	for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1314 		if (*p == parms) {
1315 			*p = parms->next;
1316 			parms->dead = 1;
1317 			write_unlock_bh(&tbl->lock);
1318 			if (parms->dev)
1319 				dev_put(parms->dev);
1320 			call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1321 			return;
1322 		}
1323 	}
1324 	write_unlock_bh(&tbl->lock);
1325 	NEIGH_PRINTK1("neigh_parms_release: not found\n");
1326 }
1327 
1328 void neigh_parms_destroy(struct neigh_parms *parms)
1329 {
1330 	kfree(parms);
1331 }
1332 
1333 
1334 void neigh_table_init(struct neigh_table *tbl)
1335 {
1336 	unsigned long now = jiffies;
1337 	unsigned long phsize;
1338 
1339 	atomic_set(&tbl->parms.refcnt, 1);
1340 	INIT_RCU_HEAD(&tbl->parms.rcu_head);
1341 	tbl->parms.reachable_time =
1342 			  neigh_rand_reach_time(tbl->parms.base_reachable_time);
1343 
1344 	if (!tbl->kmem_cachep)
1345 		tbl->kmem_cachep = kmem_cache_create(tbl->id,
1346 						     tbl->entry_size,
1347 						     0, SLAB_HWCACHE_ALIGN,
1348 						     NULL, NULL);
1349 
1350 	if (!tbl->kmem_cachep)
1351 		panic("cannot create neighbour cache");
1352 
1353 	tbl->stats = alloc_percpu(struct neigh_statistics);
1354 	if (!tbl->stats)
1355 		panic("cannot create neighbour cache statistics");
1356 
1357 #ifdef CONFIG_PROC_FS
1358 	tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat);
1359 	if (!tbl->pde)
1360 		panic("cannot create neighbour proc dir entry");
1361 	tbl->pde->proc_fops = &neigh_stat_seq_fops;
1362 	tbl->pde->data = tbl;
1363 #endif
1364 
1365 	tbl->hash_mask = 1;
1366 	tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1367 
1368 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1369 	tbl->phash_buckets = kmalloc(phsize, GFP_KERNEL);
1370 
1371 	if (!tbl->hash_buckets || !tbl->phash_buckets)
1372 		panic("cannot allocate neighbour cache hashes");
1373 
1374 	memset(tbl->phash_buckets, 0, phsize);
1375 
1376 	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1377 
1378 	rwlock_init(&tbl->lock);
1379 	init_timer(&tbl->gc_timer);
1380 	tbl->gc_timer.data     = (unsigned long)tbl;
1381 	tbl->gc_timer.function = neigh_periodic_timer;
1382 	tbl->gc_timer.expires  = now + 1;
1383 	add_timer(&tbl->gc_timer);
1384 
1385 	init_timer(&tbl->proxy_timer);
1386 	tbl->proxy_timer.data	  = (unsigned long)tbl;
1387 	tbl->proxy_timer.function = neigh_proxy_process;
1388 	skb_queue_head_init(&tbl->proxy_queue);
1389 
1390 	tbl->last_flush = now;
1391 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1392 	write_lock(&neigh_tbl_lock);
1393 	tbl->next	= neigh_tables;
1394 	neigh_tables	= tbl;
1395 	write_unlock(&neigh_tbl_lock);
1396 }
1397 
1398 int neigh_table_clear(struct neigh_table *tbl)
1399 {
1400 	struct neigh_table **tp;
1401 
1402 	/* It is not clean... Fix it to unload IPv6 module safely */
1403 	del_timer_sync(&tbl->gc_timer);
1404 	del_timer_sync(&tbl->proxy_timer);
1405 	pneigh_queue_purge(&tbl->proxy_queue);
1406 	neigh_ifdown(tbl, NULL);
1407 	if (atomic_read(&tbl->entries))
1408 		printk(KERN_CRIT "neighbour leakage\n");
1409 	write_lock(&neigh_tbl_lock);
1410 	for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1411 		if (*tp == tbl) {
1412 			*tp = tbl->next;
1413 			break;
1414 		}
1415 	}
1416 	write_unlock(&neigh_tbl_lock);
1417 
1418 	neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1419 	tbl->hash_buckets = NULL;
1420 
1421 	kfree(tbl->phash_buckets);
1422 	tbl->phash_buckets = NULL;
1423 
1424 	return 0;
1425 }
1426 
1427 int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1428 {
1429 	struct ndmsg *ndm = NLMSG_DATA(nlh);
1430 	struct rtattr **nda = arg;
1431 	struct neigh_table *tbl;
1432 	struct net_device *dev = NULL;
1433 	int err = -ENODEV;
1434 
1435 	if (ndm->ndm_ifindex &&
1436 	    (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1437 		goto out;
1438 
1439 	read_lock(&neigh_tbl_lock);
1440 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1441 		struct rtattr *dst_attr = nda[NDA_DST - 1];
1442 		struct neighbour *n;
1443 
1444 		if (tbl->family != ndm->ndm_family)
1445 			continue;
1446 		read_unlock(&neigh_tbl_lock);
1447 
1448 		err = -EINVAL;
1449 		if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1450 			goto out_dev_put;
1451 
1452 		if (ndm->ndm_flags & NTF_PROXY) {
1453 			err = pneigh_delete(tbl, RTA_DATA(dst_attr), dev);
1454 			goto out_dev_put;
1455 		}
1456 
1457 		if (!dev)
1458 			goto out;
1459 
1460 		n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1461 		if (n) {
1462 			err = neigh_update(n, NULL, NUD_FAILED,
1463 					   NEIGH_UPDATE_F_OVERRIDE|
1464 					   NEIGH_UPDATE_F_ADMIN);
1465 			neigh_release(n);
1466 		}
1467 		goto out_dev_put;
1468 	}
1469 	read_unlock(&neigh_tbl_lock);
1470 	err = -EADDRNOTAVAIL;
1471 out_dev_put:
1472 	if (dev)
1473 		dev_put(dev);
1474 out:
1475 	return err;
1476 }
1477 
1478 int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1479 {
1480 	struct ndmsg *ndm = NLMSG_DATA(nlh);
1481 	struct rtattr **nda = arg;
1482 	struct neigh_table *tbl;
1483 	struct net_device *dev = NULL;
1484 	int err = -ENODEV;
1485 
1486 	if (ndm->ndm_ifindex &&
1487 	    (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1488 		goto out;
1489 
1490 	read_lock(&neigh_tbl_lock);
1491 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1492 		struct rtattr *lladdr_attr = nda[NDA_LLADDR - 1];
1493 		struct rtattr *dst_attr = nda[NDA_DST - 1];
1494 		int override = 1;
1495 		struct neighbour *n;
1496 
1497 		if (tbl->family != ndm->ndm_family)
1498 			continue;
1499 		read_unlock(&neigh_tbl_lock);
1500 
1501 		err = -EINVAL;
1502 		if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1503 			goto out_dev_put;
1504 
1505 		if (ndm->ndm_flags & NTF_PROXY) {
1506 			err = -ENOBUFS;
1507 			if (pneigh_lookup(tbl, RTA_DATA(dst_attr), dev, 1))
1508 				err = 0;
1509 			goto out_dev_put;
1510 		}
1511 
1512 		err = -EINVAL;
1513 		if (!dev)
1514 			goto out;
1515 		if (lladdr_attr && RTA_PAYLOAD(lladdr_attr) < dev->addr_len)
1516 			goto out_dev_put;
1517 
1518 		n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1519 		if (n) {
1520 			if (nlh->nlmsg_flags & NLM_F_EXCL) {
1521 				err = -EEXIST;
1522 				neigh_release(n);
1523 				goto out_dev_put;
1524 			}
1525 
1526 			override = nlh->nlmsg_flags & NLM_F_REPLACE;
1527 		} else if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1528 			err = -ENOENT;
1529 			goto out_dev_put;
1530 		} else {
1531 			n = __neigh_lookup_errno(tbl, RTA_DATA(dst_attr), dev);
1532 			if (IS_ERR(n)) {
1533 				err = PTR_ERR(n);
1534 				goto out_dev_put;
1535 			}
1536 		}
1537 
1538 		err = neigh_update(n,
1539 				   lladdr_attr ? RTA_DATA(lladdr_attr) : NULL,
1540 				   ndm->ndm_state,
1541 				   (override ? NEIGH_UPDATE_F_OVERRIDE : 0) |
1542 				   NEIGH_UPDATE_F_ADMIN);
1543 
1544 		neigh_release(n);
1545 		goto out_dev_put;
1546 	}
1547 
1548 	read_unlock(&neigh_tbl_lock);
1549 	err = -EADDRNOTAVAIL;
1550 out_dev_put:
1551 	if (dev)
1552 		dev_put(dev);
1553 out:
1554 	return err;
1555 }
1556 
1557 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1558 {
1559 	struct rtattr *nest = NULL;
1560 
1561 	nest = RTA_NEST(skb, NDTA_PARMS);
1562 
1563 	if (parms->dev)
1564 		RTA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1565 
1566 	RTA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1567 	RTA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1568 	RTA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1569 	RTA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1570 	RTA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1571 	RTA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1572 	RTA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1573 	RTA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1574 		      parms->base_reachable_time);
1575 	RTA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1576 	RTA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1577 	RTA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1578 	RTA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1579 	RTA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1580 	RTA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1581 
1582 	return RTA_NEST_END(skb, nest);
1583 
1584 rtattr_failure:
1585 	return RTA_NEST_CANCEL(skb, nest);
1586 }
1587 
1588 static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb,
1589 			      struct netlink_callback *cb)
1590 {
1591 	struct nlmsghdr *nlh;
1592 	struct ndtmsg *ndtmsg;
1593 
1594 	nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1595 			       NLM_F_MULTI);
1596 
1597 	ndtmsg = NLMSG_DATA(nlh);
1598 
1599 	read_lock_bh(&tbl->lock);
1600 	ndtmsg->ndtm_family = tbl->family;
1601 	ndtmsg->ndtm_pad1   = 0;
1602 	ndtmsg->ndtm_pad2   = 0;
1603 
1604 	RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1605 	RTA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1606 	RTA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1607 	RTA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1608 	RTA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1609 
1610 	{
1611 		unsigned long now = jiffies;
1612 		unsigned int flush_delta = now - tbl->last_flush;
1613 		unsigned int rand_delta = now - tbl->last_rand;
1614 
1615 		struct ndt_config ndc = {
1616 			.ndtc_key_len		= tbl->key_len,
1617 			.ndtc_entry_size	= tbl->entry_size,
1618 			.ndtc_entries		= atomic_read(&tbl->entries),
1619 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
1620 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
1621 			.ndtc_hash_rnd		= tbl->hash_rnd,
1622 			.ndtc_hash_mask		= tbl->hash_mask,
1623 			.ndtc_hash_chain_gc	= tbl->hash_chain_gc,
1624 			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
1625 		};
1626 
1627 		RTA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1628 	}
1629 
1630 	{
1631 		int cpu;
1632 		struct ndt_stats ndst;
1633 
1634 		memset(&ndst, 0, sizeof(ndst));
1635 
1636 		for_each_cpu(cpu) {
1637 			struct neigh_statistics	*st;
1638 
1639 			st = per_cpu_ptr(tbl->stats, cpu);
1640 			ndst.ndts_allocs		+= st->allocs;
1641 			ndst.ndts_destroys		+= st->destroys;
1642 			ndst.ndts_hash_grows		+= st->hash_grows;
1643 			ndst.ndts_res_failed		+= st->res_failed;
1644 			ndst.ndts_lookups		+= st->lookups;
1645 			ndst.ndts_hits			+= st->hits;
1646 			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
1647 			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
1648 			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
1649 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
1650 		}
1651 
1652 		RTA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1653 	}
1654 
1655 	BUG_ON(tbl->parms.dev);
1656 	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1657 		goto rtattr_failure;
1658 
1659 	read_unlock_bh(&tbl->lock);
1660 	return NLMSG_END(skb, nlh);
1661 
1662 rtattr_failure:
1663 	read_unlock_bh(&tbl->lock);
1664 	return NLMSG_CANCEL(skb, nlh);
1665 
1666 nlmsg_failure:
1667 	return -1;
1668 }
1669 
1670 static int neightbl_fill_param_info(struct neigh_table *tbl,
1671 				    struct neigh_parms *parms,
1672 				    struct sk_buff *skb,
1673 				    struct netlink_callback *cb)
1674 {
1675 	struct ndtmsg *ndtmsg;
1676 	struct nlmsghdr *nlh;
1677 
1678 	nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1679 			       NLM_F_MULTI);
1680 
1681 	ndtmsg = NLMSG_DATA(nlh);
1682 
1683 	read_lock_bh(&tbl->lock);
1684 	ndtmsg->ndtm_family = tbl->family;
1685 	ndtmsg->ndtm_pad1   = 0;
1686 	ndtmsg->ndtm_pad2   = 0;
1687 	RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1688 
1689 	if (neightbl_fill_parms(skb, parms) < 0)
1690 		goto rtattr_failure;
1691 
1692 	read_unlock_bh(&tbl->lock);
1693 	return NLMSG_END(skb, nlh);
1694 
1695 rtattr_failure:
1696 	read_unlock_bh(&tbl->lock);
1697 	return NLMSG_CANCEL(skb, nlh);
1698 
1699 nlmsg_failure:
1700 	return -1;
1701 }
1702 
1703 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1704 						      int ifindex)
1705 {
1706 	struct neigh_parms *p;
1707 
1708 	for (p = &tbl->parms; p; p = p->next)
1709 		if ((p->dev && p->dev->ifindex == ifindex) ||
1710 		    (!p->dev && !ifindex))
1711 			return p;
1712 
1713 	return NULL;
1714 }
1715 
1716 int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1717 {
1718 	struct neigh_table *tbl;
1719 	struct ndtmsg *ndtmsg = NLMSG_DATA(nlh);
1720 	struct rtattr **tb = arg;
1721 	int err = -EINVAL;
1722 
1723 	if (!tb[NDTA_NAME - 1] || !RTA_PAYLOAD(tb[NDTA_NAME - 1]))
1724 		return -EINVAL;
1725 
1726 	read_lock(&neigh_tbl_lock);
1727 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1728 		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1729 			continue;
1730 
1731 		if (!rtattr_strcmp(tb[NDTA_NAME - 1], tbl->id))
1732 			break;
1733 	}
1734 
1735 	if (tbl == NULL) {
1736 		err = -ENOENT;
1737 		goto errout;
1738 	}
1739 
1740 	/*
1741 	 * We acquire tbl->lock to be nice to the periodic timers and
1742 	 * make sure they always see a consistent set of values.
1743 	 */
1744 	write_lock_bh(&tbl->lock);
1745 
1746 	if (tb[NDTA_THRESH1 - 1])
1747 		tbl->gc_thresh1 = RTA_GET_U32(tb[NDTA_THRESH1 - 1]);
1748 
1749 	if (tb[NDTA_THRESH2 - 1])
1750 		tbl->gc_thresh2 = RTA_GET_U32(tb[NDTA_THRESH2 - 1]);
1751 
1752 	if (tb[NDTA_THRESH3 - 1])
1753 		tbl->gc_thresh3 = RTA_GET_U32(tb[NDTA_THRESH3 - 1]);
1754 
1755 	if (tb[NDTA_GC_INTERVAL - 1])
1756 		tbl->gc_interval = RTA_GET_MSECS(tb[NDTA_GC_INTERVAL - 1]);
1757 
1758 	if (tb[NDTA_PARMS - 1]) {
1759 		struct rtattr *tbp[NDTPA_MAX];
1760 		struct neigh_parms *p;
1761 		u32 ifindex = 0;
1762 
1763 		if (rtattr_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS - 1]) < 0)
1764 			goto rtattr_failure;
1765 
1766 		if (tbp[NDTPA_IFINDEX - 1])
1767 			ifindex = RTA_GET_U32(tbp[NDTPA_IFINDEX - 1]);
1768 
1769 		p = lookup_neigh_params(tbl, ifindex);
1770 		if (p == NULL) {
1771 			err = -ENOENT;
1772 			goto rtattr_failure;
1773 		}
1774 
1775 		if (tbp[NDTPA_QUEUE_LEN - 1])
1776 			p->queue_len = RTA_GET_U32(tbp[NDTPA_QUEUE_LEN - 1]);
1777 
1778 		if (tbp[NDTPA_PROXY_QLEN - 1])
1779 			p->proxy_qlen = RTA_GET_U32(tbp[NDTPA_PROXY_QLEN - 1]);
1780 
1781 		if (tbp[NDTPA_APP_PROBES - 1])
1782 			p->app_probes = RTA_GET_U32(tbp[NDTPA_APP_PROBES - 1]);
1783 
1784 		if (tbp[NDTPA_UCAST_PROBES - 1])
1785 			p->ucast_probes =
1786 			   RTA_GET_U32(tbp[NDTPA_UCAST_PROBES - 1]);
1787 
1788 		if (tbp[NDTPA_MCAST_PROBES - 1])
1789 			p->mcast_probes =
1790 			   RTA_GET_U32(tbp[NDTPA_MCAST_PROBES - 1]);
1791 
1792 		if (tbp[NDTPA_BASE_REACHABLE_TIME - 1])
1793 			p->base_reachable_time =
1794 			   RTA_GET_MSECS(tbp[NDTPA_BASE_REACHABLE_TIME - 1]);
1795 
1796 		if (tbp[NDTPA_GC_STALETIME - 1])
1797 			p->gc_staletime =
1798 			   RTA_GET_MSECS(tbp[NDTPA_GC_STALETIME - 1]);
1799 
1800 		if (tbp[NDTPA_DELAY_PROBE_TIME - 1])
1801 			p->delay_probe_time =
1802 			   RTA_GET_MSECS(tbp[NDTPA_DELAY_PROBE_TIME - 1]);
1803 
1804 		if (tbp[NDTPA_RETRANS_TIME - 1])
1805 			p->retrans_time =
1806 			   RTA_GET_MSECS(tbp[NDTPA_RETRANS_TIME - 1]);
1807 
1808 		if (tbp[NDTPA_ANYCAST_DELAY - 1])
1809 			p->anycast_delay =
1810 			   RTA_GET_MSECS(tbp[NDTPA_ANYCAST_DELAY - 1]);
1811 
1812 		if (tbp[NDTPA_PROXY_DELAY - 1])
1813 			p->proxy_delay =
1814 			   RTA_GET_MSECS(tbp[NDTPA_PROXY_DELAY - 1]);
1815 
1816 		if (tbp[NDTPA_LOCKTIME - 1])
1817 			p->locktime = RTA_GET_MSECS(tbp[NDTPA_LOCKTIME - 1]);
1818 	}
1819 
1820 	err = 0;
1821 
1822 rtattr_failure:
1823 	write_unlock_bh(&tbl->lock);
1824 errout:
1825 	read_unlock(&neigh_tbl_lock);
1826 	return err;
1827 }
1828 
1829 int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1830 {
1831 	int idx, family;
1832 	int s_idx = cb->args[0];
1833 	struct neigh_table *tbl;
1834 
1835 	family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1836 
1837 	read_lock(&neigh_tbl_lock);
1838 	for (tbl = neigh_tables, idx = 0; tbl; tbl = tbl->next) {
1839 		struct neigh_parms *p;
1840 
1841 		if (idx < s_idx || (family && tbl->family != family))
1842 			continue;
1843 
1844 		if (neightbl_fill_info(tbl, skb, cb) <= 0)
1845 			break;
1846 
1847 		for (++idx, p = tbl->parms.next; p; p = p->next, idx++) {
1848 			if (idx < s_idx)
1849 				continue;
1850 
1851 			if (neightbl_fill_param_info(tbl, p, skb, cb) <= 0)
1852 				goto out;
1853 		}
1854 
1855 	}
1856 out:
1857 	read_unlock(&neigh_tbl_lock);
1858 	cb->args[0] = idx;
1859 
1860 	return skb->len;
1861 }
1862 
1863 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n,
1864 			   u32 pid, u32 seq, int event, unsigned int flags)
1865 {
1866 	unsigned long now = jiffies;
1867 	unsigned char *b = skb->tail;
1868 	struct nda_cacheinfo ci;
1869 	int locked = 0;
1870 	u32 probes;
1871 	struct nlmsghdr *nlh = NLMSG_NEW(skb, pid, seq, event,
1872 					 sizeof(struct ndmsg), flags);
1873 	struct ndmsg *ndm = NLMSG_DATA(nlh);
1874 
1875 	ndm->ndm_family	 = n->ops->family;
1876 	ndm->ndm_pad1    = 0;
1877 	ndm->ndm_pad2    = 0;
1878 	ndm->ndm_flags	 = n->flags;
1879 	ndm->ndm_type	 = n->type;
1880 	ndm->ndm_ifindex = n->dev->ifindex;
1881 	RTA_PUT(skb, NDA_DST, n->tbl->key_len, n->primary_key);
1882 	read_lock_bh(&n->lock);
1883 	locked		 = 1;
1884 	ndm->ndm_state	 = n->nud_state;
1885 	if (n->nud_state & NUD_VALID)
1886 		RTA_PUT(skb, NDA_LLADDR, n->dev->addr_len, n->ha);
1887 	ci.ndm_used	 = now - n->used;
1888 	ci.ndm_confirmed = now - n->confirmed;
1889 	ci.ndm_updated	 = now - n->updated;
1890 	ci.ndm_refcnt	 = atomic_read(&n->refcnt) - 1;
1891 	probes = atomic_read(&n->probes);
1892 	read_unlock_bh(&n->lock);
1893 	locked		 = 0;
1894 	RTA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1895 	RTA_PUT(skb, NDA_PROBES, sizeof(probes), &probes);
1896 	nlh->nlmsg_len	 = skb->tail - b;
1897 	return skb->len;
1898 
1899 nlmsg_failure:
1900 rtattr_failure:
1901 	if (locked)
1902 		read_unlock_bh(&n->lock);
1903 	skb_trim(skb, b - skb->data);
1904 	return -1;
1905 }
1906 
1907 
1908 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
1909 			    struct netlink_callback *cb)
1910 {
1911 	struct neighbour *n;
1912 	int rc, h, s_h = cb->args[1];
1913 	int idx, s_idx = idx = cb->args[2];
1914 
1915 	for (h = 0; h <= tbl->hash_mask; h++) {
1916 		if (h < s_h)
1917 			continue;
1918 		if (h > s_h)
1919 			s_idx = 0;
1920 		read_lock_bh(&tbl->lock);
1921 		for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
1922 			if (idx < s_idx)
1923 				continue;
1924 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
1925 					    cb->nlh->nlmsg_seq,
1926 					    RTM_NEWNEIGH,
1927 					    NLM_F_MULTI) <= 0) {
1928 				read_unlock_bh(&tbl->lock);
1929 				rc = -1;
1930 				goto out;
1931 			}
1932 		}
1933 		read_unlock_bh(&tbl->lock);
1934 	}
1935 	rc = skb->len;
1936 out:
1937 	cb->args[1] = h;
1938 	cb->args[2] = idx;
1939 	return rc;
1940 }
1941 
1942 int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1943 {
1944 	struct neigh_table *tbl;
1945 	int t, family, s_t;
1946 
1947 	read_lock(&neigh_tbl_lock);
1948 	family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1949 	s_t = cb->args[0];
1950 
1951 	for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
1952 		if (t < s_t || (family && tbl->family != family))
1953 			continue;
1954 		if (t > s_t)
1955 			memset(&cb->args[1], 0, sizeof(cb->args) -
1956 						sizeof(cb->args[0]));
1957 		if (neigh_dump_table(tbl, skb, cb) < 0)
1958 			break;
1959 	}
1960 	read_unlock(&neigh_tbl_lock);
1961 
1962 	cb->args[0] = t;
1963 	return skb->len;
1964 }
1965 
1966 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
1967 {
1968 	int chain;
1969 
1970 	read_lock_bh(&tbl->lock);
1971 	for (chain = 0; chain <= tbl->hash_mask; chain++) {
1972 		struct neighbour *n;
1973 
1974 		for (n = tbl->hash_buckets[chain]; n; n = n->next)
1975 			cb(n, cookie);
1976 	}
1977 	read_unlock_bh(&tbl->lock);
1978 }
1979 EXPORT_SYMBOL(neigh_for_each);
1980 
1981 /* The tbl->lock must be held as a writer and BH disabled. */
1982 void __neigh_for_each_release(struct neigh_table *tbl,
1983 			      int (*cb)(struct neighbour *))
1984 {
1985 	int chain;
1986 
1987 	for (chain = 0; chain <= tbl->hash_mask; chain++) {
1988 		struct neighbour *n, **np;
1989 
1990 		np = &tbl->hash_buckets[chain];
1991 		while ((n = *np) != NULL) {
1992 			int release;
1993 
1994 			write_lock(&n->lock);
1995 			release = cb(n);
1996 			if (release) {
1997 				*np = n->next;
1998 				n->dead = 1;
1999 			} else
2000 				np = &n->next;
2001 			write_unlock(&n->lock);
2002 			if (release)
2003 				neigh_release(n);
2004 		}
2005 	}
2006 }
2007 EXPORT_SYMBOL(__neigh_for_each_release);
2008 
2009 #ifdef CONFIG_PROC_FS
2010 
2011 static struct neighbour *neigh_get_first(struct seq_file *seq)
2012 {
2013 	struct neigh_seq_state *state = seq->private;
2014 	struct neigh_table *tbl = state->tbl;
2015 	struct neighbour *n = NULL;
2016 	int bucket = state->bucket;
2017 
2018 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2019 	for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2020 		n = tbl->hash_buckets[bucket];
2021 
2022 		while (n) {
2023 			if (state->neigh_sub_iter) {
2024 				loff_t fakep = 0;
2025 				void *v;
2026 
2027 				v = state->neigh_sub_iter(state, n, &fakep);
2028 				if (!v)
2029 					goto next;
2030 			}
2031 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2032 				break;
2033 			if (n->nud_state & ~NUD_NOARP)
2034 				break;
2035 		next:
2036 			n = n->next;
2037 		}
2038 
2039 		if (n)
2040 			break;
2041 	}
2042 	state->bucket = bucket;
2043 
2044 	return n;
2045 }
2046 
2047 static struct neighbour *neigh_get_next(struct seq_file *seq,
2048 					struct neighbour *n,
2049 					loff_t *pos)
2050 {
2051 	struct neigh_seq_state *state = seq->private;
2052 	struct neigh_table *tbl = state->tbl;
2053 
2054 	if (state->neigh_sub_iter) {
2055 		void *v = state->neigh_sub_iter(state, n, pos);
2056 		if (v)
2057 			return n;
2058 	}
2059 	n = n->next;
2060 
2061 	while (1) {
2062 		while (n) {
2063 			if (state->neigh_sub_iter) {
2064 				void *v = state->neigh_sub_iter(state, n, pos);
2065 				if (v)
2066 					return n;
2067 				goto next;
2068 			}
2069 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2070 				break;
2071 
2072 			if (n->nud_state & ~NUD_NOARP)
2073 				break;
2074 		next:
2075 			n = n->next;
2076 		}
2077 
2078 		if (n)
2079 			break;
2080 
2081 		if (++state->bucket > tbl->hash_mask)
2082 			break;
2083 
2084 		n = tbl->hash_buckets[state->bucket];
2085 	}
2086 
2087 	if (n && pos)
2088 		--(*pos);
2089 	return n;
2090 }
2091 
2092 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2093 {
2094 	struct neighbour *n = neigh_get_first(seq);
2095 
2096 	if (n) {
2097 		while (*pos) {
2098 			n = neigh_get_next(seq, n, pos);
2099 			if (!n)
2100 				break;
2101 		}
2102 	}
2103 	return *pos ? NULL : n;
2104 }
2105 
2106 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2107 {
2108 	struct neigh_seq_state *state = seq->private;
2109 	struct neigh_table *tbl = state->tbl;
2110 	struct pneigh_entry *pn = NULL;
2111 	int bucket = state->bucket;
2112 
2113 	state->flags |= NEIGH_SEQ_IS_PNEIGH;
2114 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2115 		pn = tbl->phash_buckets[bucket];
2116 		if (pn)
2117 			break;
2118 	}
2119 	state->bucket = bucket;
2120 
2121 	return pn;
2122 }
2123 
2124 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2125 					    struct pneigh_entry *pn,
2126 					    loff_t *pos)
2127 {
2128 	struct neigh_seq_state *state = seq->private;
2129 	struct neigh_table *tbl = state->tbl;
2130 
2131 	pn = pn->next;
2132 	while (!pn) {
2133 		if (++state->bucket > PNEIGH_HASHMASK)
2134 			break;
2135 		pn = tbl->phash_buckets[state->bucket];
2136 		if (pn)
2137 			break;
2138 	}
2139 
2140 	if (pn && pos)
2141 		--(*pos);
2142 
2143 	return pn;
2144 }
2145 
2146 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2147 {
2148 	struct pneigh_entry *pn = pneigh_get_first(seq);
2149 
2150 	if (pn) {
2151 		while (*pos) {
2152 			pn = pneigh_get_next(seq, pn, pos);
2153 			if (!pn)
2154 				break;
2155 		}
2156 	}
2157 	return *pos ? NULL : pn;
2158 }
2159 
2160 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2161 {
2162 	struct neigh_seq_state *state = seq->private;
2163 	void *rc;
2164 
2165 	rc = neigh_get_idx(seq, pos);
2166 	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2167 		rc = pneigh_get_idx(seq, pos);
2168 
2169 	return rc;
2170 }
2171 
2172 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2173 {
2174 	struct neigh_seq_state *state = seq->private;
2175 	loff_t pos_minus_one;
2176 
2177 	state->tbl = tbl;
2178 	state->bucket = 0;
2179 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2180 
2181 	read_lock_bh(&tbl->lock);
2182 
2183 	pos_minus_one = *pos - 1;
2184 	return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2185 }
2186 EXPORT_SYMBOL(neigh_seq_start);
2187 
2188 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2189 {
2190 	struct neigh_seq_state *state;
2191 	void *rc;
2192 
2193 	if (v == SEQ_START_TOKEN) {
2194 		rc = neigh_get_idx(seq, pos);
2195 		goto out;
2196 	}
2197 
2198 	state = seq->private;
2199 	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2200 		rc = neigh_get_next(seq, v, NULL);
2201 		if (rc)
2202 			goto out;
2203 		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2204 			rc = pneigh_get_first(seq);
2205 	} else {
2206 		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2207 		rc = pneigh_get_next(seq, v, NULL);
2208 	}
2209 out:
2210 	++(*pos);
2211 	return rc;
2212 }
2213 EXPORT_SYMBOL(neigh_seq_next);
2214 
2215 void neigh_seq_stop(struct seq_file *seq, void *v)
2216 {
2217 	struct neigh_seq_state *state = seq->private;
2218 	struct neigh_table *tbl = state->tbl;
2219 
2220 	read_unlock_bh(&tbl->lock);
2221 }
2222 EXPORT_SYMBOL(neigh_seq_stop);
2223 
2224 /* statistics via seq_file */
2225 
2226 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2227 {
2228 	struct proc_dir_entry *pde = seq->private;
2229 	struct neigh_table *tbl = pde->data;
2230 	int cpu;
2231 
2232 	if (*pos == 0)
2233 		return SEQ_START_TOKEN;
2234 
2235 	for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2236 		if (!cpu_possible(cpu))
2237 			continue;
2238 		*pos = cpu+1;
2239 		return per_cpu_ptr(tbl->stats, cpu);
2240 	}
2241 	return NULL;
2242 }
2243 
2244 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2245 {
2246 	struct proc_dir_entry *pde = seq->private;
2247 	struct neigh_table *tbl = pde->data;
2248 	int cpu;
2249 
2250 	for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2251 		if (!cpu_possible(cpu))
2252 			continue;
2253 		*pos = cpu+1;
2254 		return per_cpu_ptr(tbl->stats, cpu);
2255 	}
2256 	return NULL;
2257 }
2258 
2259 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2260 {
2261 
2262 }
2263 
2264 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2265 {
2266 	struct proc_dir_entry *pde = seq->private;
2267 	struct neigh_table *tbl = pde->data;
2268 	struct neigh_statistics *st = v;
2269 
2270 	if (v == SEQ_START_TOKEN) {
2271 		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs\n");
2272 		return 0;
2273 	}
2274 
2275 	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2276 			"%08lx %08lx  %08lx %08lx\n",
2277 		   atomic_read(&tbl->entries),
2278 
2279 		   st->allocs,
2280 		   st->destroys,
2281 		   st->hash_grows,
2282 
2283 		   st->lookups,
2284 		   st->hits,
2285 
2286 		   st->res_failed,
2287 
2288 		   st->rcv_probes_mcast,
2289 		   st->rcv_probes_ucast,
2290 
2291 		   st->periodic_gc_runs,
2292 		   st->forced_gc_runs
2293 		   );
2294 
2295 	return 0;
2296 }
2297 
2298 static struct seq_operations neigh_stat_seq_ops = {
2299 	.start	= neigh_stat_seq_start,
2300 	.next	= neigh_stat_seq_next,
2301 	.stop	= neigh_stat_seq_stop,
2302 	.show	= neigh_stat_seq_show,
2303 };
2304 
2305 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2306 {
2307 	int ret = seq_open(file, &neigh_stat_seq_ops);
2308 
2309 	if (!ret) {
2310 		struct seq_file *sf = file->private_data;
2311 		sf->private = PDE(inode);
2312 	}
2313 	return ret;
2314 };
2315 
2316 static struct file_operations neigh_stat_seq_fops = {
2317 	.owner	 = THIS_MODULE,
2318 	.open 	 = neigh_stat_seq_open,
2319 	.read	 = seq_read,
2320 	.llseek	 = seq_lseek,
2321 	.release = seq_release,
2322 };
2323 
2324 #endif /* CONFIG_PROC_FS */
2325 
2326 #ifdef CONFIG_ARPD
2327 void neigh_app_ns(struct neighbour *n)
2328 {
2329 	struct nlmsghdr  *nlh;
2330 	int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2331 	struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2332 
2333 	if (!skb)
2334 		return;
2335 
2336 	if (neigh_fill_info(skb, n, 0, 0, RTM_GETNEIGH, 0) < 0) {
2337 		kfree_skb(skb);
2338 		return;
2339 	}
2340 	nlh			   = (struct nlmsghdr *)skb->data;
2341 	nlh->nlmsg_flags	   = NLM_F_REQUEST;
2342 	NETLINK_CB(skb).dst_group  = RTNLGRP_NEIGH;
2343 	netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2344 }
2345 
2346 static void neigh_app_notify(struct neighbour *n)
2347 {
2348 	struct nlmsghdr *nlh;
2349 	int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2350 	struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2351 
2352 	if (!skb)
2353 		return;
2354 
2355 	if (neigh_fill_info(skb, n, 0, 0, RTM_NEWNEIGH, 0) < 0) {
2356 		kfree_skb(skb);
2357 		return;
2358 	}
2359 	nlh			   = (struct nlmsghdr *)skb->data;
2360 	NETLINK_CB(skb).dst_group  = RTNLGRP_NEIGH;
2361 	netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2362 }
2363 
2364 #endif /* CONFIG_ARPD */
2365 
2366 #ifdef CONFIG_SYSCTL
2367 
2368 static struct neigh_sysctl_table {
2369 	struct ctl_table_header *sysctl_header;
2370 	ctl_table		neigh_vars[__NET_NEIGH_MAX];
2371 	ctl_table		neigh_dev[2];
2372 	ctl_table		neigh_neigh_dir[2];
2373 	ctl_table		neigh_proto_dir[2];
2374 	ctl_table		neigh_root_dir[2];
2375 } neigh_sysctl_template = {
2376 	.neigh_vars = {
2377 		{
2378 			.ctl_name	= NET_NEIGH_MCAST_SOLICIT,
2379 			.procname	= "mcast_solicit",
2380 			.maxlen		= sizeof(int),
2381 			.mode		= 0644,
2382 			.proc_handler	= &proc_dointvec,
2383 		},
2384 		{
2385 			.ctl_name	= NET_NEIGH_UCAST_SOLICIT,
2386 			.procname	= "ucast_solicit",
2387 			.maxlen		= sizeof(int),
2388 			.mode		= 0644,
2389 			.proc_handler	= &proc_dointvec,
2390 		},
2391 		{
2392 			.ctl_name	= NET_NEIGH_APP_SOLICIT,
2393 			.procname	= "app_solicit",
2394 			.maxlen		= sizeof(int),
2395 			.mode		= 0644,
2396 			.proc_handler	= &proc_dointvec,
2397 		},
2398 		{
2399 			.ctl_name	= NET_NEIGH_RETRANS_TIME,
2400 			.procname	= "retrans_time",
2401 			.maxlen		= sizeof(int),
2402 			.mode		= 0644,
2403 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2404 		},
2405 		{
2406 			.ctl_name	= NET_NEIGH_REACHABLE_TIME,
2407 			.procname	= "base_reachable_time",
2408 			.maxlen		= sizeof(int),
2409 			.mode		= 0644,
2410 			.proc_handler	= &proc_dointvec_jiffies,
2411 			.strategy	= &sysctl_jiffies,
2412 		},
2413 		{
2414 			.ctl_name	= NET_NEIGH_DELAY_PROBE_TIME,
2415 			.procname	= "delay_first_probe_time",
2416 			.maxlen		= sizeof(int),
2417 			.mode		= 0644,
2418 			.proc_handler	= &proc_dointvec_jiffies,
2419 			.strategy	= &sysctl_jiffies,
2420 		},
2421 		{
2422 			.ctl_name	= NET_NEIGH_GC_STALE_TIME,
2423 			.procname	= "gc_stale_time",
2424 			.maxlen		= sizeof(int),
2425 			.mode		= 0644,
2426 			.proc_handler	= &proc_dointvec_jiffies,
2427 			.strategy	= &sysctl_jiffies,
2428 		},
2429 		{
2430 			.ctl_name	= NET_NEIGH_UNRES_QLEN,
2431 			.procname	= "unres_qlen",
2432 			.maxlen		= sizeof(int),
2433 			.mode		= 0644,
2434 			.proc_handler	= &proc_dointvec,
2435 		},
2436 		{
2437 			.ctl_name	= NET_NEIGH_PROXY_QLEN,
2438 			.procname	= "proxy_qlen",
2439 			.maxlen		= sizeof(int),
2440 			.mode		= 0644,
2441 			.proc_handler	= &proc_dointvec,
2442 		},
2443 		{
2444 			.ctl_name	= NET_NEIGH_ANYCAST_DELAY,
2445 			.procname	= "anycast_delay",
2446 			.maxlen		= sizeof(int),
2447 			.mode		= 0644,
2448 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2449 		},
2450 		{
2451 			.ctl_name	= NET_NEIGH_PROXY_DELAY,
2452 			.procname	= "proxy_delay",
2453 			.maxlen		= sizeof(int),
2454 			.mode		= 0644,
2455 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2456 		},
2457 		{
2458 			.ctl_name	= NET_NEIGH_LOCKTIME,
2459 			.procname	= "locktime",
2460 			.maxlen		= sizeof(int),
2461 			.mode		= 0644,
2462 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2463 		},
2464 		{
2465 			.ctl_name	= NET_NEIGH_GC_INTERVAL,
2466 			.procname	= "gc_interval",
2467 			.maxlen		= sizeof(int),
2468 			.mode		= 0644,
2469 			.proc_handler	= &proc_dointvec_jiffies,
2470 			.strategy	= &sysctl_jiffies,
2471 		},
2472 		{
2473 			.ctl_name	= NET_NEIGH_GC_THRESH1,
2474 			.procname	= "gc_thresh1",
2475 			.maxlen		= sizeof(int),
2476 			.mode		= 0644,
2477 			.proc_handler	= &proc_dointvec,
2478 		},
2479 		{
2480 			.ctl_name	= NET_NEIGH_GC_THRESH2,
2481 			.procname	= "gc_thresh2",
2482 			.maxlen		= sizeof(int),
2483 			.mode		= 0644,
2484 			.proc_handler	= &proc_dointvec,
2485 		},
2486 		{
2487 			.ctl_name	= NET_NEIGH_GC_THRESH3,
2488 			.procname	= "gc_thresh3",
2489 			.maxlen		= sizeof(int),
2490 			.mode		= 0644,
2491 			.proc_handler	= &proc_dointvec,
2492 		},
2493 		{
2494 			.ctl_name	= NET_NEIGH_RETRANS_TIME_MS,
2495 			.procname	= "retrans_time_ms",
2496 			.maxlen		= sizeof(int),
2497 			.mode		= 0644,
2498 			.proc_handler	= &proc_dointvec_ms_jiffies,
2499 			.strategy	= &sysctl_ms_jiffies,
2500 		},
2501 		{
2502 			.ctl_name	= NET_NEIGH_REACHABLE_TIME_MS,
2503 			.procname	= "base_reachable_time_ms",
2504 			.maxlen		= sizeof(int),
2505 			.mode		= 0644,
2506 			.proc_handler	= &proc_dointvec_ms_jiffies,
2507 			.strategy	= &sysctl_ms_jiffies,
2508 		},
2509 	},
2510 	.neigh_dev = {
2511 		{
2512 			.ctl_name	= NET_PROTO_CONF_DEFAULT,
2513 			.procname	= "default",
2514 			.mode		= 0555,
2515 		},
2516 	},
2517 	.neigh_neigh_dir = {
2518 		{
2519 			.procname	= "neigh",
2520 			.mode		= 0555,
2521 		},
2522 	},
2523 	.neigh_proto_dir = {
2524 		{
2525 			.mode		= 0555,
2526 		},
2527 	},
2528 	.neigh_root_dir = {
2529 		{
2530 			.ctl_name	= CTL_NET,
2531 			.procname	= "net",
2532 			.mode		= 0555,
2533 		},
2534 	},
2535 };
2536 
2537 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2538 			  int p_id, int pdev_id, char *p_name,
2539 			  proc_handler *handler, ctl_handler *strategy)
2540 {
2541 	struct neigh_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
2542 	const char *dev_name_source = NULL;
2543 	char *dev_name = NULL;
2544 	int err = 0;
2545 
2546 	if (!t)
2547 		return -ENOBUFS;
2548 	memcpy(t, &neigh_sysctl_template, sizeof(*t));
2549 	t->neigh_vars[0].data  = &p->mcast_probes;
2550 	t->neigh_vars[1].data  = &p->ucast_probes;
2551 	t->neigh_vars[2].data  = &p->app_probes;
2552 	t->neigh_vars[3].data  = &p->retrans_time;
2553 	t->neigh_vars[4].data  = &p->base_reachable_time;
2554 	t->neigh_vars[5].data  = &p->delay_probe_time;
2555 	t->neigh_vars[6].data  = &p->gc_staletime;
2556 	t->neigh_vars[7].data  = &p->queue_len;
2557 	t->neigh_vars[8].data  = &p->proxy_qlen;
2558 	t->neigh_vars[9].data  = &p->anycast_delay;
2559 	t->neigh_vars[10].data = &p->proxy_delay;
2560 	t->neigh_vars[11].data = &p->locktime;
2561 
2562 	if (dev) {
2563 		dev_name_source = dev->name;
2564 		t->neigh_dev[0].ctl_name = dev->ifindex;
2565 		t->neigh_vars[12].procname = NULL;
2566 		t->neigh_vars[13].procname = NULL;
2567 		t->neigh_vars[14].procname = NULL;
2568 		t->neigh_vars[15].procname = NULL;
2569 	} else {
2570  		dev_name_source = t->neigh_dev[0].procname;
2571 		t->neigh_vars[12].data = (int *)(p + 1);
2572 		t->neigh_vars[13].data = (int *)(p + 1) + 1;
2573 		t->neigh_vars[14].data = (int *)(p + 1) + 2;
2574 		t->neigh_vars[15].data = (int *)(p + 1) + 3;
2575 	}
2576 
2577 	t->neigh_vars[16].data  = &p->retrans_time;
2578 	t->neigh_vars[17].data  = &p->base_reachable_time;
2579 
2580 	if (handler || strategy) {
2581 		/* RetransTime */
2582 		t->neigh_vars[3].proc_handler = handler;
2583 		t->neigh_vars[3].strategy = strategy;
2584 		t->neigh_vars[3].extra1 = dev;
2585 		/* ReachableTime */
2586 		t->neigh_vars[4].proc_handler = handler;
2587 		t->neigh_vars[4].strategy = strategy;
2588 		t->neigh_vars[4].extra1 = dev;
2589 		/* RetransTime (in milliseconds)*/
2590 		t->neigh_vars[16].proc_handler = handler;
2591 		t->neigh_vars[16].strategy = strategy;
2592 		t->neigh_vars[16].extra1 = dev;
2593 		/* ReachableTime (in milliseconds) */
2594 		t->neigh_vars[17].proc_handler = handler;
2595 		t->neigh_vars[17].strategy = strategy;
2596 		t->neigh_vars[17].extra1 = dev;
2597 	}
2598 
2599 	dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2600 	if (!dev_name) {
2601 		err = -ENOBUFS;
2602 		goto free;
2603 	}
2604 
2605  	t->neigh_dev[0].procname = dev_name;
2606 
2607 	t->neigh_neigh_dir[0].ctl_name = pdev_id;
2608 
2609 	t->neigh_proto_dir[0].procname = p_name;
2610 	t->neigh_proto_dir[0].ctl_name = p_id;
2611 
2612 	t->neigh_dev[0].child	       = t->neigh_vars;
2613 	t->neigh_neigh_dir[0].child    = t->neigh_dev;
2614 	t->neigh_proto_dir[0].child    = t->neigh_neigh_dir;
2615 	t->neigh_root_dir[0].child     = t->neigh_proto_dir;
2616 
2617 	t->sysctl_header = register_sysctl_table(t->neigh_root_dir, 0);
2618 	if (!t->sysctl_header) {
2619 		err = -ENOBUFS;
2620 		goto free_procname;
2621 	}
2622 	p->sysctl_table = t;
2623 	return 0;
2624 
2625 	/* error path */
2626  free_procname:
2627 	kfree(dev_name);
2628  free:
2629 	kfree(t);
2630 
2631 	return err;
2632 }
2633 
2634 void neigh_sysctl_unregister(struct neigh_parms *p)
2635 {
2636 	if (p->sysctl_table) {
2637 		struct neigh_sysctl_table *t = p->sysctl_table;
2638 		p->sysctl_table = NULL;
2639 		unregister_sysctl_table(t->sysctl_header);
2640 		kfree(t->neigh_dev[0].procname);
2641 		kfree(t);
2642 	}
2643 }
2644 
2645 #endif	/* CONFIG_SYSCTL */
2646 
2647 EXPORT_SYMBOL(__neigh_event_send);
2648 EXPORT_SYMBOL(neigh_add);
2649 EXPORT_SYMBOL(neigh_changeaddr);
2650 EXPORT_SYMBOL(neigh_compat_output);
2651 EXPORT_SYMBOL(neigh_connected_output);
2652 EXPORT_SYMBOL(neigh_create);
2653 EXPORT_SYMBOL(neigh_delete);
2654 EXPORT_SYMBOL(neigh_destroy);
2655 EXPORT_SYMBOL(neigh_dump_info);
2656 EXPORT_SYMBOL(neigh_event_ns);
2657 EXPORT_SYMBOL(neigh_ifdown);
2658 EXPORT_SYMBOL(neigh_lookup);
2659 EXPORT_SYMBOL(neigh_lookup_nodev);
2660 EXPORT_SYMBOL(neigh_parms_alloc);
2661 EXPORT_SYMBOL(neigh_parms_release);
2662 EXPORT_SYMBOL(neigh_rand_reach_time);
2663 EXPORT_SYMBOL(neigh_resolve_output);
2664 EXPORT_SYMBOL(neigh_table_clear);
2665 EXPORT_SYMBOL(neigh_table_init);
2666 EXPORT_SYMBOL(neigh_update);
2667 EXPORT_SYMBOL(neigh_update_hhs);
2668 EXPORT_SYMBOL(pneigh_enqueue);
2669 EXPORT_SYMBOL(pneigh_lookup);
2670 EXPORT_SYMBOL(neightbl_dump_info);
2671 EXPORT_SYMBOL(neightbl_set);
2672 
2673 #ifdef CONFIG_ARPD
2674 EXPORT_SYMBOL(neigh_app_ns);
2675 #endif
2676 #ifdef CONFIG_SYSCTL
2677 EXPORT_SYMBOL(neigh_sysctl_register);
2678 EXPORT_SYMBOL(neigh_sysctl_unregister);
2679 #endif
2680