xref: /linux/net/core/neighbour.c (revision 14b42963f64b98ab61fa9723c03d71aa5ef4f862)
1 /*
2  *	Generic address resolution entity
3  *
4  *	Authors:
5  *	Pedro Roque		<roque@di.fc.ul.pt>
6  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *	Fixes:
14  *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
15  *	Harald Welte		Add neighbour cache statistics like rtstat
16  */
17 
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/sched.h>
23 #include <linux/netdevice.h>
24 #include <linux/proc_fs.h>
25 #ifdef CONFIG_SYSCTL
26 #include <linux/sysctl.h>
27 #endif
28 #include <linux/times.h>
29 #include <net/neighbour.h>
30 #include <net/dst.h>
31 #include <net/sock.h>
32 #include <linux/rtnetlink.h>
33 #include <linux/random.h>
34 #include <linux/string.h>
35 
36 #define NEIGH_DEBUG 1
37 
38 #define NEIGH_PRINTK(x...) printk(x)
39 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
40 #define NEIGH_PRINTK0 NEIGH_PRINTK
41 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
42 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
43 
44 #if NEIGH_DEBUG >= 1
45 #undef NEIGH_PRINTK1
46 #define NEIGH_PRINTK1 NEIGH_PRINTK
47 #endif
48 #if NEIGH_DEBUG >= 2
49 #undef NEIGH_PRINTK2
50 #define NEIGH_PRINTK2 NEIGH_PRINTK
51 #endif
52 
53 #define PNEIGH_HASHMASK		0xF
54 
55 static void neigh_timer_handler(unsigned long arg);
56 #ifdef CONFIG_ARPD
57 static void neigh_app_notify(struct neighbour *n);
58 #endif
59 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
60 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
61 
62 static struct neigh_table *neigh_tables;
63 #ifdef CONFIG_PROC_FS
64 static struct file_operations neigh_stat_seq_fops;
65 #endif
66 
67 /*
68    Neighbour hash table buckets are protected with rwlock tbl->lock.
69 
70    - All the scans/updates to hash buckets MUST be made under this lock.
71    - NOTHING clever should be made under this lock: no callbacks
72      to protocol backends, no attempts to send something to network.
73      It will result in deadlocks, if backend/driver wants to use neighbour
74      cache.
75    - If the entry requires some non-trivial actions, increase
76      its reference count and release table lock.
77 
78    Neighbour entries are protected:
79    - with reference count.
80    - with rwlock neigh->lock
81 
82    Reference count prevents destruction.
83 
84    neigh->lock mainly serializes ll address data and its validity state.
85    However, the same lock is used to protect another entry fields:
86     - timer
87     - resolution queue
88 
89    Again, nothing clever shall be made under neigh->lock,
90    the most complicated procedure, which we allow is dev->hard_header.
91    It is supposed, that dev->hard_header is simplistic and does
92    not make callbacks to neighbour tables.
93 
94    The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
95    list of neighbour tables. This list is used only in process context,
96  */
97 
98 static DEFINE_RWLOCK(neigh_tbl_lock);
99 
100 static int neigh_blackhole(struct sk_buff *skb)
101 {
102 	kfree_skb(skb);
103 	return -ENETDOWN;
104 }
105 
106 /*
107  * It is random distribution in the interval (1/2)*base...(3/2)*base.
108  * It corresponds to default IPv6 settings and is not overridable,
109  * because it is really reasonable choice.
110  */
111 
112 unsigned long neigh_rand_reach_time(unsigned long base)
113 {
114 	return (base ? (net_random() % base) + (base >> 1) : 0);
115 }
116 
117 
118 static int neigh_forced_gc(struct neigh_table *tbl)
119 {
120 	int shrunk = 0;
121 	int i;
122 
123 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
124 
125 	write_lock_bh(&tbl->lock);
126 	for (i = 0; i <= tbl->hash_mask; i++) {
127 		struct neighbour *n, **np;
128 
129 		np = &tbl->hash_buckets[i];
130 		while ((n = *np) != NULL) {
131 			/* Neighbour record may be discarded if:
132 			 * - nobody refers to it.
133 			 * - it is not permanent
134 			 */
135 			write_lock(&n->lock);
136 			if (atomic_read(&n->refcnt) == 1 &&
137 			    !(n->nud_state & NUD_PERMANENT)) {
138 				*np	= n->next;
139 				n->dead = 1;
140 				shrunk	= 1;
141 				write_unlock(&n->lock);
142 				neigh_release(n);
143 				continue;
144 			}
145 			write_unlock(&n->lock);
146 			np = &n->next;
147 		}
148 	}
149 
150 	tbl->last_flush = jiffies;
151 
152 	write_unlock_bh(&tbl->lock);
153 
154 	return shrunk;
155 }
156 
157 static int neigh_del_timer(struct neighbour *n)
158 {
159 	if ((n->nud_state & NUD_IN_TIMER) &&
160 	    del_timer(&n->timer)) {
161 		neigh_release(n);
162 		return 1;
163 	}
164 	return 0;
165 }
166 
167 static void pneigh_queue_purge(struct sk_buff_head *list)
168 {
169 	struct sk_buff *skb;
170 
171 	while ((skb = skb_dequeue(list)) != NULL) {
172 		dev_put(skb->dev);
173 		kfree_skb(skb);
174 	}
175 }
176 
177 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
178 {
179 	int i;
180 
181 	for (i = 0; i <= tbl->hash_mask; i++) {
182 		struct neighbour *n, **np = &tbl->hash_buckets[i];
183 
184 		while ((n = *np) != NULL) {
185 			if (dev && n->dev != dev) {
186 				np = &n->next;
187 				continue;
188 			}
189 			*np = n->next;
190 			write_lock(&n->lock);
191 			neigh_del_timer(n);
192 			n->dead = 1;
193 
194 			if (atomic_read(&n->refcnt) != 1) {
195 				/* The most unpleasant situation.
196 				   We must destroy neighbour entry,
197 				   but someone still uses it.
198 
199 				   The destroy will be delayed until
200 				   the last user releases us, but
201 				   we must kill timers etc. and move
202 				   it to safe state.
203 				 */
204 				skb_queue_purge(&n->arp_queue);
205 				n->output = neigh_blackhole;
206 				if (n->nud_state & NUD_VALID)
207 					n->nud_state = NUD_NOARP;
208 				else
209 					n->nud_state = NUD_NONE;
210 				NEIGH_PRINTK2("neigh %p is stray.\n", n);
211 			}
212 			write_unlock(&n->lock);
213 			neigh_release(n);
214 		}
215 	}
216 }
217 
218 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
219 {
220 	write_lock_bh(&tbl->lock);
221 	neigh_flush_dev(tbl, dev);
222 	write_unlock_bh(&tbl->lock);
223 }
224 
225 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
226 {
227 	write_lock_bh(&tbl->lock);
228 	neigh_flush_dev(tbl, dev);
229 	pneigh_ifdown(tbl, dev);
230 	write_unlock_bh(&tbl->lock);
231 
232 	del_timer_sync(&tbl->proxy_timer);
233 	pneigh_queue_purge(&tbl->proxy_queue);
234 	return 0;
235 }
236 
237 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
238 {
239 	struct neighbour *n = NULL;
240 	unsigned long now = jiffies;
241 	int entries;
242 
243 	entries = atomic_inc_return(&tbl->entries) - 1;
244 	if (entries >= tbl->gc_thresh3 ||
245 	    (entries >= tbl->gc_thresh2 &&
246 	     time_after(now, tbl->last_flush + 5 * HZ))) {
247 		if (!neigh_forced_gc(tbl) &&
248 		    entries >= tbl->gc_thresh3)
249 			goto out_entries;
250 	}
251 
252 	n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
253 	if (!n)
254 		goto out_entries;
255 
256 	memset(n, 0, tbl->entry_size);
257 
258 	skb_queue_head_init(&n->arp_queue);
259 	rwlock_init(&n->lock);
260 	n->updated	  = n->used = now;
261 	n->nud_state	  = NUD_NONE;
262 	n->output	  = neigh_blackhole;
263 	n->parms	  = neigh_parms_clone(&tbl->parms);
264 	init_timer(&n->timer);
265 	n->timer.function = neigh_timer_handler;
266 	n->timer.data	  = (unsigned long)n;
267 
268 	NEIGH_CACHE_STAT_INC(tbl, allocs);
269 	n->tbl		  = tbl;
270 	atomic_set(&n->refcnt, 1);
271 	n->dead		  = 1;
272 out:
273 	return n;
274 
275 out_entries:
276 	atomic_dec(&tbl->entries);
277 	goto out;
278 }
279 
280 static struct neighbour **neigh_hash_alloc(unsigned int entries)
281 {
282 	unsigned long size = entries * sizeof(struct neighbour *);
283 	struct neighbour **ret;
284 
285 	if (size <= PAGE_SIZE) {
286 		ret = kzalloc(size, GFP_ATOMIC);
287 	} else {
288 		ret = (struct neighbour **)
289 		      __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
290 	}
291 	return ret;
292 }
293 
294 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
295 {
296 	unsigned long size = entries * sizeof(struct neighbour *);
297 
298 	if (size <= PAGE_SIZE)
299 		kfree(hash);
300 	else
301 		free_pages((unsigned long)hash, get_order(size));
302 }
303 
304 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
305 {
306 	struct neighbour **new_hash, **old_hash;
307 	unsigned int i, new_hash_mask, old_entries;
308 
309 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
310 
311 	BUG_ON(new_entries & (new_entries - 1));
312 	new_hash = neigh_hash_alloc(new_entries);
313 	if (!new_hash)
314 		return;
315 
316 	old_entries = tbl->hash_mask + 1;
317 	new_hash_mask = new_entries - 1;
318 	old_hash = tbl->hash_buckets;
319 
320 	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
321 	for (i = 0; i < old_entries; i++) {
322 		struct neighbour *n, *next;
323 
324 		for (n = old_hash[i]; n; n = next) {
325 			unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
326 
327 			hash_val &= new_hash_mask;
328 			next = n->next;
329 
330 			n->next = new_hash[hash_val];
331 			new_hash[hash_val] = n;
332 		}
333 	}
334 	tbl->hash_buckets = new_hash;
335 	tbl->hash_mask = new_hash_mask;
336 
337 	neigh_hash_free(old_hash, old_entries);
338 }
339 
340 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
341 			       struct net_device *dev)
342 {
343 	struct neighbour *n;
344 	int key_len = tbl->key_len;
345 	u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
346 
347 	NEIGH_CACHE_STAT_INC(tbl, lookups);
348 
349 	read_lock_bh(&tbl->lock);
350 	for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
351 		if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
352 			neigh_hold(n);
353 			NEIGH_CACHE_STAT_INC(tbl, hits);
354 			break;
355 		}
356 	}
357 	read_unlock_bh(&tbl->lock);
358 	return n;
359 }
360 
361 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
362 {
363 	struct neighbour *n;
364 	int key_len = tbl->key_len;
365 	u32 hash_val = tbl->hash(pkey, NULL) & tbl->hash_mask;
366 
367 	NEIGH_CACHE_STAT_INC(tbl, lookups);
368 
369 	read_lock_bh(&tbl->lock);
370 	for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
371 		if (!memcmp(n->primary_key, pkey, key_len)) {
372 			neigh_hold(n);
373 			NEIGH_CACHE_STAT_INC(tbl, hits);
374 			break;
375 		}
376 	}
377 	read_unlock_bh(&tbl->lock);
378 	return n;
379 }
380 
381 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
382 			       struct net_device *dev)
383 {
384 	u32 hash_val;
385 	int key_len = tbl->key_len;
386 	int error;
387 	struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
388 
389 	if (!n) {
390 		rc = ERR_PTR(-ENOBUFS);
391 		goto out;
392 	}
393 
394 	memcpy(n->primary_key, pkey, key_len);
395 	n->dev = dev;
396 	dev_hold(dev);
397 
398 	/* Protocol specific setup. */
399 	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
400 		rc = ERR_PTR(error);
401 		goto out_neigh_release;
402 	}
403 
404 	/* Device specific setup. */
405 	if (n->parms->neigh_setup &&
406 	    (error = n->parms->neigh_setup(n)) < 0) {
407 		rc = ERR_PTR(error);
408 		goto out_neigh_release;
409 	}
410 
411 	n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
412 
413 	write_lock_bh(&tbl->lock);
414 
415 	if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
416 		neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
417 
418 	hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
419 
420 	if (n->parms->dead) {
421 		rc = ERR_PTR(-EINVAL);
422 		goto out_tbl_unlock;
423 	}
424 
425 	for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
426 		if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
427 			neigh_hold(n1);
428 			rc = n1;
429 			goto out_tbl_unlock;
430 		}
431 	}
432 
433 	n->next = tbl->hash_buckets[hash_val];
434 	tbl->hash_buckets[hash_val] = n;
435 	n->dead = 0;
436 	neigh_hold(n);
437 	write_unlock_bh(&tbl->lock);
438 	NEIGH_PRINTK2("neigh %p is created.\n", n);
439 	rc = n;
440 out:
441 	return rc;
442 out_tbl_unlock:
443 	write_unlock_bh(&tbl->lock);
444 out_neigh_release:
445 	neigh_release(n);
446 	goto out;
447 }
448 
449 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
450 				    struct net_device *dev, int creat)
451 {
452 	struct pneigh_entry *n;
453 	int key_len = tbl->key_len;
454 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
455 
456 	hash_val ^= (hash_val >> 16);
457 	hash_val ^= hash_val >> 8;
458 	hash_val ^= hash_val >> 4;
459 	hash_val &= PNEIGH_HASHMASK;
460 
461 	read_lock_bh(&tbl->lock);
462 
463 	for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
464 		if (!memcmp(n->key, pkey, key_len) &&
465 		    (n->dev == dev || !n->dev)) {
466 			read_unlock_bh(&tbl->lock);
467 			goto out;
468 		}
469 	}
470 	read_unlock_bh(&tbl->lock);
471 	n = NULL;
472 	if (!creat)
473 		goto out;
474 
475 	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
476 	if (!n)
477 		goto out;
478 
479 	memcpy(n->key, pkey, key_len);
480 	n->dev = dev;
481 	if (dev)
482 		dev_hold(dev);
483 
484 	if (tbl->pconstructor && tbl->pconstructor(n)) {
485 		if (dev)
486 			dev_put(dev);
487 		kfree(n);
488 		n = NULL;
489 		goto out;
490 	}
491 
492 	write_lock_bh(&tbl->lock);
493 	n->next = tbl->phash_buckets[hash_val];
494 	tbl->phash_buckets[hash_val] = n;
495 	write_unlock_bh(&tbl->lock);
496 out:
497 	return n;
498 }
499 
500 
501 int pneigh_delete(struct neigh_table *tbl, const void *pkey,
502 		  struct net_device *dev)
503 {
504 	struct pneigh_entry *n, **np;
505 	int key_len = tbl->key_len;
506 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
507 
508 	hash_val ^= (hash_val >> 16);
509 	hash_val ^= hash_val >> 8;
510 	hash_val ^= hash_val >> 4;
511 	hash_val &= PNEIGH_HASHMASK;
512 
513 	write_lock_bh(&tbl->lock);
514 	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
515 	     np = &n->next) {
516 		if (!memcmp(n->key, pkey, key_len) && n->dev == dev) {
517 			*np = n->next;
518 			write_unlock_bh(&tbl->lock);
519 			if (tbl->pdestructor)
520 				tbl->pdestructor(n);
521 			if (n->dev)
522 				dev_put(n->dev);
523 			kfree(n);
524 			return 0;
525 		}
526 	}
527 	write_unlock_bh(&tbl->lock);
528 	return -ENOENT;
529 }
530 
531 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
532 {
533 	struct pneigh_entry *n, **np;
534 	u32 h;
535 
536 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
537 		np = &tbl->phash_buckets[h];
538 		while ((n = *np) != NULL) {
539 			if (!dev || n->dev == dev) {
540 				*np = n->next;
541 				if (tbl->pdestructor)
542 					tbl->pdestructor(n);
543 				if (n->dev)
544 					dev_put(n->dev);
545 				kfree(n);
546 				continue;
547 			}
548 			np = &n->next;
549 		}
550 	}
551 	return -ENOENT;
552 }
553 
554 
555 /*
556  *	neighbour must already be out of the table;
557  *
558  */
559 void neigh_destroy(struct neighbour *neigh)
560 {
561 	struct hh_cache *hh;
562 
563 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
564 
565 	if (!neigh->dead) {
566 		printk(KERN_WARNING
567 		       "Destroying alive neighbour %p\n", neigh);
568 		dump_stack();
569 		return;
570 	}
571 
572 	if (neigh_del_timer(neigh))
573 		printk(KERN_WARNING "Impossible event.\n");
574 
575 	while ((hh = neigh->hh) != NULL) {
576 		neigh->hh = hh->hh_next;
577 		hh->hh_next = NULL;
578 		write_lock_bh(&hh->hh_lock);
579 		hh->hh_output = neigh_blackhole;
580 		write_unlock_bh(&hh->hh_lock);
581 		if (atomic_dec_and_test(&hh->hh_refcnt))
582 			kfree(hh);
583 	}
584 
585 	if (neigh->parms->neigh_destructor)
586 		(neigh->parms->neigh_destructor)(neigh);
587 
588 	skb_queue_purge(&neigh->arp_queue);
589 
590 	dev_put(neigh->dev);
591 	neigh_parms_put(neigh->parms);
592 
593 	NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
594 
595 	atomic_dec(&neigh->tbl->entries);
596 	kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
597 }
598 
599 /* Neighbour state is suspicious;
600    disable fast path.
601 
602    Called with write_locked neigh.
603  */
604 static void neigh_suspect(struct neighbour *neigh)
605 {
606 	struct hh_cache *hh;
607 
608 	NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
609 
610 	neigh->output = neigh->ops->output;
611 
612 	for (hh = neigh->hh; hh; hh = hh->hh_next)
613 		hh->hh_output = neigh->ops->output;
614 }
615 
616 /* Neighbour state is OK;
617    enable fast path.
618 
619    Called with write_locked neigh.
620  */
621 static void neigh_connect(struct neighbour *neigh)
622 {
623 	struct hh_cache *hh;
624 
625 	NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
626 
627 	neigh->output = neigh->ops->connected_output;
628 
629 	for (hh = neigh->hh; hh; hh = hh->hh_next)
630 		hh->hh_output = neigh->ops->hh_output;
631 }
632 
633 static void neigh_periodic_timer(unsigned long arg)
634 {
635 	struct neigh_table *tbl = (struct neigh_table *)arg;
636 	struct neighbour *n, **np;
637 	unsigned long expire, now = jiffies;
638 
639 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
640 
641 	write_lock(&tbl->lock);
642 
643 	/*
644 	 *	periodically recompute ReachableTime from random function
645 	 */
646 
647 	if (time_after(now, tbl->last_rand + 300 * HZ)) {
648 		struct neigh_parms *p;
649 		tbl->last_rand = now;
650 		for (p = &tbl->parms; p; p = p->next)
651 			p->reachable_time =
652 				neigh_rand_reach_time(p->base_reachable_time);
653 	}
654 
655 	np = &tbl->hash_buckets[tbl->hash_chain_gc];
656 	tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
657 
658 	while ((n = *np) != NULL) {
659 		unsigned int state;
660 
661 		write_lock(&n->lock);
662 
663 		state = n->nud_state;
664 		if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
665 			write_unlock(&n->lock);
666 			goto next_elt;
667 		}
668 
669 		if (time_before(n->used, n->confirmed))
670 			n->used = n->confirmed;
671 
672 		if (atomic_read(&n->refcnt) == 1 &&
673 		    (state == NUD_FAILED ||
674 		     time_after(now, n->used + n->parms->gc_staletime))) {
675 			*np = n->next;
676 			n->dead = 1;
677 			write_unlock(&n->lock);
678 			neigh_release(n);
679 			continue;
680 		}
681 		write_unlock(&n->lock);
682 
683 next_elt:
684 		np = &n->next;
685 	}
686 
687  	/* Cycle through all hash buckets every base_reachable_time/2 ticks.
688  	 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
689  	 * base_reachable_time.
690 	 */
691 	expire = tbl->parms.base_reachable_time >> 1;
692 	expire /= (tbl->hash_mask + 1);
693 	if (!expire)
694 		expire = 1;
695 
696  	mod_timer(&tbl->gc_timer, now + expire);
697 
698 	write_unlock(&tbl->lock);
699 }
700 
701 static __inline__ int neigh_max_probes(struct neighbour *n)
702 {
703 	struct neigh_parms *p = n->parms;
704 	return (n->nud_state & NUD_PROBE ?
705 		p->ucast_probes :
706 		p->ucast_probes + p->app_probes + p->mcast_probes);
707 }
708 
709 static inline void neigh_add_timer(struct neighbour *n, unsigned long when)
710 {
711 	if (unlikely(mod_timer(&n->timer, when))) {
712 		printk("NEIGH: BUG, double timer add, state is %x\n",
713 		       n->nud_state);
714 		dump_stack();
715 	}
716 }
717 
718 /* Called when a timer expires for a neighbour entry. */
719 
720 static void neigh_timer_handler(unsigned long arg)
721 {
722 	unsigned long now, next;
723 	struct neighbour *neigh = (struct neighbour *)arg;
724 	unsigned state;
725 	int notify = 0;
726 
727 	write_lock(&neigh->lock);
728 
729 	state = neigh->nud_state;
730 	now = jiffies;
731 	next = now + HZ;
732 
733 	if (!(state & NUD_IN_TIMER)) {
734 #ifndef CONFIG_SMP
735 		printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
736 #endif
737 		goto out;
738 	}
739 
740 	if (state & NUD_REACHABLE) {
741 		if (time_before_eq(now,
742 				   neigh->confirmed + neigh->parms->reachable_time)) {
743 			NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
744 			next = neigh->confirmed + neigh->parms->reachable_time;
745 		} else if (time_before_eq(now,
746 					  neigh->used + neigh->parms->delay_probe_time)) {
747 			NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
748 			neigh->nud_state = NUD_DELAY;
749 			neigh->updated = jiffies;
750 			neigh_suspect(neigh);
751 			next = now + neigh->parms->delay_probe_time;
752 		} else {
753 			NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
754 			neigh->nud_state = NUD_STALE;
755 			neigh->updated = jiffies;
756 			neigh_suspect(neigh);
757 		}
758 	} else if (state & NUD_DELAY) {
759 		if (time_before_eq(now,
760 				   neigh->confirmed + neigh->parms->delay_probe_time)) {
761 			NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
762 			neigh->nud_state = NUD_REACHABLE;
763 			neigh->updated = jiffies;
764 			neigh_connect(neigh);
765 			next = neigh->confirmed + neigh->parms->reachable_time;
766 		} else {
767 			NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
768 			neigh->nud_state = NUD_PROBE;
769 			neigh->updated = jiffies;
770 			atomic_set(&neigh->probes, 0);
771 			next = now + neigh->parms->retrans_time;
772 		}
773 	} else {
774 		/* NUD_PROBE|NUD_INCOMPLETE */
775 		next = now + neigh->parms->retrans_time;
776 	}
777 
778 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
779 	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
780 		struct sk_buff *skb;
781 
782 		neigh->nud_state = NUD_FAILED;
783 		neigh->updated = jiffies;
784 		notify = 1;
785 		NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
786 		NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
787 
788 		/* It is very thin place. report_unreachable is very complicated
789 		   routine. Particularly, it can hit the same neighbour entry!
790 
791 		   So that, we try to be accurate and avoid dead loop. --ANK
792 		 */
793 		while (neigh->nud_state == NUD_FAILED &&
794 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
795 			write_unlock(&neigh->lock);
796 			neigh->ops->error_report(neigh, skb);
797 			write_lock(&neigh->lock);
798 		}
799 		skb_queue_purge(&neigh->arp_queue);
800 	}
801 
802 	if (neigh->nud_state & NUD_IN_TIMER) {
803 		if (time_before(next, jiffies + HZ/2))
804 			next = jiffies + HZ/2;
805 		if (!mod_timer(&neigh->timer, next))
806 			neigh_hold(neigh);
807 	}
808 	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
809 		struct sk_buff *skb = skb_peek(&neigh->arp_queue);
810 		/* keep skb alive even if arp_queue overflows */
811 		if (skb)
812 			skb_get(skb);
813 		write_unlock(&neigh->lock);
814 		neigh->ops->solicit(neigh, skb);
815 		atomic_inc(&neigh->probes);
816 		if (skb)
817 			kfree_skb(skb);
818 	} else {
819 out:
820 		write_unlock(&neigh->lock);
821 	}
822 
823 #ifdef CONFIG_ARPD
824 	if (notify && neigh->parms->app_probes)
825 		neigh_app_notify(neigh);
826 #endif
827 	neigh_release(neigh);
828 }
829 
830 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
831 {
832 	int rc;
833 	unsigned long now;
834 
835 	write_lock_bh(&neigh->lock);
836 
837 	rc = 0;
838 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
839 		goto out_unlock_bh;
840 
841 	now = jiffies;
842 
843 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
844 		if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
845 			atomic_set(&neigh->probes, neigh->parms->ucast_probes);
846 			neigh->nud_state     = NUD_INCOMPLETE;
847 			neigh->updated = jiffies;
848 			neigh_hold(neigh);
849 			neigh_add_timer(neigh, now + 1);
850 		} else {
851 			neigh->nud_state = NUD_FAILED;
852 			neigh->updated = jiffies;
853 			write_unlock_bh(&neigh->lock);
854 
855 			if (skb)
856 				kfree_skb(skb);
857 			return 1;
858 		}
859 	} else if (neigh->nud_state & NUD_STALE) {
860 		NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
861 		neigh_hold(neigh);
862 		neigh->nud_state = NUD_DELAY;
863 		neigh->updated = jiffies;
864 		neigh_add_timer(neigh,
865 				jiffies + neigh->parms->delay_probe_time);
866 	}
867 
868 	if (neigh->nud_state == NUD_INCOMPLETE) {
869 		if (skb) {
870 			if (skb_queue_len(&neigh->arp_queue) >=
871 			    neigh->parms->queue_len) {
872 				struct sk_buff *buff;
873 				buff = neigh->arp_queue.next;
874 				__skb_unlink(buff, &neigh->arp_queue);
875 				kfree_skb(buff);
876 			}
877 			__skb_queue_tail(&neigh->arp_queue, skb);
878 		}
879 		rc = 1;
880 	}
881 out_unlock_bh:
882 	write_unlock_bh(&neigh->lock);
883 	return rc;
884 }
885 
886 static __inline__ void neigh_update_hhs(struct neighbour *neigh)
887 {
888 	struct hh_cache *hh;
889 	void (*update)(struct hh_cache*, struct net_device*, unsigned char *) =
890 		neigh->dev->header_cache_update;
891 
892 	if (update) {
893 		for (hh = neigh->hh; hh; hh = hh->hh_next) {
894 			write_lock_bh(&hh->hh_lock);
895 			update(hh, neigh->dev, neigh->ha);
896 			write_unlock_bh(&hh->hh_lock);
897 		}
898 	}
899 }
900 
901 
902 
903 /* Generic update routine.
904    -- lladdr is new lladdr or NULL, if it is not supplied.
905    -- new    is new state.
906    -- flags
907 	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
908 				if it is different.
909 	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
910 				lladdr instead of overriding it
911 				if it is different.
912 				It also allows to retain current state
913 				if lladdr is unchanged.
914 	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
915 
916 	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
917 				NTF_ROUTER flag.
918 	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
919 				a router.
920 
921    Caller MUST hold reference count on the entry.
922  */
923 
924 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
925 		 u32 flags)
926 {
927 	u8 old;
928 	int err;
929 #ifdef CONFIG_ARPD
930 	int notify = 0;
931 #endif
932 	struct net_device *dev;
933 	int update_isrouter = 0;
934 
935 	write_lock_bh(&neigh->lock);
936 
937 	dev    = neigh->dev;
938 	old    = neigh->nud_state;
939 	err    = -EPERM;
940 
941 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
942 	    (old & (NUD_NOARP | NUD_PERMANENT)))
943 		goto out;
944 
945 	if (!(new & NUD_VALID)) {
946 		neigh_del_timer(neigh);
947 		if (old & NUD_CONNECTED)
948 			neigh_suspect(neigh);
949 		neigh->nud_state = new;
950 		err = 0;
951 #ifdef CONFIG_ARPD
952 		notify = old & NUD_VALID;
953 #endif
954 		goto out;
955 	}
956 
957 	/* Compare new lladdr with cached one */
958 	if (!dev->addr_len) {
959 		/* First case: device needs no address. */
960 		lladdr = neigh->ha;
961 	} else if (lladdr) {
962 		/* The second case: if something is already cached
963 		   and a new address is proposed:
964 		   - compare new & old
965 		   - if they are different, check override flag
966 		 */
967 		if ((old & NUD_VALID) &&
968 		    !memcmp(lladdr, neigh->ha, dev->addr_len))
969 			lladdr = neigh->ha;
970 	} else {
971 		/* No address is supplied; if we know something,
972 		   use it, otherwise discard the request.
973 		 */
974 		err = -EINVAL;
975 		if (!(old & NUD_VALID))
976 			goto out;
977 		lladdr = neigh->ha;
978 	}
979 
980 	if (new & NUD_CONNECTED)
981 		neigh->confirmed = jiffies;
982 	neigh->updated = jiffies;
983 
984 	/* If entry was valid and address is not changed,
985 	   do not change entry state, if new one is STALE.
986 	 */
987 	err = 0;
988 	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
989 	if (old & NUD_VALID) {
990 		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
991 			update_isrouter = 0;
992 			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
993 			    (old & NUD_CONNECTED)) {
994 				lladdr = neigh->ha;
995 				new = NUD_STALE;
996 			} else
997 				goto out;
998 		} else {
999 			if (lladdr == neigh->ha && new == NUD_STALE &&
1000 			    ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1001 			     (old & NUD_CONNECTED))
1002 			    )
1003 				new = old;
1004 		}
1005 	}
1006 
1007 	if (new != old) {
1008 		neigh_del_timer(neigh);
1009 		if (new & NUD_IN_TIMER) {
1010 			neigh_hold(neigh);
1011 			neigh_add_timer(neigh, (jiffies +
1012 						((new & NUD_REACHABLE) ?
1013 						 neigh->parms->reachable_time :
1014 						 0)));
1015 		}
1016 		neigh->nud_state = new;
1017 	}
1018 
1019 	if (lladdr != neigh->ha) {
1020 		memcpy(&neigh->ha, lladdr, dev->addr_len);
1021 		neigh_update_hhs(neigh);
1022 		if (!(new & NUD_CONNECTED))
1023 			neigh->confirmed = jiffies -
1024 				      (neigh->parms->base_reachable_time << 1);
1025 #ifdef CONFIG_ARPD
1026 		notify = 1;
1027 #endif
1028 	}
1029 	if (new == old)
1030 		goto out;
1031 	if (new & NUD_CONNECTED)
1032 		neigh_connect(neigh);
1033 	else
1034 		neigh_suspect(neigh);
1035 	if (!(old & NUD_VALID)) {
1036 		struct sk_buff *skb;
1037 
1038 		/* Again: avoid dead loop if something went wrong */
1039 
1040 		while (neigh->nud_state & NUD_VALID &&
1041 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1042 			struct neighbour *n1 = neigh;
1043 			write_unlock_bh(&neigh->lock);
1044 			/* On shaper/eql skb->dst->neighbour != neigh :( */
1045 			if (skb->dst && skb->dst->neighbour)
1046 				n1 = skb->dst->neighbour;
1047 			n1->output(skb);
1048 			write_lock_bh(&neigh->lock);
1049 		}
1050 		skb_queue_purge(&neigh->arp_queue);
1051 	}
1052 out:
1053 	if (update_isrouter) {
1054 		neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1055 			(neigh->flags | NTF_ROUTER) :
1056 			(neigh->flags & ~NTF_ROUTER);
1057 	}
1058 	write_unlock_bh(&neigh->lock);
1059 #ifdef CONFIG_ARPD
1060 	if (notify && neigh->parms->app_probes)
1061 		neigh_app_notify(neigh);
1062 #endif
1063 	return err;
1064 }
1065 
1066 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1067 				 u8 *lladdr, void *saddr,
1068 				 struct net_device *dev)
1069 {
1070 	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1071 						 lladdr || !dev->addr_len);
1072 	if (neigh)
1073 		neigh_update(neigh, lladdr, NUD_STALE,
1074 			     NEIGH_UPDATE_F_OVERRIDE);
1075 	return neigh;
1076 }
1077 
1078 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1079 			  u16 protocol)
1080 {
1081 	struct hh_cache	*hh;
1082 	struct net_device *dev = dst->dev;
1083 
1084 	for (hh = n->hh; hh; hh = hh->hh_next)
1085 		if (hh->hh_type == protocol)
1086 			break;
1087 
1088 	if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1089 		rwlock_init(&hh->hh_lock);
1090 		hh->hh_type = protocol;
1091 		atomic_set(&hh->hh_refcnt, 0);
1092 		hh->hh_next = NULL;
1093 		if (dev->hard_header_cache(n, hh)) {
1094 			kfree(hh);
1095 			hh = NULL;
1096 		} else {
1097 			atomic_inc(&hh->hh_refcnt);
1098 			hh->hh_next = n->hh;
1099 			n->hh	    = hh;
1100 			if (n->nud_state & NUD_CONNECTED)
1101 				hh->hh_output = n->ops->hh_output;
1102 			else
1103 				hh->hh_output = n->ops->output;
1104 		}
1105 	}
1106 	if (hh)	{
1107 		atomic_inc(&hh->hh_refcnt);
1108 		dst->hh = hh;
1109 	}
1110 }
1111 
1112 /* This function can be used in contexts, where only old dev_queue_xmit
1113    worked, f.e. if you want to override normal output path (eql, shaper),
1114    but resolution is not made yet.
1115  */
1116 
1117 int neigh_compat_output(struct sk_buff *skb)
1118 {
1119 	struct net_device *dev = skb->dev;
1120 
1121 	__skb_pull(skb, skb->nh.raw - skb->data);
1122 
1123 	if (dev->hard_header &&
1124 	    dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1125 		    	     skb->len) < 0 &&
1126 	    dev->rebuild_header(skb))
1127 		return 0;
1128 
1129 	return dev_queue_xmit(skb);
1130 }
1131 
1132 /* Slow and careful. */
1133 
1134 int neigh_resolve_output(struct sk_buff *skb)
1135 {
1136 	struct dst_entry *dst = skb->dst;
1137 	struct neighbour *neigh;
1138 	int rc = 0;
1139 
1140 	if (!dst || !(neigh = dst->neighbour))
1141 		goto discard;
1142 
1143 	__skb_pull(skb, skb->nh.raw - skb->data);
1144 
1145 	if (!neigh_event_send(neigh, skb)) {
1146 		int err;
1147 		struct net_device *dev = neigh->dev;
1148 		if (dev->hard_header_cache && !dst->hh) {
1149 			write_lock_bh(&neigh->lock);
1150 			if (!dst->hh)
1151 				neigh_hh_init(neigh, dst, dst->ops->protocol);
1152 			err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1153 					       neigh->ha, NULL, skb->len);
1154 			write_unlock_bh(&neigh->lock);
1155 		} else {
1156 			read_lock_bh(&neigh->lock);
1157 			err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1158 					       neigh->ha, NULL, skb->len);
1159 			read_unlock_bh(&neigh->lock);
1160 		}
1161 		if (err >= 0)
1162 			rc = neigh->ops->queue_xmit(skb);
1163 		else
1164 			goto out_kfree_skb;
1165 	}
1166 out:
1167 	return rc;
1168 discard:
1169 	NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1170 		      dst, dst ? dst->neighbour : NULL);
1171 out_kfree_skb:
1172 	rc = -EINVAL;
1173 	kfree_skb(skb);
1174 	goto out;
1175 }
1176 
1177 /* As fast as possible without hh cache */
1178 
1179 int neigh_connected_output(struct sk_buff *skb)
1180 {
1181 	int err;
1182 	struct dst_entry *dst = skb->dst;
1183 	struct neighbour *neigh = dst->neighbour;
1184 	struct net_device *dev = neigh->dev;
1185 
1186 	__skb_pull(skb, skb->nh.raw - skb->data);
1187 
1188 	read_lock_bh(&neigh->lock);
1189 	err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1190 			       neigh->ha, NULL, skb->len);
1191 	read_unlock_bh(&neigh->lock);
1192 	if (err >= 0)
1193 		err = neigh->ops->queue_xmit(skb);
1194 	else {
1195 		err = -EINVAL;
1196 		kfree_skb(skb);
1197 	}
1198 	return err;
1199 }
1200 
1201 static void neigh_proxy_process(unsigned long arg)
1202 {
1203 	struct neigh_table *tbl = (struct neigh_table *)arg;
1204 	long sched_next = 0;
1205 	unsigned long now = jiffies;
1206 	struct sk_buff *skb;
1207 
1208 	spin_lock(&tbl->proxy_queue.lock);
1209 
1210 	skb = tbl->proxy_queue.next;
1211 
1212 	while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1213 		struct sk_buff *back = skb;
1214 		long tdif = NEIGH_CB(back)->sched_next - now;
1215 
1216 		skb = skb->next;
1217 		if (tdif <= 0) {
1218 			struct net_device *dev = back->dev;
1219 			__skb_unlink(back, &tbl->proxy_queue);
1220 			if (tbl->proxy_redo && netif_running(dev))
1221 				tbl->proxy_redo(back);
1222 			else
1223 				kfree_skb(back);
1224 
1225 			dev_put(dev);
1226 		} else if (!sched_next || tdif < sched_next)
1227 			sched_next = tdif;
1228 	}
1229 	del_timer(&tbl->proxy_timer);
1230 	if (sched_next)
1231 		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1232 	spin_unlock(&tbl->proxy_queue.lock);
1233 }
1234 
1235 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1236 		    struct sk_buff *skb)
1237 {
1238 	unsigned long now = jiffies;
1239 	unsigned long sched_next = now + (net_random() % p->proxy_delay);
1240 
1241 	if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1242 		kfree_skb(skb);
1243 		return;
1244 	}
1245 
1246 	NEIGH_CB(skb)->sched_next = sched_next;
1247 	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1248 
1249 	spin_lock(&tbl->proxy_queue.lock);
1250 	if (del_timer(&tbl->proxy_timer)) {
1251 		if (time_before(tbl->proxy_timer.expires, sched_next))
1252 			sched_next = tbl->proxy_timer.expires;
1253 	}
1254 	dst_release(skb->dst);
1255 	skb->dst = NULL;
1256 	dev_hold(skb->dev);
1257 	__skb_queue_tail(&tbl->proxy_queue, skb);
1258 	mod_timer(&tbl->proxy_timer, sched_next);
1259 	spin_unlock(&tbl->proxy_queue.lock);
1260 }
1261 
1262 
1263 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1264 				      struct neigh_table *tbl)
1265 {
1266 	struct neigh_parms *p = kmalloc(sizeof(*p), GFP_KERNEL);
1267 
1268 	if (p) {
1269 		memcpy(p, &tbl->parms, sizeof(*p));
1270 		p->tbl		  = tbl;
1271 		atomic_set(&p->refcnt, 1);
1272 		INIT_RCU_HEAD(&p->rcu_head);
1273 		p->reachable_time =
1274 				neigh_rand_reach_time(p->base_reachable_time);
1275 		if (dev) {
1276 			if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1277 				kfree(p);
1278 				return NULL;
1279 			}
1280 
1281 			dev_hold(dev);
1282 			p->dev = dev;
1283 		}
1284 		p->sysctl_table = NULL;
1285 		write_lock_bh(&tbl->lock);
1286 		p->next		= tbl->parms.next;
1287 		tbl->parms.next = p;
1288 		write_unlock_bh(&tbl->lock);
1289 	}
1290 	return p;
1291 }
1292 
1293 static void neigh_rcu_free_parms(struct rcu_head *head)
1294 {
1295 	struct neigh_parms *parms =
1296 		container_of(head, struct neigh_parms, rcu_head);
1297 
1298 	neigh_parms_put(parms);
1299 }
1300 
1301 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1302 {
1303 	struct neigh_parms **p;
1304 
1305 	if (!parms || parms == &tbl->parms)
1306 		return;
1307 	write_lock_bh(&tbl->lock);
1308 	for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1309 		if (*p == parms) {
1310 			*p = parms->next;
1311 			parms->dead = 1;
1312 			write_unlock_bh(&tbl->lock);
1313 			if (parms->dev)
1314 				dev_put(parms->dev);
1315 			call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1316 			return;
1317 		}
1318 	}
1319 	write_unlock_bh(&tbl->lock);
1320 	NEIGH_PRINTK1("neigh_parms_release: not found\n");
1321 }
1322 
1323 void neigh_parms_destroy(struct neigh_parms *parms)
1324 {
1325 	kfree(parms);
1326 }
1327 
1328 void neigh_table_init_no_netlink(struct neigh_table *tbl)
1329 {
1330 	unsigned long now = jiffies;
1331 	unsigned long phsize;
1332 
1333 	atomic_set(&tbl->parms.refcnt, 1);
1334 	INIT_RCU_HEAD(&tbl->parms.rcu_head);
1335 	tbl->parms.reachable_time =
1336 			  neigh_rand_reach_time(tbl->parms.base_reachable_time);
1337 
1338 	if (!tbl->kmem_cachep)
1339 		tbl->kmem_cachep = kmem_cache_create(tbl->id,
1340 						     tbl->entry_size,
1341 						     0, SLAB_HWCACHE_ALIGN,
1342 						     NULL, NULL);
1343 
1344 	if (!tbl->kmem_cachep)
1345 		panic("cannot create neighbour cache");
1346 
1347 	tbl->stats = alloc_percpu(struct neigh_statistics);
1348 	if (!tbl->stats)
1349 		panic("cannot create neighbour cache statistics");
1350 
1351 #ifdef CONFIG_PROC_FS
1352 	tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat);
1353 	if (!tbl->pde)
1354 		panic("cannot create neighbour proc dir entry");
1355 	tbl->pde->proc_fops = &neigh_stat_seq_fops;
1356 	tbl->pde->data = tbl;
1357 #endif
1358 
1359 	tbl->hash_mask = 1;
1360 	tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1361 
1362 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1363 	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1364 
1365 	if (!tbl->hash_buckets || !tbl->phash_buckets)
1366 		panic("cannot allocate neighbour cache hashes");
1367 
1368 	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1369 
1370 	rwlock_init(&tbl->lock);
1371 	init_timer(&tbl->gc_timer);
1372 	tbl->gc_timer.data     = (unsigned long)tbl;
1373 	tbl->gc_timer.function = neigh_periodic_timer;
1374 	tbl->gc_timer.expires  = now + 1;
1375 	add_timer(&tbl->gc_timer);
1376 
1377 	init_timer(&tbl->proxy_timer);
1378 	tbl->proxy_timer.data	  = (unsigned long)tbl;
1379 	tbl->proxy_timer.function = neigh_proxy_process;
1380 	skb_queue_head_init(&tbl->proxy_queue);
1381 
1382 	tbl->last_flush = now;
1383 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1384 }
1385 
1386 void neigh_table_init(struct neigh_table *tbl)
1387 {
1388 	struct neigh_table *tmp;
1389 
1390 	neigh_table_init_no_netlink(tbl);
1391 	write_lock(&neigh_tbl_lock);
1392 	for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1393 		if (tmp->family == tbl->family)
1394 			break;
1395 	}
1396 	tbl->next	= neigh_tables;
1397 	neigh_tables	= tbl;
1398 	write_unlock(&neigh_tbl_lock);
1399 
1400 	if (unlikely(tmp)) {
1401 		printk(KERN_ERR "NEIGH: Registering multiple tables for "
1402 		       "family %d\n", tbl->family);
1403 		dump_stack();
1404 	}
1405 }
1406 
1407 int neigh_table_clear(struct neigh_table *tbl)
1408 {
1409 	struct neigh_table **tp;
1410 
1411 	/* It is not clean... Fix it to unload IPv6 module safely */
1412 	del_timer_sync(&tbl->gc_timer);
1413 	del_timer_sync(&tbl->proxy_timer);
1414 	pneigh_queue_purge(&tbl->proxy_queue);
1415 	neigh_ifdown(tbl, NULL);
1416 	if (atomic_read(&tbl->entries))
1417 		printk(KERN_CRIT "neighbour leakage\n");
1418 	write_lock(&neigh_tbl_lock);
1419 	for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1420 		if (*tp == tbl) {
1421 			*tp = tbl->next;
1422 			break;
1423 		}
1424 	}
1425 	write_unlock(&neigh_tbl_lock);
1426 
1427 	neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1428 	tbl->hash_buckets = NULL;
1429 
1430 	kfree(tbl->phash_buckets);
1431 	tbl->phash_buckets = NULL;
1432 
1433 	return 0;
1434 }
1435 
1436 int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1437 {
1438 	struct ndmsg *ndm = NLMSG_DATA(nlh);
1439 	struct rtattr **nda = arg;
1440 	struct neigh_table *tbl;
1441 	struct net_device *dev = NULL;
1442 	int err = -ENODEV;
1443 
1444 	if (ndm->ndm_ifindex &&
1445 	    (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1446 		goto out;
1447 
1448 	read_lock(&neigh_tbl_lock);
1449 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1450 		struct rtattr *dst_attr = nda[NDA_DST - 1];
1451 		struct neighbour *n;
1452 
1453 		if (tbl->family != ndm->ndm_family)
1454 			continue;
1455 		read_unlock(&neigh_tbl_lock);
1456 
1457 		err = -EINVAL;
1458 		if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1459 			goto out_dev_put;
1460 
1461 		if (ndm->ndm_flags & NTF_PROXY) {
1462 			err = pneigh_delete(tbl, RTA_DATA(dst_attr), dev);
1463 			goto out_dev_put;
1464 		}
1465 
1466 		if (!dev)
1467 			goto out;
1468 
1469 		n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1470 		if (n) {
1471 			err = neigh_update(n, NULL, NUD_FAILED,
1472 					   NEIGH_UPDATE_F_OVERRIDE|
1473 					   NEIGH_UPDATE_F_ADMIN);
1474 			neigh_release(n);
1475 		}
1476 		goto out_dev_put;
1477 	}
1478 	read_unlock(&neigh_tbl_lock);
1479 	err = -EADDRNOTAVAIL;
1480 out_dev_put:
1481 	if (dev)
1482 		dev_put(dev);
1483 out:
1484 	return err;
1485 }
1486 
1487 int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1488 {
1489 	struct ndmsg *ndm = NLMSG_DATA(nlh);
1490 	struct rtattr **nda = arg;
1491 	struct neigh_table *tbl;
1492 	struct net_device *dev = NULL;
1493 	int err = -ENODEV;
1494 
1495 	if (ndm->ndm_ifindex &&
1496 	    (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1497 		goto out;
1498 
1499 	read_lock(&neigh_tbl_lock);
1500 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1501 		struct rtattr *lladdr_attr = nda[NDA_LLADDR - 1];
1502 		struct rtattr *dst_attr = nda[NDA_DST - 1];
1503 		int override = 1;
1504 		struct neighbour *n;
1505 
1506 		if (tbl->family != ndm->ndm_family)
1507 			continue;
1508 		read_unlock(&neigh_tbl_lock);
1509 
1510 		err = -EINVAL;
1511 		if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1512 			goto out_dev_put;
1513 
1514 		if (ndm->ndm_flags & NTF_PROXY) {
1515 			err = -ENOBUFS;
1516 			if (pneigh_lookup(tbl, RTA_DATA(dst_attr), dev, 1))
1517 				err = 0;
1518 			goto out_dev_put;
1519 		}
1520 
1521 		err = -EINVAL;
1522 		if (!dev)
1523 			goto out;
1524 		if (lladdr_attr && RTA_PAYLOAD(lladdr_attr) < dev->addr_len)
1525 			goto out_dev_put;
1526 
1527 		n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1528 		if (n) {
1529 			if (nlh->nlmsg_flags & NLM_F_EXCL) {
1530 				err = -EEXIST;
1531 				neigh_release(n);
1532 				goto out_dev_put;
1533 			}
1534 
1535 			override = nlh->nlmsg_flags & NLM_F_REPLACE;
1536 		} else if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1537 			err = -ENOENT;
1538 			goto out_dev_put;
1539 		} else {
1540 			n = __neigh_lookup_errno(tbl, RTA_DATA(dst_attr), dev);
1541 			if (IS_ERR(n)) {
1542 				err = PTR_ERR(n);
1543 				goto out_dev_put;
1544 			}
1545 		}
1546 
1547 		err = neigh_update(n,
1548 				   lladdr_attr ? RTA_DATA(lladdr_attr) : NULL,
1549 				   ndm->ndm_state,
1550 				   (override ? NEIGH_UPDATE_F_OVERRIDE : 0) |
1551 				   NEIGH_UPDATE_F_ADMIN);
1552 
1553 		neigh_release(n);
1554 		goto out_dev_put;
1555 	}
1556 
1557 	read_unlock(&neigh_tbl_lock);
1558 	err = -EADDRNOTAVAIL;
1559 out_dev_put:
1560 	if (dev)
1561 		dev_put(dev);
1562 out:
1563 	return err;
1564 }
1565 
1566 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1567 {
1568 	struct rtattr *nest = NULL;
1569 
1570 	nest = RTA_NEST(skb, NDTA_PARMS);
1571 
1572 	if (parms->dev)
1573 		RTA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1574 
1575 	RTA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1576 	RTA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1577 	RTA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1578 	RTA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1579 	RTA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1580 	RTA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1581 	RTA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1582 	RTA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1583 		      parms->base_reachable_time);
1584 	RTA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1585 	RTA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1586 	RTA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1587 	RTA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1588 	RTA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1589 	RTA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1590 
1591 	return RTA_NEST_END(skb, nest);
1592 
1593 rtattr_failure:
1594 	return RTA_NEST_CANCEL(skb, nest);
1595 }
1596 
1597 static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb,
1598 			      struct netlink_callback *cb)
1599 {
1600 	struct nlmsghdr *nlh;
1601 	struct ndtmsg *ndtmsg;
1602 
1603 	nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1604 			       NLM_F_MULTI);
1605 
1606 	ndtmsg = NLMSG_DATA(nlh);
1607 
1608 	read_lock_bh(&tbl->lock);
1609 	ndtmsg->ndtm_family = tbl->family;
1610 	ndtmsg->ndtm_pad1   = 0;
1611 	ndtmsg->ndtm_pad2   = 0;
1612 
1613 	RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1614 	RTA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1615 	RTA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1616 	RTA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1617 	RTA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1618 
1619 	{
1620 		unsigned long now = jiffies;
1621 		unsigned int flush_delta = now - tbl->last_flush;
1622 		unsigned int rand_delta = now - tbl->last_rand;
1623 
1624 		struct ndt_config ndc = {
1625 			.ndtc_key_len		= tbl->key_len,
1626 			.ndtc_entry_size	= tbl->entry_size,
1627 			.ndtc_entries		= atomic_read(&tbl->entries),
1628 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
1629 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
1630 			.ndtc_hash_rnd		= tbl->hash_rnd,
1631 			.ndtc_hash_mask		= tbl->hash_mask,
1632 			.ndtc_hash_chain_gc	= tbl->hash_chain_gc,
1633 			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
1634 		};
1635 
1636 		RTA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1637 	}
1638 
1639 	{
1640 		int cpu;
1641 		struct ndt_stats ndst;
1642 
1643 		memset(&ndst, 0, sizeof(ndst));
1644 
1645 		for_each_possible_cpu(cpu) {
1646 			struct neigh_statistics	*st;
1647 
1648 			st = per_cpu_ptr(tbl->stats, cpu);
1649 			ndst.ndts_allocs		+= st->allocs;
1650 			ndst.ndts_destroys		+= st->destroys;
1651 			ndst.ndts_hash_grows		+= st->hash_grows;
1652 			ndst.ndts_res_failed		+= st->res_failed;
1653 			ndst.ndts_lookups		+= st->lookups;
1654 			ndst.ndts_hits			+= st->hits;
1655 			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
1656 			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
1657 			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
1658 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
1659 		}
1660 
1661 		RTA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1662 	}
1663 
1664 	BUG_ON(tbl->parms.dev);
1665 	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1666 		goto rtattr_failure;
1667 
1668 	read_unlock_bh(&tbl->lock);
1669 	return NLMSG_END(skb, nlh);
1670 
1671 rtattr_failure:
1672 	read_unlock_bh(&tbl->lock);
1673 	return NLMSG_CANCEL(skb, nlh);
1674 
1675 nlmsg_failure:
1676 	return -1;
1677 }
1678 
1679 static int neightbl_fill_param_info(struct neigh_table *tbl,
1680 				    struct neigh_parms *parms,
1681 				    struct sk_buff *skb,
1682 				    struct netlink_callback *cb)
1683 {
1684 	struct ndtmsg *ndtmsg;
1685 	struct nlmsghdr *nlh;
1686 
1687 	nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1688 			       NLM_F_MULTI);
1689 
1690 	ndtmsg = NLMSG_DATA(nlh);
1691 
1692 	read_lock_bh(&tbl->lock);
1693 	ndtmsg->ndtm_family = tbl->family;
1694 	ndtmsg->ndtm_pad1   = 0;
1695 	ndtmsg->ndtm_pad2   = 0;
1696 	RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1697 
1698 	if (neightbl_fill_parms(skb, parms) < 0)
1699 		goto rtattr_failure;
1700 
1701 	read_unlock_bh(&tbl->lock);
1702 	return NLMSG_END(skb, nlh);
1703 
1704 rtattr_failure:
1705 	read_unlock_bh(&tbl->lock);
1706 	return NLMSG_CANCEL(skb, nlh);
1707 
1708 nlmsg_failure:
1709 	return -1;
1710 }
1711 
1712 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1713 						      int ifindex)
1714 {
1715 	struct neigh_parms *p;
1716 
1717 	for (p = &tbl->parms; p; p = p->next)
1718 		if ((p->dev && p->dev->ifindex == ifindex) ||
1719 		    (!p->dev && !ifindex))
1720 			return p;
1721 
1722 	return NULL;
1723 }
1724 
1725 int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1726 {
1727 	struct neigh_table *tbl;
1728 	struct ndtmsg *ndtmsg = NLMSG_DATA(nlh);
1729 	struct rtattr **tb = arg;
1730 	int err = -EINVAL;
1731 
1732 	if (!tb[NDTA_NAME - 1] || !RTA_PAYLOAD(tb[NDTA_NAME - 1]))
1733 		return -EINVAL;
1734 
1735 	read_lock(&neigh_tbl_lock);
1736 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1737 		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1738 			continue;
1739 
1740 		if (!rtattr_strcmp(tb[NDTA_NAME - 1], tbl->id))
1741 			break;
1742 	}
1743 
1744 	if (tbl == NULL) {
1745 		err = -ENOENT;
1746 		goto errout;
1747 	}
1748 
1749 	/*
1750 	 * We acquire tbl->lock to be nice to the periodic timers and
1751 	 * make sure they always see a consistent set of values.
1752 	 */
1753 	write_lock_bh(&tbl->lock);
1754 
1755 	if (tb[NDTA_THRESH1 - 1])
1756 		tbl->gc_thresh1 = RTA_GET_U32(tb[NDTA_THRESH1 - 1]);
1757 
1758 	if (tb[NDTA_THRESH2 - 1])
1759 		tbl->gc_thresh2 = RTA_GET_U32(tb[NDTA_THRESH2 - 1]);
1760 
1761 	if (tb[NDTA_THRESH3 - 1])
1762 		tbl->gc_thresh3 = RTA_GET_U32(tb[NDTA_THRESH3 - 1]);
1763 
1764 	if (tb[NDTA_GC_INTERVAL - 1])
1765 		tbl->gc_interval = RTA_GET_MSECS(tb[NDTA_GC_INTERVAL - 1]);
1766 
1767 	if (tb[NDTA_PARMS - 1]) {
1768 		struct rtattr *tbp[NDTPA_MAX];
1769 		struct neigh_parms *p;
1770 		u32 ifindex = 0;
1771 
1772 		if (rtattr_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS - 1]) < 0)
1773 			goto rtattr_failure;
1774 
1775 		if (tbp[NDTPA_IFINDEX - 1])
1776 			ifindex = RTA_GET_U32(tbp[NDTPA_IFINDEX - 1]);
1777 
1778 		p = lookup_neigh_params(tbl, ifindex);
1779 		if (p == NULL) {
1780 			err = -ENOENT;
1781 			goto rtattr_failure;
1782 		}
1783 
1784 		if (tbp[NDTPA_QUEUE_LEN - 1])
1785 			p->queue_len = RTA_GET_U32(tbp[NDTPA_QUEUE_LEN - 1]);
1786 
1787 		if (tbp[NDTPA_PROXY_QLEN - 1])
1788 			p->proxy_qlen = RTA_GET_U32(tbp[NDTPA_PROXY_QLEN - 1]);
1789 
1790 		if (tbp[NDTPA_APP_PROBES - 1])
1791 			p->app_probes = RTA_GET_U32(tbp[NDTPA_APP_PROBES - 1]);
1792 
1793 		if (tbp[NDTPA_UCAST_PROBES - 1])
1794 			p->ucast_probes =
1795 			   RTA_GET_U32(tbp[NDTPA_UCAST_PROBES - 1]);
1796 
1797 		if (tbp[NDTPA_MCAST_PROBES - 1])
1798 			p->mcast_probes =
1799 			   RTA_GET_U32(tbp[NDTPA_MCAST_PROBES - 1]);
1800 
1801 		if (tbp[NDTPA_BASE_REACHABLE_TIME - 1])
1802 			p->base_reachable_time =
1803 			   RTA_GET_MSECS(tbp[NDTPA_BASE_REACHABLE_TIME - 1]);
1804 
1805 		if (tbp[NDTPA_GC_STALETIME - 1])
1806 			p->gc_staletime =
1807 			   RTA_GET_MSECS(tbp[NDTPA_GC_STALETIME - 1]);
1808 
1809 		if (tbp[NDTPA_DELAY_PROBE_TIME - 1])
1810 			p->delay_probe_time =
1811 			   RTA_GET_MSECS(tbp[NDTPA_DELAY_PROBE_TIME - 1]);
1812 
1813 		if (tbp[NDTPA_RETRANS_TIME - 1])
1814 			p->retrans_time =
1815 			   RTA_GET_MSECS(tbp[NDTPA_RETRANS_TIME - 1]);
1816 
1817 		if (tbp[NDTPA_ANYCAST_DELAY - 1])
1818 			p->anycast_delay =
1819 			   RTA_GET_MSECS(tbp[NDTPA_ANYCAST_DELAY - 1]);
1820 
1821 		if (tbp[NDTPA_PROXY_DELAY - 1])
1822 			p->proxy_delay =
1823 			   RTA_GET_MSECS(tbp[NDTPA_PROXY_DELAY - 1]);
1824 
1825 		if (tbp[NDTPA_LOCKTIME - 1])
1826 			p->locktime = RTA_GET_MSECS(tbp[NDTPA_LOCKTIME - 1]);
1827 	}
1828 
1829 	err = 0;
1830 
1831 rtattr_failure:
1832 	write_unlock_bh(&tbl->lock);
1833 errout:
1834 	read_unlock(&neigh_tbl_lock);
1835 	return err;
1836 }
1837 
1838 int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1839 {
1840 	int idx, family;
1841 	int s_idx = cb->args[0];
1842 	struct neigh_table *tbl;
1843 
1844 	family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1845 
1846 	read_lock(&neigh_tbl_lock);
1847 	for (tbl = neigh_tables, idx = 0; tbl; tbl = tbl->next) {
1848 		struct neigh_parms *p;
1849 
1850 		if (idx < s_idx || (family && tbl->family != family))
1851 			continue;
1852 
1853 		if (neightbl_fill_info(tbl, skb, cb) <= 0)
1854 			break;
1855 
1856 		for (++idx, p = tbl->parms.next; p; p = p->next, idx++) {
1857 			if (idx < s_idx)
1858 				continue;
1859 
1860 			if (neightbl_fill_param_info(tbl, p, skb, cb) <= 0)
1861 				goto out;
1862 		}
1863 
1864 	}
1865 out:
1866 	read_unlock(&neigh_tbl_lock);
1867 	cb->args[0] = idx;
1868 
1869 	return skb->len;
1870 }
1871 
1872 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n,
1873 			   u32 pid, u32 seq, int event, unsigned int flags)
1874 {
1875 	unsigned long now = jiffies;
1876 	unsigned char *b = skb->tail;
1877 	struct nda_cacheinfo ci;
1878 	int locked = 0;
1879 	u32 probes;
1880 	struct nlmsghdr *nlh = NLMSG_NEW(skb, pid, seq, event,
1881 					 sizeof(struct ndmsg), flags);
1882 	struct ndmsg *ndm = NLMSG_DATA(nlh);
1883 
1884 	ndm->ndm_family	 = n->ops->family;
1885 	ndm->ndm_pad1    = 0;
1886 	ndm->ndm_pad2    = 0;
1887 	ndm->ndm_flags	 = n->flags;
1888 	ndm->ndm_type	 = n->type;
1889 	ndm->ndm_ifindex = n->dev->ifindex;
1890 	RTA_PUT(skb, NDA_DST, n->tbl->key_len, n->primary_key);
1891 	read_lock_bh(&n->lock);
1892 	locked		 = 1;
1893 	ndm->ndm_state	 = n->nud_state;
1894 	if (n->nud_state & NUD_VALID)
1895 		RTA_PUT(skb, NDA_LLADDR, n->dev->addr_len, n->ha);
1896 	ci.ndm_used	 = now - n->used;
1897 	ci.ndm_confirmed = now - n->confirmed;
1898 	ci.ndm_updated	 = now - n->updated;
1899 	ci.ndm_refcnt	 = atomic_read(&n->refcnt) - 1;
1900 	probes = atomic_read(&n->probes);
1901 	read_unlock_bh(&n->lock);
1902 	locked		 = 0;
1903 	RTA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1904 	RTA_PUT(skb, NDA_PROBES, sizeof(probes), &probes);
1905 	nlh->nlmsg_len	 = skb->tail - b;
1906 	return skb->len;
1907 
1908 nlmsg_failure:
1909 rtattr_failure:
1910 	if (locked)
1911 		read_unlock_bh(&n->lock);
1912 	skb_trim(skb, b - skb->data);
1913 	return -1;
1914 }
1915 
1916 
1917 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
1918 			    struct netlink_callback *cb)
1919 {
1920 	struct neighbour *n;
1921 	int rc, h, s_h = cb->args[1];
1922 	int idx, s_idx = idx = cb->args[2];
1923 
1924 	for (h = 0; h <= tbl->hash_mask; h++) {
1925 		if (h < s_h)
1926 			continue;
1927 		if (h > s_h)
1928 			s_idx = 0;
1929 		read_lock_bh(&tbl->lock);
1930 		for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
1931 			if (idx < s_idx)
1932 				continue;
1933 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
1934 					    cb->nlh->nlmsg_seq,
1935 					    RTM_NEWNEIGH,
1936 					    NLM_F_MULTI) <= 0) {
1937 				read_unlock_bh(&tbl->lock);
1938 				rc = -1;
1939 				goto out;
1940 			}
1941 		}
1942 		read_unlock_bh(&tbl->lock);
1943 	}
1944 	rc = skb->len;
1945 out:
1946 	cb->args[1] = h;
1947 	cb->args[2] = idx;
1948 	return rc;
1949 }
1950 
1951 int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1952 {
1953 	struct neigh_table *tbl;
1954 	int t, family, s_t;
1955 
1956 	read_lock(&neigh_tbl_lock);
1957 	family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1958 	s_t = cb->args[0];
1959 
1960 	for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
1961 		if (t < s_t || (family && tbl->family != family))
1962 			continue;
1963 		if (t > s_t)
1964 			memset(&cb->args[1], 0, sizeof(cb->args) -
1965 						sizeof(cb->args[0]));
1966 		if (neigh_dump_table(tbl, skb, cb) < 0)
1967 			break;
1968 	}
1969 	read_unlock(&neigh_tbl_lock);
1970 
1971 	cb->args[0] = t;
1972 	return skb->len;
1973 }
1974 
1975 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
1976 {
1977 	int chain;
1978 
1979 	read_lock_bh(&tbl->lock);
1980 	for (chain = 0; chain <= tbl->hash_mask; chain++) {
1981 		struct neighbour *n;
1982 
1983 		for (n = tbl->hash_buckets[chain]; n; n = n->next)
1984 			cb(n, cookie);
1985 	}
1986 	read_unlock_bh(&tbl->lock);
1987 }
1988 EXPORT_SYMBOL(neigh_for_each);
1989 
1990 /* The tbl->lock must be held as a writer and BH disabled. */
1991 void __neigh_for_each_release(struct neigh_table *tbl,
1992 			      int (*cb)(struct neighbour *))
1993 {
1994 	int chain;
1995 
1996 	for (chain = 0; chain <= tbl->hash_mask; chain++) {
1997 		struct neighbour *n, **np;
1998 
1999 		np = &tbl->hash_buckets[chain];
2000 		while ((n = *np) != NULL) {
2001 			int release;
2002 
2003 			write_lock(&n->lock);
2004 			release = cb(n);
2005 			if (release) {
2006 				*np = n->next;
2007 				n->dead = 1;
2008 			} else
2009 				np = &n->next;
2010 			write_unlock(&n->lock);
2011 			if (release)
2012 				neigh_release(n);
2013 		}
2014 	}
2015 }
2016 EXPORT_SYMBOL(__neigh_for_each_release);
2017 
2018 #ifdef CONFIG_PROC_FS
2019 
2020 static struct neighbour *neigh_get_first(struct seq_file *seq)
2021 {
2022 	struct neigh_seq_state *state = seq->private;
2023 	struct neigh_table *tbl = state->tbl;
2024 	struct neighbour *n = NULL;
2025 	int bucket = state->bucket;
2026 
2027 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2028 	for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2029 		n = tbl->hash_buckets[bucket];
2030 
2031 		while (n) {
2032 			if (state->neigh_sub_iter) {
2033 				loff_t fakep = 0;
2034 				void *v;
2035 
2036 				v = state->neigh_sub_iter(state, n, &fakep);
2037 				if (!v)
2038 					goto next;
2039 			}
2040 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2041 				break;
2042 			if (n->nud_state & ~NUD_NOARP)
2043 				break;
2044 		next:
2045 			n = n->next;
2046 		}
2047 
2048 		if (n)
2049 			break;
2050 	}
2051 	state->bucket = bucket;
2052 
2053 	return n;
2054 }
2055 
2056 static struct neighbour *neigh_get_next(struct seq_file *seq,
2057 					struct neighbour *n,
2058 					loff_t *pos)
2059 {
2060 	struct neigh_seq_state *state = seq->private;
2061 	struct neigh_table *tbl = state->tbl;
2062 
2063 	if (state->neigh_sub_iter) {
2064 		void *v = state->neigh_sub_iter(state, n, pos);
2065 		if (v)
2066 			return n;
2067 	}
2068 	n = n->next;
2069 
2070 	while (1) {
2071 		while (n) {
2072 			if (state->neigh_sub_iter) {
2073 				void *v = state->neigh_sub_iter(state, n, pos);
2074 				if (v)
2075 					return n;
2076 				goto next;
2077 			}
2078 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2079 				break;
2080 
2081 			if (n->nud_state & ~NUD_NOARP)
2082 				break;
2083 		next:
2084 			n = n->next;
2085 		}
2086 
2087 		if (n)
2088 			break;
2089 
2090 		if (++state->bucket > tbl->hash_mask)
2091 			break;
2092 
2093 		n = tbl->hash_buckets[state->bucket];
2094 	}
2095 
2096 	if (n && pos)
2097 		--(*pos);
2098 	return n;
2099 }
2100 
2101 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2102 {
2103 	struct neighbour *n = neigh_get_first(seq);
2104 
2105 	if (n) {
2106 		while (*pos) {
2107 			n = neigh_get_next(seq, n, pos);
2108 			if (!n)
2109 				break;
2110 		}
2111 	}
2112 	return *pos ? NULL : n;
2113 }
2114 
2115 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2116 {
2117 	struct neigh_seq_state *state = seq->private;
2118 	struct neigh_table *tbl = state->tbl;
2119 	struct pneigh_entry *pn = NULL;
2120 	int bucket = state->bucket;
2121 
2122 	state->flags |= NEIGH_SEQ_IS_PNEIGH;
2123 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2124 		pn = tbl->phash_buckets[bucket];
2125 		if (pn)
2126 			break;
2127 	}
2128 	state->bucket = bucket;
2129 
2130 	return pn;
2131 }
2132 
2133 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2134 					    struct pneigh_entry *pn,
2135 					    loff_t *pos)
2136 {
2137 	struct neigh_seq_state *state = seq->private;
2138 	struct neigh_table *tbl = state->tbl;
2139 
2140 	pn = pn->next;
2141 	while (!pn) {
2142 		if (++state->bucket > PNEIGH_HASHMASK)
2143 			break;
2144 		pn = tbl->phash_buckets[state->bucket];
2145 		if (pn)
2146 			break;
2147 	}
2148 
2149 	if (pn && pos)
2150 		--(*pos);
2151 
2152 	return pn;
2153 }
2154 
2155 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2156 {
2157 	struct pneigh_entry *pn = pneigh_get_first(seq);
2158 
2159 	if (pn) {
2160 		while (*pos) {
2161 			pn = pneigh_get_next(seq, pn, pos);
2162 			if (!pn)
2163 				break;
2164 		}
2165 	}
2166 	return *pos ? NULL : pn;
2167 }
2168 
2169 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2170 {
2171 	struct neigh_seq_state *state = seq->private;
2172 	void *rc;
2173 
2174 	rc = neigh_get_idx(seq, pos);
2175 	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2176 		rc = pneigh_get_idx(seq, pos);
2177 
2178 	return rc;
2179 }
2180 
2181 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2182 {
2183 	struct neigh_seq_state *state = seq->private;
2184 	loff_t pos_minus_one;
2185 
2186 	state->tbl = tbl;
2187 	state->bucket = 0;
2188 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2189 
2190 	read_lock_bh(&tbl->lock);
2191 
2192 	pos_minus_one = *pos - 1;
2193 	return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2194 }
2195 EXPORT_SYMBOL(neigh_seq_start);
2196 
2197 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2198 {
2199 	struct neigh_seq_state *state;
2200 	void *rc;
2201 
2202 	if (v == SEQ_START_TOKEN) {
2203 		rc = neigh_get_idx(seq, pos);
2204 		goto out;
2205 	}
2206 
2207 	state = seq->private;
2208 	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2209 		rc = neigh_get_next(seq, v, NULL);
2210 		if (rc)
2211 			goto out;
2212 		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2213 			rc = pneigh_get_first(seq);
2214 	} else {
2215 		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2216 		rc = pneigh_get_next(seq, v, NULL);
2217 	}
2218 out:
2219 	++(*pos);
2220 	return rc;
2221 }
2222 EXPORT_SYMBOL(neigh_seq_next);
2223 
2224 void neigh_seq_stop(struct seq_file *seq, void *v)
2225 {
2226 	struct neigh_seq_state *state = seq->private;
2227 	struct neigh_table *tbl = state->tbl;
2228 
2229 	read_unlock_bh(&tbl->lock);
2230 }
2231 EXPORT_SYMBOL(neigh_seq_stop);
2232 
2233 /* statistics via seq_file */
2234 
2235 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2236 {
2237 	struct proc_dir_entry *pde = seq->private;
2238 	struct neigh_table *tbl = pde->data;
2239 	int cpu;
2240 
2241 	if (*pos == 0)
2242 		return SEQ_START_TOKEN;
2243 
2244 	for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2245 		if (!cpu_possible(cpu))
2246 			continue;
2247 		*pos = cpu+1;
2248 		return per_cpu_ptr(tbl->stats, cpu);
2249 	}
2250 	return NULL;
2251 }
2252 
2253 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2254 {
2255 	struct proc_dir_entry *pde = seq->private;
2256 	struct neigh_table *tbl = pde->data;
2257 	int cpu;
2258 
2259 	for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2260 		if (!cpu_possible(cpu))
2261 			continue;
2262 		*pos = cpu+1;
2263 		return per_cpu_ptr(tbl->stats, cpu);
2264 	}
2265 	return NULL;
2266 }
2267 
2268 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2269 {
2270 
2271 }
2272 
2273 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2274 {
2275 	struct proc_dir_entry *pde = seq->private;
2276 	struct neigh_table *tbl = pde->data;
2277 	struct neigh_statistics *st = v;
2278 
2279 	if (v == SEQ_START_TOKEN) {
2280 		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs\n");
2281 		return 0;
2282 	}
2283 
2284 	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2285 			"%08lx %08lx  %08lx %08lx\n",
2286 		   atomic_read(&tbl->entries),
2287 
2288 		   st->allocs,
2289 		   st->destroys,
2290 		   st->hash_grows,
2291 
2292 		   st->lookups,
2293 		   st->hits,
2294 
2295 		   st->res_failed,
2296 
2297 		   st->rcv_probes_mcast,
2298 		   st->rcv_probes_ucast,
2299 
2300 		   st->periodic_gc_runs,
2301 		   st->forced_gc_runs
2302 		   );
2303 
2304 	return 0;
2305 }
2306 
2307 static struct seq_operations neigh_stat_seq_ops = {
2308 	.start	= neigh_stat_seq_start,
2309 	.next	= neigh_stat_seq_next,
2310 	.stop	= neigh_stat_seq_stop,
2311 	.show	= neigh_stat_seq_show,
2312 };
2313 
2314 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2315 {
2316 	int ret = seq_open(file, &neigh_stat_seq_ops);
2317 
2318 	if (!ret) {
2319 		struct seq_file *sf = file->private_data;
2320 		sf->private = PDE(inode);
2321 	}
2322 	return ret;
2323 };
2324 
2325 static struct file_operations neigh_stat_seq_fops = {
2326 	.owner	 = THIS_MODULE,
2327 	.open 	 = neigh_stat_seq_open,
2328 	.read	 = seq_read,
2329 	.llseek	 = seq_lseek,
2330 	.release = seq_release,
2331 };
2332 
2333 #endif /* CONFIG_PROC_FS */
2334 
2335 #ifdef CONFIG_ARPD
2336 void neigh_app_ns(struct neighbour *n)
2337 {
2338 	struct nlmsghdr  *nlh;
2339 	int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2340 	struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2341 
2342 	if (!skb)
2343 		return;
2344 
2345 	if (neigh_fill_info(skb, n, 0, 0, RTM_GETNEIGH, 0) < 0) {
2346 		kfree_skb(skb);
2347 		return;
2348 	}
2349 	nlh			   = (struct nlmsghdr *)skb->data;
2350 	nlh->nlmsg_flags	   = NLM_F_REQUEST;
2351 	NETLINK_CB(skb).dst_group  = RTNLGRP_NEIGH;
2352 	netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2353 }
2354 
2355 static void neigh_app_notify(struct neighbour *n)
2356 {
2357 	struct nlmsghdr *nlh;
2358 	int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2359 	struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2360 
2361 	if (!skb)
2362 		return;
2363 
2364 	if (neigh_fill_info(skb, n, 0, 0, RTM_NEWNEIGH, 0) < 0) {
2365 		kfree_skb(skb);
2366 		return;
2367 	}
2368 	nlh			   = (struct nlmsghdr *)skb->data;
2369 	NETLINK_CB(skb).dst_group  = RTNLGRP_NEIGH;
2370 	netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2371 }
2372 
2373 #endif /* CONFIG_ARPD */
2374 
2375 #ifdef CONFIG_SYSCTL
2376 
2377 static struct neigh_sysctl_table {
2378 	struct ctl_table_header *sysctl_header;
2379 	ctl_table		neigh_vars[__NET_NEIGH_MAX];
2380 	ctl_table		neigh_dev[2];
2381 	ctl_table		neigh_neigh_dir[2];
2382 	ctl_table		neigh_proto_dir[2];
2383 	ctl_table		neigh_root_dir[2];
2384 } neigh_sysctl_template = {
2385 	.neigh_vars = {
2386 		{
2387 			.ctl_name	= NET_NEIGH_MCAST_SOLICIT,
2388 			.procname	= "mcast_solicit",
2389 			.maxlen		= sizeof(int),
2390 			.mode		= 0644,
2391 			.proc_handler	= &proc_dointvec,
2392 		},
2393 		{
2394 			.ctl_name	= NET_NEIGH_UCAST_SOLICIT,
2395 			.procname	= "ucast_solicit",
2396 			.maxlen		= sizeof(int),
2397 			.mode		= 0644,
2398 			.proc_handler	= &proc_dointvec,
2399 		},
2400 		{
2401 			.ctl_name	= NET_NEIGH_APP_SOLICIT,
2402 			.procname	= "app_solicit",
2403 			.maxlen		= sizeof(int),
2404 			.mode		= 0644,
2405 			.proc_handler	= &proc_dointvec,
2406 		},
2407 		{
2408 			.ctl_name	= NET_NEIGH_RETRANS_TIME,
2409 			.procname	= "retrans_time",
2410 			.maxlen		= sizeof(int),
2411 			.mode		= 0644,
2412 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2413 		},
2414 		{
2415 			.ctl_name	= NET_NEIGH_REACHABLE_TIME,
2416 			.procname	= "base_reachable_time",
2417 			.maxlen		= sizeof(int),
2418 			.mode		= 0644,
2419 			.proc_handler	= &proc_dointvec_jiffies,
2420 			.strategy	= &sysctl_jiffies,
2421 		},
2422 		{
2423 			.ctl_name	= NET_NEIGH_DELAY_PROBE_TIME,
2424 			.procname	= "delay_first_probe_time",
2425 			.maxlen		= sizeof(int),
2426 			.mode		= 0644,
2427 			.proc_handler	= &proc_dointvec_jiffies,
2428 			.strategy	= &sysctl_jiffies,
2429 		},
2430 		{
2431 			.ctl_name	= NET_NEIGH_GC_STALE_TIME,
2432 			.procname	= "gc_stale_time",
2433 			.maxlen		= sizeof(int),
2434 			.mode		= 0644,
2435 			.proc_handler	= &proc_dointvec_jiffies,
2436 			.strategy	= &sysctl_jiffies,
2437 		},
2438 		{
2439 			.ctl_name	= NET_NEIGH_UNRES_QLEN,
2440 			.procname	= "unres_qlen",
2441 			.maxlen		= sizeof(int),
2442 			.mode		= 0644,
2443 			.proc_handler	= &proc_dointvec,
2444 		},
2445 		{
2446 			.ctl_name	= NET_NEIGH_PROXY_QLEN,
2447 			.procname	= "proxy_qlen",
2448 			.maxlen		= sizeof(int),
2449 			.mode		= 0644,
2450 			.proc_handler	= &proc_dointvec,
2451 		},
2452 		{
2453 			.ctl_name	= NET_NEIGH_ANYCAST_DELAY,
2454 			.procname	= "anycast_delay",
2455 			.maxlen		= sizeof(int),
2456 			.mode		= 0644,
2457 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2458 		},
2459 		{
2460 			.ctl_name	= NET_NEIGH_PROXY_DELAY,
2461 			.procname	= "proxy_delay",
2462 			.maxlen		= sizeof(int),
2463 			.mode		= 0644,
2464 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2465 		},
2466 		{
2467 			.ctl_name	= NET_NEIGH_LOCKTIME,
2468 			.procname	= "locktime",
2469 			.maxlen		= sizeof(int),
2470 			.mode		= 0644,
2471 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2472 		},
2473 		{
2474 			.ctl_name	= NET_NEIGH_GC_INTERVAL,
2475 			.procname	= "gc_interval",
2476 			.maxlen		= sizeof(int),
2477 			.mode		= 0644,
2478 			.proc_handler	= &proc_dointvec_jiffies,
2479 			.strategy	= &sysctl_jiffies,
2480 		},
2481 		{
2482 			.ctl_name	= NET_NEIGH_GC_THRESH1,
2483 			.procname	= "gc_thresh1",
2484 			.maxlen		= sizeof(int),
2485 			.mode		= 0644,
2486 			.proc_handler	= &proc_dointvec,
2487 		},
2488 		{
2489 			.ctl_name	= NET_NEIGH_GC_THRESH2,
2490 			.procname	= "gc_thresh2",
2491 			.maxlen		= sizeof(int),
2492 			.mode		= 0644,
2493 			.proc_handler	= &proc_dointvec,
2494 		},
2495 		{
2496 			.ctl_name	= NET_NEIGH_GC_THRESH3,
2497 			.procname	= "gc_thresh3",
2498 			.maxlen		= sizeof(int),
2499 			.mode		= 0644,
2500 			.proc_handler	= &proc_dointvec,
2501 		},
2502 		{
2503 			.ctl_name	= NET_NEIGH_RETRANS_TIME_MS,
2504 			.procname	= "retrans_time_ms",
2505 			.maxlen		= sizeof(int),
2506 			.mode		= 0644,
2507 			.proc_handler	= &proc_dointvec_ms_jiffies,
2508 			.strategy	= &sysctl_ms_jiffies,
2509 		},
2510 		{
2511 			.ctl_name	= NET_NEIGH_REACHABLE_TIME_MS,
2512 			.procname	= "base_reachable_time_ms",
2513 			.maxlen		= sizeof(int),
2514 			.mode		= 0644,
2515 			.proc_handler	= &proc_dointvec_ms_jiffies,
2516 			.strategy	= &sysctl_ms_jiffies,
2517 		},
2518 	},
2519 	.neigh_dev = {
2520 		{
2521 			.ctl_name	= NET_PROTO_CONF_DEFAULT,
2522 			.procname	= "default",
2523 			.mode		= 0555,
2524 		},
2525 	},
2526 	.neigh_neigh_dir = {
2527 		{
2528 			.procname	= "neigh",
2529 			.mode		= 0555,
2530 		},
2531 	},
2532 	.neigh_proto_dir = {
2533 		{
2534 			.mode		= 0555,
2535 		},
2536 	},
2537 	.neigh_root_dir = {
2538 		{
2539 			.ctl_name	= CTL_NET,
2540 			.procname	= "net",
2541 			.mode		= 0555,
2542 		},
2543 	},
2544 };
2545 
2546 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2547 			  int p_id, int pdev_id, char *p_name,
2548 			  proc_handler *handler, ctl_handler *strategy)
2549 {
2550 	struct neigh_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
2551 	const char *dev_name_source = NULL;
2552 	char *dev_name = NULL;
2553 	int err = 0;
2554 
2555 	if (!t)
2556 		return -ENOBUFS;
2557 	memcpy(t, &neigh_sysctl_template, sizeof(*t));
2558 	t->neigh_vars[0].data  = &p->mcast_probes;
2559 	t->neigh_vars[1].data  = &p->ucast_probes;
2560 	t->neigh_vars[2].data  = &p->app_probes;
2561 	t->neigh_vars[3].data  = &p->retrans_time;
2562 	t->neigh_vars[4].data  = &p->base_reachable_time;
2563 	t->neigh_vars[5].data  = &p->delay_probe_time;
2564 	t->neigh_vars[6].data  = &p->gc_staletime;
2565 	t->neigh_vars[7].data  = &p->queue_len;
2566 	t->neigh_vars[8].data  = &p->proxy_qlen;
2567 	t->neigh_vars[9].data  = &p->anycast_delay;
2568 	t->neigh_vars[10].data = &p->proxy_delay;
2569 	t->neigh_vars[11].data = &p->locktime;
2570 
2571 	if (dev) {
2572 		dev_name_source = dev->name;
2573 		t->neigh_dev[0].ctl_name = dev->ifindex;
2574 		t->neigh_vars[12].procname = NULL;
2575 		t->neigh_vars[13].procname = NULL;
2576 		t->neigh_vars[14].procname = NULL;
2577 		t->neigh_vars[15].procname = NULL;
2578 	} else {
2579  		dev_name_source = t->neigh_dev[0].procname;
2580 		t->neigh_vars[12].data = (int *)(p + 1);
2581 		t->neigh_vars[13].data = (int *)(p + 1) + 1;
2582 		t->neigh_vars[14].data = (int *)(p + 1) + 2;
2583 		t->neigh_vars[15].data = (int *)(p + 1) + 3;
2584 	}
2585 
2586 	t->neigh_vars[16].data  = &p->retrans_time;
2587 	t->neigh_vars[17].data  = &p->base_reachable_time;
2588 
2589 	if (handler || strategy) {
2590 		/* RetransTime */
2591 		t->neigh_vars[3].proc_handler = handler;
2592 		t->neigh_vars[3].strategy = strategy;
2593 		t->neigh_vars[3].extra1 = dev;
2594 		/* ReachableTime */
2595 		t->neigh_vars[4].proc_handler = handler;
2596 		t->neigh_vars[4].strategy = strategy;
2597 		t->neigh_vars[4].extra1 = dev;
2598 		/* RetransTime (in milliseconds)*/
2599 		t->neigh_vars[16].proc_handler = handler;
2600 		t->neigh_vars[16].strategy = strategy;
2601 		t->neigh_vars[16].extra1 = dev;
2602 		/* ReachableTime (in milliseconds) */
2603 		t->neigh_vars[17].proc_handler = handler;
2604 		t->neigh_vars[17].strategy = strategy;
2605 		t->neigh_vars[17].extra1 = dev;
2606 	}
2607 
2608 	dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2609 	if (!dev_name) {
2610 		err = -ENOBUFS;
2611 		goto free;
2612 	}
2613 
2614  	t->neigh_dev[0].procname = dev_name;
2615 
2616 	t->neigh_neigh_dir[0].ctl_name = pdev_id;
2617 
2618 	t->neigh_proto_dir[0].procname = p_name;
2619 	t->neigh_proto_dir[0].ctl_name = p_id;
2620 
2621 	t->neigh_dev[0].child	       = t->neigh_vars;
2622 	t->neigh_neigh_dir[0].child    = t->neigh_dev;
2623 	t->neigh_proto_dir[0].child    = t->neigh_neigh_dir;
2624 	t->neigh_root_dir[0].child     = t->neigh_proto_dir;
2625 
2626 	t->sysctl_header = register_sysctl_table(t->neigh_root_dir, 0);
2627 	if (!t->sysctl_header) {
2628 		err = -ENOBUFS;
2629 		goto free_procname;
2630 	}
2631 	p->sysctl_table = t;
2632 	return 0;
2633 
2634 	/* error path */
2635  free_procname:
2636 	kfree(dev_name);
2637  free:
2638 	kfree(t);
2639 
2640 	return err;
2641 }
2642 
2643 void neigh_sysctl_unregister(struct neigh_parms *p)
2644 {
2645 	if (p->sysctl_table) {
2646 		struct neigh_sysctl_table *t = p->sysctl_table;
2647 		p->sysctl_table = NULL;
2648 		unregister_sysctl_table(t->sysctl_header);
2649 		kfree(t->neigh_dev[0].procname);
2650 		kfree(t);
2651 	}
2652 }
2653 
2654 #endif	/* CONFIG_SYSCTL */
2655 
2656 EXPORT_SYMBOL(__neigh_event_send);
2657 EXPORT_SYMBOL(neigh_add);
2658 EXPORT_SYMBOL(neigh_changeaddr);
2659 EXPORT_SYMBOL(neigh_compat_output);
2660 EXPORT_SYMBOL(neigh_connected_output);
2661 EXPORT_SYMBOL(neigh_create);
2662 EXPORT_SYMBOL(neigh_delete);
2663 EXPORT_SYMBOL(neigh_destroy);
2664 EXPORT_SYMBOL(neigh_dump_info);
2665 EXPORT_SYMBOL(neigh_event_ns);
2666 EXPORT_SYMBOL(neigh_ifdown);
2667 EXPORT_SYMBOL(neigh_lookup);
2668 EXPORT_SYMBOL(neigh_lookup_nodev);
2669 EXPORT_SYMBOL(neigh_parms_alloc);
2670 EXPORT_SYMBOL(neigh_parms_release);
2671 EXPORT_SYMBOL(neigh_rand_reach_time);
2672 EXPORT_SYMBOL(neigh_resolve_output);
2673 EXPORT_SYMBOL(neigh_table_clear);
2674 EXPORT_SYMBOL(neigh_table_init);
2675 EXPORT_SYMBOL(neigh_table_init_no_netlink);
2676 EXPORT_SYMBOL(neigh_update);
2677 EXPORT_SYMBOL(neigh_update_hhs);
2678 EXPORT_SYMBOL(pneigh_enqueue);
2679 EXPORT_SYMBOL(pneigh_lookup);
2680 EXPORT_SYMBOL(neightbl_dump_info);
2681 EXPORT_SYMBOL(neightbl_set);
2682 
2683 #ifdef CONFIG_ARPD
2684 EXPORT_SYMBOL(neigh_app_ns);
2685 #endif
2686 #ifdef CONFIG_SYSCTL
2687 EXPORT_SYMBOL(neigh_sysctl_register);
2688 EXPORT_SYMBOL(neigh_sysctl_unregister);
2689 #endif
2690