xref: /linux/net/core/neighbour.c (revision d67b569f5f620c0fb95d5212642746b7ba9d29e4)
1 /*
2  *	Generic address resolution entity
3  *
4  *	Authors:
5  *	Pedro Roque		<roque@di.fc.ul.pt>
6  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *	Fixes:
14  *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
15  *	Harald Welte		Add neighbour cache statistics like rtstat
16  */
17 
18 #include <linux/config.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/socket.h>
23 #include <linux/sched.h>
24 #include <linux/netdevice.h>
25 #include <linux/proc_fs.h>
26 #ifdef CONFIG_SYSCTL
27 #include <linux/sysctl.h>
28 #endif
29 #include <linux/times.h>
30 #include <net/neighbour.h>
31 #include <net/dst.h>
32 #include <net/sock.h>
33 #include <linux/rtnetlink.h>
34 #include <linux/random.h>
35 #include <linux/string.h>
36 
37 #define NEIGH_DEBUG 1
38 
39 #define NEIGH_PRINTK(x...) printk(x)
40 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
41 #define NEIGH_PRINTK0 NEIGH_PRINTK
42 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
43 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
44 
45 #if NEIGH_DEBUG >= 1
46 #undef NEIGH_PRINTK1
47 #define NEIGH_PRINTK1 NEIGH_PRINTK
48 #endif
49 #if NEIGH_DEBUG >= 2
50 #undef NEIGH_PRINTK2
51 #define NEIGH_PRINTK2 NEIGH_PRINTK
52 #endif
53 
54 #define PNEIGH_HASHMASK		0xF
55 
56 static void neigh_timer_handler(unsigned long arg);
57 #ifdef CONFIG_ARPD
58 static void neigh_app_notify(struct neighbour *n);
59 #endif
60 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
61 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
62 
63 static struct neigh_table *neigh_tables;
64 static struct file_operations neigh_stat_seq_fops;
65 
66 /*
67    Neighbour hash table buckets are protected with rwlock tbl->lock.
68 
69    - All the scans/updates to hash buckets MUST be made under this lock.
70    - NOTHING clever should be made under this lock: no callbacks
71      to protocol backends, no attempts to send something to network.
72      It will result in deadlocks, if backend/driver wants to use neighbour
73      cache.
74    - If the entry requires some non-trivial actions, increase
75      its reference count and release table lock.
76 
77    Neighbour entries are protected:
78    - with reference count.
79    - with rwlock neigh->lock
80 
81    Reference count prevents destruction.
82 
83    neigh->lock mainly serializes ll address data and its validity state.
84    However, the same lock is used to protect another entry fields:
85     - timer
86     - resolution queue
87 
88    Again, nothing clever shall be made under neigh->lock,
89    the most complicated procedure, which we allow is dev->hard_header.
90    It is supposed, that dev->hard_header is simplistic and does
91    not make callbacks to neighbour tables.
92 
93    The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
94    list of neighbour tables. This list is used only in process context,
95  */
96 
97 static DEFINE_RWLOCK(neigh_tbl_lock);
98 
99 static int neigh_blackhole(struct sk_buff *skb)
100 {
101 	kfree_skb(skb);
102 	return -ENETDOWN;
103 }
104 
105 /*
106  * It is random distribution in the interval (1/2)*base...(3/2)*base.
107  * It corresponds to default IPv6 settings and is not overridable,
108  * because it is really reasonable choice.
109  */
110 
111 unsigned long neigh_rand_reach_time(unsigned long base)
112 {
113 	return (base ? (net_random() % base) + (base >> 1) : 0);
114 }
115 
116 
117 static int neigh_forced_gc(struct neigh_table *tbl)
118 {
119 	int shrunk = 0;
120 	int i;
121 
122 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
123 
124 	write_lock_bh(&tbl->lock);
125 	for (i = 0; i <= tbl->hash_mask; i++) {
126 		struct neighbour *n, **np;
127 
128 		np = &tbl->hash_buckets[i];
129 		while ((n = *np) != NULL) {
130 			/* Neighbour record may be discarded if:
131 			 * - nobody refers to it.
132 			 * - it is not permanent
133 			 */
134 			write_lock(&n->lock);
135 			if (atomic_read(&n->refcnt) == 1 &&
136 			    !(n->nud_state & NUD_PERMANENT)) {
137 				*np	= n->next;
138 				n->dead = 1;
139 				shrunk	= 1;
140 				write_unlock(&n->lock);
141 				neigh_release(n);
142 				continue;
143 			}
144 			write_unlock(&n->lock);
145 			np = &n->next;
146 		}
147 	}
148 
149 	tbl->last_flush = jiffies;
150 
151 	write_unlock_bh(&tbl->lock);
152 
153 	return shrunk;
154 }
155 
156 static int neigh_del_timer(struct neighbour *n)
157 {
158 	if ((n->nud_state & NUD_IN_TIMER) &&
159 	    del_timer(&n->timer)) {
160 		neigh_release(n);
161 		return 1;
162 	}
163 	return 0;
164 }
165 
166 static void pneigh_queue_purge(struct sk_buff_head *list)
167 {
168 	struct sk_buff *skb;
169 
170 	while ((skb = skb_dequeue(list)) != NULL) {
171 		dev_put(skb->dev);
172 		kfree_skb(skb);
173 	}
174 }
175 
176 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
177 {
178 	int i;
179 
180 	write_lock_bh(&tbl->lock);
181 
182 	for (i=0; i <= tbl->hash_mask; i++) {
183 		struct neighbour *n, **np;
184 
185 		np = &tbl->hash_buckets[i];
186 		while ((n = *np) != NULL) {
187 			if (dev && n->dev != dev) {
188 				np = &n->next;
189 				continue;
190 			}
191 			*np = n->next;
192 			write_lock_bh(&n->lock);
193 			n->dead = 1;
194 			neigh_del_timer(n);
195 			write_unlock_bh(&n->lock);
196 			neigh_release(n);
197 		}
198 	}
199 
200         write_unlock_bh(&tbl->lock);
201 }
202 
203 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
204 {
205 	int i;
206 
207 	write_lock_bh(&tbl->lock);
208 
209 	for (i = 0; i <= tbl->hash_mask; i++) {
210 		struct neighbour *n, **np = &tbl->hash_buckets[i];
211 
212 		while ((n = *np) != NULL) {
213 			if (dev && n->dev != dev) {
214 				np = &n->next;
215 				continue;
216 			}
217 			*np = n->next;
218 			write_lock(&n->lock);
219 			neigh_del_timer(n);
220 			n->dead = 1;
221 
222 			if (atomic_read(&n->refcnt) != 1) {
223 				/* The most unpleasant situation.
224 				   We must destroy neighbour entry,
225 				   but someone still uses it.
226 
227 				   The destroy will be delayed until
228 				   the last user releases us, but
229 				   we must kill timers etc. and move
230 				   it to safe state.
231 				 */
232 				skb_queue_purge(&n->arp_queue);
233 				n->output = neigh_blackhole;
234 				if (n->nud_state & NUD_VALID)
235 					n->nud_state = NUD_NOARP;
236 				else
237 					n->nud_state = NUD_NONE;
238 				NEIGH_PRINTK2("neigh %p is stray.\n", n);
239 			}
240 			write_unlock(&n->lock);
241 			neigh_release(n);
242 		}
243 	}
244 
245 	pneigh_ifdown(tbl, dev);
246 	write_unlock_bh(&tbl->lock);
247 
248 	del_timer_sync(&tbl->proxy_timer);
249 	pneigh_queue_purge(&tbl->proxy_queue);
250 	return 0;
251 }
252 
253 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
254 {
255 	struct neighbour *n = NULL;
256 	unsigned long now = jiffies;
257 	int entries;
258 
259 	entries = atomic_inc_return(&tbl->entries) - 1;
260 	if (entries >= tbl->gc_thresh3 ||
261 	    (entries >= tbl->gc_thresh2 &&
262 	     time_after(now, tbl->last_flush + 5 * HZ))) {
263 		if (!neigh_forced_gc(tbl) &&
264 		    entries >= tbl->gc_thresh3)
265 			goto out_entries;
266 	}
267 
268 	n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
269 	if (!n)
270 		goto out_entries;
271 
272 	memset(n, 0, tbl->entry_size);
273 
274 	skb_queue_head_init(&n->arp_queue);
275 	rwlock_init(&n->lock);
276 	n->updated	  = n->used = now;
277 	n->nud_state	  = NUD_NONE;
278 	n->output	  = neigh_blackhole;
279 	n->parms	  = neigh_parms_clone(&tbl->parms);
280 	init_timer(&n->timer);
281 	n->timer.function = neigh_timer_handler;
282 	n->timer.data	  = (unsigned long)n;
283 
284 	NEIGH_CACHE_STAT_INC(tbl, allocs);
285 	n->tbl		  = tbl;
286 	atomic_set(&n->refcnt, 1);
287 	n->dead		  = 1;
288 out:
289 	return n;
290 
291 out_entries:
292 	atomic_dec(&tbl->entries);
293 	goto out;
294 }
295 
296 static struct neighbour **neigh_hash_alloc(unsigned int entries)
297 {
298 	unsigned long size = entries * sizeof(struct neighbour *);
299 	struct neighbour **ret;
300 
301 	if (size <= PAGE_SIZE) {
302 		ret = kmalloc(size, GFP_ATOMIC);
303 	} else {
304 		ret = (struct neighbour **)
305 			__get_free_pages(GFP_ATOMIC, get_order(size));
306 	}
307 	if (ret)
308 		memset(ret, 0, size);
309 
310 	return ret;
311 }
312 
313 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
314 {
315 	unsigned long size = entries * sizeof(struct neighbour *);
316 
317 	if (size <= PAGE_SIZE)
318 		kfree(hash);
319 	else
320 		free_pages((unsigned long)hash, get_order(size));
321 }
322 
323 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
324 {
325 	struct neighbour **new_hash, **old_hash;
326 	unsigned int i, new_hash_mask, old_entries;
327 
328 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
329 
330 	BUG_ON(new_entries & (new_entries - 1));
331 	new_hash = neigh_hash_alloc(new_entries);
332 	if (!new_hash)
333 		return;
334 
335 	old_entries = tbl->hash_mask + 1;
336 	new_hash_mask = new_entries - 1;
337 	old_hash = tbl->hash_buckets;
338 
339 	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
340 	for (i = 0; i < old_entries; i++) {
341 		struct neighbour *n, *next;
342 
343 		for (n = old_hash[i]; n; n = next) {
344 			unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
345 
346 			hash_val &= new_hash_mask;
347 			next = n->next;
348 
349 			n->next = new_hash[hash_val];
350 			new_hash[hash_val] = n;
351 		}
352 	}
353 	tbl->hash_buckets = new_hash;
354 	tbl->hash_mask = new_hash_mask;
355 
356 	neigh_hash_free(old_hash, old_entries);
357 }
358 
359 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
360 			       struct net_device *dev)
361 {
362 	struct neighbour *n;
363 	int key_len = tbl->key_len;
364 	u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
365 
366 	NEIGH_CACHE_STAT_INC(tbl, lookups);
367 
368 	read_lock_bh(&tbl->lock);
369 	for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
370 		if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
371 			neigh_hold(n);
372 			NEIGH_CACHE_STAT_INC(tbl, hits);
373 			break;
374 		}
375 	}
376 	read_unlock_bh(&tbl->lock);
377 	return n;
378 }
379 
380 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
381 {
382 	struct neighbour *n;
383 	int key_len = tbl->key_len;
384 	u32 hash_val = tbl->hash(pkey, NULL) & tbl->hash_mask;
385 
386 	NEIGH_CACHE_STAT_INC(tbl, lookups);
387 
388 	read_lock_bh(&tbl->lock);
389 	for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
390 		if (!memcmp(n->primary_key, pkey, key_len)) {
391 			neigh_hold(n);
392 			NEIGH_CACHE_STAT_INC(tbl, hits);
393 			break;
394 		}
395 	}
396 	read_unlock_bh(&tbl->lock);
397 	return n;
398 }
399 
400 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
401 			       struct net_device *dev)
402 {
403 	u32 hash_val;
404 	int key_len = tbl->key_len;
405 	int error;
406 	struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
407 
408 	if (!n) {
409 		rc = ERR_PTR(-ENOBUFS);
410 		goto out;
411 	}
412 
413 	memcpy(n->primary_key, pkey, key_len);
414 	n->dev = dev;
415 	dev_hold(dev);
416 
417 	/* Protocol specific setup. */
418 	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
419 		rc = ERR_PTR(error);
420 		goto out_neigh_release;
421 	}
422 
423 	/* Device specific setup. */
424 	if (n->parms->neigh_setup &&
425 	    (error = n->parms->neigh_setup(n)) < 0) {
426 		rc = ERR_PTR(error);
427 		goto out_neigh_release;
428 	}
429 
430 	n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
431 
432 	write_lock_bh(&tbl->lock);
433 
434 	if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
435 		neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
436 
437 	hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
438 
439 	if (n->parms->dead) {
440 		rc = ERR_PTR(-EINVAL);
441 		goto out_tbl_unlock;
442 	}
443 
444 	for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
445 		if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
446 			neigh_hold(n1);
447 			rc = n1;
448 			goto out_tbl_unlock;
449 		}
450 	}
451 
452 	n->next = tbl->hash_buckets[hash_val];
453 	tbl->hash_buckets[hash_val] = n;
454 	n->dead = 0;
455 	neigh_hold(n);
456 	write_unlock_bh(&tbl->lock);
457 	NEIGH_PRINTK2("neigh %p is created.\n", n);
458 	rc = n;
459 out:
460 	return rc;
461 out_tbl_unlock:
462 	write_unlock_bh(&tbl->lock);
463 out_neigh_release:
464 	neigh_release(n);
465 	goto out;
466 }
467 
468 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
469 				    struct net_device *dev, int creat)
470 {
471 	struct pneigh_entry *n;
472 	int key_len = tbl->key_len;
473 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
474 
475 	hash_val ^= (hash_val >> 16);
476 	hash_val ^= hash_val >> 8;
477 	hash_val ^= hash_val >> 4;
478 	hash_val &= PNEIGH_HASHMASK;
479 
480 	read_lock_bh(&tbl->lock);
481 
482 	for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
483 		if (!memcmp(n->key, pkey, key_len) &&
484 		    (n->dev == dev || !n->dev)) {
485 			read_unlock_bh(&tbl->lock);
486 			goto out;
487 		}
488 	}
489 	read_unlock_bh(&tbl->lock);
490 	n = NULL;
491 	if (!creat)
492 		goto out;
493 
494 	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
495 	if (!n)
496 		goto out;
497 
498 	memcpy(n->key, pkey, key_len);
499 	n->dev = dev;
500 	if (dev)
501 		dev_hold(dev);
502 
503 	if (tbl->pconstructor && tbl->pconstructor(n)) {
504 		if (dev)
505 			dev_put(dev);
506 		kfree(n);
507 		n = NULL;
508 		goto out;
509 	}
510 
511 	write_lock_bh(&tbl->lock);
512 	n->next = tbl->phash_buckets[hash_val];
513 	tbl->phash_buckets[hash_val] = n;
514 	write_unlock_bh(&tbl->lock);
515 out:
516 	return n;
517 }
518 
519 
520 int pneigh_delete(struct neigh_table *tbl, const void *pkey,
521 		  struct net_device *dev)
522 {
523 	struct pneigh_entry *n, **np;
524 	int key_len = tbl->key_len;
525 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
526 
527 	hash_val ^= (hash_val >> 16);
528 	hash_val ^= hash_val >> 8;
529 	hash_val ^= hash_val >> 4;
530 	hash_val &= PNEIGH_HASHMASK;
531 
532 	write_lock_bh(&tbl->lock);
533 	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
534 	     np = &n->next) {
535 		if (!memcmp(n->key, pkey, key_len) && n->dev == dev) {
536 			*np = n->next;
537 			write_unlock_bh(&tbl->lock);
538 			if (tbl->pdestructor)
539 				tbl->pdestructor(n);
540 			if (n->dev)
541 				dev_put(n->dev);
542 			kfree(n);
543 			return 0;
544 		}
545 	}
546 	write_unlock_bh(&tbl->lock);
547 	return -ENOENT;
548 }
549 
550 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
551 {
552 	struct pneigh_entry *n, **np;
553 	u32 h;
554 
555 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
556 		np = &tbl->phash_buckets[h];
557 		while ((n = *np) != NULL) {
558 			if (!dev || n->dev == dev) {
559 				*np = n->next;
560 				if (tbl->pdestructor)
561 					tbl->pdestructor(n);
562 				if (n->dev)
563 					dev_put(n->dev);
564 				kfree(n);
565 				continue;
566 			}
567 			np = &n->next;
568 		}
569 	}
570 	return -ENOENT;
571 }
572 
573 
574 /*
575  *	neighbour must already be out of the table;
576  *
577  */
578 void neigh_destroy(struct neighbour *neigh)
579 {
580 	struct hh_cache *hh;
581 
582 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
583 
584 	if (!neigh->dead) {
585 		printk(KERN_WARNING
586 		       "Destroying alive neighbour %p\n", neigh);
587 		dump_stack();
588 		return;
589 	}
590 
591 	if (neigh_del_timer(neigh))
592 		printk(KERN_WARNING "Impossible event.\n");
593 
594 	while ((hh = neigh->hh) != NULL) {
595 		neigh->hh = hh->hh_next;
596 		hh->hh_next = NULL;
597 		write_lock_bh(&hh->hh_lock);
598 		hh->hh_output = neigh_blackhole;
599 		write_unlock_bh(&hh->hh_lock);
600 		if (atomic_dec_and_test(&hh->hh_refcnt))
601 			kfree(hh);
602 	}
603 
604 	if (neigh->ops && neigh->ops->destructor)
605 		(neigh->ops->destructor)(neigh);
606 
607 	skb_queue_purge(&neigh->arp_queue);
608 
609 	dev_put(neigh->dev);
610 	neigh_parms_put(neigh->parms);
611 
612 	NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
613 
614 	atomic_dec(&neigh->tbl->entries);
615 	kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
616 }
617 
618 /* Neighbour state is suspicious;
619    disable fast path.
620 
621    Called with write_locked neigh.
622  */
623 static void neigh_suspect(struct neighbour *neigh)
624 {
625 	struct hh_cache *hh;
626 
627 	NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
628 
629 	neigh->output = neigh->ops->output;
630 
631 	for (hh = neigh->hh; hh; hh = hh->hh_next)
632 		hh->hh_output = neigh->ops->output;
633 }
634 
635 /* Neighbour state is OK;
636    enable fast path.
637 
638    Called with write_locked neigh.
639  */
640 static void neigh_connect(struct neighbour *neigh)
641 {
642 	struct hh_cache *hh;
643 
644 	NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
645 
646 	neigh->output = neigh->ops->connected_output;
647 
648 	for (hh = neigh->hh; hh; hh = hh->hh_next)
649 		hh->hh_output = neigh->ops->hh_output;
650 }
651 
652 static void neigh_periodic_timer(unsigned long arg)
653 {
654 	struct neigh_table *tbl = (struct neigh_table *)arg;
655 	struct neighbour *n, **np;
656 	unsigned long expire, now = jiffies;
657 
658 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
659 
660 	write_lock(&tbl->lock);
661 
662 	/*
663 	 *	periodically recompute ReachableTime from random function
664 	 */
665 
666 	if (time_after(now, tbl->last_rand + 300 * HZ)) {
667 		struct neigh_parms *p;
668 		tbl->last_rand = now;
669 		for (p = &tbl->parms; p; p = p->next)
670 			p->reachable_time =
671 				neigh_rand_reach_time(p->base_reachable_time);
672 	}
673 
674 	np = &tbl->hash_buckets[tbl->hash_chain_gc];
675 	tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
676 
677 	while ((n = *np) != NULL) {
678 		unsigned int state;
679 
680 		write_lock(&n->lock);
681 
682 		state = n->nud_state;
683 		if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
684 			write_unlock(&n->lock);
685 			goto next_elt;
686 		}
687 
688 		if (time_before(n->used, n->confirmed))
689 			n->used = n->confirmed;
690 
691 		if (atomic_read(&n->refcnt) == 1 &&
692 		    (state == NUD_FAILED ||
693 		     time_after(now, n->used + n->parms->gc_staletime))) {
694 			*np = n->next;
695 			n->dead = 1;
696 			write_unlock(&n->lock);
697 			neigh_release(n);
698 			continue;
699 		}
700 		write_unlock(&n->lock);
701 
702 next_elt:
703 		np = &n->next;
704 	}
705 
706  	/* Cycle through all hash buckets every base_reachable_time/2 ticks.
707  	 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
708  	 * base_reachable_time.
709 	 */
710 	expire = tbl->parms.base_reachable_time >> 1;
711 	expire /= (tbl->hash_mask + 1);
712 	if (!expire)
713 		expire = 1;
714 
715  	mod_timer(&tbl->gc_timer, now + expire);
716 
717 	write_unlock(&tbl->lock);
718 }
719 
720 static __inline__ int neigh_max_probes(struct neighbour *n)
721 {
722 	struct neigh_parms *p = n->parms;
723 	return (n->nud_state & NUD_PROBE ?
724 		p->ucast_probes :
725 		p->ucast_probes + p->app_probes + p->mcast_probes);
726 }
727 
728 
729 /* Called when a timer expires for a neighbour entry. */
730 
731 static void neigh_timer_handler(unsigned long arg)
732 {
733 	unsigned long now, next;
734 	struct neighbour *neigh = (struct neighbour *)arg;
735 	unsigned state;
736 	int notify = 0;
737 
738 	write_lock(&neigh->lock);
739 
740 	state = neigh->nud_state;
741 	now = jiffies;
742 	next = now + HZ;
743 
744 	if (!(state & NUD_IN_TIMER)) {
745 #ifndef CONFIG_SMP
746 		printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
747 #endif
748 		goto out;
749 	}
750 
751 	if (state & NUD_REACHABLE) {
752 		if (time_before_eq(now,
753 				   neigh->confirmed + neigh->parms->reachable_time)) {
754 			NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
755 			next = neigh->confirmed + neigh->parms->reachable_time;
756 		} else if (time_before_eq(now,
757 					  neigh->used + neigh->parms->delay_probe_time)) {
758 			NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
759 			neigh->nud_state = NUD_DELAY;
760 			neigh_suspect(neigh);
761 			next = now + neigh->parms->delay_probe_time;
762 		} else {
763 			NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
764 			neigh->nud_state = NUD_STALE;
765 			neigh_suspect(neigh);
766 		}
767 	} else if (state & NUD_DELAY) {
768 		if (time_before_eq(now,
769 				   neigh->confirmed + neigh->parms->delay_probe_time)) {
770 			NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
771 			neigh->nud_state = NUD_REACHABLE;
772 			neigh_connect(neigh);
773 			next = neigh->confirmed + neigh->parms->reachable_time;
774 		} else {
775 			NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
776 			neigh->nud_state = NUD_PROBE;
777 			atomic_set(&neigh->probes, 0);
778 			next = now + neigh->parms->retrans_time;
779 		}
780 	} else {
781 		/* NUD_PROBE|NUD_INCOMPLETE */
782 		next = now + neigh->parms->retrans_time;
783 	}
784 
785 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
786 	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
787 		struct sk_buff *skb;
788 
789 		neigh->nud_state = NUD_FAILED;
790 		notify = 1;
791 		NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
792 		NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
793 
794 		/* It is very thin place. report_unreachable is very complicated
795 		   routine. Particularly, it can hit the same neighbour entry!
796 
797 		   So that, we try to be accurate and avoid dead loop. --ANK
798 		 */
799 		while (neigh->nud_state == NUD_FAILED &&
800 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
801 			write_unlock(&neigh->lock);
802 			neigh->ops->error_report(neigh, skb);
803 			write_lock(&neigh->lock);
804 		}
805 		skb_queue_purge(&neigh->arp_queue);
806 	}
807 
808 	if (neigh->nud_state & NUD_IN_TIMER) {
809 		neigh_hold(neigh);
810 		if (time_before(next, jiffies + HZ/2))
811 			next = jiffies + HZ/2;
812 		neigh->timer.expires = next;
813 		add_timer(&neigh->timer);
814 	}
815 	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
816 		struct sk_buff *skb = skb_peek(&neigh->arp_queue);
817 		/* keep skb alive even if arp_queue overflows */
818 		if (skb)
819 			skb_get(skb);
820 		write_unlock(&neigh->lock);
821 		neigh->ops->solicit(neigh, skb);
822 		atomic_inc(&neigh->probes);
823 		if (skb)
824 			kfree_skb(skb);
825 	} else {
826 out:
827 		write_unlock(&neigh->lock);
828 	}
829 
830 #ifdef CONFIG_ARPD
831 	if (notify && neigh->parms->app_probes)
832 		neigh_app_notify(neigh);
833 #endif
834 	neigh_release(neigh);
835 }
836 
837 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
838 {
839 	int rc;
840 	unsigned long now;
841 
842 	write_lock_bh(&neigh->lock);
843 
844 	rc = 0;
845 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
846 		goto out_unlock_bh;
847 
848 	now = jiffies;
849 
850 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
851 		if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
852 			atomic_set(&neigh->probes, neigh->parms->ucast_probes);
853 			neigh->nud_state     = NUD_INCOMPLETE;
854 			neigh_hold(neigh);
855 			neigh->timer.expires = now + 1;
856 			add_timer(&neigh->timer);
857 		} else {
858 			neigh->nud_state = NUD_FAILED;
859 			write_unlock_bh(&neigh->lock);
860 
861 			if (skb)
862 				kfree_skb(skb);
863 			return 1;
864 		}
865 	} else if (neigh->nud_state & NUD_STALE) {
866 		NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
867 		neigh_hold(neigh);
868 		neigh->nud_state = NUD_DELAY;
869 		neigh->timer.expires = jiffies + neigh->parms->delay_probe_time;
870 		add_timer(&neigh->timer);
871 	}
872 
873 	if (neigh->nud_state == NUD_INCOMPLETE) {
874 		if (skb) {
875 			if (skb_queue_len(&neigh->arp_queue) >=
876 			    neigh->parms->queue_len) {
877 				struct sk_buff *buff;
878 				buff = neigh->arp_queue.next;
879 				__skb_unlink(buff, &neigh->arp_queue);
880 				kfree_skb(buff);
881 			}
882 			__skb_queue_tail(&neigh->arp_queue, skb);
883 		}
884 		rc = 1;
885 	}
886 out_unlock_bh:
887 	write_unlock_bh(&neigh->lock);
888 	return rc;
889 }
890 
891 static __inline__ void neigh_update_hhs(struct neighbour *neigh)
892 {
893 	struct hh_cache *hh;
894 	void (*update)(struct hh_cache*, struct net_device*, unsigned char *) =
895 		neigh->dev->header_cache_update;
896 
897 	if (update) {
898 		for (hh = neigh->hh; hh; hh = hh->hh_next) {
899 			write_lock_bh(&hh->hh_lock);
900 			update(hh, neigh->dev, neigh->ha);
901 			write_unlock_bh(&hh->hh_lock);
902 		}
903 	}
904 }
905 
906 
907 
908 /* Generic update routine.
909    -- lladdr is new lladdr or NULL, if it is not supplied.
910    -- new    is new state.
911    -- flags
912 	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
913 				if it is different.
914 	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
915 				lladdr instead of overriding it
916 				if it is different.
917 				It also allows to retain current state
918 				if lladdr is unchanged.
919 	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
920 
921 	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
922 				NTF_ROUTER flag.
923 	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
924 				a router.
925 
926    Caller MUST hold reference count on the entry.
927  */
928 
929 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
930 		 u32 flags)
931 {
932 	u8 old;
933 	int err;
934 #ifdef CONFIG_ARPD
935 	int notify = 0;
936 #endif
937 	struct net_device *dev;
938 	int update_isrouter = 0;
939 
940 	write_lock_bh(&neigh->lock);
941 
942 	dev    = neigh->dev;
943 	old    = neigh->nud_state;
944 	err    = -EPERM;
945 
946 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
947 	    (old & (NUD_NOARP | NUD_PERMANENT)))
948 		goto out;
949 
950 	if (!(new & NUD_VALID)) {
951 		neigh_del_timer(neigh);
952 		if (old & NUD_CONNECTED)
953 			neigh_suspect(neigh);
954 		neigh->nud_state = new;
955 		err = 0;
956 #ifdef CONFIG_ARPD
957 		notify = old & NUD_VALID;
958 #endif
959 		goto out;
960 	}
961 
962 	/* Compare new lladdr with cached one */
963 	if (!dev->addr_len) {
964 		/* First case: device needs no address. */
965 		lladdr = neigh->ha;
966 	} else if (lladdr) {
967 		/* The second case: if something is already cached
968 		   and a new address is proposed:
969 		   - compare new & old
970 		   - if they are different, check override flag
971 		 */
972 		if ((old & NUD_VALID) &&
973 		    !memcmp(lladdr, neigh->ha, dev->addr_len))
974 			lladdr = neigh->ha;
975 	} else {
976 		/* No address is supplied; if we know something,
977 		   use it, otherwise discard the request.
978 		 */
979 		err = -EINVAL;
980 		if (!(old & NUD_VALID))
981 			goto out;
982 		lladdr = neigh->ha;
983 	}
984 
985 	if (new & NUD_CONNECTED)
986 		neigh->confirmed = jiffies;
987 	neigh->updated = jiffies;
988 
989 	/* If entry was valid and address is not changed,
990 	   do not change entry state, if new one is STALE.
991 	 */
992 	err = 0;
993 	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
994 	if (old & NUD_VALID) {
995 		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
996 			update_isrouter = 0;
997 			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
998 			    (old & NUD_CONNECTED)) {
999 				lladdr = neigh->ha;
1000 				new = NUD_STALE;
1001 			} else
1002 				goto out;
1003 		} else {
1004 			if (lladdr == neigh->ha && new == NUD_STALE &&
1005 			    ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1006 			     (old & NUD_CONNECTED))
1007 			    )
1008 				new = old;
1009 		}
1010 	}
1011 
1012 	if (new != old) {
1013 		neigh_del_timer(neigh);
1014 		if (new & NUD_IN_TIMER) {
1015 			neigh_hold(neigh);
1016 			neigh->timer.expires = jiffies +
1017 						((new & NUD_REACHABLE) ?
1018 						 neigh->parms->reachable_time : 0);
1019 			add_timer(&neigh->timer);
1020 		}
1021 		neigh->nud_state = new;
1022 	}
1023 
1024 	if (lladdr != neigh->ha) {
1025 		memcpy(&neigh->ha, lladdr, dev->addr_len);
1026 		neigh_update_hhs(neigh);
1027 		if (!(new & NUD_CONNECTED))
1028 			neigh->confirmed = jiffies -
1029 				      (neigh->parms->base_reachable_time << 1);
1030 #ifdef CONFIG_ARPD
1031 		notify = 1;
1032 #endif
1033 	}
1034 	if (new == old)
1035 		goto out;
1036 	if (new & NUD_CONNECTED)
1037 		neigh_connect(neigh);
1038 	else
1039 		neigh_suspect(neigh);
1040 	if (!(old & NUD_VALID)) {
1041 		struct sk_buff *skb;
1042 
1043 		/* Again: avoid dead loop if something went wrong */
1044 
1045 		while (neigh->nud_state & NUD_VALID &&
1046 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1047 			struct neighbour *n1 = neigh;
1048 			write_unlock_bh(&neigh->lock);
1049 			/* On shaper/eql skb->dst->neighbour != neigh :( */
1050 			if (skb->dst && skb->dst->neighbour)
1051 				n1 = skb->dst->neighbour;
1052 			n1->output(skb);
1053 			write_lock_bh(&neigh->lock);
1054 		}
1055 		skb_queue_purge(&neigh->arp_queue);
1056 	}
1057 out:
1058 	if (update_isrouter) {
1059 		neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1060 			(neigh->flags | NTF_ROUTER) :
1061 			(neigh->flags & ~NTF_ROUTER);
1062 	}
1063 	write_unlock_bh(&neigh->lock);
1064 #ifdef CONFIG_ARPD
1065 	if (notify && neigh->parms->app_probes)
1066 		neigh_app_notify(neigh);
1067 #endif
1068 	return err;
1069 }
1070 
1071 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1072 				 u8 *lladdr, void *saddr,
1073 				 struct net_device *dev)
1074 {
1075 	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1076 						 lladdr || !dev->addr_len);
1077 	if (neigh)
1078 		neigh_update(neigh, lladdr, NUD_STALE,
1079 			     NEIGH_UPDATE_F_OVERRIDE);
1080 	return neigh;
1081 }
1082 
1083 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1084 			  u16 protocol)
1085 {
1086 	struct hh_cache	*hh;
1087 	struct net_device *dev = dst->dev;
1088 
1089 	for (hh = n->hh; hh; hh = hh->hh_next)
1090 		if (hh->hh_type == protocol)
1091 			break;
1092 
1093 	if (!hh && (hh = kmalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1094 		memset(hh, 0, sizeof(struct hh_cache));
1095 		rwlock_init(&hh->hh_lock);
1096 		hh->hh_type = protocol;
1097 		atomic_set(&hh->hh_refcnt, 0);
1098 		hh->hh_next = NULL;
1099 		if (dev->hard_header_cache(n, hh)) {
1100 			kfree(hh);
1101 			hh = NULL;
1102 		} else {
1103 			atomic_inc(&hh->hh_refcnt);
1104 			hh->hh_next = n->hh;
1105 			n->hh	    = hh;
1106 			if (n->nud_state & NUD_CONNECTED)
1107 				hh->hh_output = n->ops->hh_output;
1108 			else
1109 				hh->hh_output = n->ops->output;
1110 		}
1111 	}
1112 	if (hh)	{
1113 		atomic_inc(&hh->hh_refcnt);
1114 		dst->hh = hh;
1115 	}
1116 }
1117 
1118 /* This function can be used in contexts, where only old dev_queue_xmit
1119    worked, f.e. if you want to override normal output path (eql, shaper),
1120    but resolution is not made yet.
1121  */
1122 
1123 int neigh_compat_output(struct sk_buff *skb)
1124 {
1125 	struct net_device *dev = skb->dev;
1126 
1127 	__skb_pull(skb, skb->nh.raw - skb->data);
1128 
1129 	if (dev->hard_header &&
1130 	    dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1131 		    	     skb->len) < 0 &&
1132 	    dev->rebuild_header(skb))
1133 		return 0;
1134 
1135 	return dev_queue_xmit(skb);
1136 }
1137 
1138 /* Slow and careful. */
1139 
1140 int neigh_resolve_output(struct sk_buff *skb)
1141 {
1142 	struct dst_entry *dst = skb->dst;
1143 	struct neighbour *neigh;
1144 	int rc = 0;
1145 
1146 	if (!dst || !(neigh = dst->neighbour))
1147 		goto discard;
1148 
1149 	__skb_pull(skb, skb->nh.raw - skb->data);
1150 
1151 	if (!neigh_event_send(neigh, skb)) {
1152 		int err;
1153 		struct net_device *dev = neigh->dev;
1154 		if (dev->hard_header_cache && !dst->hh) {
1155 			write_lock_bh(&neigh->lock);
1156 			if (!dst->hh)
1157 				neigh_hh_init(neigh, dst, dst->ops->protocol);
1158 			err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1159 					       neigh->ha, NULL, skb->len);
1160 			write_unlock_bh(&neigh->lock);
1161 		} else {
1162 			read_lock_bh(&neigh->lock);
1163 			err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1164 					       neigh->ha, NULL, skb->len);
1165 			read_unlock_bh(&neigh->lock);
1166 		}
1167 		if (err >= 0)
1168 			rc = neigh->ops->queue_xmit(skb);
1169 		else
1170 			goto out_kfree_skb;
1171 	}
1172 out:
1173 	return rc;
1174 discard:
1175 	NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1176 		      dst, dst ? dst->neighbour : NULL);
1177 out_kfree_skb:
1178 	rc = -EINVAL;
1179 	kfree_skb(skb);
1180 	goto out;
1181 }
1182 
1183 /* As fast as possible without hh cache */
1184 
1185 int neigh_connected_output(struct sk_buff *skb)
1186 {
1187 	int err;
1188 	struct dst_entry *dst = skb->dst;
1189 	struct neighbour *neigh = dst->neighbour;
1190 	struct net_device *dev = neigh->dev;
1191 
1192 	__skb_pull(skb, skb->nh.raw - skb->data);
1193 
1194 	read_lock_bh(&neigh->lock);
1195 	err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1196 			       neigh->ha, NULL, skb->len);
1197 	read_unlock_bh(&neigh->lock);
1198 	if (err >= 0)
1199 		err = neigh->ops->queue_xmit(skb);
1200 	else {
1201 		err = -EINVAL;
1202 		kfree_skb(skb);
1203 	}
1204 	return err;
1205 }
1206 
1207 static void neigh_proxy_process(unsigned long arg)
1208 {
1209 	struct neigh_table *tbl = (struct neigh_table *)arg;
1210 	long sched_next = 0;
1211 	unsigned long now = jiffies;
1212 	struct sk_buff *skb;
1213 
1214 	spin_lock(&tbl->proxy_queue.lock);
1215 
1216 	skb = tbl->proxy_queue.next;
1217 
1218 	while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1219 		struct sk_buff *back = skb;
1220 		long tdif = back->stamp.tv_usec - now;
1221 
1222 		skb = skb->next;
1223 		if (tdif <= 0) {
1224 			struct net_device *dev = back->dev;
1225 			__skb_unlink(back, &tbl->proxy_queue);
1226 			if (tbl->proxy_redo && netif_running(dev))
1227 				tbl->proxy_redo(back);
1228 			else
1229 				kfree_skb(back);
1230 
1231 			dev_put(dev);
1232 		} else if (!sched_next || tdif < sched_next)
1233 			sched_next = tdif;
1234 	}
1235 	del_timer(&tbl->proxy_timer);
1236 	if (sched_next)
1237 		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1238 	spin_unlock(&tbl->proxy_queue.lock);
1239 }
1240 
1241 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1242 		    struct sk_buff *skb)
1243 {
1244 	unsigned long now = jiffies;
1245 	unsigned long sched_next = now + (net_random() % p->proxy_delay);
1246 
1247 	if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1248 		kfree_skb(skb);
1249 		return;
1250 	}
1251 	skb->stamp.tv_sec  = LOCALLY_ENQUEUED;
1252 	skb->stamp.tv_usec = sched_next;
1253 
1254 	spin_lock(&tbl->proxy_queue.lock);
1255 	if (del_timer(&tbl->proxy_timer)) {
1256 		if (time_before(tbl->proxy_timer.expires, sched_next))
1257 			sched_next = tbl->proxy_timer.expires;
1258 	}
1259 	dst_release(skb->dst);
1260 	skb->dst = NULL;
1261 	dev_hold(skb->dev);
1262 	__skb_queue_tail(&tbl->proxy_queue, skb);
1263 	mod_timer(&tbl->proxy_timer, sched_next);
1264 	spin_unlock(&tbl->proxy_queue.lock);
1265 }
1266 
1267 
1268 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1269 				      struct neigh_table *tbl)
1270 {
1271 	struct neigh_parms *p = kmalloc(sizeof(*p), GFP_KERNEL);
1272 
1273 	if (p) {
1274 		memcpy(p, &tbl->parms, sizeof(*p));
1275 		p->tbl		  = tbl;
1276 		atomic_set(&p->refcnt, 1);
1277 		INIT_RCU_HEAD(&p->rcu_head);
1278 		p->reachable_time =
1279 				neigh_rand_reach_time(p->base_reachable_time);
1280 		if (dev) {
1281 			if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1282 				kfree(p);
1283 				return NULL;
1284 			}
1285 
1286 			dev_hold(dev);
1287 			p->dev = dev;
1288 		}
1289 		p->sysctl_table = NULL;
1290 		write_lock_bh(&tbl->lock);
1291 		p->next		= tbl->parms.next;
1292 		tbl->parms.next = p;
1293 		write_unlock_bh(&tbl->lock);
1294 	}
1295 	return p;
1296 }
1297 
1298 static void neigh_rcu_free_parms(struct rcu_head *head)
1299 {
1300 	struct neigh_parms *parms =
1301 		container_of(head, struct neigh_parms, rcu_head);
1302 
1303 	neigh_parms_put(parms);
1304 }
1305 
1306 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1307 {
1308 	struct neigh_parms **p;
1309 
1310 	if (!parms || parms == &tbl->parms)
1311 		return;
1312 	write_lock_bh(&tbl->lock);
1313 	for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1314 		if (*p == parms) {
1315 			*p = parms->next;
1316 			parms->dead = 1;
1317 			write_unlock_bh(&tbl->lock);
1318 			if (parms->dev)
1319 				dev_put(parms->dev);
1320 			call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1321 			return;
1322 		}
1323 	}
1324 	write_unlock_bh(&tbl->lock);
1325 	NEIGH_PRINTK1("neigh_parms_release: not found\n");
1326 }
1327 
1328 void neigh_parms_destroy(struct neigh_parms *parms)
1329 {
1330 	kfree(parms);
1331 }
1332 
1333 
1334 void neigh_table_init(struct neigh_table *tbl)
1335 {
1336 	unsigned long now = jiffies;
1337 	unsigned long phsize;
1338 
1339 	atomic_set(&tbl->parms.refcnt, 1);
1340 	INIT_RCU_HEAD(&tbl->parms.rcu_head);
1341 	tbl->parms.reachable_time =
1342 			  neigh_rand_reach_time(tbl->parms.base_reachable_time);
1343 
1344 	if (!tbl->kmem_cachep)
1345 		tbl->kmem_cachep = kmem_cache_create(tbl->id,
1346 						     tbl->entry_size,
1347 						     0, SLAB_HWCACHE_ALIGN,
1348 						     NULL, NULL);
1349 
1350 	if (!tbl->kmem_cachep)
1351 		panic("cannot create neighbour cache");
1352 
1353 	tbl->stats = alloc_percpu(struct neigh_statistics);
1354 	if (!tbl->stats)
1355 		panic("cannot create neighbour cache statistics");
1356 
1357 #ifdef CONFIG_PROC_FS
1358 	tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat);
1359 	if (!tbl->pde)
1360 		panic("cannot create neighbour proc dir entry");
1361 	tbl->pde->proc_fops = &neigh_stat_seq_fops;
1362 	tbl->pde->data = tbl;
1363 #endif
1364 
1365 	tbl->hash_mask = 1;
1366 	tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1367 
1368 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1369 	tbl->phash_buckets = kmalloc(phsize, GFP_KERNEL);
1370 
1371 	if (!tbl->hash_buckets || !tbl->phash_buckets)
1372 		panic("cannot allocate neighbour cache hashes");
1373 
1374 	memset(tbl->phash_buckets, 0, phsize);
1375 
1376 	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1377 
1378 	rwlock_init(&tbl->lock);
1379 	init_timer(&tbl->gc_timer);
1380 	tbl->gc_timer.data     = (unsigned long)tbl;
1381 	tbl->gc_timer.function = neigh_periodic_timer;
1382 	tbl->gc_timer.expires  = now + 1;
1383 	add_timer(&tbl->gc_timer);
1384 
1385 	init_timer(&tbl->proxy_timer);
1386 	tbl->proxy_timer.data	  = (unsigned long)tbl;
1387 	tbl->proxy_timer.function = neigh_proxy_process;
1388 	skb_queue_head_init(&tbl->proxy_queue);
1389 
1390 	tbl->last_flush = now;
1391 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1392 	write_lock(&neigh_tbl_lock);
1393 	tbl->next	= neigh_tables;
1394 	neigh_tables	= tbl;
1395 	write_unlock(&neigh_tbl_lock);
1396 }
1397 
1398 int neigh_table_clear(struct neigh_table *tbl)
1399 {
1400 	struct neigh_table **tp;
1401 
1402 	/* It is not clean... Fix it to unload IPv6 module safely */
1403 	del_timer_sync(&tbl->gc_timer);
1404 	del_timer_sync(&tbl->proxy_timer);
1405 	pneigh_queue_purge(&tbl->proxy_queue);
1406 	neigh_ifdown(tbl, NULL);
1407 	if (atomic_read(&tbl->entries))
1408 		printk(KERN_CRIT "neighbour leakage\n");
1409 	write_lock(&neigh_tbl_lock);
1410 	for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1411 		if (*tp == tbl) {
1412 			*tp = tbl->next;
1413 			break;
1414 		}
1415 	}
1416 	write_unlock(&neigh_tbl_lock);
1417 
1418 	neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1419 	tbl->hash_buckets = NULL;
1420 
1421 	kfree(tbl->phash_buckets);
1422 	tbl->phash_buckets = NULL;
1423 
1424 	return 0;
1425 }
1426 
1427 int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1428 {
1429 	struct ndmsg *ndm = NLMSG_DATA(nlh);
1430 	struct rtattr **nda = arg;
1431 	struct neigh_table *tbl;
1432 	struct net_device *dev = NULL;
1433 	int err = -ENODEV;
1434 
1435 	if (ndm->ndm_ifindex &&
1436 	    (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1437 		goto out;
1438 
1439 	read_lock(&neigh_tbl_lock);
1440 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1441 		struct rtattr *dst_attr = nda[NDA_DST - 1];
1442 		struct neighbour *n;
1443 
1444 		if (tbl->family != ndm->ndm_family)
1445 			continue;
1446 		read_unlock(&neigh_tbl_lock);
1447 
1448 		err = -EINVAL;
1449 		if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1450 			goto out_dev_put;
1451 
1452 		if (ndm->ndm_flags & NTF_PROXY) {
1453 			err = pneigh_delete(tbl, RTA_DATA(dst_attr), dev);
1454 			goto out_dev_put;
1455 		}
1456 
1457 		if (!dev)
1458 			goto out;
1459 
1460 		n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1461 		if (n) {
1462 			err = neigh_update(n, NULL, NUD_FAILED,
1463 					   NEIGH_UPDATE_F_OVERRIDE|
1464 					   NEIGH_UPDATE_F_ADMIN);
1465 			neigh_release(n);
1466 		}
1467 		goto out_dev_put;
1468 	}
1469 	read_unlock(&neigh_tbl_lock);
1470 	err = -EADDRNOTAVAIL;
1471 out_dev_put:
1472 	if (dev)
1473 		dev_put(dev);
1474 out:
1475 	return err;
1476 }
1477 
1478 int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1479 {
1480 	struct ndmsg *ndm = NLMSG_DATA(nlh);
1481 	struct rtattr **nda = arg;
1482 	struct neigh_table *tbl;
1483 	struct net_device *dev = NULL;
1484 	int err = -ENODEV;
1485 
1486 	if (ndm->ndm_ifindex &&
1487 	    (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1488 		goto out;
1489 
1490 	read_lock(&neigh_tbl_lock);
1491 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1492 		struct rtattr *lladdr_attr = nda[NDA_LLADDR - 1];
1493 		struct rtattr *dst_attr = nda[NDA_DST - 1];
1494 		int override = 1;
1495 		struct neighbour *n;
1496 
1497 		if (tbl->family != ndm->ndm_family)
1498 			continue;
1499 		read_unlock(&neigh_tbl_lock);
1500 
1501 		err = -EINVAL;
1502 		if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1503 			goto out_dev_put;
1504 
1505 		if (ndm->ndm_flags & NTF_PROXY) {
1506 			err = -ENOBUFS;
1507 			if (pneigh_lookup(tbl, RTA_DATA(dst_attr), dev, 1))
1508 				err = 0;
1509 			goto out_dev_put;
1510 		}
1511 
1512 		err = -EINVAL;
1513 		if (!dev)
1514 			goto out;
1515 		if (lladdr_attr && RTA_PAYLOAD(lladdr_attr) < dev->addr_len)
1516 			goto out_dev_put;
1517 
1518 		n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1519 		if (n) {
1520 			if (nlh->nlmsg_flags & NLM_F_EXCL) {
1521 				err = -EEXIST;
1522 				neigh_release(n);
1523 				goto out_dev_put;
1524 			}
1525 
1526 			override = nlh->nlmsg_flags & NLM_F_REPLACE;
1527 		} else if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1528 			err = -ENOENT;
1529 			goto out_dev_put;
1530 		} else {
1531 			n = __neigh_lookup_errno(tbl, RTA_DATA(dst_attr), dev);
1532 			if (IS_ERR(n)) {
1533 				err = PTR_ERR(n);
1534 				goto out_dev_put;
1535 			}
1536 		}
1537 
1538 		err = neigh_update(n,
1539 				   lladdr_attr ? RTA_DATA(lladdr_attr) : NULL,
1540 				   ndm->ndm_state,
1541 				   (override ? NEIGH_UPDATE_F_OVERRIDE : 0) |
1542 				   NEIGH_UPDATE_F_ADMIN);
1543 
1544 		neigh_release(n);
1545 		goto out_dev_put;
1546 	}
1547 
1548 	read_unlock(&neigh_tbl_lock);
1549 	err = -EADDRNOTAVAIL;
1550 out_dev_put:
1551 	if (dev)
1552 		dev_put(dev);
1553 out:
1554 	return err;
1555 }
1556 
1557 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1558 {
1559 	struct rtattr *nest = NULL;
1560 
1561 	nest = RTA_NEST(skb, NDTA_PARMS);
1562 
1563 	if (parms->dev)
1564 		RTA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1565 
1566 	RTA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1567 	RTA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1568 	RTA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1569 	RTA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1570 	RTA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1571 	RTA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1572 	RTA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1573 	RTA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1574 		      parms->base_reachable_time);
1575 	RTA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1576 	RTA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1577 	RTA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1578 	RTA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1579 	RTA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1580 	RTA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1581 
1582 	return RTA_NEST_END(skb, nest);
1583 
1584 rtattr_failure:
1585 	return RTA_NEST_CANCEL(skb, nest);
1586 }
1587 
1588 static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb,
1589 			      struct netlink_callback *cb)
1590 {
1591 	struct nlmsghdr *nlh;
1592 	struct ndtmsg *ndtmsg;
1593 
1594 	nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1595 			       NLM_F_MULTI);
1596 
1597 	ndtmsg = NLMSG_DATA(nlh);
1598 
1599 	read_lock_bh(&tbl->lock);
1600 	ndtmsg->ndtm_family = tbl->family;
1601 	ndtmsg->ndtm_pad1   = 0;
1602 	ndtmsg->ndtm_pad2   = 0;
1603 
1604 	RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1605 	RTA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1606 	RTA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1607 	RTA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1608 	RTA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1609 
1610 	{
1611 		unsigned long now = jiffies;
1612 		unsigned int flush_delta = now - tbl->last_flush;
1613 		unsigned int rand_delta = now - tbl->last_rand;
1614 
1615 		struct ndt_config ndc = {
1616 			.ndtc_key_len		= tbl->key_len,
1617 			.ndtc_entry_size	= tbl->entry_size,
1618 			.ndtc_entries		= atomic_read(&tbl->entries),
1619 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
1620 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
1621 			.ndtc_hash_rnd		= tbl->hash_rnd,
1622 			.ndtc_hash_mask		= tbl->hash_mask,
1623 			.ndtc_hash_chain_gc	= tbl->hash_chain_gc,
1624 			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
1625 		};
1626 
1627 		RTA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1628 	}
1629 
1630 	{
1631 		int cpu;
1632 		struct ndt_stats ndst;
1633 
1634 		memset(&ndst, 0, sizeof(ndst));
1635 
1636 		for (cpu = 0; cpu < NR_CPUS; cpu++) {
1637 			struct neigh_statistics	*st;
1638 
1639 			if (!cpu_possible(cpu))
1640 				continue;
1641 
1642 			st = per_cpu_ptr(tbl->stats, cpu);
1643 			ndst.ndts_allocs		+= st->allocs;
1644 			ndst.ndts_destroys		+= st->destroys;
1645 			ndst.ndts_hash_grows		+= st->hash_grows;
1646 			ndst.ndts_res_failed		+= st->res_failed;
1647 			ndst.ndts_lookups		+= st->lookups;
1648 			ndst.ndts_hits			+= st->hits;
1649 			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
1650 			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
1651 			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
1652 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
1653 		}
1654 
1655 		RTA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1656 	}
1657 
1658 	BUG_ON(tbl->parms.dev);
1659 	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1660 		goto rtattr_failure;
1661 
1662 	read_unlock_bh(&tbl->lock);
1663 	return NLMSG_END(skb, nlh);
1664 
1665 rtattr_failure:
1666 	read_unlock_bh(&tbl->lock);
1667 	return NLMSG_CANCEL(skb, nlh);
1668 
1669 nlmsg_failure:
1670 	return -1;
1671 }
1672 
1673 static int neightbl_fill_param_info(struct neigh_table *tbl,
1674 				    struct neigh_parms *parms,
1675 				    struct sk_buff *skb,
1676 				    struct netlink_callback *cb)
1677 {
1678 	struct ndtmsg *ndtmsg;
1679 	struct nlmsghdr *nlh;
1680 
1681 	nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1682 			       NLM_F_MULTI);
1683 
1684 	ndtmsg = NLMSG_DATA(nlh);
1685 
1686 	read_lock_bh(&tbl->lock);
1687 	ndtmsg->ndtm_family = tbl->family;
1688 	ndtmsg->ndtm_pad1   = 0;
1689 	ndtmsg->ndtm_pad2   = 0;
1690 	RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1691 
1692 	if (neightbl_fill_parms(skb, parms) < 0)
1693 		goto rtattr_failure;
1694 
1695 	read_unlock_bh(&tbl->lock);
1696 	return NLMSG_END(skb, nlh);
1697 
1698 rtattr_failure:
1699 	read_unlock_bh(&tbl->lock);
1700 	return NLMSG_CANCEL(skb, nlh);
1701 
1702 nlmsg_failure:
1703 	return -1;
1704 }
1705 
1706 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1707 						      int ifindex)
1708 {
1709 	struct neigh_parms *p;
1710 
1711 	for (p = &tbl->parms; p; p = p->next)
1712 		if ((p->dev && p->dev->ifindex == ifindex) ||
1713 		    (!p->dev && !ifindex))
1714 			return p;
1715 
1716 	return NULL;
1717 }
1718 
1719 int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1720 {
1721 	struct neigh_table *tbl;
1722 	struct ndtmsg *ndtmsg = NLMSG_DATA(nlh);
1723 	struct rtattr **tb = arg;
1724 	int err = -EINVAL;
1725 
1726 	if (!tb[NDTA_NAME - 1] || !RTA_PAYLOAD(tb[NDTA_NAME - 1]))
1727 		return -EINVAL;
1728 
1729 	read_lock(&neigh_tbl_lock);
1730 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1731 		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1732 			continue;
1733 
1734 		if (!rtattr_strcmp(tb[NDTA_NAME - 1], tbl->id))
1735 			break;
1736 	}
1737 
1738 	if (tbl == NULL) {
1739 		err = -ENOENT;
1740 		goto errout;
1741 	}
1742 
1743 	/*
1744 	 * We acquire tbl->lock to be nice to the periodic timers and
1745 	 * make sure they always see a consistent set of values.
1746 	 */
1747 	write_lock_bh(&tbl->lock);
1748 
1749 	if (tb[NDTA_THRESH1 - 1])
1750 		tbl->gc_thresh1 = RTA_GET_U32(tb[NDTA_THRESH1 - 1]);
1751 
1752 	if (tb[NDTA_THRESH2 - 1])
1753 		tbl->gc_thresh2 = RTA_GET_U32(tb[NDTA_THRESH2 - 1]);
1754 
1755 	if (tb[NDTA_THRESH3 - 1])
1756 		tbl->gc_thresh3 = RTA_GET_U32(tb[NDTA_THRESH3 - 1]);
1757 
1758 	if (tb[NDTA_GC_INTERVAL - 1])
1759 		tbl->gc_interval = RTA_GET_MSECS(tb[NDTA_GC_INTERVAL - 1]);
1760 
1761 	if (tb[NDTA_PARMS - 1]) {
1762 		struct rtattr *tbp[NDTPA_MAX];
1763 		struct neigh_parms *p;
1764 		u32 ifindex = 0;
1765 
1766 		if (rtattr_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS - 1]) < 0)
1767 			goto rtattr_failure;
1768 
1769 		if (tbp[NDTPA_IFINDEX - 1])
1770 			ifindex = RTA_GET_U32(tbp[NDTPA_IFINDEX - 1]);
1771 
1772 		p = lookup_neigh_params(tbl, ifindex);
1773 		if (p == NULL) {
1774 			err = -ENOENT;
1775 			goto rtattr_failure;
1776 		}
1777 
1778 		if (tbp[NDTPA_QUEUE_LEN - 1])
1779 			p->queue_len = RTA_GET_U32(tbp[NDTPA_QUEUE_LEN - 1]);
1780 
1781 		if (tbp[NDTPA_PROXY_QLEN - 1])
1782 			p->proxy_qlen = RTA_GET_U32(tbp[NDTPA_PROXY_QLEN - 1]);
1783 
1784 		if (tbp[NDTPA_APP_PROBES - 1])
1785 			p->app_probes = RTA_GET_U32(tbp[NDTPA_APP_PROBES - 1]);
1786 
1787 		if (tbp[NDTPA_UCAST_PROBES - 1])
1788 			p->ucast_probes =
1789 			   RTA_GET_U32(tbp[NDTPA_UCAST_PROBES - 1]);
1790 
1791 		if (tbp[NDTPA_MCAST_PROBES - 1])
1792 			p->mcast_probes =
1793 			   RTA_GET_U32(tbp[NDTPA_MCAST_PROBES - 1]);
1794 
1795 		if (tbp[NDTPA_BASE_REACHABLE_TIME - 1])
1796 			p->base_reachable_time =
1797 			   RTA_GET_MSECS(tbp[NDTPA_BASE_REACHABLE_TIME - 1]);
1798 
1799 		if (tbp[NDTPA_GC_STALETIME - 1])
1800 			p->gc_staletime =
1801 			   RTA_GET_MSECS(tbp[NDTPA_GC_STALETIME - 1]);
1802 
1803 		if (tbp[NDTPA_DELAY_PROBE_TIME - 1])
1804 			p->delay_probe_time =
1805 			   RTA_GET_MSECS(tbp[NDTPA_DELAY_PROBE_TIME - 1]);
1806 
1807 		if (tbp[NDTPA_RETRANS_TIME - 1])
1808 			p->retrans_time =
1809 			   RTA_GET_MSECS(tbp[NDTPA_RETRANS_TIME - 1]);
1810 
1811 		if (tbp[NDTPA_ANYCAST_DELAY - 1])
1812 			p->anycast_delay =
1813 			   RTA_GET_MSECS(tbp[NDTPA_ANYCAST_DELAY - 1]);
1814 
1815 		if (tbp[NDTPA_PROXY_DELAY - 1])
1816 			p->proxy_delay =
1817 			   RTA_GET_MSECS(tbp[NDTPA_PROXY_DELAY - 1]);
1818 
1819 		if (tbp[NDTPA_LOCKTIME - 1])
1820 			p->locktime = RTA_GET_MSECS(tbp[NDTPA_LOCKTIME - 1]);
1821 	}
1822 
1823 	err = 0;
1824 
1825 rtattr_failure:
1826 	write_unlock_bh(&tbl->lock);
1827 errout:
1828 	read_unlock(&neigh_tbl_lock);
1829 	return err;
1830 }
1831 
1832 int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1833 {
1834 	int idx, family;
1835 	int s_idx = cb->args[0];
1836 	struct neigh_table *tbl;
1837 
1838 	family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1839 
1840 	read_lock(&neigh_tbl_lock);
1841 	for (tbl = neigh_tables, idx = 0; tbl; tbl = tbl->next) {
1842 		struct neigh_parms *p;
1843 
1844 		if (idx < s_idx || (family && tbl->family != family))
1845 			continue;
1846 
1847 		if (neightbl_fill_info(tbl, skb, cb) <= 0)
1848 			break;
1849 
1850 		for (++idx, p = tbl->parms.next; p; p = p->next, idx++) {
1851 			if (idx < s_idx)
1852 				continue;
1853 
1854 			if (neightbl_fill_param_info(tbl, p, skb, cb) <= 0)
1855 				goto out;
1856 		}
1857 
1858 	}
1859 out:
1860 	read_unlock(&neigh_tbl_lock);
1861 	cb->args[0] = idx;
1862 
1863 	return skb->len;
1864 }
1865 
1866 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n,
1867 			   u32 pid, u32 seq, int event, unsigned int flags)
1868 {
1869 	unsigned long now = jiffies;
1870 	unsigned char *b = skb->tail;
1871 	struct nda_cacheinfo ci;
1872 	int locked = 0;
1873 	u32 probes;
1874 	struct nlmsghdr *nlh = NLMSG_NEW(skb, pid, seq, event,
1875 					 sizeof(struct ndmsg), flags);
1876 	struct ndmsg *ndm = NLMSG_DATA(nlh);
1877 
1878 	ndm->ndm_family	 = n->ops->family;
1879 	ndm->ndm_pad1    = 0;
1880 	ndm->ndm_pad2    = 0;
1881 	ndm->ndm_flags	 = n->flags;
1882 	ndm->ndm_type	 = n->type;
1883 	ndm->ndm_ifindex = n->dev->ifindex;
1884 	RTA_PUT(skb, NDA_DST, n->tbl->key_len, n->primary_key);
1885 	read_lock_bh(&n->lock);
1886 	locked		 = 1;
1887 	ndm->ndm_state	 = n->nud_state;
1888 	if (n->nud_state & NUD_VALID)
1889 		RTA_PUT(skb, NDA_LLADDR, n->dev->addr_len, n->ha);
1890 	ci.ndm_used	 = now - n->used;
1891 	ci.ndm_confirmed = now - n->confirmed;
1892 	ci.ndm_updated	 = now - n->updated;
1893 	ci.ndm_refcnt	 = atomic_read(&n->refcnt) - 1;
1894 	probes = atomic_read(&n->probes);
1895 	read_unlock_bh(&n->lock);
1896 	locked		 = 0;
1897 	RTA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1898 	RTA_PUT(skb, NDA_PROBES, sizeof(probes), &probes);
1899 	nlh->nlmsg_len	 = skb->tail - b;
1900 	return skb->len;
1901 
1902 nlmsg_failure:
1903 rtattr_failure:
1904 	if (locked)
1905 		read_unlock_bh(&n->lock);
1906 	skb_trim(skb, b - skb->data);
1907 	return -1;
1908 }
1909 
1910 
1911 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
1912 			    struct netlink_callback *cb)
1913 {
1914 	struct neighbour *n;
1915 	int rc, h, s_h = cb->args[1];
1916 	int idx, s_idx = idx = cb->args[2];
1917 
1918 	for (h = 0; h <= tbl->hash_mask; h++) {
1919 		if (h < s_h)
1920 			continue;
1921 		if (h > s_h)
1922 			s_idx = 0;
1923 		read_lock_bh(&tbl->lock);
1924 		for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
1925 			if (idx < s_idx)
1926 				continue;
1927 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
1928 					    cb->nlh->nlmsg_seq,
1929 					    RTM_NEWNEIGH,
1930 					    NLM_F_MULTI) <= 0) {
1931 				read_unlock_bh(&tbl->lock);
1932 				rc = -1;
1933 				goto out;
1934 			}
1935 		}
1936 		read_unlock_bh(&tbl->lock);
1937 	}
1938 	rc = skb->len;
1939 out:
1940 	cb->args[1] = h;
1941 	cb->args[2] = idx;
1942 	return rc;
1943 }
1944 
1945 int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1946 {
1947 	struct neigh_table *tbl;
1948 	int t, family, s_t;
1949 
1950 	read_lock(&neigh_tbl_lock);
1951 	family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1952 	s_t = cb->args[0];
1953 
1954 	for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
1955 		if (t < s_t || (family && tbl->family != family))
1956 			continue;
1957 		if (t > s_t)
1958 			memset(&cb->args[1], 0, sizeof(cb->args) -
1959 						sizeof(cb->args[0]));
1960 		if (neigh_dump_table(tbl, skb, cb) < 0)
1961 			break;
1962 	}
1963 	read_unlock(&neigh_tbl_lock);
1964 
1965 	cb->args[0] = t;
1966 	return skb->len;
1967 }
1968 
1969 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
1970 {
1971 	int chain;
1972 
1973 	read_lock_bh(&tbl->lock);
1974 	for (chain = 0; chain <= tbl->hash_mask; chain++) {
1975 		struct neighbour *n;
1976 
1977 		for (n = tbl->hash_buckets[chain]; n; n = n->next)
1978 			cb(n, cookie);
1979 	}
1980 	read_unlock_bh(&tbl->lock);
1981 }
1982 EXPORT_SYMBOL(neigh_for_each);
1983 
1984 /* The tbl->lock must be held as a writer and BH disabled. */
1985 void __neigh_for_each_release(struct neigh_table *tbl,
1986 			      int (*cb)(struct neighbour *))
1987 {
1988 	int chain;
1989 
1990 	for (chain = 0; chain <= tbl->hash_mask; chain++) {
1991 		struct neighbour *n, **np;
1992 
1993 		np = &tbl->hash_buckets[chain];
1994 		while ((n = *np) != NULL) {
1995 			int release;
1996 
1997 			write_lock(&n->lock);
1998 			release = cb(n);
1999 			if (release) {
2000 				*np = n->next;
2001 				n->dead = 1;
2002 			} else
2003 				np = &n->next;
2004 			write_unlock(&n->lock);
2005 			if (release)
2006 				neigh_release(n);
2007 		}
2008 	}
2009 }
2010 EXPORT_SYMBOL(__neigh_for_each_release);
2011 
2012 #ifdef CONFIG_PROC_FS
2013 
2014 static struct neighbour *neigh_get_first(struct seq_file *seq)
2015 {
2016 	struct neigh_seq_state *state = seq->private;
2017 	struct neigh_table *tbl = state->tbl;
2018 	struct neighbour *n = NULL;
2019 	int bucket = state->bucket;
2020 
2021 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2022 	for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2023 		n = tbl->hash_buckets[bucket];
2024 
2025 		while (n) {
2026 			if (state->neigh_sub_iter) {
2027 				loff_t fakep = 0;
2028 				void *v;
2029 
2030 				v = state->neigh_sub_iter(state, n, &fakep);
2031 				if (!v)
2032 					goto next;
2033 			}
2034 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2035 				break;
2036 			if (n->nud_state & ~NUD_NOARP)
2037 				break;
2038 		next:
2039 			n = n->next;
2040 		}
2041 
2042 		if (n)
2043 			break;
2044 	}
2045 	state->bucket = bucket;
2046 
2047 	return n;
2048 }
2049 
2050 static struct neighbour *neigh_get_next(struct seq_file *seq,
2051 					struct neighbour *n,
2052 					loff_t *pos)
2053 {
2054 	struct neigh_seq_state *state = seq->private;
2055 	struct neigh_table *tbl = state->tbl;
2056 
2057 	if (state->neigh_sub_iter) {
2058 		void *v = state->neigh_sub_iter(state, n, pos);
2059 		if (v)
2060 			return n;
2061 	}
2062 	n = n->next;
2063 
2064 	while (1) {
2065 		while (n) {
2066 			if (state->neigh_sub_iter) {
2067 				void *v = state->neigh_sub_iter(state, n, pos);
2068 				if (v)
2069 					return n;
2070 				goto next;
2071 			}
2072 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2073 				break;
2074 
2075 			if (n->nud_state & ~NUD_NOARP)
2076 				break;
2077 		next:
2078 			n = n->next;
2079 		}
2080 
2081 		if (n)
2082 			break;
2083 
2084 		if (++state->bucket > tbl->hash_mask)
2085 			break;
2086 
2087 		n = tbl->hash_buckets[state->bucket];
2088 	}
2089 
2090 	if (n && pos)
2091 		--(*pos);
2092 	return n;
2093 }
2094 
2095 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2096 {
2097 	struct neighbour *n = neigh_get_first(seq);
2098 
2099 	if (n) {
2100 		while (*pos) {
2101 			n = neigh_get_next(seq, n, pos);
2102 			if (!n)
2103 				break;
2104 		}
2105 	}
2106 	return *pos ? NULL : n;
2107 }
2108 
2109 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2110 {
2111 	struct neigh_seq_state *state = seq->private;
2112 	struct neigh_table *tbl = state->tbl;
2113 	struct pneigh_entry *pn = NULL;
2114 	int bucket = state->bucket;
2115 
2116 	state->flags |= NEIGH_SEQ_IS_PNEIGH;
2117 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2118 		pn = tbl->phash_buckets[bucket];
2119 		if (pn)
2120 			break;
2121 	}
2122 	state->bucket = bucket;
2123 
2124 	return pn;
2125 }
2126 
2127 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2128 					    struct pneigh_entry *pn,
2129 					    loff_t *pos)
2130 {
2131 	struct neigh_seq_state *state = seq->private;
2132 	struct neigh_table *tbl = state->tbl;
2133 
2134 	pn = pn->next;
2135 	while (!pn) {
2136 		if (++state->bucket > PNEIGH_HASHMASK)
2137 			break;
2138 		pn = tbl->phash_buckets[state->bucket];
2139 		if (pn)
2140 			break;
2141 	}
2142 
2143 	if (pn && pos)
2144 		--(*pos);
2145 
2146 	return pn;
2147 }
2148 
2149 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2150 {
2151 	struct pneigh_entry *pn = pneigh_get_first(seq);
2152 
2153 	if (pn) {
2154 		while (*pos) {
2155 			pn = pneigh_get_next(seq, pn, pos);
2156 			if (!pn)
2157 				break;
2158 		}
2159 	}
2160 	return *pos ? NULL : pn;
2161 }
2162 
2163 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2164 {
2165 	struct neigh_seq_state *state = seq->private;
2166 	void *rc;
2167 
2168 	rc = neigh_get_idx(seq, pos);
2169 	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2170 		rc = pneigh_get_idx(seq, pos);
2171 
2172 	return rc;
2173 }
2174 
2175 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2176 {
2177 	struct neigh_seq_state *state = seq->private;
2178 	loff_t pos_minus_one;
2179 
2180 	state->tbl = tbl;
2181 	state->bucket = 0;
2182 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2183 
2184 	read_lock_bh(&tbl->lock);
2185 
2186 	pos_minus_one = *pos - 1;
2187 	return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2188 }
2189 EXPORT_SYMBOL(neigh_seq_start);
2190 
2191 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2192 {
2193 	struct neigh_seq_state *state;
2194 	void *rc;
2195 
2196 	if (v == SEQ_START_TOKEN) {
2197 		rc = neigh_get_idx(seq, pos);
2198 		goto out;
2199 	}
2200 
2201 	state = seq->private;
2202 	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2203 		rc = neigh_get_next(seq, v, NULL);
2204 		if (rc)
2205 			goto out;
2206 		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2207 			rc = pneigh_get_first(seq);
2208 	} else {
2209 		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2210 		rc = pneigh_get_next(seq, v, NULL);
2211 	}
2212 out:
2213 	++(*pos);
2214 	return rc;
2215 }
2216 EXPORT_SYMBOL(neigh_seq_next);
2217 
2218 void neigh_seq_stop(struct seq_file *seq, void *v)
2219 {
2220 	struct neigh_seq_state *state = seq->private;
2221 	struct neigh_table *tbl = state->tbl;
2222 
2223 	read_unlock_bh(&tbl->lock);
2224 }
2225 EXPORT_SYMBOL(neigh_seq_stop);
2226 
2227 /* statistics via seq_file */
2228 
2229 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2230 {
2231 	struct proc_dir_entry *pde = seq->private;
2232 	struct neigh_table *tbl = pde->data;
2233 	int cpu;
2234 
2235 	if (*pos == 0)
2236 		return SEQ_START_TOKEN;
2237 
2238 	for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2239 		if (!cpu_possible(cpu))
2240 			continue;
2241 		*pos = cpu+1;
2242 		return per_cpu_ptr(tbl->stats, cpu);
2243 	}
2244 	return NULL;
2245 }
2246 
2247 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2248 {
2249 	struct proc_dir_entry *pde = seq->private;
2250 	struct neigh_table *tbl = pde->data;
2251 	int cpu;
2252 
2253 	for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2254 		if (!cpu_possible(cpu))
2255 			continue;
2256 		*pos = cpu+1;
2257 		return per_cpu_ptr(tbl->stats, cpu);
2258 	}
2259 	return NULL;
2260 }
2261 
2262 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2263 {
2264 
2265 }
2266 
2267 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2268 {
2269 	struct proc_dir_entry *pde = seq->private;
2270 	struct neigh_table *tbl = pde->data;
2271 	struct neigh_statistics *st = v;
2272 
2273 	if (v == SEQ_START_TOKEN) {
2274 		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs\n");
2275 		return 0;
2276 	}
2277 
2278 	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2279 			"%08lx %08lx  %08lx %08lx\n",
2280 		   atomic_read(&tbl->entries),
2281 
2282 		   st->allocs,
2283 		   st->destroys,
2284 		   st->hash_grows,
2285 
2286 		   st->lookups,
2287 		   st->hits,
2288 
2289 		   st->res_failed,
2290 
2291 		   st->rcv_probes_mcast,
2292 		   st->rcv_probes_ucast,
2293 
2294 		   st->periodic_gc_runs,
2295 		   st->forced_gc_runs
2296 		   );
2297 
2298 	return 0;
2299 }
2300 
2301 static struct seq_operations neigh_stat_seq_ops = {
2302 	.start	= neigh_stat_seq_start,
2303 	.next	= neigh_stat_seq_next,
2304 	.stop	= neigh_stat_seq_stop,
2305 	.show	= neigh_stat_seq_show,
2306 };
2307 
2308 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2309 {
2310 	int ret = seq_open(file, &neigh_stat_seq_ops);
2311 
2312 	if (!ret) {
2313 		struct seq_file *sf = file->private_data;
2314 		sf->private = PDE(inode);
2315 	}
2316 	return ret;
2317 };
2318 
2319 static struct file_operations neigh_stat_seq_fops = {
2320 	.owner	 = THIS_MODULE,
2321 	.open 	 = neigh_stat_seq_open,
2322 	.read	 = seq_read,
2323 	.llseek	 = seq_lseek,
2324 	.release = seq_release,
2325 };
2326 
2327 #endif /* CONFIG_PROC_FS */
2328 
2329 #ifdef CONFIG_ARPD
2330 void neigh_app_ns(struct neighbour *n)
2331 {
2332 	struct nlmsghdr  *nlh;
2333 	int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2334 	struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2335 
2336 	if (!skb)
2337 		return;
2338 
2339 	if (neigh_fill_info(skb, n, 0, 0, RTM_GETNEIGH, 0) < 0) {
2340 		kfree_skb(skb);
2341 		return;
2342 	}
2343 	nlh			   = (struct nlmsghdr *)skb->data;
2344 	nlh->nlmsg_flags	   = NLM_F_REQUEST;
2345 	NETLINK_CB(skb).dst_groups = RTMGRP_NEIGH;
2346 	netlink_broadcast(rtnl, skb, 0, RTMGRP_NEIGH, GFP_ATOMIC);
2347 }
2348 
2349 static void neigh_app_notify(struct neighbour *n)
2350 {
2351 	struct nlmsghdr *nlh;
2352 	int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2353 	struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2354 
2355 	if (!skb)
2356 		return;
2357 
2358 	if (neigh_fill_info(skb, n, 0, 0, RTM_NEWNEIGH, 0) < 0) {
2359 		kfree_skb(skb);
2360 		return;
2361 	}
2362 	nlh			   = (struct nlmsghdr *)skb->data;
2363 	NETLINK_CB(skb).dst_groups = RTMGRP_NEIGH;
2364 	netlink_broadcast(rtnl, skb, 0, RTMGRP_NEIGH, GFP_ATOMIC);
2365 }
2366 
2367 #endif /* CONFIG_ARPD */
2368 
2369 #ifdef CONFIG_SYSCTL
2370 
2371 static struct neigh_sysctl_table {
2372 	struct ctl_table_header *sysctl_header;
2373 	ctl_table		neigh_vars[__NET_NEIGH_MAX];
2374 	ctl_table		neigh_dev[2];
2375 	ctl_table		neigh_neigh_dir[2];
2376 	ctl_table		neigh_proto_dir[2];
2377 	ctl_table		neigh_root_dir[2];
2378 } neigh_sysctl_template = {
2379 	.neigh_vars = {
2380 		{
2381 			.ctl_name	= NET_NEIGH_MCAST_SOLICIT,
2382 			.procname	= "mcast_solicit",
2383 			.maxlen		= sizeof(int),
2384 			.mode		= 0644,
2385 			.proc_handler	= &proc_dointvec,
2386 		},
2387 		{
2388 			.ctl_name	= NET_NEIGH_UCAST_SOLICIT,
2389 			.procname	= "ucast_solicit",
2390 			.maxlen		= sizeof(int),
2391 			.mode		= 0644,
2392 			.proc_handler	= &proc_dointvec,
2393 		},
2394 		{
2395 			.ctl_name	= NET_NEIGH_APP_SOLICIT,
2396 			.procname	= "app_solicit",
2397 			.maxlen		= sizeof(int),
2398 			.mode		= 0644,
2399 			.proc_handler	= &proc_dointvec,
2400 		},
2401 		{
2402 			.ctl_name	= NET_NEIGH_RETRANS_TIME,
2403 			.procname	= "retrans_time",
2404 			.maxlen		= sizeof(int),
2405 			.mode		= 0644,
2406 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2407 		},
2408 		{
2409 			.ctl_name	= NET_NEIGH_REACHABLE_TIME,
2410 			.procname	= "base_reachable_time",
2411 			.maxlen		= sizeof(int),
2412 			.mode		= 0644,
2413 			.proc_handler	= &proc_dointvec_jiffies,
2414 			.strategy	= &sysctl_jiffies,
2415 		},
2416 		{
2417 			.ctl_name	= NET_NEIGH_DELAY_PROBE_TIME,
2418 			.procname	= "delay_first_probe_time",
2419 			.maxlen		= sizeof(int),
2420 			.mode		= 0644,
2421 			.proc_handler	= &proc_dointvec_jiffies,
2422 			.strategy	= &sysctl_jiffies,
2423 		},
2424 		{
2425 			.ctl_name	= NET_NEIGH_GC_STALE_TIME,
2426 			.procname	= "gc_stale_time",
2427 			.maxlen		= sizeof(int),
2428 			.mode		= 0644,
2429 			.proc_handler	= &proc_dointvec_jiffies,
2430 			.strategy	= &sysctl_jiffies,
2431 		},
2432 		{
2433 			.ctl_name	= NET_NEIGH_UNRES_QLEN,
2434 			.procname	= "unres_qlen",
2435 			.maxlen		= sizeof(int),
2436 			.mode		= 0644,
2437 			.proc_handler	= &proc_dointvec,
2438 		},
2439 		{
2440 			.ctl_name	= NET_NEIGH_PROXY_QLEN,
2441 			.procname	= "proxy_qlen",
2442 			.maxlen		= sizeof(int),
2443 			.mode		= 0644,
2444 			.proc_handler	= &proc_dointvec,
2445 		},
2446 		{
2447 			.ctl_name	= NET_NEIGH_ANYCAST_DELAY,
2448 			.procname	= "anycast_delay",
2449 			.maxlen		= sizeof(int),
2450 			.mode		= 0644,
2451 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2452 		},
2453 		{
2454 			.ctl_name	= NET_NEIGH_PROXY_DELAY,
2455 			.procname	= "proxy_delay",
2456 			.maxlen		= sizeof(int),
2457 			.mode		= 0644,
2458 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2459 		},
2460 		{
2461 			.ctl_name	= NET_NEIGH_LOCKTIME,
2462 			.procname	= "locktime",
2463 			.maxlen		= sizeof(int),
2464 			.mode		= 0644,
2465 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2466 		},
2467 		{
2468 			.ctl_name	= NET_NEIGH_GC_INTERVAL,
2469 			.procname	= "gc_interval",
2470 			.maxlen		= sizeof(int),
2471 			.mode		= 0644,
2472 			.proc_handler	= &proc_dointvec_jiffies,
2473 			.strategy	= &sysctl_jiffies,
2474 		},
2475 		{
2476 			.ctl_name	= NET_NEIGH_GC_THRESH1,
2477 			.procname	= "gc_thresh1",
2478 			.maxlen		= sizeof(int),
2479 			.mode		= 0644,
2480 			.proc_handler	= &proc_dointvec,
2481 		},
2482 		{
2483 			.ctl_name	= NET_NEIGH_GC_THRESH2,
2484 			.procname	= "gc_thresh2",
2485 			.maxlen		= sizeof(int),
2486 			.mode		= 0644,
2487 			.proc_handler	= &proc_dointvec,
2488 		},
2489 		{
2490 			.ctl_name	= NET_NEIGH_GC_THRESH3,
2491 			.procname	= "gc_thresh3",
2492 			.maxlen		= sizeof(int),
2493 			.mode		= 0644,
2494 			.proc_handler	= &proc_dointvec,
2495 		},
2496 		{
2497 			.ctl_name	= NET_NEIGH_RETRANS_TIME_MS,
2498 			.procname	= "retrans_time_ms",
2499 			.maxlen		= sizeof(int),
2500 			.mode		= 0644,
2501 			.proc_handler	= &proc_dointvec_ms_jiffies,
2502 			.strategy	= &sysctl_ms_jiffies,
2503 		},
2504 		{
2505 			.ctl_name	= NET_NEIGH_REACHABLE_TIME_MS,
2506 			.procname	= "base_reachable_time_ms",
2507 			.maxlen		= sizeof(int),
2508 			.mode		= 0644,
2509 			.proc_handler	= &proc_dointvec_ms_jiffies,
2510 			.strategy	= &sysctl_ms_jiffies,
2511 		},
2512 	},
2513 	.neigh_dev = {
2514 		{
2515 			.ctl_name	= NET_PROTO_CONF_DEFAULT,
2516 			.procname	= "default",
2517 			.mode		= 0555,
2518 		},
2519 	},
2520 	.neigh_neigh_dir = {
2521 		{
2522 			.procname	= "neigh",
2523 			.mode		= 0555,
2524 		},
2525 	},
2526 	.neigh_proto_dir = {
2527 		{
2528 			.mode		= 0555,
2529 		},
2530 	},
2531 	.neigh_root_dir = {
2532 		{
2533 			.ctl_name	= CTL_NET,
2534 			.procname	= "net",
2535 			.mode		= 0555,
2536 		},
2537 	},
2538 };
2539 
2540 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2541 			  int p_id, int pdev_id, char *p_name,
2542 			  proc_handler *handler, ctl_handler *strategy)
2543 {
2544 	struct neigh_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
2545 	const char *dev_name_source = NULL;
2546 	char *dev_name = NULL;
2547 	int err = 0;
2548 
2549 	if (!t)
2550 		return -ENOBUFS;
2551 	memcpy(t, &neigh_sysctl_template, sizeof(*t));
2552 	t->neigh_vars[0].data  = &p->mcast_probes;
2553 	t->neigh_vars[1].data  = &p->ucast_probes;
2554 	t->neigh_vars[2].data  = &p->app_probes;
2555 	t->neigh_vars[3].data  = &p->retrans_time;
2556 	t->neigh_vars[4].data  = &p->base_reachable_time;
2557 	t->neigh_vars[5].data  = &p->delay_probe_time;
2558 	t->neigh_vars[6].data  = &p->gc_staletime;
2559 	t->neigh_vars[7].data  = &p->queue_len;
2560 	t->neigh_vars[8].data  = &p->proxy_qlen;
2561 	t->neigh_vars[9].data  = &p->anycast_delay;
2562 	t->neigh_vars[10].data = &p->proxy_delay;
2563 	t->neigh_vars[11].data = &p->locktime;
2564 
2565 	if (dev) {
2566 		dev_name_source = dev->name;
2567 		t->neigh_dev[0].ctl_name = dev->ifindex;
2568 		t->neigh_vars[12].procname = NULL;
2569 		t->neigh_vars[13].procname = NULL;
2570 		t->neigh_vars[14].procname = NULL;
2571 		t->neigh_vars[15].procname = NULL;
2572 	} else {
2573  		dev_name_source = t->neigh_dev[0].procname;
2574 		t->neigh_vars[12].data = (int *)(p + 1);
2575 		t->neigh_vars[13].data = (int *)(p + 1) + 1;
2576 		t->neigh_vars[14].data = (int *)(p + 1) + 2;
2577 		t->neigh_vars[15].data = (int *)(p + 1) + 3;
2578 	}
2579 
2580 	t->neigh_vars[16].data  = &p->retrans_time;
2581 	t->neigh_vars[17].data  = &p->base_reachable_time;
2582 
2583 	if (handler || strategy) {
2584 		/* RetransTime */
2585 		t->neigh_vars[3].proc_handler = handler;
2586 		t->neigh_vars[3].strategy = strategy;
2587 		t->neigh_vars[3].extra1 = dev;
2588 		/* ReachableTime */
2589 		t->neigh_vars[4].proc_handler = handler;
2590 		t->neigh_vars[4].strategy = strategy;
2591 		t->neigh_vars[4].extra1 = dev;
2592 		/* RetransTime (in milliseconds)*/
2593 		t->neigh_vars[16].proc_handler = handler;
2594 		t->neigh_vars[16].strategy = strategy;
2595 		t->neigh_vars[16].extra1 = dev;
2596 		/* ReachableTime (in milliseconds) */
2597 		t->neigh_vars[17].proc_handler = handler;
2598 		t->neigh_vars[17].strategy = strategy;
2599 		t->neigh_vars[17].extra1 = dev;
2600 	}
2601 
2602 	dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2603 	if (!dev_name) {
2604 		err = -ENOBUFS;
2605 		goto free;
2606 	}
2607 
2608  	t->neigh_dev[0].procname = dev_name;
2609 
2610 	t->neigh_neigh_dir[0].ctl_name = pdev_id;
2611 
2612 	t->neigh_proto_dir[0].procname = p_name;
2613 	t->neigh_proto_dir[0].ctl_name = p_id;
2614 
2615 	t->neigh_dev[0].child	       = t->neigh_vars;
2616 	t->neigh_neigh_dir[0].child    = t->neigh_dev;
2617 	t->neigh_proto_dir[0].child    = t->neigh_neigh_dir;
2618 	t->neigh_root_dir[0].child     = t->neigh_proto_dir;
2619 
2620 	t->sysctl_header = register_sysctl_table(t->neigh_root_dir, 0);
2621 	if (!t->sysctl_header) {
2622 		err = -ENOBUFS;
2623 		goto free_procname;
2624 	}
2625 	p->sysctl_table = t;
2626 	return 0;
2627 
2628 	/* error path */
2629  free_procname:
2630 	kfree(dev_name);
2631  free:
2632 	kfree(t);
2633 
2634 	return err;
2635 }
2636 
2637 void neigh_sysctl_unregister(struct neigh_parms *p)
2638 {
2639 	if (p->sysctl_table) {
2640 		struct neigh_sysctl_table *t = p->sysctl_table;
2641 		p->sysctl_table = NULL;
2642 		unregister_sysctl_table(t->sysctl_header);
2643 		kfree(t->neigh_dev[0].procname);
2644 		kfree(t);
2645 	}
2646 }
2647 
2648 #endif	/* CONFIG_SYSCTL */
2649 
2650 EXPORT_SYMBOL(__neigh_event_send);
2651 EXPORT_SYMBOL(neigh_add);
2652 EXPORT_SYMBOL(neigh_changeaddr);
2653 EXPORT_SYMBOL(neigh_compat_output);
2654 EXPORT_SYMBOL(neigh_connected_output);
2655 EXPORT_SYMBOL(neigh_create);
2656 EXPORT_SYMBOL(neigh_delete);
2657 EXPORT_SYMBOL(neigh_destroy);
2658 EXPORT_SYMBOL(neigh_dump_info);
2659 EXPORT_SYMBOL(neigh_event_ns);
2660 EXPORT_SYMBOL(neigh_ifdown);
2661 EXPORT_SYMBOL(neigh_lookup);
2662 EXPORT_SYMBOL(neigh_lookup_nodev);
2663 EXPORT_SYMBOL(neigh_parms_alloc);
2664 EXPORT_SYMBOL(neigh_parms_release);
2665 EXPORT_SYMBOL(neigh_rand_reach_time);
2666 EXPORT_SYMBOL(neigh_resolve_output);
2667 EXPORT_SYMBOL(neigh_table_clear);
2668 EXPORT_SYMBOL(neigh_table_init);
2669 EXPORT_SYMBOL(neigh_update);
2670 EXPORT_SYMBOL(neigh_update_hhs);
2671 EXPORT_SYMBOL(pneigh_enqueue);
2672 EXPORT_SYMBOL(pneigh_lookup);
2673 EXPORT_SYMBOL(neightbl_dump_info);
2674 EXPORT_SYMBOL(neightbl_set);
2675 
2676 #ifdef CONFIG_ARPD
2677 EXPORT_SYMBOL(neigh_app_ns);
2678 #endif
2679 #ifdef CONFIG_SYSCTL
2680 EXPORT_SYMBOL(neigh_sysctl_register);
2681 EXPORT_SYMBOL(neigh_sysctl_unregister);
2682 #endif
2683