xref: /linux/net/core/neighbour.c (revision 6e8331ac6973435b1e7604c30f2ad394035b46e1)
1 /*
2  *	Generic address resolution entity
3  *
4  *	Authors:
5  *	Pedro Roque		<roque@di.fc.ul.pt>
6  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *	Fixes:
14  *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
15  *	Harald Welte		Add neighbour cache statistics like rtstat
16  */
17 
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/sched.h>
23 #include <linux/netdevice.h>
24 #include <linux/proc_fs.h>
25 #ifdef CONFIG_SYSCTL
26 #include <linux/sysctl.h>
27 #endif
28 #include <linux/times.h>
29 #include <net/neighbour.h>
30 #include <net/dst.h>
31 #include <net/sock.h>
32 #include <net/netevent.h>
33 #include <linux/rtnetlink.h>
34 #include <linux/random.h>
35 #include <linux/string.h>
36 
37 #define NEIGH_DEBUG 1
38 
39 #define NEIGH_PRINTK(x...) printk(x)
40 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
41 #define NEIGH_PRINTK0 NEIGH_PRINTK
42 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
43 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
44 
45 #if NEIGH_DEBUG >= 1
46 #undef NEIGH_PRINTK1
47 #define NEIGH_PRINTK1 NEIGH_PRINTK
48 #endif
49 #if NEIGH_DEBUG >= 2
50 #undef NEIGH_PRINTK2
51 #define NEIGH_PRINTK2 NEIGH_PRINTK
52 #endif
53 
54 #define PNEIGH_HASHMASK		0xF
55 
56 static void neigh_timer_handler(unsigned long arg);
57 #ifdef CONFIG_ARPD
58 static void neigh_app_notify(struct neighbour *n);
59 #endif
60 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
61 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
62 
63 static struct neigh_table *neigh_tables;
64 #ifdef CONFIG_PROC_FS
65 static struct file_operations neigh_stat_seq_fops;
66 #endif
67 
68 /*
69    Neighbour hash table buckets are protected with rwlock tbl->lock.
70 
71    - All the scans/updates to hash buckets MUST be made under this lock.
72    - NOTHING clever should be made under this lock: no callbacks
73      to protocol backends, no attempts to send something to network.
74      It will result in deadlocks, if backend/driver wants to use neighbour
75      cache.
76    - If the entry requires some non-trivial actions, increase
77      its reference count and release table lock.
78 
79    Neighbour entries are protected:
80    - with reference count.
81    - with rwlock neigh->lock
82 
83    Reference count prevents destruction.
84 
85    neigh->lock mainly serializes ll address data and its validity state.
86    However, the same lock is used to protect another entry fields:
87     - timer
88     - resolution queue
89 
90    Again, nothing clever shall be made under neigh->lock,
91    the most complicated procedure, which we allow is dev->hard_header.
92    It is supposed, that dev->hard_header is simplistic and does
93    not make callbacks to neighbour tables.
94 
95    The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96    list of neighbour tables. This list is used only in process context,
97  */
98 
99 static DEFINE_RWLOCK(neigh_tbl_lock);
100 
101 static int neigh_blackhole(struct sk_buff *skb)
102 {
103 	kfree_skb(skb);
104 	return -ENETDOWN;
105 }
106 
107 /*
108  * It is random distribution in the interval (1/2)*base...(3/2)*base.
109  * It corresponds to default IPv6 settings and is not overridable,
110  * because it is really reasonable choice.
111  */
112 
113 unsigned long neigh_rand_reach_time(unsigned long base)
114 {
115 	return (base ? (net_random() % base) + (base >> 1) : 0);
116 }
117 
118 
119 static int neigh_forced_gc(struct neigh_table *tbl)
120 {
121 	int shrunk = 0;
122 	int i;
123 
124 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
125 
126 	write_lock_bh(&tbl->lock);
127 	for (i = 0; i <= tbl->hash_mask; i++) {
128 		struct neighbour *n, **np;
129 
130 		np = &tbl->hash_buckets[i];
131 		while ((n = *np) != NULL) {
132 			/* Neighbour record may be discarded if:
133 			 * - nobody refers to it.
134 			 * - it is not permanent
135 			 */
136 			write_lock(&n->lock);
137 			if (atomic_read(&n->refcnt) == 1 &&
138 			    !(n->nud_state & NUD_PERMANENT)) {
139 				*np	= n->next;
140 				n->dead = 1;
141 				shrunk	= 1;
142 				write_unlock(&n->lock);
143 				neigh_release(n);
144 				continue;
145 			}
146 			write_unlock(&n->lock);
147 			np = &n->next;
148 		}
149 	}
150 
151 	tbl->last_flush = jiffies;
152 
153 	write_unlock_bh(&tbl->lock);
154 
155 	return shrunk;
156 }
157 
158 static int neigh_del_timer(struct neighbour *n)
159 {
160 	if ((n->nud_state & NUD_IN_TIMER) &&
161 	    del_timer(&n->timer)) {
162 		neigh_release(n);
163 		return 1;
164 	}
165 	return 0;
166 }
167 
168 static void pneigh_queue_purge(struct sk_buff_head *list)
169 {
170 	struct sk_buff *skb;
171 
172 	while ((skb = skb_dequeue(list)) != NULL) {
173 		dev_put(skb->dev);
174 		kfree_skb(skb);
175 	}
176 }
177 
178 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
179 {
180 	int i;
181 
182 	for (i = 0; i <= tbl->hash_mask; i++) {
183 		struct neighbour *n, **np = &tbl->hash_buckets[i];
184 
185 		while ((n = *np) != NULL) {
186 			if (dev && n->dev != dev) {
187 				np = &n->next;
188 				continue;
189 			}
190 			*np = n->next;
191 			write_lock(&n->lock);
192 			neigh_del_timer(n);
193 			n->dead = 1;
194 
195 			if (atomic_read(&n->refcnt) != 1) {
196 				/* The most unpleasant situation.
197 				   We must destroy neighbour entry,
198 				   but someone still uses it.
199 
200 				   The destroy will be delayed until
201 				   the last user releases us, but
202 				   we must kill timers etc. and move
203 				   it to safe state.
204 				 */
205 				skb_queue_purge(&n->arp_queue);
206 				n->output = neigh_blackhole;
207 				if (n->nud_state & NUD_VALID)
208 					n->nud_state = NUD_NOARP;
209 				else
210 					n->nud_state = NUD_NONE;
211 				NEIGH_PRINTK2("neigh %p is stray.\n", n);
212 			}
213 			write_unlock(&n->lock);
214 			neigh_release(n);
215 		}
216 	}
217 }
218 
219 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
220 {
221 	write_lock_bh(&tbl->lock);
222 	neigh_flush_dev(tbl, dev);
223 	write_unlock_bh(&tbl->lock);
224 }
225 
226 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
227 {
228 	write_lock_bh(&tbl->lock);
229 	neigh_flush_dev(tbl, dev);
230 	pneigh_ifdown(tbl, dev);
231 	write_unlock_bh(&tbl->lock);
232 
233 	del_timer_sync(&tbl->proxy_timer);
234 	pneigh_queue_purge(&tbl->proxy_queue);
235 	return 0;
236 }
237 
238 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
239 {
240 	struct neighbour *n = NULL;
241 	unsigned long now = jiffies;
242 	int entries;
243 
244 	entries = atomic_inc_return(&tbl->entries) - 1;
245 	if (entries >= tbl->gc_thresh3 ||
246 	    (entries >= tbl->gc_thresh2 &&
247 	     time_after(now, tbl->last_flush + 5 * HZ))) {
248 		if (!neigh_forced_gc(tbl) &&
249 		    entries >= tbl->gc_thresh3)
250 			goto out_entries;
251 	}
252 
253 	n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
254 	if (!n)
255 		goto out_entries;
256 
257 	memset(n, 0, tbl->entry_size);
258 
259 	skb_queue_head_init(&n->arp_queue);
260 	rwlock_init(&n->lock);
261 	n->updated	  = n->used = now;
262 	n->nud_state	  = NUD_NONE;
263 	n->output	  = neigh_blackhole;
264 	n->parms	  = neigh_parms_clone(&tbl->parms);
265 	init_timer(&n->timer);
266 	n->timer.function = neigh_timer_handler;
267 	n->timer.data	  = (unsigned long)n;
268 
269 	NEIGH_CACHE_STAT_INC(tbl, allocs);
270 	n->tbl		  = tbl;
271 	atomic_set(&n->refcnt, 1);
272 	n->dead		  = 1;
273 out:
274 	return n;
275 
276 out_entries:
277 	atomic_dec(&tbl->entries);
278 	goto out;
279 }
280 
281 static struct neighbour **neigh_hash_alloc(unsigned int entries)
282 {
283 	unsigned long size = entries * sizeof(struct neighbour *);
284 	struct neighbour **ret;
285 
286 	if (size <= PAGE_SIZE) {
287 		ret = kzalloc(size, GFP_ATOMIC);
288 	} else {
289 		ret = (struct neighbour **)
290 		      __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
291 	}
292 	return ret;
293 }
294 
295 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
296 {
297 	unsigned long size = entries * sizeof(struct neighbour *);
298 
299 	if (size <= PAGE_SIZE)
300 		kfree(hash);
301 	else
302 		free_pages((unsigned long)hash, get_order(size));
303 }
304 
305 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
306 {
307 	struct neighbour **new_hash, **old_hash;
308 	unsigned int i, new_hash_mask, old_entries;
309 
310 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
311 
312 	BUG_ON(new_entries & (new_entries - 1));
313 	new_hash = neigh_hash_alloc(new_entries);
314 	if (!new_hash)
315 		return;
316 
317 	old_entries = tbl->hash_mask + 1;
318 	new_hash_mask = new_entries - 1;
319 	old_hash = tbl->hash_buckets;
320 
321 	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
322 	for (i = 0; i < old_entries; i++) {
323 		struct neighbour *n, *next;
324 
325 		for (n = old_hash[i]; n; n = next) {
326 			unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
327 
328 			hash_val &= new_hash_mask;
329 			next = n->next;
330 
331 			n->next = new_hash[hash_val];
332 			new_hash[hash_val] = n;
333 		}
334 	}
335 	tbl->hash_buckets = new_hash;
336 	tbl->hash_mask = new_hash_mask;
337 
338 	neigh_hash_free(old_hash, old_entries);
339 }
340 
341 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
342 			       struct net_device *dev)
343 {
344 	struct neighbour *n;
345 	int key_len = tbl->key_len;
346 	u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
347 
348 	NEIGH_CACHE_STAT_INC(tbl, lookups);
349 
350 	read_lock_bh(&tbl->lock);
351 	for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
352 		if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
353 			neigh_hold(n);
354 			NEIGH_CACHE_STAT_INC(tbl, hits);
355 			break;
356 		}
357 	}
358 	read_unlock_bh(&tbl->lock);
359 	return n;
360 }
361 
362 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
363 {
364 	struct neighbour *n;
365 	int key_len = tbl->key_len;
366 	u32 hash_val = tbl->hash(pkey, NULL) & tbl->hash_mask;
367 
368 	NEIGH_CACHE_STAT_INC(tbl, lookups);
369 
370 	read_lock_bh(&tbl->lock);
371 	for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
372 		if (!memcmp(n->primary_key, pkey, key_len)) {
373 			neigh_hold(n);
374 			NEIGH_CACHE_STAT_INC(tbl, hits);
375 			break;
376 		}
377 	}
378 	read_unlock_bh(&tbl->lock);
379 	return n;
380 }
381 
382 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
383 			       struct net_device *dev)
384 {
385 	u32 hash_val;
386 	int key_len = tbl->key_len;
387 	int error;
388 	struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
389 
390 	if (!n) {
391 		rc = ERR_PTR(-ENOBUFS);
392 		goto out;
393 	}
394 
395 	memcpy(n->primary_key, pkey, key_len);
396 	n->dev = dev;
397 	dev_hold(dev);
398 
399 	/* Protocol specific setup. */
400 	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
401 		rc = ERR_PTR(error);
402 		goto out_neigh_release;
403 	}
404 
405 	/* Device specific setup. */
406 	if (n->parms->neigh_setup &&
407 	    (error = n->parms->neigh_setup(n)) < 0) {
408 		rc = ERR_PTR(error);
409 		goto out_neigh_release;
410 	}
411 
412 	n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
413 
414 	write_lock_bh(&tbl->lock);
415 
416 	if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
417 		neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
418 
419 	hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
420 
421 	if (n->parms->dead) {
422 		rc = ERR_PTR(-EINVAL);
423 		goto out_tbl_unlock;
424 	}
425 
426 	for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
427 		if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
428 			neigh_hold(n1);
429 			rc = n1;
430 			goto out_tbl_unlock;
431 		}
432 	}
433 
434 	n->next = tbl->hash_buckets[hash_val];
435 	tbl->hash_buckets[hash_val] = n;
436 	n->dead = 0;
437 	neigh_hold(n);
438 	write_unlock_bh(&tbl->lock);
439 	NEIGH_PRINTK2("neigh %p is created.\n", n);
440 	rc = n;
441 out:
442 	return rc;
443 out_tbl_unlock:
444 	write_unlock_bh(&tbl->lock);
445 out_neigh_release:
446 	neigh_release(n);
447 	goto out;
448 }
449 
450 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
451 				    struct net_device *dev, int creat)
452 {
453 	struct pneigh_entry *n;
454 	int key_len = tbl->key_len;
455 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
456 
457 	hash_val ^= (hash_val >> 16);
458 	hash_val ^= hash_val >> 8;
459 	hash_val ^= hash_val >> 4;
460 	hash_val &= PNEIGH_HASHMASK;
461 
462 	read_lock_bh(&tbl->lock);
463 
464 	for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
465 		if (!memcmp(n->key, pkey, key_len) &&
466 		    (n->dev == dev || !n->dev)) {
467 			read_unlock_bh(&tbl->lock);
468 			goto out;
469 		}
470 	}
471 	read_unlock_bh(&tbl->lock);
472 	n = NULL;
473 	if (!creat)
474 		goto out;
475 
476 	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
477 	if (!n)
478 		goto out;
479 
480 	memcpy(n->key, pkey, key_len);
481 	n->dev = dev;
482 	if (dev)
483 		dev_hold(dev);
484 
485 	if (tbl->pconstructor && tbl->pconstructor(n)) {
486 		if (dev)
487 			dev_put(dev);
488 		kfree(n);
489 		n = NULL;
490 		goto out;
491 	}
492 
493 	write_lock_bh(&tbl->lock);
494 	n->next = tbl->phash_buckets[hash_val];
495 	tbl->phash_buckets[hash_val] = n;
496 	write_unlock_bh(&tbl->lock);
497 out:
498 	return n;
499 }
500 
501 
502 int pneigh_delete(struct neigh_table *tbl, const void *pkey,
503 		  struct net_device *dev)
504 {
505 	struct pneigh_entry *n, **np;
506 	int key_len = tbl->key_len;
507 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
508 
509 	hash_val ^= (hash_val >> 16);
510 	hash_val ^= hash_val >> 8;
511 	hash_val ^= hash_val >> 4;
512 	hash_val &= PNEIGH_HASHMASK;
513 
514 	write_lock_bh(&tbl->lock);
515 	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
516 	     np = &n->next) {
517 		if (!memcmp(n->key, pkey, key_len) && n->dev == dev) {
518 			*np = n->next;
519 			write_unlock_bh(&tbl->lock);
520 			if (tbl->pdestructor)
521 				tbl->pdestructor(n);
522 			if (n->dev)
523 				dev_put(n->dev);
524 			kfree(n);
525 			return 0;
526 		}
527 	}
528 	write_unlock_bh(&tbl->lock);
529 	return -ENOENT;
530 }
531 
532 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
533 {
534 	struct pneigh_entry *n, **np;
535 	u32 h;
536 
537 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
538 		np = &tbl->phash_buckets[h];
539 		while ((n = *np) != NULL) {
540 			if (!dev || n->dev == dev) {
541 				*np = n->next;
542 				if (tbl->pdestructor)
543 					tbl->pdestructor(n);
544 				if (n->dev)
545 					dev_put(n->dev);
546 				kfree(n);
547 				continue;
548 			}
549 			np = &n->next;
550 		}
551 	}
552 	return -ENOENT;
553 }
554 
555 
556 /*
557  *	neighbour must already be out of the table;
558  *
559  */
560 void neigh_destroy(struct neighbour *neigh)
561 {
562 	struct hh_cache *hh;
563 
564 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
565 
566 	if (!neigh->dead) {
567 		printk(KERN_WARNING
568 		       "Destroying alive neighbour %p\n", neigh);
569 		dump_stack();
570 		return;
571 	}
572 
573 	if (neigh_del_timer(neigh))
574 		printk(KERN_WARNING "Impossible event.\n");
575 
576 	while ((hh = neigh->hh) != NULL) {
577 		neigh->hh = hh->hh_next;
578 		hh->hh_next = NULL;
579 		write_lock_bh(&hh->hh_lock);
580 		hh->hh_output = neigh_blackhole;
581 		write_unlock_bh(&hh->hh_lock);
582 		if (atomic_dec_and_test(&hh->hh_refcnt))
583 			kfree(hh);
584 	}
585 
586 	if (neigh->parms->neigh_destructor)
587 		(neigh->parms->neigh_destructor)(neigh);
588 
589 	skb_queue_purge(&neigh->arp_queue);
590 
591 	dev_put(neigh->dev);
592 	neigh_parms_put(neigh->parms);
593 
594 	NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
595 
596 	atomic_dec(&neigh->tbl->entries);
597 	kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
598 }
599 
600 /* Neighbour state is suspicious;
601    disable fast path.
602 
603    Called with write_locked neigh.
604  */
605 static void neigh_suspect(struct neighbour *neigh)
606 {
607 	struct hh_cache *hh;
608 
609 	NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
610 
611 	neigh->output = neigh->ops->output;
612 
613 	for (hh = neigh->hh; hh; hh = hh->hh_next)
614 		hh->hh_output = neigh->ops->output;
615 }
616 
617 /* Neighbour state is OK;
618    enable fast path.
619 
620    Called with write_locked neigh.
621  */
622 static void neigh_connect(struct neighbour *neigh)
623 {
624 	struct hh_cache *hh;
625 
626 	NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
627 
628 	neigh->output = neigh->ops->connected_output;
629 
630 	for (hh = neigh->hh; hh; hh = hh->hh_next)
631 		hh->hh_output = neigh->ops->hh_output;
632 }
633 
634 static void neigh_periodic_timer(unsigned long arg)
635 {
636 	struct neigh_table *tbl = (struct neigh_table *)arg;
637 	struct neighbour *n, **np;
638 	unsigned long expire, now = jiffies;
639 
640 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
641 
642 	write_lock(&tbl->lock);
643 
644 	/*
645 	 *	periodically recompute ReachableTime from random function
646 	 */
647 
648 	if (time_after(now, tbl->last_rand + 300 * HZ)) {
649 		struct neigh_parms *p;
650 		tbl->last_rand = now;
651 		for (p = &tbl->parms; p; p = p->next)
652 			p->reachable_time =
653 				neigh_rand_reach_time(p->base_reachable_time);
654 	}
655 
656 	np = &tbl->hash_buckets[tbl->hash_chain_gc];
657 	tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
658 
659 	while ((n = *np) != NULL) {
660 		unsigned int state;
661 
662 		write_lock(&n->lock);
663 
664 		state = n->nud_state;
665 		if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
666 			write_unlock(&n->lock);
667 			goto next_elt;
668 		}
669 
670 		if (time_before(n->used, n->confirmed))
671 			n->used = n->confirmed;
672 
673 		if (atomic_read(&n->refcnt) == 1 &&
674 		    (state == NUD_FAILED ||
675 		     time_after(now, n->used + n->parms->gc_staletime))) {
676 			*np = n->next;
677 			n->dead = 1;
678 			write_unlock(&n->lock);
679 			neigh_release(n);
680 			continue;
681 		}
682 		write_unlock(&n->lock);
683 
684 next_elt:
685 		np = &n->next;
686 	}
687 
688  	/* Cycle through all hash buckets every base_reachable_time/2 ticks.
689  	 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
690  	 * base_reachable_time.
691 	 */
692 	expire = tbl->parms.base_reachable_time >> 1;
693 	expire /= (tbl->hash_mask + 1);
694 	if (!expire)
695 		expire = 1;
696 
697  	mod_timer(&tbl->gc_timer, now + expire);
698 
699 	write_unlock(&tbl->lock);
700 }
701 
702 static __inline__ int neigh_max_probes(struct neighbour *n)
703 {
704 	struct neigh_parms *p = n->parms;
705 	return (n->nud_state & NUD_PROBE ?
706 		p->ucast_probes :
707 		p->ucast_probes + p->app_probes + p->mcast_probes);
708 }
709 
710 static inline void neigh_add_timer(struct neighbour *n, unsigned long when)
711 {
712 	if (unlikely(mod_timer(&n->timer, when))) {
713 		printk("NEIGH: BUG, double timer add, state is %x\n",
714 		       n->nud_state);
715 		dump_stack();
716 	}
717 }
718 
719 /* Called when a timer expires for a neighbour entry. */
720 
721 static void neigh_timer_handler(unsigned long arg)
722 {
723 	unsigned long now, next;
724 	struct neighbour *neigh = (struct neighbour *)arg;
725 	unsigned state;
726 	int notify = 0;
727 
728 	write_lock(&neigh->lock);
729 
730 	state = neigh->nud_state;
731 	now = jiffies;
732 	next = now + HZ;
733 
734 	if (!(state & NUD_IN_TIMER)) {
735 #ifndef CONFIG_SMP
736 		printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
737 #endif
738 		goto out;
739 	}
740 
741 	if (state & NUD_REACHABLE) {
742 		if (time_before_eq(now,
743 				   neigh->confirmed + neigh->parms->reachable_time)) {
744 			NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
745 			next = neigh->confirmed + neigh->parms->reachable_time;
746 		} else if (time_before_eq(now,
747 					  neigh->used + neigh->parms->delay_probe_time)) {
748 			NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
749 			neigh->nud_state = NUD_DELAY;
750 			neigh->updated = jiffies;
751 			neigh_suspect(neigh);
752 			next = now + neigh->parms->delay_probe_time;
753 		} else {
754 			NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
755 			neigh->nud_state = NUD_STALE;
756 			neigh->updated = jiffies;
757 			neigh_suspect(neigh);
758 			notify = 1;
759 		}
760 	} else if (state & NUD_DELAY) {
761 		if (time_before_eq(now,
762 				   neigh->confirmed + neigh->parms->delay_probe_time)) {
763 			NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
764 			neigh->nud_state = NUD_REACHABLE;
765 			neigh->updated = jiffies;
766 			neigh_connect(neigh);
767 			notify = 1;
768 			next = neigh->confirmed + neigh->parms->reachable_time;
769 		} else {
770 			NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
771 			neigh->nud_state = NUD_PROBE;
772 			neigh->updated = jiffies;
773 			atomic_set(&neigh->probes, 0);
774 			next = now + neigh->parms->retrans_time;
775 		}
776 	} else {
777 		/* NUD_PROBE|NUD_INCOMPLETE */
778 		next = now + neigh->parms->retrans_time;
779 	}
780 
781 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
782 	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
783 		struct sk_buff *skb;
784 
785 		neigh->nud_state = NUD_FAILED;
786 		neigh->updated = jiffies;
787 		notify = 1;
788 		NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
789 		NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
790 
791 		/* It is very thin place. report_unreachable is very complicated
792 		   routine. Particularly, it can hit the same neighbour entry!
793 
794 		   So that, we try to be accurate and avoid dead loop. --ANK
795 		 */
796 		while (neigh->nud_state == NUD_FAILED &&
797 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
798 			write_unlock(&neigh->lock);
799 			neigh->ops->error_report(neigh, skb);
800 			write_lock(&neigh->lock);
801 		}
802 		skb_queue_purge(&neigh->arp_queue);
803 	}
804 
805 	if (neigh->nud_state & NUD_IN_TIMER) {
806 		if (time_before(next, jiffies + HZ/2))
807 			next = jiffies + HZ/2;
808 		if (!mod_timer(&neigh->timer, next))
809 			neigh_hold(neigh);
810 	}
811 	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
812 		struct sk_buff *skb = skb_peek(&neigh->arp_queue);
813 		/* keep skb alive even if arp_queue overflows */
814 		if (skb)
815 			skb_get(skb);
816 		write_unlock(&neigh->lock);
817 		neigh->ops->solicit(neigh, skb);
818 		atomic_inc(&neigh->probes);
819 		if (skb)
820 			kfree_skb(skb);
821 	} else {
822 out:
823 		write_unlock(&neigh->lock);
824 	}
825 	if (notify)
826 		call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
827 
828 #ifdef CONFIG_ARPD
829 	if (notify && neigh->parms->app_probes)
830 		neigh_app_notify(neigh);
831 #endif
832 	neigh_release(neigh);
833 }
834 
835 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
836 {
837 	int rc;
838 	unsigned long now;
839 
840 	write_lock_bh(&neigh->lock);
841 
842 	rc = 0;
843 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
844 		goto out_unlock_bh;
845 
846 	now = jiffies;
847 
848 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
849 		if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
850 			atomic_set(&neigh->probes, neigh->parms->ucast_probes);
851 			neigh->nud_state     = NUD_INCOMPLETE;
852 			neigh->updated = jiffies;
853 			neigh_hold(neigh);
854 			neigh_add_timer(neigh, now + 1);
855 		} else {
856 			neigh->nud_state = NUD_FAILED;
857 			neigh->updated = jiffies;
858 			write_unlock_bh(&neigh->lock);
859 
860 			if (skb)
861 				kfree_skb(skb);
862 			return 1;
863 		}
864 	} else if (neigh->nud_state & NUD_STALE) {
865 		NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
866 		neigh_hold(neigh);
867 		neigh->nud_state = NUD_DELAY;
868 		neigh->updated = jiffies;
869 		neigh_add_timer(neigh,
870 				jiffies + neigh->parms->delay_probe_time);
871 	}
872 
873 	if (neigh->nud_state == NUD_INCOMPLETE) {
874 		if (skb) {
875 			if (skb_queue_len(&neigh->arp_queue) >=
876 			    neigh->parms->queue_len) {
877 				struct sk_buff *buff;
878 				buff = neigh->arp_queue.next;
879 				__skb_unlink(buff, &neigh->arp_queue);
880 				kfree_skb(buff);
881 			}
882 			__skb_queue_tail(&neigh->arp_queue, skb);
883 		}
884 		rc = 1;
885 	}
886 out_unlock_bh:
887 	write_unlock_bh(&neigh->lock);
888 	return rc;
889 }
890 
891 static __inline__ void neigh_update_hhs(struct neighbour *neigh)
892 {
893 	struct hh_cache *hh;
894 	void (*update)(struct hh_cache*, struct net_device*, unsigned char *) =
895 		neigh->dev->header_cache_update;
896 
897 	if (update) {
898 		for (hh = neigh->hh; hh; hh = hh->hh_next) {
899 			write_lock_bh(&hh->hh_lock);
900 			update(hh, neigh->dev, neigh->ha);
901 			write_unlock_bh(&hh->hh_lock);
902 		}
903 	}
904 }
905 
906 
907 
908 /* Generic update routine.
909    -- lladdr is new lladdr or NULL, if it is not supplied.
910    -- new    is new state.
911    -- flags
912 	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
913 				if it is different.
914 	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
915 				lladdr instead of overriding it
916 				if it is different.
917 				It also allows to retain current state
918 				if lladdr is unchanged.
919 	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
920 
921 	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
922 				NTF_ROUTER flag.
923 	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
924 				a router.
925 
926    Caller MUST hold reference count on the entry.
927  */
928 
929 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
930 		 u32 flags)
931 {
932 	u8 old;
933 	int err;
934 	int notify = 0;
935 	struct net_device *dev;
936 	int update_isrouter = 0;
937 
938 	write_lock_bh(&neigh->lock);
939 
940 	dev    = neigh->dev;
941 	old    = neigh->nud_state;
942 	err    = -EPERM;
943 
944 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
945 	    (old & (NUD_NOARP | NUD_PERMANENT)))
946 		goto out;
947 
948 	if (!(new & NUD_VALID)) {
949 		neigh_del_timer(neigh);
950 		if (old & NUD_CONNECTED)
951 			neigh_suspect(neigh);
952 		neigh->nud_state = new;
953 		err = 0;
954 		notify = old & NUD_VALID;
955 		goto out;
956 	}
957 
958 	/* Compare new lladdr with cached one */
959 	if (!dev->addr_len) {
960 		/* First case: device needs no address. */
961 		lladdr = neigh->ha;
962 	} else if (lladdr) {
963 		/* The second case: if something is already cached
964 		   and a new address is proposed:
965 		   - compare new & old
966 		   - if they are different, check override flag
967 		 */
968 		if ((old & NUD_VALID) &&
969 		    !memcmp(lladdr, neigh->ha, dev->addr_len))
970 			lladdr = neigh->ha;
971 	} else {
972 		/* No address is supplied; if we know something,
973 		   use it, otherwise discard the request.
974 		 */
975 		err = -EINVAL;
976 		if (!(old & NUD_VALID))
977 			goto out;
978 		lladdr = neigh->ha;
979 	}
980 
981 	if (new & NUD_CONNECTED)
982 		neigh->confirmed = jiffies;
983 	neigh->updated = jiffies;
984 
985 	/* If entry was valid and address is not changed,
986 	   do not change entry state, if new one is STALE.
987 	 */
988 	err = 0;
989 	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
990 	if (old & NUD_VALID) {
991 		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
992 			update_isrouter = 0;
993 			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
994 			    (old & NUD_CONNECTED)) {
995 				lladdr = neigh->ha;
996 				new = NUD_STALE;
997 			} else
998 				goto out;
999 		} else {
1000 			if (lladdr == neigh->ha && new == NUD_STALE &&
1001 			    ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1002 			     (old & NUD_CONNECTED))
1003 			    )
1004 				new = old;
1005 		}
1006 	}
1007 
1008 	if (new != old) {
1009 		neigh_del_timer(neigh);
1010 		if (new & NUD_IN_TIMER) {
1011 			neigh_hold(neigh);
1012 			neigh_add_timer(neigh, (jiffies +
1013 						((new & NUD_REACHABLE) ?
1014 						 neigh->parms->reachable_time :
1015 						 0)));
1016 		}
1017 		neigh->nud_state = new;
1018 	}
1019 
1020 	if (lladdr != neigh->ha) {
1021 		memcpy(&neigh->ha, lladdr, dev->addr_len);
1022 		neigh_update_hhs(neigh);
1023 		if (!(new & NUD_CONNECTED))
1024 			neigh->confirmed = jiffies -
1025 				      (neigh->parms->base_reachable_time << 1);
1026 		notify = 1;
1027 	}
1028 	if (new == old)
1029 		goto out;
1030 	if (new & NUD_CONNECTED)
1031 		neigh_connect(neigh);
1032 	else
1033 		neigh_suspect(neigh);
1034 	if (!(old & NUD_VALID)) {
1035 		struct sk_buff *skb;
1036 
1037 		/* Again: avoid dead loop if something went wrong */
1038 
1039 		while (neigh->nud_state & NUD_VALID &&
1040 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1041 			struct neighbour *n1 = neigh;
1042 			write_unlock_bh(&neigh->lock);
1043 			/* On shaper/eql skb->dst->neighbour != neigh :( */
1044 			if (skb->dst && skb->dst->neighbour)
1045 				n1 = skb->dst->neighbour;
1046 			n1->output(skb);
1047 			write_lock_bh(&neigh->lock);
1048 		}
1049 		skb_queue_purge(&neigh->arp_queue);
1050 	}
1051 out:
1052 	if (update_isrouter) {
1053 		neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1054 			(neigh->flags | NTF_ROUTER) :
1055 			(neigh->flags & ~NTF_ROUTER);
1056 	}
1057 	write_unlock_bh(&neigh->lock);
1058 
1059 	if (notify)
1060 		call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
1061 #ifdef CONFIG_ARPD
1062 	if (notify && neigh->parms->app_probes)
1063 		neigh_app_notify(neigh);
1064 #endif
1065 	return err;
1066 }
1067 
1068 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1069 				 u8 *lladdr, void *saddr,
1070 				 struct net_device *dev)
1071 {
1072 	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1073 						 lladdr || !dev->addr_len);
1074 	if (neigh)
1075 		neigh_update(neigh, lladdr, NUD_STALE,
1076 			     NEIGH_UPDATE_F_OVERRIDE);
1077 	return neigh;
1078 }
1079 
1080 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1081 			  u16 protocol)
1082 {
1083 	struct hh_cache	*hh;
1084 	struct net_device *dev = dst->dev;
1085 
1086 	for (hh = n->hh; hh; hh = hh->hh_next)
1087 		if (hh->hh_type == protocol)
1088 			break;
1089 
1090 	if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1091 		rwlock_init(&hh->hh_lock);
1092 		hh->hh_type = protocol;
1093 		atomic_set(&hh->hh_refcnt, 0);
1094 		hh->hh_next = NULL;
1095 		if (dev->hard_header_cache(n, hh)) {
1096 			kfree(hh);
1097 			hh = NULL;
1098 		} else {
1099 			atomic_inc(&hh->hh_refcnt);
1100 			hh->hh_next = n->hh;
1101 			n->hh	    = hh;
1102 			if (n->nud_state & NUD_CONNECTED)
1103 				hh->hh_output = n->ops->hh_output;
1104 			else
1105 				hh->hh_output = n->ops->output;
1106 		}
1107 	}
1108 	if (hh)	{
1109 		atomic_inc(&hh->hh_refcnt);
1110 		dst->hh = hh;
1111 	}
1112 }
1113 
1114 /* This function can be used in contexts, where only old dev_queue_xmit
1115    worked, f.e. if you want to override normal output path (eql, shaper),
1116    but resolution is not made yet.
1117  */
1118 
1119 int neigh_compat_output(struct sk_buff *skb)
1120 {
1121 	struct net_device *dev = skb->dev;
1122 
1123 	__skb_pull(skb, skb->nh.raw - skb->data);
1124 
1125 	if (dev->hard_header &&
1126 	    dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1127 		    	     skb->len) < 0 &&
1128 	    dev->rebuild_header(skb))
1129 		return 0;
1130 
1131 	return dev_queue_xmit(skb);
1132 }
1133 
1134 /* Slow and careful. */
1135 
1136 int neigh_resolve_output(struct sk_buff *skb)
1137 {
1138 	struct dst_entry *dst = skb->dst;
1139 	struct neighbour *neigh;
1140 	int rc = 0;
1141 
1142 	if (!dst || !(neigh = dst->neighbour))
1143 		goto discard;
1144 
1145 	__skb_pull(skb, skb->nh.raw - skb->data);
1146 
1147 	if (!neigh_event_send(neigh, skb)) {
1148 		int err;
1149 		struct net_device *dev = neigh->dev;
1150 		if (dev->hard_header_cache && !dst->hh) {
1151 			write_lock_bh(&neigh->lock);
1152 			if (!dst->hh)
1153 				neigh_hh_init(neigh, dst, dst->ops->protocol);
1154 			err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1155 					       neigh->ha, NULL, skb->len);
1156 			write_unlock_bh(&neigh->lock);
1157 		} else {
1158 			read_lock_bh(&neigh->lock);
1159 			err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1160 					       neigh->ha, NULL, skb->len);
1161 			read_unlock_bh(&neigh->lock);
1162 		}
1163 		if (err >= 0)
1164 			rc = neigh->ops->queue_xmit(skb);
1165 		else
1166 			goto out_kfree_skb;
1167 	}
1168 out:
1169 	return rc;
1170 discard:
1171 	NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1172 		      dst, dst ? dst->neighbour : NULL);
1173 out_kfree_skb:
1174 	rc = -EINVAL;
1175 	kfree_skb(skb);
1176 	goto out;
1177 }
1178 
1179 /* As fast as possible without hh cache */
1180 
1181 int neigh_connected_output(struct sk_buff *skb)
1182 {
1183 	int err;
1184 	struct dst_entry *dst = skb->dst;
1185 	struct neighbour *neigh = dst->neighbour;
1186 	struct net_device *dev = neigh->dev;
1187 
1188 	__skb_pull(skb, skb->nh.raw - skb->data);
1189 
1190 	read_lock_bh(&neigh->lock);
1191 	err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1192 			       neigh->ha, NULL, skb->len);
1193 	read_unlock_bh(&neigh->lock);
1194 	if (err >= 0)
1195 		err = neigh->ops->queue_xmit(skb);
1196 	else {
1197 		err = -EINVAL;
1198 		kfree_skb(skb);
1199 	}
1200 	return err;
1201 }
1202 
1203 static void neigh_proxy_process(unsigned long arg)
1204 {
1205 	struct neigh_table *tbl = (struct neigh_table *)arg;
1206 	long sched_next = 0;
1207 	unsigned long now = jiffies;
1208 	struct sk_buff *skb;
1209 
1210 	spin_lock(&tbl->proxy_queue.lock);
1211 
1212 	skb = tbl->proxy_queue.next;
1213 
1214 	while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1215 		struct sk_buff *back = skb;
1216 		long tdif = NEIGH_CB(back)->sched_next - now;
1217 
1218 		skb = skb->next;
1219 		if (tdif <= 0) {
1220 			struct net_device *dev = back->dev;
1221 			__skb_unlink(back, &tbl->proxy_queue);
1222 			if (tbl->proxy_redo && netif_running(dev))
1223 				tbl->proxy_redo(back);
1224 			else
1225 				kfree_skb(back);
1226 
1227 			dev_put(dev);
1228 		} else if (!sched_next || tdif < sched_next)
1229 			sched_next = tdif;
1230 	}
1231 	del_timer(&tbl->proxy_timer);
1232 	if (sched_next)
1233 		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1234 	spin_unlock(&tbl->proxy_queue.lock);
1235 }
1236 
1237 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1238 		    struct sk_buff *skb)
1239 {
1240 	unsigned long now = jiffies;
1241 	unsigned long sched_next = now + (net_random() % p->proxy_delay);
1242 
1243 	if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1244 		kfree_skb(skb);
1245 		return;
1246 	}
1247 
1248 	NEIGH_CB(skb)->sched_next = sched_next;
1249 	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1250 
1251 	spin_lock(&tbl->proxy_queue.lock);
1252 	if (del_timer(&tbl->proxy_timer)) {
1253 		if (time_before(tbl->proxy_timer.expires, sched_next))
1254 			sched_next = tbl->proxy_timer.expires;
1255 	}
1256 	dst_release(skb->dst);
1257 	skb->dst = NULL;
1258 	dev_hold(skb->dev);
1259 	__skb_queue_tail(&tbl->proxy_queue, skb);
1260 	mod_timer(&tbl->proxy_timer, sched_next);
1261 	spin_unlock(&tbl->proxy_queue.lock);
1262 }
1263 
1264 
1265 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1266 				      struct neigh_table *tbl)
1267 {
1268 	struct neigh_parms *p = kmalloc(sizeof(*p), GFP_KERNEL);
1269 
1270 	if (p) {
1271 		memcpy(p, &tbl->parms, sizeof(*p));
1272 		p->tbl		  = tbl;
1273 		atomic_set(&p->refcnt, 1);
1274 		INIT_RCU_HEAD(&p->rcu_head);
1275 		p->reachable_time =
1276 				neigh_rand_reach_time(p->base_reachable_time);
1277 		if (dev) {
1278 			if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1279 				kfree(p);
1280 				return NULL;
1281 			}
1282 
1283 			dev_hold(dev);
1284 			p->dev = dev;
1285 		}
1286 		p->sysctl_table = NULL;
1287 		write_lock_bh(&tbl->lock);
1288 		p->next		= tbl->parms.next;
1289 		tbl->parms.next = p;
1290 		write_unlock_bh(&tbl->lock);
1291 	}
1292 	return p;
1293 }
1294 
1295 static void neigh_rcu_free_parms(struct rcu_head *head)
1296 {
1297 	struct neigh_parms *parms =
1298 		container_of(head, struct neigh_parms, rcu_head);
1299 
1300 	neigh_parms_put(parms);
1301 }
1302 
1303 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1304 {
1305 	struct neigh_parms **p;
1306 
1307 	if (!parms || parms == &tbl->parms)
1308 		return;
1309 	write_lock_bh(&tbl->lock);
1310 	for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1311 		if (*p == parms) {
1312 			*p = parms->next;
1313 			parms->dead = 1;
1314 			write_unlock_bh(&tbl->lock);
1315 			if (parms->dev)
1316 				dev_put(parms->dev);
1317 			call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1318 			return;
1319 		}
1320 	}
1321 	write_unlock_bh(&tbl->lock);
1322 	NEIGH_PRINTK1("neigh_parms_release: not found\n");
1323 }
1324 
1325 void neigh_parms_destroy(struct neigh_parms *parms)
1326 {
1327 	kfree(parms);
1328 }
1329 
1330 void neigh_table_init_no_netlink(struct neigh_table *tbl)
1331 {
1332 	unsigned long now = jiffies;
1333 	unsigned long phsize;
1334 
1335 	atomic_set(&tbl->parms.refcnt, 1);
1336 	INIT_RCU_HEAD(&tbl->parms.rcu_head);
1337 	tbl->parms.reachable_time =
1338 			  neigh_rand_reach_time(tbl->parms.base_reachable_time);
1339 
1340 	if (!tbl->kmem_cachep)
1341 		tbl->kmem_cachep = kmem_cache_create(tbl->id,
1342 						     tbl->entry_size,
1343 						     0, SLAB_HWCACHE_ALIGN,
1344 						     NULL, NULL);
1345 
1346 	if (!tbl->kmem_cachep)
1347 		panic("cannot create neighbour cache");
1348 
1349 	tbl->stats = alloc_percpu(struct neigh_statistics);
1350 	if (!tbl->stats)
1351 		panic("cannot create neighbour cache statistics");
1352 
1353 #ifdef CONFIG_PROC_FS
1354 	tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat);
1355 	if (!tbl->pde)
1356 		panic("cannot create neighbour proc dir entry");
1357 	tbl->pde->proc_fops = &neigh_stat_seq_fops;
1358 	tbl->pde->data = tbl;
1359 #endif
1360 
1361 	tbl->hash_mask = 1;
1362 	tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1363 
1364 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1365 	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1366 
1367 	if (!tbl->hash_buckets || !tbl->phash_buckets)
1368 		panic("cannot allocate neighbour cache hashes");
1369 
1370 	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1371 
1372 	rwlock_init(&tbl->lock);
1373 	init_timer(&tbl->gc_timer);
1374 	tbl->gc_timer.data     = (unsigned long)tbl;
1375 	tbl->gc_timer.function = neigh_periodic_timer;
1376 	tbl->gc_timer.expires  = now + 1;
1377 	add_timer(&tbl->gc_timer);
1378 
1379 	init_timer(&tbl->proxy_timer);
1380 	tbl->proxy_timer.data	  = (unsigned long)tbl;
1381 	tbl->proxy_timer.function = neigh_proxy_process;
1382 	skb_queue_head_init(&tbl->proxy_queue);
1383 
1384 	tbl->last_flush = now;
1385 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1386 }
1387 
1388 void neigh_table_init(struct neigh_table *tbl)
1389 {
1390 	struct neigh_table *tmp;
1391 
1392 	neigh_table_init_no_netlink(tbl);
1393 	write_lock(&neigh_tbl_lock);
1394 	for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1395 		if (tmp->family == tbl->family)
1396 			break;
1397 	}
1398 	tbl->next	= neigh_tables;
1399 	neigh_tables	= tbl;
1400 	write_unlock(&neigh_tbl_lock);
1401 
1402 	if (unlikely(tmp)) {
1403 		printk(KERN_ERR "NEIGH: Registering multiple tables for "
1404 		       "family %d\n", tbl->family);
1405 		dump_stack();
1406 	}
1407 }
1408 
1409 int neigh_table_clear(struct neigh_table *tbl)
1410 {
1411 	struct neigh_table **tp;
1412 
1413 	/* It is not clean... Fix it to unload IPv6 module safely */
1414 	del_timer_sync(&tbl->gc_timer);
1415 	del_timer_sync(&tbl->proxy_timer);
1416 	pneigh_queue_purge(&tbl->proxy_queue);
1417 	neigh_ifdown(tbl, NULL);
1418 	if (atomic_read(&tbl->entries))
1419 		printk(KERN_CRIT "neighbour leakage\n");
1420 	write_lock(&neigh_tbl_lock);
1421 	for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1422 		if (*tp == tbl) {
1423 			*tp = tbl->next;
1424 			break;
1425 		}
1426 	}
1427 	write_unlock(&neigh_tbl_lock);
1428 
1429 	neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1430 	tbl->hash_buckets = NULL;
1431 
1432 	kfree(tbl->phash_buckets);
1433 	tbl->phash_buckets = NULL;
1434 
1435 	return 0;
1436 }
1437 
1438 int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1439 {
1440 	struct ndmsg *ndm = NLMSG_DATA(nlh);
1441 	struct rtattr **nda = arg;
1442 	struct neigh_table *tbl;
1443 	struct net_device *dev = NULL;
1444 	int err = -ENODEV;
1445 
1446 	if (ndm->ndm_ifindex &&
1447 	    (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1448 		goto out;
1449 
1450 	read_lock(&neigh_tbl_lock);
1451 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1452 		struct rtattr *dst_attr = nda[NDA_DST - 1];
1453 		struct neighbour *n;
1454 
1455 		if (tbl->family != ndm->ndm_family)
1456 			continue;
1457 		read_unlock(&neigh_tbl_lock);
1458 
1459 		err = -EINVAL;
1460 		if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1461 			goto out_dev_put;
1462 
1463 		if (ndm->ndm_flags & NTF_PROXY) {
1464 			err = pneigh_delete(tbl, RTA_DATA(dst_attr), dev);
1465 			goto out_dev_put;
1466 		}
1467 
1468 		if (!dev)
1469 			goto out;
1470 
1471 		n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1472 		if (n) {
1473 			err = neigh_update(n, NULL, NUD_FAILED,
1474 					   NEIGH_UPDATE_F_OVERRIDE|
1475 					   NEIGH_UPDATE_F_ADMIN);
1476 			neigh_release(n);
1477 		}
1478 		goto out_dev_put;
1479 	}
1480 	read_unlock(&neigh_tbl_lock);
1481 	err = -EADDRNOTAVAIL;
1482 out_dev_put:
1483 	if (dev)
1484 		dev_put(dev);
1485 out:
1486 	return err;
1487 }
1488 
1489 int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1490 {
1491 	struct ndmsg *ndm = NLMSG_DATA(nlh);
1492 	struct rtattr **nda = arg;
1493 	struct neigh_table *tbl;
1494 	struct net_device *dev = NULL;
1495 	int err = -ENODEV;
1496 
1497 	if (ndm->ndm_ifindex &&
1498 	    (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1499 		goto out;
1500 
1501 	read_lock(&neigh_tbl_lock);
1502 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1503 		struct rtattr *lladdr_attr = nda[NDA_LLADDR - 1];
1504 		struct rtattr *dst_attr = nda[NDA_DST - 1];
1505 		int override = 1;
1506 		struct neighbour *n;
1507 
1508 		if (tbl->family != ndm->ndm_family)
1509 			continue;
1510 		read_unlock(&neigh_tbl_lock);
1511 
1512 		err = -EINVAL;
1513 		if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1514 			goto out_dev_put;
1515 
1516 		if (ndm->ndm_flags & NTF_PROXY) {
1517 			err = -ENOBUFS;
1518 			if (pneigh_lookup(tbl, RTA_DATA(dst_attr), dev, 1))
1519 				err = 0;
1520 			goto out_dev_put;
1521 		}
1522 
1523 		err = -EINVAL;
1524 		if (!dev)
1525 			goto out;
1526 		if (lladdr_attr && RTA_PAYLOAD(lladdr_attr) < dev->addr_len)
1527 			goto out_dev_put;
1528 
1529 		n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1530 		if (n) {
1531 			if (nlh->nlmsg_flags & NLM_F_EXCL) {
1532 				err = -EEXIST;
1533 				neigh_release(n);
1534 				goto out_dev_put;
1535 			}
1536 
1537 			override = nlh->nlmsg_flags & NLM_F_REPLACE;
1538 		} else if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1539 			err = -ENOENT;
1540 			goto out_dev_put;
1541 		} else {
1542 			n = __neigh_lookup_errno(tbl, RTA_DATA(dst_attr), dev);
1543 			if (IS_ERR(n)) {
1544 				err = PTR_ERR(n);
1545 				goto out_dev_put;
1546 			}
1547 		}
1548 
1549 		err = neigh_update(n,
1550 				   lladdr_attr ? RTA_DATA(lladdr_attr) : NULL,
1551 				   ndm->ndm_state,
1552 				   (override ? NEIGH_UPDATE_F_OVERRIDE : 0) |
1553 				   NEIGH_UPDATE_F_ADMIN);
1554 
1555 		neigh_release(n);
1556 		goto out_dev_put;
1557 	}
1558 
1559 	read_unlock(&neigh_tbl_lock);
1560 	err = -EADDRNOTAVAIL;
1561 out_dev_put:
1562 	if (dev)
1563 		dev_put(dev);
1564 out:
1565 	return err;
1566 }
1567 
1568 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1569 {
1570 	struct rtattr *nest = NULL;
1571 
1572 	nest = RTA_NEST(skb, NDTA_PARMS);
1573 
1574 	if (parms->dev)
1575 		RTA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1576 
1577 	RTA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1578 	RTA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1579 	RTA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1580 	RTA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1581 	RTA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1582 	RTA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1583 	RTA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1584 	RTA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1585 		      parms->base_reachable_time);
1586 	RTA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1587 	RTA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1588 	RTA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1589 	RTA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1590 	RTA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1591 	RTA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1592 
1593 	return RTA_NEST_END(skb, nest);
1594 
1595 rtattr_failure:
1596 	return RTA_NEST_CANCEL(skb, nest);
1597 }
1598 
1599 static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb,
1600 			      struct netlink_callback *cb)
1601 {
1602 	struct nlmsghdr *nlh;
1603 	struct ndtmsg *ndtmsg;
1604 
1605 	nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1606 			       NLM_F_MULTI);
1607 
1608 	ndtmsg = NLMSG_DATA(nlh);
1609 
1610 	read_lock_bh(&tbl->lock);
1611 	ndtmsg->ndtm_family = tbl->family;
1612 	ndtmsg->ndtm_pad1   = 0;
1613 	ndtmsg->ndtm_pad2   = 0;
1614 
1615 	RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1616 	RTA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1617 	RTA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1618 	RTA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1619 	RTA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1620 
1621 	{
1622 		unsigned long now = jiffies;
1623 		unsigned int flush_delta = now - tbl->last_flush;
1624 		unsigned int rand_delta = now - tbl->last_rand;
1625 
1626 		struct ndt_config ndc = {
1627 			.ndtc_key_len		= tbl->key_len,
1628 			.ndtc_entry_size	= tbl->entry_size,
1629 			.ndtc_entries		= atomic_read(&tbl->entries),
1630 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
1631 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
1632 			.ndtc_hash_rnd		= tbl->hash_rnd,
1633 			.ndtc_hash_mask		= tbl->hash_mask,
1634 			.ndtc_hash_chain_gc	= tbl->hash_chain_gc,
1635 			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
1636 		};
1637 
1638 		RTA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1639 	}
1640 
1641 	{
1642 		int cpu;
1643 		struct ndt_stats ndst;
1644 
1645 		memset(&ndst, 0, sizeof(ndst));
1646 
1647 		for_each_possible_cpu(cpu) {
1648 			struct neigh_statistics	*st;
1649 
1650 			st = per_cpu_ptr(tbl->stats, cpu);
1651 			ndst.ndts_allocs		+= st->allocs;
1652 			ndst.ndts_destroys		+= st->destroys;
1653 			ndst.ndts_hash_grows		+= st->hash_grows;
1654 			ndst.ndts_res_failed		+= st->res_failed;
1655 			ndst.ndts_lookups		+= st->lookups;
1656 			ndst.ndts_hits			+= st->hits;
1657 			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
1658 			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
1659 			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
1660 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
1661 		}
1662 
1663 		RTA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1664 	}
1665 
1666 	BUG_ON(tbl->parms.dev);
1667 	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1668 		goto rtattr_failure;
1669 
1670 	read_unlock_bh(&tbl->lock);
1671 	return NLMSG_END(skb, nlh);
1672 
1673 rtattr_failure:
1674 	read_unlock_bh(&tbl->lock);
1675 	return NLMSG_CANCEL(skb, nlh);
1676 
1677 nlmsg_failure:
1678 	return -1;
1679 }
1680 
1681 static int neightbl_fill_param_info(struct neigh_table *tbl,
1682 				    struct neigh_parms *parms,
1683 				    struct sk_buff *skb,
1684 				    struct netlink_callback *cb)
1685 {
1686 	struct ndtmsg *ndtmsg;
1687 	struct nlmsghdr *nlh;
1688 
1689 	nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1690 			       NLM_F_MULTI);
1691 
1692 	ndtmsg = NLMSG_DATA(nlh);
1693 
1694 	read_lock_bh(&tbl->lock);
1695 	ndtmsg->ndtm_family = tbl->family;
1696 	ndtmsg->ndtm_pad1   = 0;
1697 	ndtmsg->ndtm_pad2   = 0;
1698 	RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1699 
1700 	if (neightbl_fill_parms(skb, parms) < 0)
1701 		goto rtattr_failure;
1702 
1703 	read_unlock_bh(&tbl->lock);
1704 	return NLMSG_END(skb, nlh);
1705 
1706 rtattr_failure:
1707 	read_unlock_bh(&tbl->lock);
1708 	return NLMSG_CANCEL(skb, nlh);
1709 
1710 nlmsg_failure:
1711 	return -1;
1712 }
1713 
1714 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1715 						      int ifindex)
1716 {
1717 	struct neigh_parms *p;
1718 
1719 	for (p = &tbl->parms; p; p = p->next)
1720 		if ((p->dev && p->dev->ifindex == ifindex) ||
1721 		    (!p->dev && !ifindex))
1722 			return p;
1723 
1724 	return NULL;
1725 }
1726 
1727 int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1728 {
1729 	struct neigh_table *tbl;
1730 	struct ndtmsg *ndtmsg = NLMSG_DATA(nlh);
1731 	struct rtattr **tb = arg;
1732 	int err = -EINVAL;
1733 
1734 	if (!tb[NDTA_NAME - 1] || !RTA_PAYLOAD(tb[NDTA_NAME - 1]))
1735 		return -EINVAL;
1736 
1737 	read_lock(&neigh_tbl_lock);
1738 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1739 		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1740 			continue;
1741 
1742 		if (!rtattr_strcmp(tb[NDTA_NAME - 1], tbl->id))
1743 			break;
1744 	}
1745 
1746 	if (tbl == NULL) {
1747 		err = -ENOENT;
1748 		goto errout;
1749 	}
1750 
1751 	/*
1752 	 * We acquire tbl->lock to be nice to the periodic timers and
1753 	 * make sure they always see a consistent set of values.
1754 	 */
1755 	write_lock_bh(&tbl->lock);
1756 
1757 	if (tb[NDTA_THRESH1 - 1])
1758 		tbl->gc_thresh1 = RTA_GET_U32(tb[NDTA_THRESH1 - 1]);
1759 
1760 	if (tb[NDTA_THRESH2 - 1])
1761 		tbl->gc_thresh2 = RTA_GET_U32(tb[NDTA_THRESH2 - 1]);
1762 
1763 	if (tb[NDTA_THRESH3 - 1])
1764 		tbl->gc_thresh3 = RTA_GET_U32(tb[NDTA_THRESH3 - 1]);
1765 
1766 	if (tb[NDTA_GC_INTERVAL - 1])
1767 		tbl->gc_interval = RTA_GET_MSECS(tb[NDTA_GC_INTERVAL - 1]);
1768 
1769 	if (tb[NDTA_PARMS - 1]) {
1770 		struct rtattr *tbp[NDTPA_MAX];
1771 		struct neigh_parms *p;
1772 		u32 ifindex = 0;
1773 
1774 		if (rtattr_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS - 1]) < 0)
1775 			goto rtattr_failure;
1776 
1777 		if (tbp[NDTPA_IFINDEX - 1])
1778 			ifindex = RTA_GET_U32(tbp[NDTPA_IFINDEX - 1]);
1779 
1780 		p = lookup_neigh_params(tbl, ifindex);
1781 		if (p == NULL) {
1782 			err = -ENOENT;
1783 			goto rtattr_failure;
1784 		}
1785 
1786 		if (tbp[NDTPA_QUEUE_LEN - 1])
1787 			p->queue_len = RTA_GET_U32(tbp[NDTPA_QUEUE_LEN - 1]);
1788 
1789 		if (tbp[NDTPA_PROXY_QLEN - 1])
1790 			p->proxy_qlen = RTA_GET_U32(tbp[NDTPA_PROXY_QLEN - 1]);
1791 
1792 		if (tbp[NDTPA_APP_PROBES - 1])
1793 			p->app_probes = RTA_GET_U32(tbp[NDTPA_APP_PROBES - 1]);
1794 
1795 		if (tbp[NDTPA_UCAST_PROBES - 1])
1796 			p->ucast_probes =
1797 			   RTA_GET_U32(tbp[NDTPA_UCAST_PROBES - 1]);
1798 
1799 		if (tbp[NDTPA_MCAST_PROBES - 1])
1800 			p->mcast_probes =
1801 			   RTA_GET_U32(tbp[NDTPA_MCAST_PROBES - 1]);
1802 
1803 		if (tbp[NDTPA_BASE_REACHABLE_TIME - 1])
1804 			p->base_reachable_time =
1805 			   RTA_GET_MSECS(tbp[NDTPA_BASE_REACHABLE_TIME - 1]);
1806 
1807 		if (tbp[NDTPA_GC_STALETIME - 1])
1808 			p->gc_staletime =
1809 			   RTA_GET_MSECS(tbp[NDTPA_GC_STALETIME - 1]);
1810 
1811 		if (tbp[NDTPA_DELAY_PROBE_TIME - 1])
1812 			p->delay_probe_time =
1813 			   RTA_GET_MSECS(tbp[NDTPA_DELAY_PROBE_TIME - 1]);
1814 
1815 		if (tbp[NDTPA_RETRANS_TIME - 1])
1816 			p->retrans_time =
1817 			   RTA_GET_MSECS(tbp[NDTPA_RETRANS_TIME - 1]);
1818 
1819 		if (tbp[NDTPA_ANYCAST_DELAY - 1])
1820 			p->anycast_delay =
1821 			   RTA_GET_MSECS(tbp[NDTPA_ANYCAST_DELAY - 1]);
1822 
1823 		if (tbp[NDTPA_PROXY_DELAY - 1])
1824 			p->proxy_delay =
1825 			   RTA_GET_MSECS(tbp[NDTPA_PROXY_DELAY - 1]);
1826 
1827 		if (tbp[NDTPA_LOCKTIME - 1])
1828 			p->locktime = RTA_GET_MSECS(tbp[NDTPA_LOCKTIME - 1]);
1829 	}
1830 
1831 	err = 0;
1832 
1833 rtattr_failure:
1834 	write_unlock_bh(&tbl->lock);
1835 errout:
1836 	read_unlock(&neigh_tbl_lock);
1837 	return err;
1838 }
1839 
1840 int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1841 {
1842 	int idx, family;
1843 	int s_idx = cb->args[0];
1844 	struct neigh_table *tbl;
1845 
1846 	family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1847 
1848 	read_lock(&neigh_tbl_lock);
1849 	for (tbl = neigh_tables, idx = 0; tbl; tbl = tbl->next) {
1850 		struct neigh_parms *p;
1851 
1852 		if (idx < s_idx || (family && tbl->family != family))
1853 			continue;
1854 
1855 		if (neightbl_fill_info(tbl, skb, cb) <= 0)
1856 			break;
1857 
1858 		for (++idx, p = tbl->parms.next; p; p = p->next, idx++) {
1859 			if (idx < s_idx)
1860 				continue;
1861 
1862 			if (neightbl_fill_param_info(tbl, p, skb, cb) <= 0)
1863 				goto out;
1864 		}
1865 
1866 	}
1867 out:
1868 	read_unlock(&neigh_tbl_lock);
1869 	cb->args[0] = idx;
1870 
1871 	return skb->len;
1872 }
1873 
1874 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n,
1875 			   u32 pid, u32 seq, int event, unsigned int flags)
1876 {
1877 	unsigned long now = jiffies;
1878 	unsigned char *b = skb->tail;
1879 	struct nda_cacheinfo ci;
1880 	int locked = 0;
1881 	u32 probes;
1882 	struct nlmsghdr *nlh = NLMSG_NEW(skb, pid, seq, event,
1883 					 sizeof(struct ndmsg), flags);
1884 	struct ndmsg *ndm = NLMSG_DATA(nlh);
1885 
1886 	ndm->ndm_family	 = n->ops->family;
1887 	ndm->ndm_pad1    = 0;
1888 	ndm->ndm_pad2    = 0;
1889 	ndm->ndm_flags	 = n->flags;
1890 	ndm->ndm_type	 = n->type;
1891 	ndm->ndm_ifindex = n->dev->ifindex;
1892 	RTA_PUT(skb, NDA_DST, n->tbl->key_len, n->primary_key);
1893 	read_lock_bh(&n->lock);
1894 	locked		 = 1;
1895 	ndm->ndm_state	 = n->nud_state;
1896 	if (n->nud_state & NUD_VALID)
1897 		RTA_PUT(skb, NDA_LLADDR, n->dev->addr_len, n->ha);
1898 	ci.ndm_used	 = now - n->used;
1899 	ci.ndm_confirmed = now - n->confirmed;
1900 	ci.ndm_updated	 = now - n->updated;
1901 	ci.ndm_refcnt	 = atomic_read(&n->refcnt) - 1;
1902 	probes = atomic_read(&n->probes);
1903 	read_unlock_bh(&n->lock);
1904 	locked		 = 0;
1905 	RTA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1906 	RTA_PUT(skb, NDA_PROBES, sizeof(probes), &probes);
1907 	nlh->nlmsg_len	 = skb->tail - b;
1908 	return skb->len;
1909 
1910 nlmsg_failure:
1911 rtattr_failure:
1912 	if (locked)
1913 		read_unlock_bh(&n->lock);
1914 	skb_trim(skb, b - skb->data);
1915 	return -1;
1916 }
1917 
1918 
1919 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
1920 			    struct netlink_callback *cb)
1921 {
1922 	struct neighbour *n;
1923 	int rc, h, s_h = cb->args[1];
1924 	int idx, s_idx = idx = cb->args[2];
1925 
1926 	for (h = 0; h <= tbl->hash_mask; h++) {
1927 		if (h < s_h)
1928 			continue;
1929 		if (h > s_h)
1930 			s_idx = 0;
1931 		read_lock_bh(&tbl->lock);
1932 		for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
1933 			if (idx < s_idx)
1934 				continue;
1935 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
1936 					    cb->nlh->nlmsg_seq,
1937 					    RTM_NEWNEIGH,
1938 					    NLM_F_MULTI) <= 0) {
1939 				read_unlock_bh(&tbl->lock);
1940 				rc = -1;
1941 				goto out;
1942 			}
1943 		}
1944 		read_unlock_bh(&tbl->lock);
1945 	}
1946 	rc = skb->len;
1947 out:
1948 	cb->args[1] = h;
1949 	cb->args[2] = idx;
1950 	return rc;
1951 }
1952 
1953 int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1954 {
1955 	struct neigh_table *tbl;
1956 	int t, family, s_t;
1957 
1958 	read_lock(&neigh_tbl_lock);
1959 	family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1960 	s_t = cb->args[0];
1961 
1962 	for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
1963 		if (t < s_t || (family && tbl->family != family))
1964 			continue;
1965 		if (t > s_t)
1966 			memset(&cb->args[1], 0, sizeof(cb->args) -
1967 						sizeof(cb->args[0]));
1968 		if (neigh_dump_table(tbl, skb, cb) < 0)
1969 			break;
1970 	}
1971 	read_unlock(&neigh_tbl_lock);
1972 
1973 	cb->args[0] = t;
1974 	return skb->len;
1975 }
1976 
1977 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
1978 {
1979 	int chain;
1980 
1981 	read_lock_bh(&tbl->lock);
1982 	for (chain = 0; chain <= tbl->hash_mask; chain++) {
1983 		struct neighbour *n;
1984 
1985 		for (n = tbl->hash_buckets[chain]; n; n = n->next)
1986 			cb(n, cookie);
1987 	}
1988 	read_unlock_bh(&tbl->lock);
1989 }
1990 EXPORT_SYMBOL(neigh_for_each);
1991 
1992 /* The tbl->lock must be held as a writer and BH disabled. */
1993 void __neigh_for_each_release(struct neigh_table *tbl,
1994 			      int (*cb)(struct neighbour *))
1995 {
1996 	int chain;
1997 
1998 	for (chain = 0; chain <= tbl->hash_mask; chain++) {
1999 		struct neighbour *n, **np;
2000 
2001 		np = &tbl->hash_buckets[chain];
2002 		while ((n = *np) != NULL) {
2003 			int release;
2004 
2005 			write_lock(&n->lock);
2006 			release = cb(n);
2007 			if (release) {
2008 				*np = n->next;
2009 				n->dead = 1;
2010 			} else
2011 				np = &n->next;
2012 			write_unlock(&n->lock);
2013 			if (release)
2014 				neigh_release(n);
2015 		}
2016 	}
2017 }
2018 EXPORT_SYMBOL(__neigh_for_each_release);
2019 
2020 #ifdef CONFIG_PROC_FS
2021 
2022 static struct neighbour *neigh_get_first(struct seq_file *seq)
2023 {
2024 	struct neigh_seq_state *state = seq->private;
2025 	struct neigh_table *tbl = state->tbl;
2026 	struct neighbour *n = NULL;
2027 	int bucket = state->bucket;
2028 
2029 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2030 	for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2031 		n = tbl->hash_buckets[bucket];
2032 
2033 		while (n) {
2034 			if (state->neigh_sub_iter) {
2035 				loff_t fakep = 0;
2036 				void *v;
2037 
2038 				v = state->neigh_sub_iter(state, n, &fakep);
2039 				if (!v)
2040 					goto next;
2041 			}
2042 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2043 				break;
2044 			if (n->nud_state & ~NUD_NOARP)
2045 				break;
2046 		next:
2047 			n = n->next;
2048 		}
2049 
2050 		if (n)
2051 			break;
2052 	}
2053 	state->bucket = bucket;
2054 
2055 	return n;
2056 }
2057 
2058 static struct neighbour *neigh_get_next(struct seq_file *seq,
2059 					struct neighbour *n,
2060 					loff_t *pos)
2061 {
2062 	struct neigh_seq_state *state = seq->private;
2063 	struct neigh_table *tbl = state->tbl;
2064 
2065 	if (state->neigh_sub_iter) {
2066 		void *v = state->neigh_sub_iter(state, n, pos);
2067 		if (v)
2068 			return n;
2069 	}
2070 	n = n->next;
2071 
2072 	while (1) {
2073 		while (n) {
2074 			if (state->neigh_sub_iter) {
2075 				void *v = state->neigh_sub_iter(state, n, pos);
2076 				if (v)
2077 					return n;
2078 				goto next;
2079 			}
2080 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2081 				break;
2082 
2083 			if (n->nud_state & ~NUD_NOARP)
2084 				break;
2085 		next:
2086 			n = n->next;
2087 		}
2088 
2089 		if (n)
2090 			break;
2091 
2092 		if (++state->bucket > tbl->hash_mask)
2093 			break;
2094 
2095 		n = tbl->hash_buckets[state->bucket];
2096 	}
2097 
2098 	if (n && pos)
2099 		--(*pos);
2100 	return n;
2101 }
2102 
2103 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2104 {
2105 	struct neighbour *n = neigh_get_first(seq);
2106 
2107 	if (n) {
2108 		while (*pos) {
2109 			n = neigh_get_next(seq, n, pos);
2110 			if (!n)
2111 				break;
2112 		}
2113 	}
2114 	return *pos ? NULL : n;
2115 }
2116 
2117 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2118 {
2119 	struct neigh_seq_state *state = seq->private;
2120 	struct neigh_table *tbl = state->tbl;
2121 	struct pneigh_entry *pn = NULL;
2122 	int bucket = state->bucket;
2123 
2124 	state->flags |= NEIGH_SEQ_IS_PNEIGH;
2125 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2126 		pn = tbl->phash_buckets[bucket];
2127 		if (pn)
2128 			break;
2129 	}
2130 	state->bucket = bucket;
2131 
2132 	return pn;
2133 }
2134 
2135 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2136 					    struct pneigh_entry *pn,
2137 					    loff_t *pos)
2138 {
2139 	struct neigh_seq_state *state = seq->private;
2140 	struct neigh_table *tbl = state->tbl;
2141 
2142 	pn = pn->next;
2143 	while (!pn) {
2144 		if (++state->bucket > PNEIGH_HASHMASK)
2145 			break;
2146 		pn = tbl->phash_buckets[state->bucket];
2147 		if (pn)
2148 			break;
2149 	}
2150 
2151 	if (pn && pos)
2152 		--(*pos);
2153 
2154 	return pn;
2155 }
2156 
2157 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2158 {
2159 	struct pneigh_entry *pn = pneigh_get_first(seq);
2160 
2161 	if (pn) {
2162 		while (*pos) {
2163 			pn = pneigh_get_next(seq, pn, pos);
2164 			if (!pn)
2165 				break;
2166 		}
2167 	}
2168 	return *pos ? NULL : pn;
2169 }
2170 
2171 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2172 {
2173 	struct neigh_seq_state *state = seq->private;
2174 	void *rc;
2175 
2176 	rc = neigh_get_idx(seq, pos);
2177 	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2178 		rc = pneigh_get_idx(seq, pos);
2179 
2180 	return rc;
2181 }
2182 
2183 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2184 {
2185 	struct neigh_seq_state *state = seq->private;
2186 	loff_t pos_minus_one;
2187 
2188 	state->tbl = tbl;
2189 	state->bucket = 0;
2190 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2191 
2192 	read_lock_bh(&tbl->lock);
2193 
2194 	pos_minus_one = *pos - 1;
2195 	return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2196 }
2197 EXPORT_SYMBOL(neigh_seq_start);
2198 
2199 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2200 {
2201 	struct neigh_seq_state *state;
2202 	void *rc;
2203 
2204 	if (v == SEQ_START_TOKEN) {
2205 		rc = neigh_get_idx(seq, pos);
2206 		goto out;
2207 	}
2208 
2209 	state = seq->private;
2210 	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2211 		rc = neigh_get_next(seq, v, NULL);
2212 		if (rc)
2213 			goto out;
2214 		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2215 			rc = pneigh_get_first(seq);
2216 	} else {
2217 		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2218 		rc = pneigh_get_next(seq, v, NULL);
2219 	}
2220 out:
2221 	++(*pos);
2222 	return rc;
2223 }
2224 EXPORT_SYMBOL(neigh_seq_next);
2225 
2226 void neigh_seq_stop(struct seq_file *seq, void *v)
2227 {
2228 	struct neigh_seq_state *state = seq->private;
2229 	struct neigh_table *tbl = state->tbl;
2230 
2231 	read_unlock_bh(&tbl->lock);
2232 }
2233 EXPORT_SYMBOL(neigh_seq_stop);
2234 
2235 /* statistics via seq_file */
2236 
2237 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2238 {
2239 	struct proc_dir_entry *pde = seq->private;
2240 	struct neigh_table *tbl = pde->data;
2241 	int cpu;
2242 
2243 	if (*pos == 0)
2244 		return SEQ_START_TOKEN;
2245 
2246 	for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2247 		if (!cpu_possible(cpu))
2248 			continue;
2249 		*pos = cpu+1;
2250 		return per_cpu_ptr(tbl->stats, cpu);
2251 	}
2252 	return NULL;
2253 }
2254 
2255 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2256 {
2257 	struct proc_dir_entry *pde = seq->private;
2258 	struct neigh_table *tbl = pde->data;
2259 	int cpu;
2260 
2261 	for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2262 		if (!cpu_possible(cpu))
2263 			continue;
2264 		*pos = cpu+1;
2265 		return per_cpu_ptr(tbl->stats, cpu);
2266 	}
2267 	return NULL;
2268 }
2269 
2270 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2271 {
2272 
2273 }
2274 
2275 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2276 {
2277 	struct proc_dir_entry *pde = seq->private;
2278 	struct neigh_table *tbl = pde->data;
2279 	struct neigh_statistics *st = v;
2280 
2281 	if (v == SEQ_START_TOKEN) {
2282 		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs\n");
2283 		return 0;
2284 	}
2285 
2286 	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2287 			"%08lx %08lx  %08lx %08lx\n",
2288 		   atomic_read(&tbl->entries),
2289 
2290 		   st->allocs,
2291 		   st->destroys,
2292 		   st->hash_grows,
2293 
2294 		   st->lookups,
2295 		   st->hits,
2296 
2297 		   st->res_failed,
2298 
2299 		   st->rcv_probes_mcast,
2300 		   st->rcv_probes_ucast,
2301 
2302 		   st->periodic_gc_runs,
2303 		   st->forced_gc_runs
2304 		   );
2305 
2306 	return 0;
2307 }
2308 
2309 static struct seq_operations neigh_stat_seq_ops = {
2310 	.start	= neigh_stat_seq_start,
2311 	.next	= neigh_stat_seq_next,
2312 	.stop	= neigh_stat_seq_stop,
2313 	.show	= neigh_stat_seq_show,
2314 };
2315 
2316 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2317 {
2318 	int ret = seq_open(file, &neigh_stat_seq_ops);
2319 
2320 	if (!ret) {
2321 		struct seq_file *sf = file->private_data;
2322 		sf->private = PDE(inode);
2323 	}
2324 	return ret;
2325 };
2326 
2327 static struct file_operations neigh_stat_seq_fops = {
2328 	.owner	 = THIS_MODULE,
2329 	.open 	 = neigh_stat_seq_open,
2330 	.read	 = seq_read,
2331 	.llseek	 = seq_lseek,
2332 	.release = seq_release,
2333 };
2334 
2335 #endif /* CONFIG_PROC_FS */
2336 
2337 #ifdef CONFIG_ARPD
2338 void neigh_app_ns(struct neighbour *n)
2339 {
2340 	struct nlmsghdr  *nlh;
2341 	int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2342 	struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2343 
2344 	if (!skb)
2345 		return;
2346 
2347 	if (neigh_fill_info(skb, n, 0, 0, RTM_GETNEIGH, 0) < 0) {
2348 		kfree_skb(skb);
2349 		return;
2350 	}
2351 	nlh			   = (struct nlmsghdr *)skb->data;
2352 	nlh->nlmsg_flags	   = NLM_F_REQUEST;
2353 	NETLINK_CB(skb).dst_group  = RTNLGRP_NEIGH;
2354 	netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2355 }
2356 
2357 static void neigh_app_notify(struct neighbour *n)
2358 {
2359 	struct nlmsghdr *nlh;
2360 	int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2361 	struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2362 
2363 	if (!skb)
2364 		return;
2365 
2366 	if (neigh_fill_info(skb, n, 0, 0, RTM_NEWNEIGH, 0) < 0) {
2367 		kfree_skb(skb);
2368 		return;
2369 	}
2370 	nlh			   = (struct nlmsghdr *)skb->data;
2371 	NETLINK_CB(skb).dst_group  = RTNLGRP_NEIGH;
2372 	netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2373 }
2374 
2375 #endif /* CONFIG_ARPD */
2376 
2377 #ifdef CONFIG_SYSCTL
2378 
2379 static struct neigh_sysctl_table {
2380 	struct ctl_table_header *sysctl_header;
2381 	ctl_table		neigh_vars[__NET_NEIGH_MAX];
2382 	ctl_table		neigh_dev[2];
2383 	ctl_table		neigh_neigh_dir[2];
2384 	ctl_table		neigh_proto_dir[2];
2385 	ctl_table		neigh_root_dir[2];
2386 } neigh_sysctl_template = {
2387 	.neigh_vars = {
2388 		{
2389 			.ctl_name	= NET_NEIGH_MCAST_SOLICIT,
2390 			.procname	= "mcast_solicit",
2391 			.maxlen		= sizeof(int),
2392 			.mode		= 0644,
2393 			.proc_handler	= &proc_dointvec,
2394 		},
2395 		{
2396 			.ctl_name	= NET_NEIGH_UCAST_SOLICIT,
2397 			.procname	= "ucast_solicit",
2398 			.maxlen		= sizeof(int),
2399 			.mode		= 0644,
2400 			.proc_handler	= &proc_dointvec,
2401 		},
2402 		{
2403 			.ctl_name	= NET_NEIGH_APP_SOLICIT,
2404 			.procname	= "app_solicit",
2405 			.maxlen		= sizeof(int),
2406 			.mode		= 0644,
2407 			.proc_handler	= &proc_dointvec,
2408 		},
2409 		{
2410 			.ctl_name	= NET_NEIGH_RETRANS_TIME,
2411 			.procname	= "retrans_time",
2412 			.maxlen		= sizeof(int),
2413 			.mode		= 0644,
2414 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2415 		},
2416 		{
2417 			.ctl_name	= NET_NEIGH_REACHABLE_TIME,
2418 			.procname	= "base_reachable_time",
2419 			.maxlen		= sizeof(int),
2420 			.mode		= 0644,
2421 			.proc_handler	= &proc_dointvec_jiffies,
2422 			.strategy	= &sysctl_jiffies,
2423 		},
2424 		{
2425 			.ctl_name	= NET_NEIGH_DELAY_PROBE_TIME,
2426 			.procname	= "delay_first_probe_time",
2427 			.maxlen		= sizeof(int),
2428 			.mode		= 0644,
2429 			.proc_handler	= &proc_dointvec_jiffies,
2430 			.strategy	= &sysctl_jiffies,
2431 		},
2432 		{
2433 			.ctl_name	= NET_NEIGH_GC_STALE_TIME,
2434 			.procname	= "gc_stale_time",
2435 			.maxlen		= sizeof(int),
2436 			.mode		= 0644,
2437 			.proc_handler	= &proc_dointvec_jiffies,
2438 			.strategy	= &sysctl_jiffies,
2439 		},
2440 		{
2441 			.ctl_name	= NET_NEIGH_UNRES_QLEN,
2442 			.procname	= "unres_qlen",
2443 			.maxlen		= sizeof(int),
2444 			.mode		= 0644,
2445 			.proc_handler	= &proc_dointvec,
2446 		},
2447 		{
2448 			.ctl_name	= NET_NEIGH_PROXY_QLEN,
2449 			.procname	= "proxy_qlen",
2450 			.maxlen		= sizeof(int),
2451 			.mode		= 0644,
2452 			.proc_handler	= &proc_dointvec,
2453 		},
2454 		{
2455 			.ctl_name	= NET_NEIGH_ANYCAST_DELAY,
2456 			.procname	= "anycast_delay",
2457 			.maxlen		= sizeof(int),
2458 			.mode		= 0644,
2459 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2460 		},
2461 		{
2462 			.ctl_name	= NET_NEIGH_PROXY_DELAY,
2463 			.procname	= "proxy_delay",
2464 			.maxlen		= sizeof(int),
2465 			.mode		= 0644,
2466 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2467 		},
2468 		{
2469 			.ctl_name	= NET_NEIGH_LOCKTIME,
2470 			.procname	= "locktime",
2471 			.maxlen		= sizeof(int),
2472 			.mode		= 0644,
2473 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2474 		},
2475 		{
2476 			.ctl_name	= NET_NEIGH_GC_INTERVAL,
2477 			.procname	= "gc_interval",
2478 			.maxlen		= sizeof(int),
2479 			.mode		= 0644,
2480 			.proc_handler	= &proc_dointvec_jiffies,
2481 			.strategy	= &sysctl_jiffies,
2482 		},
2483 		{
2484 			.ctl_name	= NET_NEIGH_GC_THRESH1,
2485 			.procname	= "gc_thresh1",
2486 			.maxlen		= sizeof(int),
2487 			.mode		= 0644,
2488 			.proc_handler	= &proc_dointvec,
2489 		},
2490 		{
2491 			.ctl_name	= NET_NEIGH_GC_THRESH2,
2492 			.procname	= "gc_thresh2",
2493 			.maxlen		= sizeof(int),
2494 			.mode		= 0644,
2495 			.proc_handler	= &proc_dointvec,
2496 		},
2497 		{
2498 			.ctl_name	= NET_NEIGH_GC_THRESH3,
2499 			.procname	= "gc_thresh3",
2500 			.maxlen		= sizeof(int),
2501 			.mode		= 0644,
2502 			.proc_handler	= &proc_dointvec,
2503 		},
2504 		{
2505 			.ctl_name	= NET_NEIGH_RETRANS_TIME_MS,
2506 			.procname	= "retrans_time_ms",
2507 			.maxlen		= sizeof(int),
2508 			.mode		= 0644,
2509 			.proc_handler	= &proc_dointvec_ms_jiffies,
2510 			.strategy	= &sysctl_ms_jiffies,
2511 		},
2512 		{
2513 			.ctl_name	= NET_NEIGH_REACHABLE_TIME_MS,
2514 			.procname	= "base_reachable_time_ms",
2515 			.maxlen		= sizeof(int),
2516 			.mode		= 0644,
2517 			.proc_handler	= &proc_dointvec_ms_jiffies,
2518 			.strategy	= &sysctl_ms_jiffies,
2519 		},
2520 	},
2521 	.neigh_dev = {
2522 		{
2523 			.ctl_name	= NET_PROTO_CONF_DEFAULT,
2524 			.procname	= "default",
2525 			.mode		= 0555,
2526 		},
2527 	},
2528 	.neigh_neigh_dir = {
2529 		{
2530 			.procname	= "neigh",
2531 			.mode		= 0555,
2532 		},
2533 	},
2534 	.neigh_proto_dir = {
2535 		{
2536 			.mode		= 0555,
2537 		},
2538 	},
2539 	.neigh_root_dir = {
2540 		{
2541 			.ctl_name	= CTL_NET,
2542 			.procname	= "net",
2543 			.mode		= 0555,
2544 		},
2545 	},
2546 };
2547 
2548 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2549 			  int p_id, int pdev_id, char *p_name,
2550 			  proc_handler *handler, ctl_handler *strategy)
2551 {
2552 	struct neigh_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
2553 	const char *dev_name_source = NULL;
2554 	char *dev_name = NULL;
2555 	int err = 0;
2556 
2557 	if (!t)
2558 		return -ENOBUFS;
2559 	memcpy(t, &neigh_sysctl_template, sizeof(*t));
2560 	t->neigh_vars[0].data  = &p->mcast_probes;
2561 	t->neigh_vars[1].data  = &p->ucast_probes;
2562 	t->neigh_vars[2].data  = &p->app_probes;
2563 	t->neigh_vars[3].data  = &p->retrans_time;
2564 	t->neigh_vars[4].data  = &p->base_reachable_time;
2565 	t->neigh_vars[5].data  = &p->delay_probe_time;
2566 	t->neigh_vars[6].data  = &p->gc_staletime;
2567 	t->neigh_vars[7].data  = &p->queue_len;
2568 	t->neigh_vars[8].data  = &p->proxy_qlen;
2569 	t->neigh_vars[9].data  = &p->anycast_delay;
2570 	t->neigh_vars[10].data = &p->proxy_delay;
2571 	t->neigh_vars[11].data = &p->locktime;
2572 
2573 	if (dev) {
2574 		dev_name_source = dev->name;
2575 		t->neigh_dev[0].ctl_name = dev->ifindex;
2576 		t->neigh_vars[12].procname = NULL;
2577 		t->neigh_vars[13].procname = NULL;
2578 		t->neigh_vars[14].procname = NULL;
2579 		t->neigh_vars[15].procname = NULL;
2580 	} else {
2581  		dev_name_source = t->neigh_dev[0].procname;
2582 		t->neigh_vars[12].data = (int *)(p + 1);
2583 		t->neigh_vars[13].data = (int *)(p + 1) + 1;
2584 		t->neigh_vars[14].data = (int *)(p + 1) + 2;
2585 		t->neigh_vars[15].data = (int *)(p + 1) + 3;
2586 	}
2587 
2588 	t->neigh_vars[16].data  = &p->retrans_time;
2589 	t->neigh_vars[17].data  = &p->base_reachable_time;
2590 
2591 	if (handler || strategy) {
2592 		/* RetransTime */
2593 		t->neigh_vars[3].proc_handler = handler;
2594 		t->neigh_vars[3].strategy = strategy;
2595 		t->neigh_vars[3].extra1 = dev;
2596 		/* ReachableTime */
2597 		t->neigh_vars[4].proc_handler = handler;
2598 		t->neigh_vars[4].strategy = strategy;
2599 		t->neigh_vars[4].extra1 = dev;
2600 		/* RetransTime (in milliseconds)*/
2601 		t->neigh_vars[16].proc_handler = handler;
2602 		t->neigh_vars[16].strategy = strategy;
2603 		t->neigh_vars[16].extra1 = dev;
2604 		/* ReachableTime (in milliseconds) */
2605 		t->neigh_vars[17].proc_handler = handler;
2606 		t->neigh_vars[17].strategy = strategy;
2607 		t->neigh_vars[17].extra1 = dev;
2608 	}
2609 
2610 	dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2611 	if (!dev_name) {
2612 		err = -ENOBUFS;
2613 		goto free;
2614 	}
2615 
2616  	t->neigh_dev[0].procname = dev_name;
2617 
2618 	t->neigh_neigh_dir[0].ctl_name = pdev_id;
2619 
2620 	t->neigh_proto_dir[0].procname = p_name;
2621 	t->neigh_proto_dir[0].ctl_name = p_id;
2622 
2623 	t->neigh_dev[0].child	       = t->neigh_vars;
2624 	t->neigh_neigh_dir[0].child    = t->neigh_dev;
2625 	t->neigh_proto_dir[0].child    = t->neigh_neigh_dir;
2626 	t->neigh_root_dir[0].child     = t->neigh_proto_dir;
2627 
2628 	t->sysctl_header = register_sysctl_table(t->neigh_root_dir, 0);
2629 	if (!t->sysctl_header) {
2630 		err = -ENOBUFS;
2631 		goto free_procname;
2632 	}
2633 	p->sysctl_table = t;
2634 	return 0;
2635 
2636 	/* error path */
2637  free_procname:
2638 	kfree(dev_name);
2639  free:
2640 	kfree(t);
2641 
2642 	return err;
2643 }
2644 
2645 void neigh_sysctl_unregister(struct neigh_parms *p)
2646 {
2647 	if (p->sysctl_table) {
2648 		struct neigh_sysctl_table *t = p->sysctl_table;
2649 		p->sysctl_table = NULL;
2650 		unregister_sysctl_table(t->sysctl_header);
2651 		kfree(t->neigh_dev[0].procname);
2652 		kfree(t);
2653 	}
2654 }
2655 
2656 #endif	/* CONFIG_SYSCTL */
2657 
2658 EXPORT_SYMBOL(__neigh_event_send);
2659 EXPORT_SYMBOL(neigh_add);
2660 EXPORT_SYMBOL(neigh_changeaddr);
2661 EXPORT_SYMBOL(neigh_compat_output);
2662 EXPORT_SYMBOL(neigh_connected_output);
2663 EXPORT_SYMBOL(neigh_create);
2664 EXPORT_SYMBOL(neigh_delete);
2665 EXPORT_SYMBOL(neigh_destroy);
2666 EXPORT_SYMBOL(neigh_dump_info);
2667 EXPORT_SYMBOL(neigh_event_ns);
2668 EXPORT_SYMBOL(neigh_ifdown);
2669 EXPORT_SYMBOL(neigh_lookup);
2670 EXPORT_SYMBOL(neigh_lookup_nodev);
2671 EXPORT_SYMBOL(neigh_parms_alloc);
2672 EXPORT_SYMBOL(neigh_parms_release);
2673 EXPORT_SYMBOL(neigh_rand_reach_time);
2674 EXPORT_SYMBOL(neigh_resolve_output);
2675 EXPORT_SYMBOL(neigh_table_clear);
2676 EXPORT_SYMBOL(neigh_table_init);
2677 EXPORT_SYMBOL(neigh_table_init_no_netlink);
2678 EXPORT_SYMBOL(neigh_update);
2679 EXPORT_SYMBOL(neigh_update_hhs);
2680 EXPORT_SYMBOL(pneigh_enqueue);
2681 EXPORT_SYMBOL(pneigh_lookup);
2682 EXPORT_SYMBOL(neightbl_dump_info);
2683 EXPORT_SYMBOL(neightbl_set);
2684 
2685 #ifdef CONFIG_ARPD
2686 EXPORT_SYMBOL(neigh_app_ns);
2687 #endif
2688 #ifdef CONFIG_SYSCTL
2689 EXPORT_SYMBOL(neigh_sysctl_register);
2690 EXPORT_SYMBOL(neigh_sysctl_unregister);
2691 #endif
2692