xref: /linux/net/core/neighbour.c (revision 3932b9ca55b0be314a36d3e84faff3e823c081f5)
1 /*
2  *	Generic address resolution entity
3  *
4  *	Authors:
5  *	Pedro Roque		<roque@di.fc.ul.pt>
6  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *	Fixes:
14  *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
15  *	Harald Welte		Add neighbour cache statistics like rtstat
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include <linux/slab.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/socket.h>
25 #include <linux/netdevice.h>
26 #include <linux/proc_fs.h>
27 #ifdef CONFIG_SYSCTL
28 #include <linux/sysctl.h>
29 #endif
30 #include <linux/times.h>
31 #include <net/net_namespace.h>
32 #include <net/neighbour.h>
33 #include <net/dst.h>
34 #include <net/sock.h>
35 #include <net/netevent.h>
36 #include <net/netlink.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/random.h>
39 #include <linux/string.h>
40 #include <linux/log2.h>
41 #include <linux/inetdevice.h>
42 #include <net/addrconf.h>
43 
44 #define DEBUG
45 #define NEIGH_DEBUG 1
46 #define neigh_dbg(level, fmt, ...)		\
47 do {						\
48 	if (level <= NEIGH_DEBUG)		\
49 		pr_debug(fmt, ##__VA_ARGS__);	\
50 } while (0)
51 
52 #define PNEIGH_HASHMASK		0xF
53 
54 static void neigh_timer_handler(unsigned long arg);
55 static void __neigh_notify(struct neighbour *n, int type, int flags);
56 static void neigh_update_notify(struct neighbour *neigh);
57 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
58 
59 static struct neigh_table *neigh_tables;
60 #ifdef CONFIG_PROC_FS
61 static const struct file_operations neigh_stat_seq_fops;
62 #endif
63 
64 /*
65    Neighbour hash table buckets are protected with rwlock tbl->lock.
66 
67    - All the scans/updates to hash buckets MUST be made under this lock.
68    - NOTHING clever should be made under this lock: no callbacks
69      to protocol backends, no attempts to send something to network.
70      It will result in deadlocks, if backend/driver wants to use neighbour
71      cache.
72    - If the entry requires some non-trivial actions, increase
73      its reference count and release table lock.
74 
75    Neighbour entries are protected:
76    - with reference count.
77    - with rwlock neigh->lock
78 
79    Reference count prevents destruction.
80 
81    neigh->lock mainly serializes ll address data and its validity state.
82    However, the same lock is used to protect another entry fields:
83     - timer
84     - resolution queue
85 
86    Again, nothing clever shall be made under neigh->lock,
87    the most complicated procedure, which we allow is dev->hard_header.
88    It is supposed, that dev->hard_header is simplistic and does
89    not make callbacks to neighbour tables.
90 
91    The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
92    list of neighbour tables. This list is used only in process context,
93  */
94 
95 static DEFINE_RWLOCK(neigh_tbl_lock);
96 
97 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
98 {
99 	kfree_skb(skb);
100 	return -ENETDOWN;
101 }
102 
103 static void neigh_cleanup_and_release(struct neighbour *neigh)
104 {
105 	if (neigh->parms->neigh_cleanup)
106 		neigh->parms->neigh_cleanup(neigh);
107 
108 	__neigh_notify(neigh, RTM_DELNEIGH, 0);
109 	neigh_release(neigh);
110 }
111 
112 /*
113  * It is random distribution in the interval (1/2)*base...(3/2)*base.
114  * It corresponds to default IPv6 settings and is not overridable,
115  * because it is really reasonable choice.
116  */
117 
118 unsigned long neigh_rand_reach_time(unsigned long base)
119 {
120 	return base ? (prandom_u32() % base) + (base >> 1) : 0;
121 }
122 EXPORT_SYMBOL(neigh_rand_reach_time);
123 
124 
125 static int neigh_forced_gc(struct neigh_table *tbl)
126 {
127 	int shrunk = 0;
128 	int i;
129 	struct neigh_hash_table *nht;
130 
131 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
132 
133 	write_lock_bh(&tbl->lock);
134 	nht = rcu_dereference_protected(tbl->nht,
135 					lockdep_is_held(&tbl->lock));
136 	for (i = 0; i < (1 << nht->hash_shift); i++) {
137 		struct neighbour *n;
138 		struct neighbour __rcu **np;
139 
140 		np = &nht->hash_buckets[i];
141 		while ((n = rcu_dereference_protected(*np,
142 					lockdep_is_held(&tbl->lock))) != NULL) {
143 			/* Neighbour record may be discarded if:
144 			 * - nobody refers to it.
145 			 * - it is not permanent
146 			 */
147 			write_lock(&n->lock);
148 			if (atomic_read(&n->refcnt) == 1 &&
149 			    !(n->nud_state & NUD_PERMANENT)) {
150 				rcu_assign_pointer(*np,
151 					rcu_dereference_protected(n->next,
152 						  lockdep_is_held(&tbl->lock)));
153 				n->dead = 1;
154 				shrunk	= 1;
155 				write_unlock(&n->lock);
156 				neigh_cleanup_and_release(n);
157 				continue;
158 			}
159 			write_unlock(&n->lock);
160 			np = &n->next;
161 		}
162 	}
163 
164 	tbl->last_flush = jiffies;
165 
166 	write_unlock_bh(&tbl->lock);
167 
168 	return shrunk;
169 }
170 
171 static void neigh_add_timer(struct neighbour *n, unsigned long when)
172 {
173 	neigh_hold(n);
174 	if (unlikely(mod_timer(&n->timer, when))) {
175 		printk("NEIGH: BUG, double timer add, state is %x\n",
176 		       n->nud_state);
177 		dump_stack();
178 	}
179 }
180 
181 static int neigh_del_timer(struct neighbour *n)
182 {
183 	if ((n->nud_state & NUD_IN_TIMER) &&
184 	    del_timer(&n->timer)) {
185 		neigh_release(n);
186 		return 1;
187 	}
188 	return 0;
189 }
190 
191 static void pneigh_queue_purge(struct sk_buff_head *list)
192 {
193 	struct sk_buff *skb;
194 
195 	while ((skb = skb_dequeue(list)) != NULL) {
196 		dev_put(skb->dev);
197 		kfree_skb(skb);
198 	}
199 }
200 
201 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
202 {
203 	int i;
204 	struct neigh_hash_table *nht;
205 
206 	nht = rcu_dereference_protected(tbl->nht,
207 					lockdep_is_held(&tbl->lock));
208 
209 	for (i = 0; i < (1 << nht->hash_shift); i++) {
210 		struct neighbour *n;
211 		struct neighbour __rcu **np = &nht->hash_buckets[i];
212 
213 		while ((n = rcu_dereference_protected(*np,
214 					lockdep_is_held(&tbl->lock))) != NULL) {
215 			if (dev && n->dev != dev) {
216 				np = &n->next;
217 				continue;
218 			}
219 			rcu_assign_pointer(*np,
220 				   rcu_dereference_protected(n->next,
221 						lockdep_is_held(&tbl->lock)));
222 			write_lock(&n->lock);
223 			neigh_del_timer(n);
224 			n->dead = 1;
225 
226 			if (atomic_read(&n->refcnt) != 1) {
227 				/* The most unpleasant situation.
228 				   We must destroy neighbour entry,
229 				   but someone still uses it.
230 
231 				   The destroy will be delayed until
232 				   the last user releases us, but
233 				   we must kill timers etc. and move
234 				   it to safe state.
235 				 */
236 				__skb_queue_purge(&n->arp_queue);
237 				n->arp_queue_len_bytes = 0;
238 				n->output = neigh_blackhole;
239 				if (n->nud_state & NUD_VALID)
240 					n->nud_state = NUD_NOARP;
241 				else
242 					n->nud_state = NUD_NONE;
243 				neigh_dbg(2, "neigh %p is stray\n", n);
244 			}
245 			write_unlock(&n->lock);
246 			neigh_cleanup_and_release(n);
247 		}
248 	}
249 }
250 
251 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
252 {
253 	write_lock_bh(&tbl->lock);
254 	neigh_flush_dev(tbl, dev);
255 	write_unlock_bh(&tbl->lock);
256 }
257 EXPORT_SYMBOL(neigh_changeaddr);
258 
259 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
260 {
261 	write_lock_bh(&tbl->lock);
262 	neigh_flush_dev(tbl, dev);
263 	pneigh_ifdown(tbl, dev);
264 	write_unlock_bh(&tbl->lock);
265 
266 	del_timer_sync(&tbl->proxy_timer);
267 	pneigh_queue_purge(&tbl->proxy_queue);
268 	return 0;
269 }
270 EXPORT_SYMBOL(neigh_ifdown);
271 
272 static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
273 {
274 	struct neighbour *n = NULL;
275 	unsigned long now = jiffies;
276 	int entries;
277 
278 	entries = atomic_inc_return(&tbl->entries) - 1;
279 	if (entries >= tbl->gc_thresh3 ||
280 	    (entries >= tbl->gc_thresh2 &&
281 	     time_after(now, tbl->last_flush + 5 * HZ))) {
282 		if (!neigh_forced_gc(tbl) &&
283 		    entries >= tbl->gc_thresh3)
284 			goto out_entries;
285 	}
286 
287 	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
288 	if (!n)
289 		goto out_entries;
290 
291 	__skb_queue_head_init(&n->arp_queue);
292 	rwlock_init(&n->lock);
293 	seqlock_init(&n->ha_lock);
294 	n->updated	  = n->used = now;
295 	n->nud_state	  = NUD_NONE;
296 	n->output	  = neigh_blackhole;
297 	seqlock_init(&n->hh.hh_lock);
298 	n->parms	  = neigh_parms_clone(&tbl->parms);
299 	setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
300 
301 	NEIGH_CACHE_STAT_INC(tbl, allocs);
302 	n->tbl		  = tbl;
303 	atomic_set(&n->refcnt, 1);
304 	n->dead		  = 1;
305 out:
306 	return n;
307 
308 out_entries:
309 	atomic_dec(&tbl->entries);
310 	goto out;
311 }
312 
313 static void neigh_get_hash_rnd(u32 *x)
314 {
315 	get_random_bytes(x, sizeof(*x));
316 	*x |= 1;
317 }
318 
319 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
320 {
321 	size_t size = (1 << shift) * sizeof(struct neighbour *);
322 	struct neigh_hash_table *ret;
323 	struct neighbour __rcu **buckets;
324 	int i;
325 
326 	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
327 	if (!ret)
328 		return NULL;
329 	if (size <= PAGE_SIZE)
330 		buckets = kzalloc(size, GFP_ATOMIC);
331 	else
332 		buckets = (struct neighbour __rcu **)
333 			  __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
334 					   get_order(size));
335 	if (!buckets) {
336 		kfree(ret);
337 		return NULL;
338 	}
339 	ret->hash_buckets = buckets;
340 	ret->hash_shift = shift;
341 	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
342 		neigh_get_hash_rnd(&ret->hash_rnd[i]);
343 	return ret;
344 }
345 
346 static void neigh_hash_free_rcu(struct rcu_head *head)
347 {
348 	struct neigh_hash_table *nht = container_of(head,
349 						    struct neigh_hash_table,
350 						    rcu);
351 	size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
352 	struct neighbour __rcu **buckets = nht->hash_buckets;
353 
354 	if (size <= PAGE_SIZE)
355 		kfree(buckets);
356 	else
357 		free_pages((unsigned long)buckets, get_order(size));
358 	kfree(nht);
359 }
360 
361 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
362 						unsigned long new_shift)
363 {
364 	unsigned int i, hash;
365 	struct neigh_hash_table *new_nht, *old_nht;
366 
367 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
368 
369 	old_nht = rcu_dereference_protected(tbl->nht,
370 					    lockdep_is_held(&tbl->lock));
371 	new_nht = neigh_hash_alloc(new_shift);
372 	if (!new_nht)
373 		return old_nht;
374 
375 	for (i = 0; i < (1 << old_nht->hash_shift); i++) {
376 		struct neighbour *n, *next;
377 
378 		for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
379 						   lockdep_is_held(&tbl->lock));
380 		     n != NULL;
381 		     n = next) {
382 			hash = tbl->hash(n->primary_key, n->dev,
383 					 new_nht->hash_rnd);
384 
385 			hash >>= (32 - new_nht->hash_shift);
386 			next = rcu_dereference_protected(n->next,
387 						lockdep_is_held(&tbl->lock));
388 
389 			rcu_assign_pointer(n->next,
390 					   rcu_dereference_protected(
391 						new_nht->hash_buckets[hash],
392 						lockdep_is_held(&tbl->lock)));
393 			rcu_assign_pointer(new_nht->hash_buckets[hash], n);
394 		}
395 	}
396 
397 	rcu_assign_pointer(tbl->nht, new_nht);
398 	call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
399 	return new_nht;
400 }
401 
402 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
403 			       struct net_device *dev)
404 {
405 	struct neighbour *n;
406 	int key_len = tbl->key_len;
407 	u32 hash_val;
408 	struct neigh_hash_table *nht;
409 
410 	NEIGH_CACHE_STAT_INC(tbl, lookups);
411 
412 	rcu_read_lock_bh();
413 	nht = rcu_dereference_bh(tbl->nht);
414 	hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
415 
416 	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
417 	     n != NULL;
418 	     n = rcu_dereference_bh(n->next)) {
419 		if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
420 			if (!atomic_inc_not_zero(&n->refcnt))
421 				n = NULL;
422 			NEIGH_CACHE_STAT_INC(tbl, hits);
423 			break;
424 		}
425 	}
426 
427 	rcu_read_unlock_bh();
428 	return n;
429 }
430 EXPORT_SYMBOL(neigh_lookup);
431 
432 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
433 				     const void *pkey)
434 {
435 	struct neighbour *n;
436 	int key_len = tbl->key_len;
437 	u32 hash_val;
438 	struct neigh_hash_table *nht;
439 
440 	NEIGH_CACHE_STAT_INC(tbl, lookups);
441 
442 	rcu_read_lock_bh();
443 	nht = rcu_dereference_bh(tbl->nht);
444 	hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
445 
446 	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
447 	     n != NULL;
448 	     n = rcu_dereference_bh(n->next)) {
449 		if (!memcmp(n->primary_key, pkey, key_len) &&
450 		    net_eq(dev_net(n->dev), net)) {
451 			if (!atomic_inc_not_zero(&n->refcnt))
452 				n = NULL;
453 			NEIGH_CACHE_STAT_INC(tbl, hits);
454 			break;
455 		}
456 	}
457 
458 	rcu_read_unlock_bh();
459 	return n;
460 }
461 EXPORT_SYMBOL(neigh_lookup_nodev);
462 
463 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
464 				 struct net_device *dev, bool want_ref)
465 {
466 	u32 hash_val;
467 	int key_len = tbl->key_len;
468 	int error;
469 	struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
470 	struct neigh_hash_table *nht;
471 
472 	if (!n) {
473 		rc = ERR_PTR(-ENOBUFS);
474 		goto out;
475 	}
476 
477 	memcpy(n->primary_key, pkey, key_len);
478 	n->dev = dev;
479 	dev_hold(dev);
480 
481 	/* Protocol specific setup. */
482 	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
483 		rc = ERR_PTR(error);
484 		goto out_neigh_release;
485 	}
486 
487 	if (dev->netdev_ops->ndo_neigh_construct) {
488 		error = dev->netdev_ops->ndo_neigh_construct(n);
489 		if (error < 0) {
490 			rc = ERR_PTR(error);
491 			goto out_neigh_release;
492 		}
493 	}
494 
495 	/* Device specific setup. */
496 	if (n->parms->neigh_setup &&
497 	    (error = n->parms->neigh_setup(n)) < 0) {
498 		rc = ERR_PTR(error);
499 		goto out_neigh_release;
500 	}
501 
502 	n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
503 
504 	write_lock_bh(&tbl->lock);
505 	nht = rcu_dereference_protected(tbl->nht,
506 					lockdep_is_held(&tbl->lock));
507 
508 	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
509 		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
510 
511 	hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
512 
513 	if (n->parms->dead) {
514 		rc = ERR_PTR(-EINVAL);
515 		goto out_tbl_unlock;
516 	}
517 
518 	for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
519 					    lockdep_is_held(&tbl->lock));
520 	     n1 != NULL;
521 	     n1 = rcu_dereference_protected(n1->next,
522 			lockdep_is_held(&tbl->lock))) {
523 		if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
524 			if (want_ref)
525 				neigh_hold(n1);
526 			rc = n1;
527 			goto out_tbl_unlock;
528 		}
529 	}
530 
531 	n->dead = 0;
532 	if (want_ref)
533 		neigh_hold(n);
534 	rcu_assign_pointer(n->next,
535 			   rcu_dereference_protected(nht->hash_buckets[hash_val],
536 						     lockdep_is_held(&tbl->lock)));
537 	rcu_assign_pointer(nht->hash_buckets[hash_val], n);
538 	write_unlock_bh(&tbl->lock);
539 	neigh_dbg(2, "neigh %p is created\n", n);
540 	rc = n;
541 out:
542 	return rc;
543 out_tbl_unlock:
544 	write_unlock_bh(&tbl->lock);
545 out_neigh_release:
546 	neigh_release(n);
547 	goto out;
548 }
549 EXPORT_SYMBOL(__neigh_create);
550 
551 static u32 pneigh_hash(const void *pkey, int key_len)
552 {
553 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
554 	hash_val ^= (hash_val >> 16);
555 	hash_val ^= hash_val >> 8;
556 	hash_val ^= hash_val >> 4;
557 	hash_val &= PNEIGH_HASHMASK;
558 	return hash_val;
559 }
560 
561 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
562 					      struct net *net,
563 					      const void *pkey,
564 					      int key_len,
565 					      struct net_device *dev)
566 {
567 	while (n) {
568 		if (!memcmp(n->key, pkey, key_len) &&
569 		    net_eq(pneigh_net(n), net) &&
570 		    (n->dev == dev || !n->dev))
571 			return n;
572 		n = n->next;
573 	}
574 	return NULL;
575 }
576 
577 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
578 		struct net *net, const void *pkey, struct net_device *dev)
579 {
580 	int key_len = tbl->key_len;
581 	u32 hash_val = pneigh_hash(pkey, key_len);
582 
583 	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
584 				 net, pkey, key_len, dev);
585 }
586 EXPORT_SYMBOL_GPL(__pneigh_lookup);
587 
588 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
589 				    struct net *net, const void *pkey,
590 				    struct net_device *dev, int creat)
591 {
592 	struct pneigh_entry *n;
593 	int key_len = tbl->key_len;
594 	u32 hash_val = pneigh_hash(pkey, key_len);
595 
596 	read_lock_bh(&tbl->lock);
597 	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
598 			      net, pkey, key_len, dev);
599 	read_unlock_bh(&tbl->lock);
600 
601 	if (n || !creat)
602 		goto out;
603 
604 	ASSERT_RTNL();
605 
606 	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
607 	if (!n)
608 		goto out;
609 
610 	write_pnet(&n->net, hold_net(net));
611 	memcpy(n->key, pkey, key_len);
612 	n->dev = dev;
613 	if (dev)
614 		dev_hold(dev);
615 
616 	if (tbl->pconstructor && tbl->pconstructor(n)) {
617 		if (dev)
618 			dev_put(dev);
619 		release_net(net);
620 		kfree(n);
621 		n = NULL;
622 		goto out;
623 	}
624 
625 	write_lock_bh(&tbl->lock);
626 	n->next = tbl->phash_buckets[hash_val];
627 	tbl->phash_buckets[hash_val] = n;
628 	write_unlock_bh(&tbl->lock);
629 out:
630 	return n;
631 }
632 EXPORT_SYMBOL(pneigh_lookup);
633 
634 
635 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
636 		  struct net_device *dev)
637 {
638 	struct pneigh_entry *n, **np;
639 	int key_len = tbl->key_len;
640 	u32 hash_val = pneigh_hash(pkey, key_len);
641 
642 	write_lock_bh(&tbl->lock);
643 	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
644 	     np = &n->next) {
645 		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
646 		    net_eq(pneigh_net(n), net)) {
647 			*np = n->next;
648 			write_unlock_bh(&tbl->lock);
649 			if (tbl->pdestructor)
650 				tbl->pdestructor(n);
651 			if (n->dev)
652 				dev_put(n->dev);
653 			release_net(pneigh_net(n));
654 			kfree(n);
655 			return 0;
656 		}
657 	}
658 	write_unlock_bh(&tbl->lock);
659 	return -ENOENT;
660 }
661 
662 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
663 {
664 	struct pneigh_entry *n, **np;
665 	u32 h;
666 
667 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
668 		np = &tbl->phash_buckets[h];
669 		while ((n = *np) != NULL) {
670 			if (!dev || n->dev == dev) {
671 				*np = n->next;
672 				if (tbl->pdestructor)
673 					tbl->pdestructor(n);
674 				if (n->dev)
675 					dev_put(n->dev);
676 				release_net(pneigh_net(n));
677 				kfree(n);
678 				continue;
679 			}
680 			np = &n->next;
681 		}
682 	}
683 	return -ENOENT;
684 }
685 
686 static void neigh_parms_destroy(struct neigh_parms *parms);
687 
688 static inline void neigh_parms_put(struct neigh_parms *parms)
689 {
690 	if (atomic_dec_and_test(&parms->refcnt))
691 		neigh_parms_destroy(parms);
692 }
693 
694 /*
695  *	neighbour must already be out of the table;
696  *
697  */
698 void neigh_destroy(struct neighbour *neigh)
699 {
700 	struct net_device *dev = neigh->dev;
701 
702 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
703 
704 	if (!neigh->dead) {
705 		pr_warn("Destroying alive neighbour %p\n", neigh);
706 		dump_stack();
707 		return;
708 	}
709 
710 	if (neigh_del_timer(neigh))
711 		pr_warn("Impossible event\n");
712 
713 	write_lock_bh(&neigh->lock);
714 	__skb_queue_purge(&neigh->arp_queue);
715 	write_unlock_bh(&neigh->lock);
716 	neigh->arp_queue_len_bytes = 0;
717 
718 	if (dev->netdev_ops->ndo_neigh_destroy)
719 		dev->netdev_ops->ndo_neigh_destroy(neigh);
720 
721 	dev_put(dev);
722 	neigh_parms_put(neigh->parms);
723 
724 	neigh_dbg(2, "neigh %p is destroyed\n", neigh);
725 
726 	atomic_dec(&neigh->tbl->entries);
727 	kfree_rcu(neigh, rcu);
728 }
729 EXPORT_SYMBOL(neigh_destroy);
730 
731 /* Neighbour state is suspicious;
732    disable fast path.
733 
734    Called with write_locked neigh.
735  */
736 static void neigh_suspect(struct neighbour *neigh)
737 {
738 	neigh_dbg(2, "neigh %p is suspected\n", neigh);
739 
740 	neigh->output = neigh->ops->output;
741 }
742 
743 /* Neighbour state is OK;
744    enable fast path.
745 
746    Called with write_locked neigh.
747  */
748 static void neigh_connect(struct neighbour *neigh)
749 {
750 	neigh_dbg(2, "neigh %p is connected\n", neigh);
751 
752 	neigh->output = neigh->ops->connected_output;
753 }
754 
755 static void neigh_periodic_work(struct work_struct *work)
756 {
757 	struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
758 	struct neighbour *n;
759 	struct neighbour __rcu **np;
760 	unsigned int i;
761 	struct neigh_hash_table *nht;
762 
763 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
764 
765 	write_lock_bh(&tbl->lock);
766 	nht = rcu_dereference_protected(tbl->nht,
767 					lockdep_is_held(&tbl->lock));
768 
769 	/*
770 	 *	periodically recompute ReachableTime from random function
771 	 */
772 
773 	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
774 		struct neigh_parms *p;
775 		tbl->last_rand = jiffies;
776 		for (p = &tbl->parms; p; p = p->next)
777 			p->reachable_time =
778 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
779 	}
780 
781 	if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
782 		goto out;
783 
784 	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
785 		np = &nht->hash_buckets[i];
786 
787 		while ((n = rcu_dereference_protected(*np,
788 				lockdep_is_held(&tbl->lock))) != NULL) {
789 			unsigned int state;
790 
791 			write_lock(&n->lock);
792 
793 			state = n->nud_state;
794 			if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
795 				write_unlock(&n->lock);
796 				goto next_elt;
797 			}
798 
799 			if (time_before(n->used, n->confirmed))
800 				n->used = n->confirmed;
801 
802 			if (atomic_read(&n->refcnt) == 1 &&
803 			    (state == NUD_FAILED ||
804 			     time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
805 				*np = n->next;
806 				n->dead = 1;
807 				write_unlock(&n->lock);
808 				neigh_cleanup_and_release(n);
809 				continue;
810 			}
811 			write_unlock(&n->lock);
812 
813 next_elt:
814 			np = &n->next;
815 		}
816 		/*
817 		 * It's fine to release lock here, even if hash table
818 		 * grows while we are preempted.
819 		 */
820 		write_unlock_bh(&tbl->lock);
821 		cond_resched();
822 		write_lock_bh(&tbl->lock);
823 		nht = rcu_dereference_protected(tbl->nht,
824 						lockdep_is_held(&tbl->lock));
825 	}
826 out:
827 	/* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
828 	 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
829 	 * BASE_REACHABLE_TIME.
830 	 */
831 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
832 			      NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
833 	write_unlock_bh(&tbl->lock);
834 }
835 
836 static __inline__ int neigh_max_probes(struct neighbour *n)
837 {
838 	struct neigh_parms *p = n->parms;
839 	int max_probes = NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES);
840 	if (!(n->nud_state & NUD_PROBE))
841 		max_probes += NEIGH_VAR(p, MCAST_PROBES);
842 	return max_probes;
843 }
844 
845 static void neigh_invalidate(struct neighbour *neigh)
846 	__releases(neigh->lock)
847 	__acquires(neigh->lock)
848 {
849 	struct sk_buff *skb;
850 
851 	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
852 	neigh_dbg(2, "neigh %p is failed\n", neigh);
853 	neigh->updated = jiffies;
854 
855 	/* It is very thin place. report_unreachable is very complicated
856 	   routine. Particularly, it can hit the same neighbour entry!
857 
858 	   So that, we try to be accurate and avoid dead loop. --ANK
859 	 */
860 	while (neigh->nud_state == NUD_FAILED &&
861 	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
862 		write_unlock(&neigh->lock);
863 		neigh->ops->error_report(neigh, skb);
864 		write_lock(&neigh->lock);
865 	}
866 	__skb_queue_purge(&neigh->arp_queue);
867 	neigh->arp_queue_len_bytes = 0;
868 }
869 
870 static void neigh_probe(struct neighbour *neigh)
871 	__releases(neigh->lock)
872 {
873 	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
874 	/* keep skb alive even if arp_queue overflows */
875 	if (skb)
876 		skb = skb_copy(skb, GFP_ATOMIC);
877 	write_unlock(&neigh->lock);
878 	neigh->ops->solicit(neigh, skb);
879 	atomic_inc(&neigh->probes);
880 	kfree_skb(skb);
881 }
882 
883 /* Called when a timer expires for a neighbour entry. */
884 
885 static void neigh_timer_handler(unsigned long arg)
886 {
887 	unsigned long now, next;
888 	struct neighbour *neigh = (struct neighbour *)arg;
889 	unsigned int state;
890 	int notify = 0;
891 
892 	write_lock(&neigh->lock);
893 
894 	state = neigh->nud_state;
895 	now = jiffies;
896 	next = now + HZ;
897 
898 	if (!(state & NUD_IN_TIMER))
899 		goto out;
900 
901 	if (state & NUD_REACHABLE) {
902 		if (time_before_eq(now,
903 				   neigh->confirmed + neigh->parms->reachable_time)) {
904 			neigh_dbg(2, "neigh %p is still alive\n", neigh);
905 			next = neigh->confirmed + neigh->parms->reachable_time;
906 		} else if (time_before_eq(now,
907 					  neigh->used +
908 					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
909 			neigh_dbg(2, "neigh %p is delayed\n", neigh);
910 			neigh->nud_state = NUD_DELAY;
911 			neigh->updated = jiffies;
912 			neigh_suspect(neigh);
913 			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
914 		} else {
915 			neigh_dbg(2, "neigh %p is suspected\n", neigh);
916 			neigh->nud_state = NUD_STALE;
917 			neigh->updated = jiffies;
918 			neigh_suspect(neigh);
919 			notify = 1;
920 		}
921 	} else if (state & NUD_DELAY) {
922 		if (time_before_eq(now,
923 				   neigh->confirmed +
924 				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
925 			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
926 			neigh->nud_state = NUD_REACHABLE;
927 			neigh->updated = jiffies;
928 			neigh_connect(neigh);
929 			notify = 1;
930 			next = neigh->confirmed + neigh->parms->reachable_time;
931 		} else {
932 			neigh_dbg(2, "neigh %p is probed\n", neigh);
933 			neigh->nud_state = NUD_PROBE;
934 			neigh->updated = jiffies;
935 			atomic_set(&neigh->probes, 0);
936 			next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
937 		}
938 	} else {
939 		/* NUD_PROBE|NUD_INCOMPLETE */
940 		next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
941 	}
942 
943 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
944 	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
945 		neigh->nud_state = NUD_FAILED;
946 		notify = 1;
947 		neigh_invalidate(neigh);
948 		goto out;
949 	}
950 
951 	if (neigh->nud_state & NUD_IN_TIMER) {
952 		if (time_before(next, jiffies + HZ/2))
953 			next = jiffies + HZ/2;
954 		if (!mod_timer(&neigh->timer, next))
955 			neigh_hold(neigh);
956 	}
957 	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
958 		neigh_probe(neigh);
959 	} else {
960 out:
961 		write_unlock(&neigh->lock);
962 	}
963 
964 	if (notify)
965 		neigh_update_notify(neigh);
966 
967 	neigh_release(neigh);
968 }
969 
970 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
971 {
972 	int rc;
973 	bool immediate_probe = false;
974 
975 	write_lock_bh(&neigh->lock);
976 
977 	rc = 0;
978 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
979 		goto out_unlock_bh;
980 
981 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
982 		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
983 		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
984 			unsigned long next, now = jiffies;
985 
986 			atomic_set(&neigh->probes,
987 				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
988 			neigh->nud_state     = NUD_INCOMPLETE;
989 			neigh->updated = now;
990 			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
991 					 HZ/2);
992 			neigh_add_timer(neigh, next);
993 			immediate_probe = true;
994 		} else {
995 			neigh->nud_state = NUD_FAILED;
996 			neigh->updated = jiffies;
997 			write_unlock_bh(&neigh->lock);
998 
999 			kfree_skb(skb);
1000 			return 1;
1001 		}
1002 	} else if (neigh->nud_state & NUD_STALE) {
1003 		neigh_dbg(2, "neigh %p is delayed\n", neigh);
1004 		neigh->nud_state = NUD_DELAY;
1005 		neigh->updated = jiffies;
1006 		neigh_add_timer(neigh, jiffies +
1007 				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1008 	}
1009 
1010 	if (neigh->nud_state == NUD_INCOMPLETE) {
1011 		if (skb) {
1012 			while (neigh->arp_queue_len_bytes + skb->truesize >
1013 			       NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1014 				struct sk_buff *buff;
1015 
1016 				buff = __skb_dequeue(&neigh->arp_queue);
1017 				if (!buff)
1018 					break;
1019 				neigh->arp_queue_len_bytes -= buff->truesize;
1020 				kfree_skb(buff);
1021 				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1022 			}
1023 			skb_dst_force(skb);
1024 			__skb_queue_tail(&neigh->arp_queue, skb);
1025 			neigh->arp_queue_len_bytes += skb->truesize;
1026 		}
1027 		rc = 1;
1028 	}
1029 out_unlock_bh:
1030 	if (immediate_probe)
1031 		neigh_probe(neigh);
1032 	else
1033 		write_unlock(&neigh->lock);
1034 	local_bh_enable();
1035 	return rc;
1036 }
1037 EXPORT_SYMBOL(__neigh_event_send);
1038 
1039 static void neigh_update_hhs(struct neighbour *neigh)
1040 {
1041 	struct hh_cache *hh;
1042 	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1043 		= NULL;
1044 
1045 	if (neigh->dev->header_ops)
1046 		update = neigh->dev->header_ops->cache_update;
1047 
1048 	if (update) {
1049 		hh = &neigh->hh;
1050 		if (hh->hh_len) {
1051 			write_seqlock_bh(&hh->hh_lock);
1052 			update(hh, neigh->dev, neigh->ha);
1053 			write_sequnlock_bh(&hh->hh_lock);
1054 		}
1055 	}
1056 }
1057 
1058 
1059 
1060 /* Generic update routine.
1061    -- lladdr is new lladdr or NULL, if it is not supplied.
1062    -- new    is new state.
1063    -- flags
1064 	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1065 				if it is different.
1066 	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1067 				lladdr instead of overriding it
1068 				if it is different.
1069 				It also allows to retain current state
1070 				if lladdr is unchanged.
1071 	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
1072 
1073 	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1074 				NTF_ROUTER flag.
1075 	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
1076 				a router.
1077 
1078    Caller MUST hold reference count on the entry.
1079  */
1080 
1081 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1082 		 u32 flags)
1083 {
1084 	u8 old;
1085 	int err;
1086 	int notify = 0;
1087 	struct net_device *dev;
1088 	int update_isrouter = 0;
1089 
1090 	write_lock_bh(&neigh->lock);
1091 
1092 	dev    = neigh->dev;
1093 	old    = neigh->nud_state;
1094 	err    = -EPERM;
1095 
1096 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1097 	    (old & (NUD_NOARP | NUD_PERMANENT)))
1098 		goto out;
1099 
1100 	if (!(new & NUD_VALID)) {
1101 		neigh_del_timer(neigh);
1102 		if (old & NUD_CONNECTED)
1103 			neigh_suspect(neigh);
1104 		neigh->nud_state = new;
1105 		err = 0;
1106 		notify = old & NUD_VALID;
1107 		if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1108 		    (new & NUD_FAILED)) {
1109 			neigh_invalidate(neigh);
1110 			notify = 1;
1111 		}
1112 		goto out;
1113 	}
1114 
1115 	/* Compare new lladdr with cached one */
1116 	if (!dev->addr_len) {
1117 		/* First case: device needs no address. */
1118 		lladdr = neigh->ha;
1119 	} else if (lladdr) {
1120 		/* The second case: if something is already cached
1121 		   and a new address is proposed:
1122 		   - compare new & old
1123 		   - if they are different, check override flag
1124 		 */
1125 		if ((old & NUD_VALID) &&
1126 		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1127 			lladdr = neigh->ha;
1128 	} else {
1129 		/* No address is supplied; if we know something,
1130 		   use it, otherwise discard the request.
1131 		 */
1132 		err = -EINVAL;
1133 		if (!(old & NUD_VALID))
1134 			goto out;
1135 		lladdr = neigh->ha;
1136 	}
1137 
1138 	if (new & NUD_CONNECTED)
1139 		neigh->confirmed = jiffies;
1140 	neigh->updated = jiffies;
1141 
1142 	/* If entry was valid and address is not changed,
1143 	   do not change entry state, if new one is STALE.
1144 	 */
1145 	err = 0;
1146 	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1147 	if (old & NUD_VALID) {
1148 		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1149 			update_isrouter = 0;
1150 			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1151 			    (old & NUD_CONNECTED)) {
1152 				lladdr = neigh->ha;
1153 				new = NUD_STALE;
1154 			} else
1155 				goto out;
1156 		} else {
1157 			if (lladdr == neigh->ha && new == NUD_STALE &&
1158 			    ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1159 			     (old & NUD_CONNECTED))
1160 			    )
1161 				new = old;
1162 		}
1163 	}
1164 
1165 	if (new != old) {
1166 		neigh_del_timer(neigh);
1167 		if (new & NUD_IN_TIMER)
1168 			neigh_add_timer(neigh, (jiffies +
1169 						((new & NUD_REACHABLE) ?
1170 						 neigh->parms->reachable_time :
1171 						 0)));
1172 		neigh->nud_state = new;
1173 		notify = 1;
1174 	}
1175 
1176 	if (lladdr != neigh->ha) {
1177 		write_seqlock(&neigh->ha_lock);
1178 		memcpy(&neigh->ha, lladdr, dev->addr_len);
1179 		write_sequnlock(&neigh->ha_lock);
1180 		neigh_update_hhs(neigh);
1181 		if (!(new & NUD_CONNECTED))
1182 			neigh->confirmed = jiffies -
1183 				      (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1184 		notify = 1;
1185 	}
1186 	if (new == old)
1187 		goto out;
1188 	if (new & NUD_CONNECTED)
1189 		neigh_connect(neigh);
1190 	else
1191 		neigh_suspect(neigh);
1192 	if (!(old & NUD_VALID)) {
1193 		struct sk_buff *skb;
1194 
1195 		/* Again: avoid dead loop if something went wrong */
1196 
1197 		while (neigh->nud_state & NUD_VALID &&
1198 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1199 			struct dst_entry *dst = skb_dst(skb);
1200 			struct neighbour *n2, *n1 = neigh;
1201 			write_unlock_bh(&neigh->lock);
1202 
1203 			rcu_read_lock();
1204 
1205 			/* Why not just use 'neigh' as-is?  The problem is that
1206 			 * things such as shaper, eql, and sch_teql can end up
1207 			 * using alternative, different, neigh objects to output
1208 			 * the packet in the output path.  So what we need to do
1209 			 * here is re-lookup the top-level neigh in the path so
1210 			 * we can reinject the packet there.
1211 			 */
1212 			n2 = NULL;
1213 			if (dst) {
1214 				n2 = dst_neigh_lookup_skb(dst, skb);
1215 				if (n2)
1216 					n1 = n2;
1217 			}
1218 			n1->output(n1, skb);
1219 			if (n2)
1220 				neigh_release(n2);
1221 			rcu_read_unlock();
1222 
1223 			write_lock_bh(&neigh->lock);
1224 		}
1225 		__skb_queue_purge(&neigh->arp_queue);
1226 		neigh->arp_queue_len_bytes = 0;
1227 	}
1228 out:
1229 	if (update_isrouter) {
1230 		neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1231 			(neigh->flags | NTF_ROUTER) :
1232 			(neigh->flags & ~NTF_ROUTER);
1233 	}
1234 	write_unlock_bh(&neigh->lock);
1235 
1236 	if (notify)
1237 		neigh_update_notify(neigh);
1238 
1239 	return err;
1240 }
1241 EXPORT_SYMBOL(neigh_update);
1242 
1243 /* Update the neigh to listen temporarily for probe responses, even if it is
1244  * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1245  */
1246 void __neigh_set_probe_once(struct neighbour *neigh)
1247 {
1248 	neigh->updated = jiffies;
1249 	if (!(neigh->nud_state & NUD_FAILED))
1250 		return;
1251 	neigh->nud_state = NUD_INCOMPLETE;
1252 	atomic_set(&neigh->probes, neigh_max_probes(neigh));
1253 	neigh_add_timer(neigh,
1254 			jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
1255 }
1256 EXPORT_SYMBOL(__neigh_set_probe_once);
1257 
1258 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1259 				 u8 *lladdr, void *saddr,
1260 				 struct net_device *dev)
1261 {
1262 	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1263 						 lladdr || !dev->addr_len);
1264 	if (neigh)
1265 		neigh_update(neigh, lladdr, NUD_STALE,
1266 			     NEIGH_UPDATE_F_OVERRIDE);
1267 	return neigh;
1268 }
1269 EXPORT_SYMBOL(neigh_event_ns);
1270 
1271 /* called with read_lock_bh(&n->lock); */
1272 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst)
1273 {
1274 	struct net_device *dev = dst->dev;
1275 	__be16 prot = dst->ops->protocol;
1276 	struct hh_cache	*hh = &n->hh;
1277 
1278 	write_lock_bh(&n->lock);
1279 
1280 	/* Only one thread can come in here and initialize the
1281 	 * hh_cache entry.
1282 	 */
1283 	if (!hh->hh_len)
1284 		dev->header_ops->cache(n, hh, prot);
1285 
1286 	write_unlock_bh(&n->lock);
1287 }
1288 
1289 /* This function can be used in contexts, where only old dev_queue_xmit
1290  * worked, f.e. if you want to override normal output path (eql, shaper),
1291  * but resolution is not made yet.
1292  */
1293 
1294 int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb)
1295 {
1296 	struct net_device *dev = skb->dev;
1297 
1298 	__skb_pull(skb, skb_network_offset(skb));
1299 
1300 	if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1301 			    skb->len) < 0 &&
1302 	    dev_rebuild_header(skb))
1303 		return 0;
1304 
1305 	return dev_queue_xmit(skb);
1306 }
1307 EXPORT_SYMBOL(neigh_compat_output);
1308 
1309 /* Slow and careful. */
1310 
1311 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1312 {
1313 	struct dst_entry *dst = skb_dst(skb);
1314 	int rc = 0;
1315 
1316 	if (!dst)
1317 		goto discard;
1318 
1319 	if (!neigh_event_send(neigh, skb)) {
1320 		int err;
1321 		struct net_device *dev = neigh->dev;
1322 		unsigned int seq;
1323 
1324 		if (dev->header_ops->cache && !neigh->hh.hh_len)
1325 			neigh_hh_init(neigh, dst);
1326 
1327 		do {
1328 			__skb_pull(skb, skb_network_offset(skb));
1329 			seq = read_seqbegin(&neigh->ha_lock);
1330 			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1331 					      neigh->ha, NULL, skb->len);
1332 		} while (read_seqretry(&neigh->ha_lock, seq));
1333 
1334 		if (err >= 0)
1335 			rc = dev_queue_xmit(skb);
1336 		else
1337 			goto out_kfree_skb;
1338 	}
1339 out:
1340 	return rc;
1341 discard:
1342 	neigh_dbg(1, "%s: dst=%p neigh=%p\n", __func__, dst, neigh);
1343 out_kfree_skb:
1344 	rc = -EINVAL;
1345 	kfree_skb(skb);
1346 	goto out;
1347 }
1348 EXPORT_SYMBOL(neigh_resolve_output);
1349 
1350 /* As fast as possible without hh cache */
1351 
1352 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1353 {
1354 	struct net_device *dev = neigh->dev;
1355 	unsigned int seq;
1356 	int err;
1357 
1358 	do {
1359 		__skb_pull(skb, skb_network_offset(skb));
1360 		seq = read_seqbegin(&neigh->ha_lock);
1361 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1362 				      neigh->ha, NULL, skb->len);
1363 	} while (read_seqretry(&neigh->ha_lock, seq));
1364 
1365 	if (err >= 0)
1366 		err = dev_queue_xmit(skb);
1367 	else {
1368 		err = -EINVAL;
1369 		kfree_skb(skb);
1370 	}
1371 	return err;
1372 }
1373 EXPORT_SYMBOL(neigh_connected_output);
1374 
1375 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1376 {
1377 	return dev_queue_xmit(skb);
1378 }
1379 EXPORT_SYMBOL(neigh_direct_output);
1380 
1381 static void neigh_proxy_process(unsigned long arg)
1382 {
1383 	struct neigh_table *tbl = (struct neigh_table *)arg;
1384 	long sched_next = 0;
1385 	unsigned long now = jiffies;
1386 	struct sk_buff *skb, *n;
1387 
1388 	spin_lock(&tbl->proxy_queue.lock);
1389 
1390 	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1391 		long tdif = NEIGH_CB(skb)->sched_next - now;
1392 
1393 		if (tdif <= 0) {
1394 			struct net_device *dev = skb->dev;
1395 
1396 			__skb_unlink(skb, &tbl->proxy_queue);
1397 			if (tbl->proxy_redo && netif_running(dev)) {
1398 				rcu_read_lock();
1399 				tbl->proxy_redo(skb);
1400 				rcu_read_unlock();
1401 			} else {
1402 				kfree_skb(skb);
1403 			}
1404 
1405 			dev_put(dev);
1406 		} else if (!sched_next || tdif < sched_next)
1407 			sched_next = tdif;
1408 	}
1409 	del_timer(&tbl->proxy_timer);
1410 	if (sched_next)
1411 		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1412 	spin_unlock(&tbl->proxy_queue.lock);
1413 }
1414 
1415 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1416 		    struct sk_buff *skb)
1417 {
1418 	unsigned long now = jiffies;
1419 
1420 	unsigned long sched_next = now + (prandom_u32() %
1421 					  NEIGH_VAR(p, PROXY_DELAY));
1422 
1423 	if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1424 		kfree_skb(skb);
1425 		return;
1426 	}
1427 
1428 	NEIGH_CB(skb)->sched_next = sched_next;
1429 	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1430 
1431 	spin_lock(&tbl->proxy_queue.lock);
1432 	if (del_timer(&tbl->proxy_timer)) {
1433 		if (time_before(tbl->proxy_timer.expires, sched_next))
1434 			sched_next = tbl->proxy_timer.expires;
1435 	}
1436 	skb_dst_drop(skb);
1437 	dev_hold(skb->dev);
1438 	__skb_queue_tail(&tbl->proxy_queue, skb);
1439 	mod_timer(&tbl->proxy_timer, sched_next);
1440 	spin_unlock(&tbl->proxy_queue.lock);
1441 }
1442 EXPORT_SYMBOL(pneigh_enqueue);
1443 
1444 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1445 						      struct net *net, int ifindex)
1446 {
1447 	struct neigh_parms *p;
1448 
1449 	for (p = &tbl->parms; p; p = p->next) {
1450 		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1451 		    (!p->dev && !ifindex && net_eq(net, &init_net)))
1452 			return p;
1453 	}
1454 
1455 	return NULL;
1456 }
1457 
1458 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1459 				      struct neigh_table *tbl)
1460 {
1461 	struct neigh_parms *p;
1462 	struct net *net = dev_net(dev);
1463 	const struct net_device_ops *ops = dev->netdev_ops;
1464 
1465 	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1466 	if (p) {
1467 		p->tbl		  = tbl;
1468 		atomic_set(&p->refcnt, 1);
1469 		p->reachable_time =
1470 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1471 		dev_hold(dev);
1472 		p->dev = dev;
1473 		write_pnet(&p->net, hold_net(net));
1474 		p->sysctl_table = NULL;
1475 
1476 		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1477 			release_net(net);
1478 			dev_put(dev);
1479 			kfree(p);
1480 			return NULL;
1481 		}
1482 
1483 		write_lock_bh(&tbl->lock);
1484 		p->next		= tbl->parms.next;
1485 		tbl->parms.next = p;
1486 		write_unlock_bh(&tbl->lock);
1487 
1488 		neigh_parms_data_state_cleanall(p);
1489 	}
1490 	return p;
1491 }
1492 EXPORT_SYMBOL(neigh_parms_alloc);
1493 
1494 static void neigh_rcu_free_parms(struct rcu_head *head)
1495 {
1496 	struct neigh_parms *parms =
1497 		container_of(head, struct neigh_parms, rcu_head);
1498 
1499 	neigh_parms_put(parms);
1500 }
1501 
1502 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1503 {
1504 	struct neigh_parms **p;
1505 
1506 	if (!parms || parms == &tbl->parms)
1507 		return;
1508 	write_lock_bh(&tbl->lock);
1509 	for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1510 		if (*p == parms) {
1511 			*p = parms->next;
1512 			parms->dead = 1;
1513 			write_unlock_bh(&tbl->lock);
1514 			if (parms->dev)
1515 				dev_put(parms->dev);
1516 			call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1517 			return;
1518 		}
1519 	}
1520 	write_unlock_bh(&tbl->lock);
1521 	neigh_dbg(1, "%s: not found\n", __func__);
1522 }
1523 EXPORT_SYMBOL(neigh_parms_release);
1524 
1525 static void neigh_parms_destroy(struct neigh_parms *parms)
1526 {
1527 	release_net(neigh_parms_net(parms));
1528 	kfree(parms);
1529 }
1530 
1531 static struct lock_class_key neigh_table_proxy_queue_class;
1532 
1533 static void neigh_table_init_no_netlink(struct neigh_table *tbl)
1534 {
1535 	unsigned long now = jiffies;
1536 	unsigned long phsize;
1537 
1538 	write_pnet(&tbl->parms.net, &init_net);
1539 	atomic_set(&tbl->parms.refcnt, 1);
1540 	tbl->parms.reachable_time =
1541 			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1542 
1543 	tbl->stats = alloc_percpu(struct neigh_statistics);
1544 	if (!tbl->stats)
1545 		panic("cannot create neighbour cache statistics");
1546 
1547 #ifdef CONFIG_PROC_FS
1548 	if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1549 			      &neigh_stat_seq_fops, tbl))
1550 		panic("cannot create neighbour proc dir entry");
1551 #endif
1552 
1553 	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1554 
1555 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1556 	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1557 
1558 	if (!tbl->nht || !tbl->phash_buckets)
1559 		panic("cannot allocate neighbour cache hashes");
1560 
1561 	if (!tbl->entry_size)
1562 		tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1563 					tbl->key_len, NEIGH_PRIV_ALIGN);
1564 	else
1565 		WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1566 
1567 	rwlock_init(&tbl->lock);
1568 	INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1569 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1570 			tbl->parms.reachable_time);
1571 	setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1572 	skb_queue_head_init_class(&tbl->proxy_queue,
1573 			&neigh_table_proxy_queue_class);
1574 
1575 	tbl->last_flush = now;
1576 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1577 }
1578 
1579 void neigh_table_init(struct neigh_table *tbl)
1580 {
1581 	struct neigh_table *tmp;
1582 
1583 	neigh_table_init_no_netlink(tbl);
1584 	write_lock(&neigh_tbl_lock);
1585 	for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1586 		if (tmp->family == tbl->family)
1587 			break;
1588 	}
1589 	tbl->next	= neigh_tables;
1590 	neigh_tables	= tbl;
1591 	write_unlock(&neigh_tbl_lock);
1592 
1593 	if (unlikely(tmp)) {
1594 		pr_err("Registering multiple tables for family %d\n",
1595 		       tbl->family);
1596 		dump_stack();
1597 	}
1598 }
1599 EXPORT_SYMBOL(neigh_table_init);
1600 
1601 int neigh_table_clear(struct neigh_table *tbl)
1602 {
1603 	struct neigh_table **tp;
1604 
1605 	/* It is not clean... Fix it to unload IPv6 module safely */
1606 	cancel_delayed_work_sync(&tbl->gc_work);
1607 	del_timer_sync(&tbl->proxy_timer);
1608 	pneigh_queue_purge(&tbl->proxy_queue);
1609 	neigh_ifdown(tbl, NULL);
1610 	if (atomic_read(&tbl->entries))
1611 		pr_crit("neighbour leakage\n");
1612 	write_lock(&neigh_tbl_lock);
1613 	for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1614 		if (*tp == tbl) {
1615 			*tp = tbl->next;
1616 			break;
1617 		}
1618 	}
1619 	write_unlock(&neigh_tbl_lock);
1620 
1621 	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1622 		 neigh_hash_free_rcu);
1623 	tbl->nht = NULL;
1624 
1625 	kfree(tbl->phash_buckets);
1626 	tbl->phash_buckets = NULL;
1627 
1628 	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1629 
1630 	free_percpu(tbl->stats);
1631 	tbl->stats = NULL;
1632 
1633 	return 0;
1634 }
1635 EXPORT_SYMBOL(neigh_table_clear);
1636 
1637 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh)
1638 {
1639 	struct net *net = sock_net(skb->sk);
1640 	struct ndmsg *ndm;
1641 	struct nlattr *dst_attr;
1642 	struct neigh_table *tbl;
1643 	struct net_device *dev = NULL;
1644 	int err = -EINVAL;
1645 
1646 	ASSERT_RTNL();
1647 	if (nlmsg_len(nlh) < sizeof(*ndm))
1648 		goto out;
1649 
1650 	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1651 	if (dst_attr == NULL)
1652 		goto out;
1653 
1654 	ndm = nlmsg_data(nlh);
1655 	if (ndm->ndm_ifindex) {
1656 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1657 		if (dev == NULL) {
1658 			err = -ENODEV;
1659 			goto out;
1660 		}
1661 	}
1662 
1663 	read_lock(&neigh_tbl_lock);
1664 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1665 		struct neighbour *neigh;
1666 
1667 		if (tbl->family != ndm->ndm_family)
1668 			continue;
1669 		read_unlock(&neigh_tbl_lock);
1670 
1671 		if (nla_len(dst_attr) < tbl->key_len)
1672 			goto out;
1673 
1674 		if (ndm->ndm_flags & NTF_PROXY) {
1675 			err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1676 			goto out;
1677 		}
1678 
1679 		if (dev == NULL)
1680 			goto out;
1681 
1682 		neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1683 		if (neigh == NULL) {
1684 			err = -ENOENT;
1685 			goto out;
1686 		}
1687 
1688 		err = neigh_update(neigh, NULL, NUD_FAILED,
1689 				   NEIGH_UPDATE_F_OVERRIDE |
1690 				   NEIGH_UPDATE_F_ADMIN);
1691 		neigh_release(neigh);
1692 		goto out;
1693 	}
1694 	read_unlock(&neigh_tbl_lock);
1695 	err = -EAFNOSUPPORT;
1696 
1697 out:
1698 	return err;
1699 }
1700 
1701 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh)
1702 {
1703 	struct net *net = sock_net(skb->sk);
1704 	struct ndmsg *ndm;
1705 	struct nlattr *tb[NDA_MAX+1];
1706 	struct neigh_table *tbl;
1707 	struct net_device *dev = NULL;
1708 	int err;
1709 
1710 	ASSERT_RTNL();
1711 	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1712 	if (err < 0)
1713 		goto out;
1714 
1715 	err = -EINVAL;
1716 	if (tb[NDA_DST] == NULL)
1717 		goto out;
1718 
1719 	ndm = nlmsg_data(nlh);
1720 	if (ndm->ndm_ifindex) {
1721 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1722 		if (dev == NULL) {
1723 			err = -ENODEV;
1724 			goto out;
1725 		}
1726 
1727 		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1728 			goto out;
1729 	}
1730 
1731 	read_lock(&neigh_tbl_lock);
1732 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1733 		int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1734 		struct neighbour *neigh;
1735 		void *dst, *lladdr;
1736 
1737 		if (tbl->family != ndm->ndm_family)
1738 			continue;
1739 		read_unlock(&neigh_tbl_lock);
1740 
1741 		if (nla_len(tb[NDA_DST]) < tbl->key_len)
1742 			goto out;
1743 		dst = nla_data(tb[NDA_DST]);
1744 		lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1745 
1746 		if (ndm->ndm_flags & NTF_PROXY) {
1747 			struct pneigh_entry *pn;
1748 
1749 			err = -ENOBUFS;
1750 			pn = pneigh_lookup(tbl, net, dst, dev, 1);
1751 			if (pn) {
1752 				pn->flags = ndm->ndm_flags;
1753 				err = 0;
1754 			}
1755 			goto out;
1756 		}
1757 
1758 		if (dev == NULL)
1759 			goto out;
1760 
1761 		neigh = neigh_lookup(tbl, dst, dev);
1762 		if (neigh == NULL) {
1763 			if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1764 				err = -ENOENT;
1765 				goto out;
1766 			}
1767 
1768 			neigh = __neigh_lookup_errno(tbl, dst, dev);
1769 			if (IS_ERR(neigh)) {
1770 				err = PTR_ERR(neigh);
1771 				goto out;
1772 			}
1773 		} else {
1774 			if (nlh->nlmsg_flags & NLM_F_EXCL) {
1775 				err = -EEXIST;
1776 				neigh_release(neigh);
1777 				goto out;
1778 			}
1779 
1780 			if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1781 				flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1782 		}
1783 
1784 		if (ndm->ndm_flags & NTF_USE) {
1785 			neigh_event_send(neigh, NULL);
1786 			err = 0;
1787 		} else
1788 			err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1789 		neigh_release(neigh);
1790 		goto out;
1791 	}
1792 
1793 	read_unlock(&neigh_tbl_lock);
1794 	err = -EAFNOSUPPORT;
1795 out:
1796 	return err;
1797 }
1798 
1799 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1800 {
1801 	struct nlattr *nest;
1802 
1803 	nest = nla_nest_start(skb, NDTA_PARMS);
1804 	if (nest == NULL)
1805 		return -ENOBUFS;
1806 
1807 	if ((parms->dev &&
1808 	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1809 	    nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
1810 	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
1811 			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
1812 	    /* approximative value for deprecated QUEUE_LEN (in packets) */
1813 	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
1814 			NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
1815 	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
1816 	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
1817 	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
1818 			NEIGH_VAR(parms, UCAST_PROBES)) ||
1819 	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
1820 			NEIGH_VAR(parms, MCAST_PROBES)) ||
1821 	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) ||
1822 	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
1823 			  NEIGH_VAR(parms, BASE_REACHABLE_TIME)) ||
1824 	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
1825 			  NEIGH_VAR(parms, GC_STALETIME)) ||
1826 	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
1827 			  NEIGH_VAR(parms, DELAY_PROBE_TIME)) ||
1828 	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
1829 			  NEIGH_VAR(parms, RETRANS_TIME)) ||
1830 	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
1831 			  NEIGH_VAR(parms, ANYCAST_DELAY)) ||
1832 	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
1833 			  NEIGH_VAR(parms, PROXY_DELAY)) ||
1834 	    nla_put_msecs(skb, NDTPA_LOCKTIME,
1835 			  NEIGH_VAR(parms, LOCKTIME)))
1836 		goto nla_put_failure;
1837 	return nla_nest_end(skb, nest);
1838 
1839 nla_put_failure:
1840 	nla_nest_cancel(skb, nest);
1841 	return -EMSGSIZE;
1842 }
1843 
1844 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1845 			      u32 pid, u32 seq, int type, int flags)
1846 {
1847 	struct nlmsghdr *nlh;
1848 	struct ndtmsg *ndtmsg;
1849 
1850 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1851 	if (nlh == NULL)
1852 		return -EMSGSIZE;
1853 
1854 	ndtmsg = nlmsg_data(nlh);
1855 
1856 	read_lock_bh(&tbl->lock);
1857 	ndtmsg->ndtm_family = tbl->family;
1858 	ndtmsg->ndtm_pad1   = 0;
1859 	ndtmsg->ndtm_pad2   = 0;
1860 
1861 	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
1862 	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval) ||
1863 	    nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
1864 	    nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
1865 	    nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
1866 		goto nla_put_failure;
1867 	{
1868 		unsigned long now = jiffies;
1869 		unsigned int flush_delta = now - tbl->last_flush;
1870 		unsigned int rand_delta = now - tbl->last_rand;
1871 		struct neigh_hash_table *nht;
1872 		struct ndt_config ndc = {
1873 			.ndtc_key_len		= tbl->key_len,
1874 			.ndtc_entry_size	= tbl->entry_size,
1875 			.ndtc_entries		= atomic_read(&tbl->entries),
1876 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
1877 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
1878 			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
1879 		};
1880 
1881 		rcu_read_lock_bh();
1882 		nht = rcu_dereference_bh(tbl->nht);
1883 		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
1884 		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1885 		rcu_read_unlock_bh();
1886 
1887 		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
1888 			goto nla_put_failure;
1889 	}
1890 
1891 	{
1892 		int cpu;
1893 		struct ndt_stats ndst;
1894 
1895 		memset(&ndst, 0, sizeof(ndst));
1896 
1897 		for_each_possible_cpu(cpu) {
1898 			struct neigh_statistics	*st;
1899 
1900 			st = per_cpu_ptr(tbl->stats, cpu);
1901 			ndst.ndts_allocs		+= st->allocs;
1902 			ndst.ndts_destroys		+= st->destroys;
1903 			ndst.ndts_hash_grows		+= st->hash_grows;
1904 			ndst.ndts_res_failed		+= st->res_failed;
1905 			ndst.ndts_lookups		+= st->lookups;
1906 			ndst.ndts_hits			+= st->hits;
1907 			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
1908 			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
1909 			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
1910 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
1911 		}
1912 
1913 		if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst))
1914 			goto nla_put_failure;
1915 	}
1916 
1917 	BUG_ON(tbl->parms.dev);
1918 	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1919 		goto nla_put_failure;
1920 
1921 	read_unlock_bh(&tbl->lock);
1922 	return nlmsg_end(skb, nlh);
1923 
1924 nla_put_failure:
1925 	read_unlock_bh(&tbl->lock);
1926 	nlmsg_cancel(skb, nlh);
1927 	return -EMSGSIZE;
1928 }
1929 
1930 static int neightbl_fill_param_info(struct sk_buff *skb,
1931 				    struct neigh_table *tbl,
1932 				    struct neigh_parms *parms,
1933 				    u32 pid, u32 seq, int type,
1934 				    unsigned int flags)
1935 {
1936 	struct ndtmsg *ndtmsg;
1937 	struct nlmsghdr *nlh;
1938 
1939 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1940 	if (nlh == NULL)
1941 		return -EMSGSIZE;
1942 
1943 	ndtmsg = nlmsg_data(nlh);
1944 
1945 	read_lock_bh(&tbl->lock);
1946 	ndtmsg->ndtm_family = tbl->family;
1947 	ndtmsg->ndtm_pad1   = 0;
1948 	ndtmsg->ndtm_pad2   = 0;
1949 
1950 	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1951 	    neightbl_fill_parms(skb, parms) < 0)
1952 		goto errout;
1953 
1954 	read_unlock_bh(&tbl->lock);
1955 	return nlmsg_end(skb, nlh);
1956 errout:
1957 	read_unlock_bh(&tbl->lock);
1958 	nlmsg_cancel(skb, nlh);
1959 	return -EMSGSIZE;
1960 }
1961 
1962 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1963 	[NDTA_NAME]		= { .type = NLA_STRING },
1964 	[NDTA_THRESH1]		= { .type = NLA_U32 },
1965 	[NDTA_THRESH2]		= { .type = NLA_U32 },
1966 	[NDTA_THRESH3]		= { .type = NLA_U32 },
1967 	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
1968 	[NDTA_PARMS]		= { .type = NLA_NESTED },
1969 };
1970 
1971 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1972 	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
1973 	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
1974 	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
1975 	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
1976 	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
1977 	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
1978 	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
1979 	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
1980 	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
1981 	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
1982 	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
1983 	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
1984 	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
1985 };
1986 
1987 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
1988 {
1989 	struct net *net = sock_net(skb->sk);
1990 	struct neigh_table *tbl;
1991 	struct ndtmsg *ndtmsg;
1992 	struct nlattr *tb[NDTA_MAX+1];
1993 	int err;
1994 
1995 	err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1996 			  nl_neightbl_policy);
1997 	if (err < 0)
1998 		goto errout;
1999 
2000 	if (tb[NDTA_NAME] == NULL) {
2001 		err = -EINVAL;
2002 		goto errout;
2003 	}
2004 
2005 	ndtmsg = nlmsg_data(nlh);
2006 	read_lock(&neigh_tbl_lock);
2007 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
2008 		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2009 			continue;
2010 
2011 		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
2012 			break;
2013 	}
2014 
2015 	if (tbl == NULL) {
2016 		err = -ENOENT;
2017 		goto errout_locked;
2018 	}
2019 
2020 	/*
2021 	 * We acquire tbl->lock to be nice to the periodic timers and
2022 	 * make sure they always see a consistent set of values.
2023 	 */
2024 	write_lock_bh(&tbl->lock);
2025 
2026 	if (tb[NDTA_PARMS]) {
2027 		struct nlattr *tbp[NDTPA_MAX+1];
2028 		struct neigh_parms *p;
2029 		int i, ifindex = 0;
2030 
2031 		err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
2032 				       nl_ntbl_parm_policy);
2033 		if (err < 0)
2034 			goto errout_tbl_lock;
2035 
2036 		if (tbp[NDTPA_IFINDEX])
2037 			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2038 
2039 		p = lookup_neigh_parms(tbl, net, ifindex);
2040 		if (p == NULL) {
2041 			err = -ENOENT;
2042 			goto errout_tbl_lock;
2043 		}
2044 
2045 		for (i = 1; i <= NDTPA_MAX; i++) {
2046 			if (tbp[i] == NULL)
2047 				continue;
2048 
2049 			switch (i) {
2050 			case NDTPA_QUEUE_LEN:
2051 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2052 					      nla_get_u32(tbp[i]) *
2053 					      SKB_TRUESIZE(ETH_FRAME_LEN));
2054 				break;
2055 			case NDTPA_QUEUE_LENBYTES:
2056 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2057 					      nla_get_u32(tbp[i]));
2058 				break;
2059 			case NDTPA_PROXY_QLEN:
2060 				NEIGH_VAR_SET(p, PROXY_QLEN,
2061 					      nla_get_u32(tbp[i]));
2062 				break;
2063 			case NDTPA_APP_PROBES:
2064 				NEIGH_VAR_SET(p, APP_PROBES,
2065 					      nla_get_u32(tbp[i]));
2066 				break;
2067 			case NDTPA_UCAST_PROBES:
2068 				NEIGH_VAR_SET(p, UCAST_PROBES,
2069 					      nla_get_u32(tbp[i]));
2070 				break;
2071 			case NDTPA_MCAST_PROBES:
2072 				NEIGH_VAR_SET(p, MCAST_PROBES,
2073 					      nla_get_u32(tbp[i]));
2074 				break;
2075 			case NDTPA_BASE_REACHABLE_TIME:
2076 				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2077 					      nla_get_msecs(tbp[i]));
2078 				break;
2079 			case NDTPA_GC_STALETIME:
2080 				NEIGH_VAR_SET(p, GC_STALETIME,
2081 					      nla_get_msecs(tbp[i]));
2082 				break;
2083 			case NDTPA_DELAY_PROBE_TIME:
2084 				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2085 					      nla_get_msecs(tbp[i]));
2086 				break;
2087 			case NDTPA_RETRANS_TIME:
2088 				NEIGH_VAR_SET(p, RETRANS_TIME,
2089 					      nla_get_msecs(tbp[i]));
2090 				break;
2091 			case NDTPA_ANYCAST_DELAY:
2092 				NEIGH_VAR_SET(p, ANYCAST_DELAY,
2093 					      nla_get_msecs(tbp[i]));
2094 				break;
2095 			case NDTPA_PROXY_DELAY:
2096 				NEIGH_VAR_SET(p, PROXY_DELAY,
2097 					      nla_get_msecs(tbp[i]));
2098 				break;
2099 			case NDTPA_LOCKTIME:
2100 				NEIGH_VAR_SET(p, LOCKTIME,
2101 					      nla_get_msecs(tbp[i]));
2102 				break;
2103 			}
2104 		}
2105 	}
2106 
2107 	err = -ENOENT;
2108 	if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2109 	     tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2110 	    !net_eq(net, &init_net))
2111 		goto errout_tbl_lock;
2112 
2113 	if (tb[NDTA_THRESH1])
2114 		tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2115 
2116 	if (tb[NDTA_THRESH2])
2117 		tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2118 
2119 	if (tb[NDTA_THRESH3])
2120 		tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2121 
2122 	if (tb[NDTA_GC_INTERVAL])
2123 		tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2124 
2125 	err = 0;
2126 
2127 errout_tbl_lock:
2128 	write_unlock_bh(&tbl->lock);
2129 errout_locked:
2130 	read_unlock(&neigh_tbl_lock);
2131 errout:
2132 	return err;
2133 }
2134 
2135 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2136 {
2137 	struct net *net = sock_net(skb->sk);
2138 	int family, tidx, nidx = 0;
2139 	int tbl_skip = cb->args[0];
2140 	int neigh_skip = cb->args[1];
2141 	struct neigh_table *tbl;
2142 
2143 	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2144 
2145 	read_lock(&neigh_tbl_lock);
2146 	for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
2147 		struct neigh_parms *p;
2148 
2149 		if (tidx < tbl_skip || (family && tbl->family != family))
2150 			continue;
2151 
2152 		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2153 				       cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2154 				       NLM_F_MULTI) <= 0)
2155 			break;
2156 
2157 		for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
2158 			if (!net_eq(neigh_parms_net(p), net))
2159 				continue;
2160 
2161 			if (nidx < neigh_skip)
2162 				goto next;
2163 
2164 			if (neightbl_fill_param_info(skb, tbl, p,
2165 						     NETLINK_CB(cb->skb).portid,
2166 						     cb->nlh->nlmsg_seq,
2167 						     RTM_NEWNEIGHTBL,
2168 						     NLM_F_MULTI) <= 0)
2169 				goto out;
2170 		next:
2171 			nidx++;
2172 		}
2173 
2174 		neigh_skip = 0;
2175 	}
2176 out:
2177 	read_unlock(&neigh_tbl_lock);
2178 	cb->args[0] = tidx;
2179 	cb->args[1] = nidx;
2180 
2181 	return skb->len;
2182 }
2183 
2184 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2185 			   u32 pid, u32 seq, int type, unsigned int flags)
2186 {
2187 	unsigned long now = jiffies;
2188 	struct nda_cacheinfo ci;
2189 	struct nlmsghdr *nlh;
2190 	struct ndmsg *ndm;
2191 
2192 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2193 	if (nlh == NULL)
2194 		return -EMSGSIZE;
2195 
2196 	ndm = nlmsg_data(nlh);
2197 	ndm->ndm_family	 = neigh->ops->family;
2198 	ndm->ndm_pad1    = 0;
2199 	ndm->ndm_pad2    = 0;
2200 	ndm->ndm_flags	 = neigh->flags;
2201 	ndm->ndm_type	 = neigh->type;
2202 	ndm->ndm_ifindex = neigh->dev->ifindex;
2203 
2204 	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2205 		goto nla_put_failure;
2206 
2207 	read_lock_bh(&neigh->lock);
2208 	ndm->ndm_state	 = neigh->nud_state;
2209 	if (neigh->nud_state & NUD_VALID) {
2210 		char haddr[MAX_ADDR_LEN];
2211 
2212 		neigh_ha_snapshot(haddr, neigh, neigh->dev);
2213 		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2214 			read_unlock_bh(&neigh->lock);
2215 			goto nla_put_failure;
2216 		}
2217 	}
2218 
2219 	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2220 	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2221 	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2222 	ci.ndm_refcnt	 = atomic_read(&neigh->refcnt) - 1;
2223 	read_unlock_bh(&neigh->lock);
2224 
2225 	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2226 	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2227 		goto nla_put_failure;
2228 
2229 	return nlmsg_end(skb, nlh);
2230 
2231 nla_put_failure:
2232 	nlmsg_cancel(skb, nlh);
2233 	return -EMSGSIZE;
2234 }
2235 
2236 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2237 			    u32 pid, u32 seq, int type, unsigned int flags,
2238 			    struct neigh_table *tbl)
2239 {
2240 	struct nlmsghdr *nlh;
2241 	struct ndmsg *ndm;
2242 
2243 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2244 	if (nlh == NULL)
2245 		return -EMSGSIZE;
2246 
2247 	ndm = nlmsg_data(nlh);
2248 	ndm->ndm_family	 = tbl->family;
2249 	ndm->ndm_pad1    = 0;
2250 	ndm->ndm_pad2    = 0;
2251 	ndm->ndm_flags	 = pn->flags | NTF_PROXY;
2252 	ndm->ndm_type	 = RTN_UNICAST;
2253 	ndm->ndm_ifindex = pn->dev->ifindex;
2254 	ndm->ndm_state	 = NUD_NONE;
2255 
2256 	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2257 		goto nla_put_failure;
2258 
2259 	return nlmsg_end(skb, nlh);
2260 
2261 nla_put_failure:
2262 	nlmsg_cancel(skb, nlh);
2263 	return -EMSGSIZE;
2264 }
2265 
2266 static void neigh_update_notify(struct neighbour *neigh)
2267 {
2268 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2269 	__neigh_notify(neigh, RTM_NEWNEIGH, 0);
2270 }
2271 
2272 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2273 			    struct netlink_callback *cb)
2274 {
2275 	struct net *net = sock_net(skb->sk);
2276 	struct neighbour *n;
2277 	int rc, h, s_h = cb->args[1];
2278 	int idx, s_idx = idx = cb->args[2];
2279 	struct neigh_hash_table *nht;
2280 
2281 	rcu_read_lock_bh();
2282 	nht = rcu_dereference_bh(tbl->nht);
2283 
2284 	for (h = s_h; h < (1 << nht->hash_shift); h++) {
2285 		if (h > s_h)
2286 			s_idx = 0;
2287 		for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2288 		     n != NULL;
2289 		     n = rcu_dereference_bh(n->next)) {
2290 			if (!net_eq(dev_net(n->dev), net))
2291 				continue;
2292 			if (idx < s_idx)
2293 				goto next;
2294 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2295 					    cb->nlh->nlmsg_seq,
2296 					    RTM_NEWNEIGH,
2297 					    NLM_F_MULTI) <= 0) {
2298 				rc = -1;
2299 				goto out;
2300 			}
2301 next:
2302 			idx++;
2303 		}
2304 	}
2305 	rc = skb->len;
2306 out:
2307 	rcu_read_unlock_bh();
2308 	cb->args[1] = h;
2309 	cb->args[2] = idx;
2310 	return rc;
2311 }
2312 
2313 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2314 			     struct netlink_callback *cb)
2315 {
2316 	struct pneigh_entry *n;
2317 	struct net *net = sock_net(skb->sk);
2318 	int rc, h, s_h = cb->args[3];
2319 	int idx, s_idx = idx = cb->args[4];
2320 
2321 	read_lock_bh(&tbl->lock);
2322 
2323 	for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2324 		if (h > s_h)
2325 			s_idx = 0;
2326 		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2327 			if (dev_net(n->dev) != net)
2328 				continue;
2329 			if (idx < s_idx)
2330 				goto next;
2331 			if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2332 					    cb->nlh->nlmsg_seq,
2333 					    RTM_NEWNEIGH,
2334 					    NLM_F_MULTI, tbl) <= 0) {
2335 				read_unlock_bh(&tbl->lock);
2336 				rc = -1;
2337 				goto out;
2338 			}
2339 		next:
2340 			idx++;
2341 		}
2342 	}
2343 
2344 	read_unlock_bh(&tbl->lock);
2345 	rc = skb->len;
2346 out:
2347 	cb->args[3] = h;
2348 	cb->args[4] = idx;
2349 	return rc;
2350 
2351 }
2352 
2353 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2354 {
2355 	struct neigh_table *tbl;
2356 	int t, family, s_t;
2357 	int proxy = 0;
2358 	int err;
2359 
2360 	read_lock(&neigh_tbl_lock);
2361 	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2362 
2363 	/* check for full ndmsg structure presence, family member is
2364 	 * the same for both structures
2365 	 */
2366 	if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
2367 	    ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
2368 		proxy = 1;
2369 
2370 	s_t = cb->args[0];
2371 
2372 	for (tbl = neigh_tables, t = 0; tbl;
2373 	     tbl = tbl->next, t++) {
2374 		if (t < s_t || (family && tbl->family != family))
2375 			continue;
2376 		if (t > s_t)
2377 			memset(&cb->args[1], 0, sizeof(cb->args) -
2378 						sizeof(cb->args[0]));
2379 		if (proxy)
2380 			err = pneigh_dump_table(tbl, skb, cb);
2381 		else
2382 			err = neigh_dump_table(tbl, skb, cb);
2383 		if (err < 0)
2384 			break;
2385 	}
2386 	read_unlock(&neigh_tbl_lock);
2387 
2388 	cb->args[0] = t;
2389 	return skb->len;
2390 }
2391 
2392 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2393 {
2394 	int chain;
2395 	struct neigh_hash_table *nht;
2396 
2397 	rcu_read_lock_bh();
2398 	nht = rcu_dereference_bh(tbl->nht);
2399 
2400 	read_lock(&tbl->lock); /* avoid resizes */
2401 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2402 		struct neighbour *n;
2403 
2404 		for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2405 		     n != NULL;
2406 		     n = rcu_dereference_bh(n->next))
2407 			cb(n, cookie);
2408 	}
2409 	read_unlock(&tbl->lock);
2410 	rcu_read_unlock_bh();
2411 }
2412 EXPORT_SYMBOL(neigh_for_each);
2413 
2414 /* The tbl->lock must be held as a writer and BH disabled. */
2415 void __neigh_for_each_release(struct neigh_table *tbl,
2416 			      int (*cb)(struct neighbour *))
2417 {
2418 	int chain;
2419 	struct neigh_hash_table *nht;
2420 
2421 	nht = rcu_dereference_protected(tbl->nht,
2422 					lockdep_is_held(&tbl->lock));
2423 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2424 		struct neighbour *n;
2425 		struct neighbour __rcu **np;
2426 
2427 		np = &nht->hash_buckets[chain];
2428 		while ((n = rcu_dereference_protected(*np,
2429 					lockdep_is_held(&tbl->lock))) != NULL) {
2430 			int release;
2431 
2432 			write_lock(&n->lock);
2433 			release = cb(n);
2434 			if (release) {
2435 				rcu_assign_pointer(*np,
2436 					rcu_dereference_protected(n->next,
2437 						lockdep_is_held(&tbl->lock)));
2438 				n->dead = 1;
2439 			} else
2440 				np = &n->next;
2441 			write_unlock(&n->lock);
2442 			if (release)
2443 				neigh_cleanup_and_release(n);
2444 		}
2445 	}
2446 }
2447 EXPORT_SYMBOL(__neigh_for_each_release);
2448 
2449 #ifdef CONFIG_PROC_FS
2450 
2451 static struct neighbour *neigh_get_first(struct seq_file *seq)
2452 {
2453 	struct neigh_seq_state *state = seq->private;
2454 	struct net *net = seq_file_net(seq);
2455 	struct neigh_hash_table *nht = state->nht;
2456 	struct neighbour *n = NULL;
2457 	int bucket = state->bucket;
2458 
2459 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2460 	for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2461 		n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2462 
2463 		while (n) {
2464 			if (!net_eq(dev_net(n->dev), net))
2465 				goto next;
2466 			if (state->neigh_sub_iter) {
2467 				loff_t fakep = 0;
2468 				void *v;
2469 
2470 				v = state->neigh_sub_iter(state, n, &fakep);
2471 				if (!v)
2472 					goto next;
2473 			}
2474 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2475 				break;
2476 			if (n->nud_state & ~NUD_NOARP)
2477 				break;
2478 next:
2479 			n = rcu_dereference_bh(n->next);
2480 		}
2481 
2482 		if (n)
2483 			break;
2484 	}
2485 	state->bucket = bucket;
2486 
2487 	return n;
2488 }
2489 
2490 static struct neighbour *neigh_get_next(struct seq_file *seq,
2491 					struct neighbour *n,
2492 					loff_t *pos)
2493 {
2494 	struct neigh_seq_state *state = seq->private;
2495 	struct net *net = seq_file_net(seq);
2496 	struct neigh_hash_table *nht = state->nht;
2497 
2498 	if (state->neigh_sub_iter) {
2499 		void *v = state->neigh_sub_iter(state, n, pos);
2500 		if (v)
2501 			return n;
2502 	}
2503 	n = rcu_dereference_bh(n->next);
2504 
2505 	while (1) {
2506 		while (n) {
2507 			if (!net_eq(dev_net(n->dev), net))
2508 				goto next;
2509 			if (state->neigh_sub_iter) {
2510 				void *v = state->neigh_sub_iter(state, n, pos);
2511 				if (v)
2512 					return n;
2513 				goto next;
2514 			}
2515 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2516 				break;
2517 
2518 			if (n->nud_state & ~NUD_NOARP)
2519 				break;
2520 next:
2521 			n = rcu_dereference_bh(n->next);
2522 		}
2523 
2524 		if (n)
2525 			break;
2526 
2527 		if (++state->bucket >= (1 << nht->hash_shift))
2528 			break;
2529 
2530 		n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2531 	}
2532 
2533 	if (n && pos)
2534 		--(*pos);
2535 	return n;
2536 }
2537 
2538 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2539 {
2540 	struct neighbour *n = neigh_get_first(seq);
2541 
2542 	if (n) {
2543 		--(*pos);
2544 		while (*pos) {
2545 			n = neigh_get_next(seq, n, pos);
2546 			if (!n)
2547 				break;
2548 		}
2549 	}
2550 	return *pos ? NULL : n;
2551 }
2552 
2553 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2554 {
2555 	struct neigh_seq_state *state = seq->private;
2556 	struct net *net = seq_file_net(seq);
2557 	struct neigh_table *tbl = state->tbl;
2558 	struct pneigh_entry *pn = NULL;
2559 	int bucket = state->bucket;
2560 
2561 	state->flags |= NEIGH_SEQ_IS_PNEIGH;
2562 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2563 		pn = tbl->phash_buckets[bucket];
2564 		while (pn && !net_eq(pneigh_net(pn), net))
2565 			pn = pn->next;
2566 		if (pn)
2567 			break;
2568 	}
2569 	state->bucket = bucket;
2570 
2571 	return pn;
2572 }
2573 
2574 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2575 					    struct pneigh_entry *pn,
2576 					    loff_t *pos)
2577 {
2578 	struct neigh_seq_state *state = seq->private;
2579 	struct net *net = seq_file_net(seq);
2580 	struct neigh_table *tbl = state->tbl;
2581 
2582 	do {
2583 		pn = pn->next;
2584 	} while (pn && !net_eq(pneigh_net(pn), net));
2585 
2586 	while (!pn) {
2587 		if (++state->bucket > PNEIGH_HASHMASK)
2588 			break;
2589 		pn = tbl->phash_buckets[state->bucket];
2590 		while (pn && !net_eq(pneigh_net(pn), net))
2591 			pn = pn->next;
2592 		if (pn)
2593 			break;
2594 	}
2595 
2596 	if (pn && pos)
2597 		--(*pos);
2598 
2599 	return pn;
2600 }
2601 
2602 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2603 {
2604 	struct pneigh_entry *pn = pneigh_get_first(seq);
2605 
2606 	if (pn) {
2607 		--(*pos);
2608 		while (*pos) {
2609 			pn = pneigh_get_next(seq, pn, pos);
2610 			if (!pn)
2611 				break;
2612 		}
2613 	}
2614 	return *pos ? NULL : pn;
2615 }
2616 
2617 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2618 {
2619 	struct neigh_seq_state *state = seq->private;
2620 	void *rc;
2621 	loff_t idxpos = *pos;
2622 
2623 	rc = neigh_get_idx(seq, &idxpos);
2624 	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2625 		rc = pneigh_get_idx(seq, &idxpos);
2626 
2627 	return rc;
2628 }
2629 
2630 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2631 	__acquires(rcu_bh)
2632 {
2633 	struct neigh_seq_state *state = seq->private;
2634 
2635 	state->tbl = tbl;
2636 	state->bucket = 0;
2637 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2638 
2639 	rcu_read_lock_bh();
2640 	state->nht = rcu_dereference_bh(tbl->nht);
2641 
2642 	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2643 }
2644 EXPORT_SYMBOL(neigh_seq_start);
2645 
2646 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2647 {
2648 	struct neigh_seq_state *state;
2649 	void *rc;
2650 
2651 	if (v == SEQ_START_TOKEN) {
2652 		rc = neigh_get_first(seq);
2653 		goto out;
2654 	}
2655 
2656 	state = seq->private;
2657 	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2658 		rc = neigh_get_next(seq, v, NULL);
2659 		if (rc)
2660 			goto out;
2661 		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2662 			rc = pneigh_get_first(seq);
2663 	} else {
2664 		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2665 		rc = pneigh_get_next(seq, v, NULL);
2666 	}
2667 out:
2668 	++(*pos);
2669 	return rc;
2670 }
2671 EXPORT_SYMBOL(neigh_seq_next);
2672 
2673 void neigh_seq_stop(struct seq_file *seq, void *v)
2674 	__releases(rcu_bh)
2675 {
2676 	rcu_read_unlock_bh();
2677 }
2678 EXPORT_SYMBOL(neigh_seq_stop);
2679 
2680 /* statistics via seq_file */
2681 
2682 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2683 {
2684 	struct neigh_table *tbl = seq->private;
2685 	int cpu;
2686 
2687 	if (*pos == 0)
2688 		return SEQ_START_TOKEN;
2689 
2690 	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2691 		if (!cpu_possible(cpu))
2692 			continue;
2693 		*pos = cpu+1;
2694 		return per_cpu_ptr(tbl->stats, cpu);
2695 	}
2696 	return NULL;
2697 }
2698 
2699 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2700 {
2701 	struct neigh_table *tbl = seq->private;
2702 	int cpu;
2703 
2704 	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2705 		if (!cpu_possible(cpu))
2706 			continue;
2707 		*pos = cpu+1;
2708 		return per_cpu_ptr(tbl->stats, cpu);
2709 	}
2710 	return NULL;
2711 }
2712 
2713 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2714 {
2715 
2716 }
2717 
2718 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2719 {
2720 	struct neigh_table *tbl = seq->private;
2721 	struct neigh_statistics *st = v;
2722 
2723 	if (v == SEQ_START_TOKEN) {
2724 		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards\n");
2725 		return 0;
2726 	}
2727 
2728 	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2729 			"%08lx %08lx  %08lx %08lx %08lx\n",
2730 		   atomic_read(&tbl->entries),
2731 
2732 		   st->allocs,
2733 		   st->destroys,
2734 		   st->hash_grows,
2735 
2736 		   st->lookups,
2737 		   st->hits,
2738 
2739 		   st->res_failed,
2740 
2741 		   st->rcv_probes_mcast,
2742 		   st->rcv_probes_ucast,
2743 
2744 		   st->periodic_gc_runs,
2745 		   st->forced_gc_runs,
2746 		   st->unres_discards
2747 		   );
2748 
2749 	return 0;
2750 }
2751 
2752 static const struct seq_operations neigh_stat_seq_ops = {
2753 	.start	= neigh_stat_seq_start,
2754 	.next	= neigh_stat_seq_next,
2755 	.stop	= neigh_stat_seq_stop,
2756 	.show	= neigh_stat_seq_show,
2757 };
2758 
2759 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2760 {
2761 	int ret = seq_open(file, &neigh_stat_seq_ops);
2762 
2763 	if (!ret) {
2764 		struct seq_file *sf = file->private_data;
2765 		sf->private = PDE_DATA(inode);
2766 	}
2767 	return ret;
2768 };
2769 
2770 static const struct file_operations neigh_stat_seq_fops = {
2771 	.owner	 = THIS_MODULE,
2772 	.open 	 = neigh_stat_seq_open,
2773 	.read	 = seq_read,
2774 	.llseek	 = seq_lseek,
2775 	.release = seq_release,
2776 };
2777 
2778 #endif /* CONFIG_PROC_FS */
2779 
2780 static inline size_t neigh_nlmsg_size(void)
2781 {
2782 	return NLMSG_ALIGN(sizeof(struct ndmsg))
2783 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2784 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2785 	       + nla_total_size(sizeof(struct nda_cacheinfo))
2786 	       + nla_total_size(4); /* NDA_PROBES */
2787 }
2788 
2789 static void __neigh_notify(struct neighbour *n, int type, int flags)
2790 {
2791 	struct net *net = dev_net(n->dev);
2792 	struct sk_buff *skb;
2793 	int err = -ENOBUFS;
2794 
2795 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2796 	if (skb == NULL)
2797 		goto errout;
2798 
2799 	err = neigh_fill_info(skb, n, 0, 0, type, flags);
2800 	if (err < 0) {
2801 		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2802 		WARN_ON(err == -EMSGSIZE);
2803 		kfree_skb(skb);
2804 		goto errout;
2805 	}
2806 	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2807 	return;
2808 errout:
2809 	if (err < 0)
2810 		rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2811 }
2812 
2813 void neigh_app_ns(struct neighbour *n)
2814 {
2815 	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2816 }
2817 EXPORT_SYMBOL(neigh_app_ns);
2818 
2819 #ifdef CONFIG_SYSCTL
2820 static int zero;
2821 static int int_max = INT_MAX;
2822 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
2823 
2824 static int proc_unres_qlen(struct ctl_table *ctl, int write,
2825 			   void __user *buffer, size_t *lenp, loff_t *ppos)
2826 {
2827 	int size, ret;
2828 	struct ctl_table tmp = *ctl;
2829 
2830 	tmp.extra1 = &zero;
2831 	tmp.extra2 = &unres_qlen_max;
2832 	tmp.data = &size;
2833 
2834 	size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
2835 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2836 
2837 	if (write && !ret)
2838 		*(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
2839 	return ret;
2840 }
2841 
2842 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
2843 						   int family)
2844 {
2845 	switch (family) {
2846 	case AF_INET:
2847 		return __in_dev_arp_parms_get_rcu(dev);
2848 	case AF_INET6:
2849 		return __in6_dev_nd_parms_get_rcu(dev);
2850 	}
2851 	return NULL;
2852 }
2853 
2854 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
2855 				  int index)
2856 {
2857 	struct net_device *dev;
2858 	int family = neigh_parms_family(p);
2859 
2860 	rcu_read_lock();
2861 	for_each_netdev_rcu(net, dev) {
2862 		struct neigh_parms *dst_p =
2863 				neigh_get_dev_parms_rcu(dev, family);
2864 
2865 		if (dst_p && !test_bit(index, dst_p->data_state))
2866 			dst_p->data[index] = p->data[index];
2867 	}
2868 	rcu_read_unlock();
2869 }
2870 
2871 static void neigh_proc_update(struct ctl_table *ctl, int write)
2872 {
2873 	struct net_device *dev = ctl->extra1;
2874 	struct neigh_parms *p = ctl->extra2;
2875 	struct net *net = neigh_parms_net(p);
2876 	int index = (int *) ctl->data - p->data;
2877 
2878 	if (!write)
2879 		return;
2880 
2881 	set_bit(index, p->data_state);
2882 	if (!dev) /* NULL dev means this is default value */
2883 		neigh_copy_dflt_parms(net, p, index);
2884 }
2885 
2886 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
2887 					   void __user *buffer,
2888 					   size_t *lenp, loff_t *ppos)
2889 {
2890 	struct ctl_table tmp = *ctl;
2891 	int ret;
2892 
2893 	tmp.extra1 = &zero;
2894 	tmp.extra2 = &int_max;
2895 
2896 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2897 	neigh_proc_update(ctl, write);
2898 	return ret;
2899 }
2900 
2901 int neigh_proc_dointvec(struct ctl_table *ctl, int write,
2902 			void __user *buffer, size_t *lenp, loff_t *ppos)
2903 {
2904 	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2905 
2906 	neigh_proc_update(ctl, write);
2907 	return ret;
2908 }
2909 EXPORT_SYMBOL(neigh_proc_dointvec);
2910 
2911 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
2912 				void __user *buffer,
2913 				size_t *lenp, loff_t *ppos)
2914 {
2915 	int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
2916 
2917 	neigh_proc_update(ctl, write);
2918 	return ret;
2919 }
2920 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
2921 
2922 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
2923 					      void __user *buffer,
2924 					      size_t *lenp, loff_t *ppos)
2925 {
2926 	int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
2927 
2928 	neigh_proc_update(ctl, write);
2929 	return ret;
2930 }
2931 
2932 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
2933 				   void __user *buffer,
2934 				   size_t *lenp, loff_t *ppos)
2935 {
2936 	int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
2937 
2938 	neigh_proc_update(ctl, write);
2939 	return ret;
2940 }
2941 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
2942 
2943 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
2944 					  void __user *buffer,
2945 					  size_t *lenp, loff_t *ppos)
2946 {
2947 	int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
2948 
2949 	neigh_proc_update(ctl, write);
2950 	return ret;
2951 }
2952 
2953 #define NEIGH_PARMS_DATA_OFFSET(index)	\
2954 	(&((struct neigh_parms *) 0)->data[index])
2955 
2956 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
2957 	[NEIGH_VAR_ ## attr] = { \
2958 		.procname	= name, \
2959 		.data		= NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
2960 		.maxlen		= sizeof(int), \
2961 		.mode		= mval, \
2962 		.proc_handler	= proc, \
2963 	}
2964 
2965 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
2966 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
2967 
2968 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
2969 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
2970 
2971 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
2972 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
2973 
2974 #define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
2975 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
2976 
2977 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
2978 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
2979 
2980 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
2981 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
2982 
2983 static struct neigh_sysctl_table {
2984 	struct ctl_table_header *sysctl_header;
2985 	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
2986 } neigh_sysctl_template __read_mostly = {
2987 	.neigh_vars = {
2988 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
2989 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
2990 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
2991 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
2992 		NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
2993 		NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
2994 		NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
2995 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
2996 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
2997 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
2998 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
2999 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3000 		NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3001 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3002 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3003 		[NEIGH_VAR_GC_INTERVAL] = {
3004 			.procname	= "gc_interval",
3005 			.maxlen		= sizeof(int),
3006 			.mode		= 0644,
3007 			.proc_handler	= proc_dointvec_jiffies,
3008 		},
3009 		[NEIGH_VAR_GC_THRESH1] = {
3010 			.procname	= "gc_thresh1",
3011 			.maxlen		= sizeof(int),
3012 			.mode		= 0644,
3013 			.extra1 	= &zero,
3014 			.extra2		= &int_max,
3015 			.proc_handler	= proc_dointvec_minmax,
3016 		},
3017 		[NEIGH_VAR_GC_THRESH2] = {
3018 			.procname	= "gc_thresh2",
3019 			.maxlen		= sizeof(int),
3020 			.mode		= 0644,
3021 			.extra1 	= &zero,
3022 			.extra2		= &int_max,
3023 			.proc_handler	= proc_dointvec_minmax,
3024 		},
3025 		[NEIGH_VAR_GC_THRESH3] = {
3026 			.procname	= "gc_thresh3",
3027 			.maxlen		= sizeof(int),
3028 			.mode		= 0644,
3029 			.extra1 	= &zero,
3030 			.extra2		= &int_max,
3031 			.proc_handler	= proc_dointvec_minmax,
3032 		},
3033 		{},
3034 	},
3035 };
3036 
3037 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3038 			  proc_handler *handler)
3039 {
3040 	int i;
3041 	struct neigh_sysctl_table *t;
3042 	const char *dev_name_source;
3043 	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3044 	char *p_name;
3045 
3046 	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3047 	if (!t)
3048 		goto err;
3049 
3050 	for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3051 		t->neigh_vars[i].data += (long) p;
3052 		t->neigh_vars[i].extra1 = dev;
3053 		t->neigh_vars[i].extra2 = p;
3054 	}
3055 
3056 	if (dev) {
3057 		dev_name_source = dev->name;
3058 		/* Terminate the table early */
3059 		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3060 		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3061 	} else {
3062 		struct neigh_table *tbl = p->tbl;
3063 		dev_name_source = "default";
3064 		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3065 		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3066 		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3067 		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3068 	}
3069 
3070 	if (handler) {
3071 		/* RetransTime */
3072 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3073 		/* ReachableTime */
3074 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3075 		/* RetransTime (in milliseconds)*/
3076 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3077 		/* ReachableTime (in milliseconds) */
3078 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3079 	}
3080 
3081 	/* Don't export sysctls to unprivileged users */
3082 	if (neigh_parms_net(p)->user_ns != &init_user_ns)
3083 		t->neigh_vars[0].procname = NULL;
3084 
3085 	switch (neigh_parms_family(p)) {
3086 	case AF_INET:
3087 	      p_name = "ipv4";
3088 	      break;
3089 	case AF_INET6:
3090 	      p_name = "ipv6";
3091 	      break;
3092 	default:
3093 	      BUG();
3094 	}
3095 
3096 	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3097 		p_name, dev_name_source);
3098 	t->sysctl_header =
3099 		register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3100 	if (!t->sysctl_header)
3101 		goto free;
3102 
3103 	p->sysctl_table = t;
3104 	return 0;
3105 
3106 free:
3107 	kfree(t);
3108 err:
3109 	return -ENOBUFS;
3110 }
3111 EXPORT_SYMBOL(neigh_sysctl_register);
3112 
3113 void neigh_sysctl_unregister(struct neigh_parms *p)
3114 {
3115 	if (p->sysctl_table) {
3116 		struct neigh_sysctl_table *t = p->sysctl_table;
3117 		p->sysctl_table = NULL;
3118 		unregister_net_sysctl_table(t->sysctl_header);
3119 		kfree(t);
3120 	}
3121 }
3122 EXPORT_SYMBOL(neigh_sysctl_unregister);
3123 
3124 #endif	/* CONFIG_SYSCTL */
3125 
3126 static int __init neigh_init(void)
3127 {
3128 	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL);
3129 	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL);
3130 	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL);
3131 
3132 	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3133 		      NULL);
3134 	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL);
3135 
3136 	return 0;
3137 }
3138 
3139 subsys_initcall(neigh_init);
3140 
3141