xref: /linux/net/core/neighbour.c (revision e0bf6c5ca2d3281f231c5f0c9bf145e9513644de)
1 /*
2  *	Generic address resolution entity
3  *
4  *	Authors:
5  *	Pedro Roque		<roque@di.fc.ul.pt>
6  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *	Fixes:
14  *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
15  *	Harald Welte		Add neighbour cache statistics like rtstat
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include <linux/slab.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/socket.h>
25 #include <linux/netdevice.h>
26 #include <linux/proc_fs.h>
27 #ifdef CONFIG_SYSCTL
28 #include <linux/sysctl.h>
29 #endif
30 #include <linux/times.h>
31 #include <net/net_namespace.h>
32 #include <net/neighbour.h>
33 #include <net/dst.h>
34 #include <net/sock.h>
35 #include <net/netevent.h>
36 #include <net/netlink.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/random.h>
39 #include <linux/string.h>
40 #include <linux/log2.h>
41 #include <linux/inetdevice.h>
42 #include <net/addrconf.h>
43 
44 #define DEBUG
45 #define NEIGH_DEBUG 1
46 #define neigh_dbg(level, fmt, ...)		\
47 do {						\
48 	if (level <= NEIGH_DEBUG)		\
49 		pr_debug(fmt, ##__VA_ARGS__);	\
50 } while (0)
51 
52 #define PNEIGH_HASHMASK		0xF
53 
54 static void neigh_timer_handler(unsigned long arg);
55 static void __neigh_notify(struct neighbour *n, int type, int flags);
56 static void neigh_update_notify(struct neighbour *neigh);
57 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
58 
59 #ifdef CONFIG_PROC_FS
60 static const struct file_operations neigh_stat_seq_fops;
61 #endif
62 
63 /*
64    Neighbour hash table buckets are protected with rwlock tbl->lock.
65 
66    - All the scans/updates to hash buckets MUST be made under this lock.
67    - NOTHING clever should be made under this lock: no callbacks
68      to protocol backends, no attempts to send something to network.
69      It will result in deadlocks, if backend/driver wants to use neighbour
70      cache.
71    - If the entry requires some non-trivial actions, increase
72      its reference count and release table lock.
73 
74    Neighbour entries are protected:
75    - with reference count.
76    - with rwlock neigh->lock
77 
78    Reference count prevents destruction.
79 
80    neigh->lock mainly serializes ll address data and its validity state.
81    However, the same lock is used to protect another entry fields:
82     - timer
83     - resolution queue
84 
85    Again, nothing clever shall be made under neigh->lock,
86    the most complicated procedure, which we allow is dev->hard_header.
87    It is supposed, that dev->hard_header is simplistic and does
88    not make callbacks to neighbour tables.
89  */
90 
91 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
92 {
93 	kfree_skb(skb);
94 	return -ENETDOWN;
95 }
96 
97 static void neigh_cleanup_and_release(struct neighbour *neigh)
98 {
99 	if (neigh->parms->neigh_cleanup)
100 		neigh->parms->neigh_cleanup(neigh);
101 
102 	__neigh_notify(neigh, RTM_DELNEIGH, 0);
103 	neigh_release(neigh);
104 }
105 
106 /*
107  * It is random distribution in the interval (1/2)*base...(3/2)*base.
108  * It corresponds to default IPv6 settings and is not overridable,
109  * because it is really reasonable choice.
110  */
111 
112 unsigned long neigh_rand_reach_time(unsigned long base)
113 {
114 	return base ? (prandom_u32() % base) + (base >> 1) : 0;
115 }
116 EXPORT_SYMBOL(neigh_rand_reach_time);
117 
118 
119 static int neigh_forced_gc(struct neigh_table *tbl)
120 {
121 	int shrunk = 0;
122 	int i;
123 	struct neigh_hash_table *nht;
124 
125 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
126 
127 	write_lock_bh(&tbl->lock);
128 	nht = rcu_dereference_protected(tbl->nht,
129 					lockdep_is_held(&tbl->lock));
130 	for (i = 0; i < (1 << nht->hash_shift); i++) {
131 		struct neighbour *n;
132 		struct neighbour __rcu **np;
133 
134 		np = &nht->hash_buckets[i];
135 		while ((n = rcu_dereference_protected(*np,
136 					lockdep_is_held(&tbl->lock))) != NULL) {
137 			/* Neighbour record may be discarded if:
138 			 * - nobody refers to it.
139 			 * - it is not permanent
140 			 */
141 			write_lock(&n->lock);
142 			if (atomic_read(&n->refcnt) == 1 &&
143 			    !(n->nud_state & NUD_PERMANENT)) {
144 				rcu_assign_pointer(*np,
145 					rcu_dereference_protected(n->next,
146 						  lockdep_is_held(&tbl->lock)));
147 				n->dead = 1;
148 				shrunk	= 1;
149 				write_unlock(&n->lock);
150 				neigh_cleanup_and_release(n);
151 				continue;
152 			}
153 			write_unlock(&n->lock);
154 			np = &n->next;
155 		}
156 	}
157 
158 	tbl->last_flush = jiffies;
159 
160 	write_unlock_bh(&tbl->lock);
161 
162 	return shrunk;
163 }
164 
165 static void neigh_add_timer(struct neighbour *n, unsigned long when)
166 {
167 	neigh_hold(n);
168 	if (unlikely(mod_timer(&n->timer, when))) {
169 		printk("NEIGH: BUG, double timer add, state is %x\n",
170 		       n->nud_state);
171 		dump_stack();
172 	}
173 }
174 
175 static int neigh_del_timer(struct neighbour *n)
176 {
177 	if ((n->nud_state & NUD_IN_TIMER) &&
178 	    del_timer(&n->timer)) {
179 		neigh_release(n);
180 		return 1;
181 	}
182 	return 0;
183 }
184 
185 static void pneigh_queue_purge(struct sk_buff_head *list)
186 {
187 	struct sk_buff *skb;
188 
189 	while ((skb = skb_dequeue(list)) != NULL) {
190 		dev_put(skb->dev);
191 		kfree_skb(skb);
192 	}
193 }
194 
195 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
196 {
197 	int i;
198 	struct neigh_hash_table *nht;
199 
200 	nht = rcu_dereference_protected(tbl->nht,
201 					lockdep_is_held(&tbl->lock));
202 
203 	for (i = 0; i < (1 << nht->hash_shift); i++) {
204 		struct neighbour *n;
205 		struct neighbour __rcu **np = &nht->hash_buckets[i];
206 
207 		while ((n = rcu_dereference_protected(*np,
208 					lockdep_is_held(&tbl->lock))) != NULL) {
209 			if (dev && n->dev != dev) {
210 				np = &n->next;
211 				continue;
212 			}
213 			rcu_assign_pointer(*np,
214 				   rcu_dereference_protected(n->next,
215 						lockdep_is_held(&tbl->lock)));
216 			write_lock(&n->lock);
217 			neigh_del_timer(n);
218 			n->dead = 1;
219 
220 			if (atomic_read(&n->refcnt) != 1) {
221 				/* The most unpleasant situation.
222 				   We must destroy neighbour entry,
223 				   but someone still uses it.
224 
225 				   The destroy will be delayed until
226 				   the last user releases us, but
227 				   we must kill timers etc. and move
228 				   it to safe state.
229 				 */
230 				__skb_queue_purge(&n->arp_queue);
231 				n->arp_queue_len_bytes = 0;
232 				n->output = neigh_blackhole;
233 				if (n->nud_state & NUD_VALID)
234 					n->nud_state = NUD_NOARP;
235 				else
236 					n->nud_state = NUD_NONE;
237 				neigh_dbg(2, "neigh %p is stray\n", n);
238 			}
239 			write_unlock(&n->lock);
240 			neigh_cleanup_and_release(n);
241 		}
242 	}
243 }
244 
245 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
246 {
247 	write_lock_bh(&tbl->lock);
248 	neigh_flush_dev(tbl, dev);
249 	write_unlock_bh(&tbl->lock);
250 }
251 EXPORT_SYMBOL(neigh_changeaddr);
252 
253 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
254 {
255 	write_lock_bh(&tbl->lock);
256 	neigh_flush_dev(tbl, dev);
257 	pneigh_ifdown(tbl, dev);
258 	write_unlock_bh(&tbl->lock);
259 
260 	del_timer_sync(&tbl->proxy_timer);
261 	pneigh_queue_purge(&tbl->proxy_queue);
262 	return 0;
263 }
264 EXPORT_SYMBOL(neigh_ifdown);
265 
266 static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
267 {
268 	struct neighbour *n = NULL;
269 	unsigned long now = jiffies;
270 	int entries;
271 
272 	entries = atomic_inc_return(&tbl->entries) - 1;
273 	if (entries >= tbl->gc_thresh3 ||
274 	    (entries >= tbl->gc_thresh2 &&
275 	     time_after(now, tbl->last_flush + 5 * HZ))) {
276 		if (!neigh_forced_gc(tbl) &&
277 		    entries >= tbl->gc_thresh3)
278 			goto out_entries;
279 	}
280 
281 	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
282 	if (!n)
283 		goto out_entries;
284 
285 	__skb_queue_head_init(&n->arp_queue);
286 	rwlock_init(&n->lock);
287 	seqlock_init(&n->ha_lock);
288 	n->updated	  = n->used = now;
289 	n->nud_state	  = NUD_NONE;
290 	n->output	  = neigh_blackhole;
291 	seqlock_init(&n->hh.hh_lock);
292 	n->parms	  = neigh_parms_clone(&tbl->parms);
293 	setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
294 
295 	NEIGH_CACHE_STAT_INC(tbl, allocs);
296 	n->tbl		  = tbl;
297 	atomic_set(&n->refcnt, 1);
298 	n->dead		  = 1;
299 out:
300 	return n;
301 
302 out_entries:
303 	atomic_dec(&tbl->entries);
304 	goto out;
305 }
306 
307 static void neigh_get_hash_rnd(u32 *x)
308 {
309 	get_random_bytes(x, sizeof(*x));
310 	*x |= 1;
311 }
312 
313 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
314 {
315 	size_t size = (1 << shift) * sizeof(struct neighbour *);
316 	struct neigh_hash_table *ret;
317 	struct neighbour __rcu **buckets;
318 	int i;
319 
320 	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
321 	if (!ret)
322 		return NULL;
323 	if (size <= PAGE_SIZE)
324 		buckets = kzalloc(size, GFP_ATOMIC);
325 	else
326 		buckets = (struct neighbour __rcu **)
327 			  __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
328 					   get_order(size));
329 	if (!buckets) {
330 		kfree(ret);
331 		return NULL;
332 	}
333 	ret->hash_buckets = buckets;
334 	ret->hash_shift = shift;
335 	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
336 		neigh_get_hash_rnd(&ret->hash_rnd[i]);
337 	return ret;
338 }
339 
340 static void neigh_hash_free_rcu(struct rcu_head *head)
341 {
342 	struct neigh_hash_table *nht = container_of(head,
343 						    struct neigh_hash_table,
344 						    rcu);
345 	size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
346 	struct neighbour __rcu **buckets = nht->hash_buckets;
347 
348 	if (size <= PAGE_SIZE)
349 		kfree(buckets);
350 	else
351 		free_pages((unsigned long)buckets, get_order(size));
352 	kfree(nht);
353 }
354 
355 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
356 						unsigned long new_shift)
357 {
358 	unsigned int i, hash;
359 	struct neigh_hash_table *new_nht, *old_nht;
360 
361 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
362 
363 	old_nht = rcu_dereference_protected(tbl->nht,
364 					    lockdep_is_held(&tbl->lock));
365 	new_nht = neigh_hash_alloc(new_shift);
366 	if (!new_nht)
367 		return old_nht;
368 
369 	for (i = 0; i < (1 << old_nht->hash_shift); i++) {
370 		struct neighbour *n, *next;
371 
372 		for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
373 						   lockdep_is_held(&tbl->lock));
374 		     n != NULL;
375 		     n = next) {
376 			hash = tbl->hash(n->primary_key, n->dev,
377 					 new_nht->hash_rnd);
378 
379 			hash >>= (32 - new_nht->hash_shift);
380 			next = rcu_dereference_protected(n->next,
381 						lockdep_is_held(&tbl->lock));
382 
383 			rcu_assign_pointer(n->next,
384 					   rcu_dereference_protected(
385 						new_nht->hash_buckets[hash],
386 						lockdep_is_held(&tbl->lock)));
387 			rcu_assign_pointer(new_nht->hash_buckets[hash], n);
388 		}
389 	}
390 
391 	rcu_assign_pointer(tbl->nht, new_nht);
392 	call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
393 	return new_nht;
394 }
395 
396 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
397 			       struct net_device *dev)
398 {
399 	struct neighbour *n;
400 	int key_len = tbl->key_len;
401 	u32 hash_val;
402 	struct neigh_hash_table *nht;
403 
404 	NEIGH_CACHE_STAT_INC(tbl, lookups);
405 
406 	rcu_read_lock_bh();
407 	nht = rcu_dereference_bh(tbl->nht);
408 	hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
409 
410 	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
411 	     n != NULL;
412 	     n = rcu_dereference_bh(n->next)) {
413 		if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
414 			if (!atomic_inc_not_zero(&n->refcnt))
415 				n = NULL;
416 			NEIGH_CACHE_STAT_INC(tbl, hits);
417 			break;
418 		}
419 	}
420 
421 	rcu_read_unlock_bh();
422 	return n;
423 }
424 EXPORT_SYMBOL(neigh_lookup);
425 
426 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
427 				     const void *pkey)
428 {
429 	struct neighbour *n;
430 	int key_len = tbl->key_len;
431 	u32 hash_val;
432 	struct neigh_hash_table *nht;
433 
434 	NEIGH_CACHE_STAT_INC(tbl, lookups);
435 
436 	rcu_read_lock_bh();
437 	nht = rcu_dereference_bh(tbl->nht);
438 	hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
439 
440 	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
441 	     n != NULL;
442 	     n = rcu_dereference_bh(n->next)) {
443 		if (!memcmp(n->primary_key, pkey, key_len) &&
444 		    net_eq(dev_net(n->dev), net)) {
445 			if (!atomic_inc_not_zero(&n->refcnt))
446 				n = NULL;
447 			NEIGH_CACHE_STAT_INC(tbl, hits);
448 			break;
449 		}
450 	}
451 
452 	rcu_read_unlock_bh();
453 	return n;
454 }
455 EXPORT_SYMBOL(neigh_lookup_nodev);
456 
457 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
458 				 struct net_device *dev, bool want_ref)
459 {
460 	u32 hash_val;
461 	int key_len = tbl->key_len;
462 	int error;
463 	struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
464 	struct neigh_hash_table *nht;
465 
466 	if (!n) {
467 		rc = ERR_PTR(-ENOBUFS);
468 		goto out;
469 	}
470 
471 	memcpy(n->primary_key, pkey, key_len);
472 	n->dev = dev;
473 	dev_hold(dev);
474 
475 	/* Protocol specific setup. */
476 	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
477 		rc = ERR_PTR(error);
478 		goto out_neigh_release;
479 	}
480 
481 	if (dev->netdev_ops->ndo_neigh_construct) {
482 		error = dev->netdev_ops->ndo_neigh_construct(n);
483 		if (error < 0) {
484 			rc = ERR_PTR(error);
485 			goto out_neigh_release;
486 		}
487 	}
488 
489 	/* Device specific setup. */
490 	if (n->parms->neigh_setup &&
491 	    (error = n->parms->neigh_setup(n)) < 0) {
492 		rc = ERR_PTR(error);
493 		goto out_neigh_release;
494 	}
495 
496 	n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
497 
498 	write_lock_bh(&tbl->lock);
499 	nht = rcu_dereference_protected(tbl->nht,
500 					lockdep_is_held(&tbl->lock));
501 
502 	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
503 		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
504 
505 	hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
506 
507 	if (n->parms->dead) {
508 		rc = ERR_PTR(-EINVAL);
509 		goto out_tbl_unlock;
510 	}
511 
512 	for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
513 					    lockdep_is_held(&tbl->lock));
514 	     n1 != NULL;
515 	     n1 = rcu_dereference_protected(n1->next,
516 			lockdep_is_held(&tbl->lock))) {
517 		if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
518 			if (want_ref)
519 				neigh_hold(n1);
520 			rc = n1;
521 			goto out_tbl_unlock;
522 		}
523 	}
524 
525 	n->dead = 0;
526 	if (want_ref)
527 		neigh_hold(n);
528 	rcu_assign_pointer(n->next,
529 			   rcu_dereference_protected(nht->hash_buckets[hash_val],
530 						     lockdep_is_held(&tbl->lock)));
531 	rcu_assign_pointer(nht->hash_buckets[hash_val], n);
532 	write_unlock_bh(&tbl->lock);
533 	neigh_dbg(2, "neigh %p is created\n", n);
534 	rc = n;
535 out:
536 	return rc;
537 out_tbl_unlock:
538 	write_unlock_bh(&tbl->lock);
539 out_neigh_release:
540 	neigh_release(n);
541 	goto out;
542 }
543 EXPORT_SYMBOL(__neigh_create);
544 
545 static u32 pneigh_hash(const void *pkey, int key_len)
546 {
547 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
548 	hash_val ^= (hash_val >> 16);
549 	hash_val ^= hash_val >> 8;
550 	hash_val ^= hash_val >> 4;
551 	hash_val &= PNEIGH_HASHMASK;
552 	return hash_val;
553 }
554 
555 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
556 					      struct net *net,
557 					      const void *pkey,
558 					      int key_len,
559 					      struct net_device *dev)
560 {
561 	while (n) {
562 		if (!memcmp(n->key, pkey, key_len) &&
563 		    net_eq(pneigh_net(n), net) &&
564 		    (n->dev == dev || !n->dev))
565 			return n;
566 		n = n->next;
567 	}
568 	return NULL;
569 }
570 
571 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
572 		struct net *net, const void *pkey, struct net_device *dev)
573 {
574 	int key_len = tbl->key_len;
575 	u32 hash_val = pneigh_hash(pkey, key_len);
576 
577 	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
578 				 net, pkey, key_len, dev);
579 }
580 EXPORT_SYMBOL_GPL(__pneigh_lookup);
581 
582 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
583 				    struct net *net, const void *pkey,
584 				    struct net_device *dev, int creat)
585 {
586 	struct pneigh_entry *n;
587 	int key_len = tbl->key_len;
588 	u32 hash_val = pneigh_hash(pkey, key_len);
589 
590 	read_lock_bh(&tbl->lock);
591 	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
592 			      net, pkey, key_len, dev);
593 	read_unlock_bh(&tbl->lock);
594 
595 	if (n || !creat)
596 		goto out;
597 
598 	ASSERT_RTNL();
599 
600 	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
601 	if (!n)
602 		goto out;
603 
604 	write_pnet(&n->net, hold_net(net));
605 	memcpy(n->key, pkey, key_len);
606 	n->dev = dev;
607 	if (dev)
608 		dev_hold(dev);
609 
610 	if (tbl->pconstructor && tbl->pconstructor(n)) {
611 		if (dev)
612 			dev_put(dev);
613 		release_net(net);
614 		kfree(n);
615 		n = NULL;
616 		goto out;
617 	}
618 
619 	write_lock_bh(&tbl->lock);
620 	n->next = tbl->phash_buckets[hash_val];
621 	tbl->phash_buckets[hash_val] = n;
622 	write_unlock_bh(&tbl->lock);
623 out:
624 	return n;
625 }
626 EXPORT_SYMBOL(pneigh_lookup);
627 
628 
629 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
630 		  struct net_device *dev)
631 {
632 	struct pneigh_entry *n, **np;
633 	int key_len = tbl->key_len;
634 	u32 hash_val = pneigh_hash(pkey, key_len);
635 
636 	write_lock_bh(&tbl->lock);
637 	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
638 	     np = &n->next) {
639 		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
640 		    net_eq(pneigh_net(n), net)) {
641 			*np = n->next;
642 			write_unlock_bh(&tbl->lock);
643 			if (tbl->pdestructor)
644 				tbl->pdestructor(n);
645 			if (n->dev)
646 				dev_put(n->dev);
647 			release_net(pneigh_net(n));
648 			kfree(n);
649 			return 0;
650 		}
651 	}
652 	write_unlock_bh(&tbl->lock);
653 	return -ENOENT;
654 }
655 
656 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
657 {
658 	struct pneigh_entry *n, **np;
659 	u32 h;
660 
661 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
662 		np = &tbl->phash_buckets[h];
663 		while ((n = *np) != NULL) {
664 			if (!dev || n->dev == dev) {
665 				*np = n->next;
666 				if (tbl->pdestructor)
667 					tbl->pdestructor(n);
668 				if (n->dev)
669 					dev_put(n->dev);
670 				release_net(pneigh_net(n));
671 				kfree(n);
672 				continue;
673 			}
674 			np = &n->next;
675 		}
676 	}
677 	return -ENOENT;
678 }
679 
680 static void neigh_parms_destroy(struct neigh_parms *parms);
681 
682 static inline void neigh_parms_put(struct neigh_parms *parms)
683 {
684 	if (atomic_dec_and_test(&parms->refcnt))
685 		neigh_parms_destroy(parms);
686 }
687 
688 /*
689  *	neighbour must already be out of the table;
690  *
691  */
692 void neigh_destroy(struct neighbour *neigh)
693 {
694 	struct net_device *dev = neigh->dev;
695 
696 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
697 
698 	if (!neigh->dead) {
699 		pr_warn("Destroying alive neighbour %p\n", neigh);
700 		dump_stack();
701 		return;
702 	}
703 
704 	if (neigh_del_timer(neigh))
705 		pr_warn("Impossible event\n");
706 
707 	write_lock_bh(&neigh->lock);
708 	__skb_queue_purge(&neigh->arp_queue);
709 	write_unlock_bh(&neigh->lock);
710 	neigh->arp_queue_len_bytes = 0;
711 
712 	if (dev->netdev_ops->ndo_neigh_destroy)
713 		dev->netdev_ops->ndo_neigh_destroy(neigh);
714 
715 	dev_put(dev);
716 	neigh_parms_put(neigh->parms);
717 
718 	neigh_dbg(2, "neigh %p is destroyed\n", neigh);
719 
720 	atomic_dec(&neigh->tbl->entries);
721 	kfree_rcu(neigh, rcu);
722 }
723 EXPORT_SYMBOL(neigh_destroy);
724 
725 /* Neighbour state is suspicious;
726    disable fast path.
727 
728    Called with write_locked neigh.
729  */
730 static void neigh_suspect(struct neighbour *neigh)
731 {
732 	neigh_dbg(2, "neigh %p is suspected\n", neigh);
733 
734 	neigh->output = neigh->ops->output;
735 }
736 
737 /* Neighbour state is OK;
738    enable fast path.
739 
740    Called with write_locked neigh.
741  */
742 static void neigh_connect(struct neighbour *neigh)
743 {
744 	neigh_dbg(2, "neigh %p is connected\n", neigh);
745 
746 	neigh->output = neigh->ops->connected_output;
747 }
748 
749 static void neigh_periodic_work(struct work_struct *work)
750 {
751 	struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
752 	struct neighbour *n;
753 	struct neighbour __rcu **np;
754 	unsigned int i;
755 	struct neigh_hash_table *nht;
756 
757 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
758 
759 	write_lock_bh(&tbl->lock);
760 	nht = rcu_dereference_protected(tbl->nht,
761 					lockdep_is_held(&tbl->lock));
762 
763 	/*
764 	 *	periodically recompute ReachableTime from random function
765 	 */
766 
767 	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
768 		struct neigh_parms *p;
769 		tbl->last_rand = jiffies;
770 		list_for_each_entry(p, &tbl->parms_list, list)
771 			p->reachable_time =
772 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
773 	}
774 
775 	if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
776 		goto out;
777 
778 	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
779 		np = &nht->hash_buckets[i];
780 
781 		while ((n = rcu_dereference_protected(*np,
782 				lockdep_is_held(&tbl->lock))) != NULL) {
783 			unsigned int state;
784 
785 			write_lock(&n->lock);
786 
787 			state = n->nud_state;
788 			if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
789 				write_unlock(&n->lock);
790 				goto next_elt;
791 			}
792 
793 			if (time_before(n->used, n->confirmed))
794 				n->used = n->confirmed;
795 
796 			if (atomic_read(&n->refcnt) == 1 &&
797 			    (state == NUD_FAILED ||
798 			     time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
799 				*np = n->next;
800 				n->dead = 1;
801 				write_unlock(&n->lock);
802 				neigh_cleanup_and_release(n);
803 				continue;
804 			}
805 			write_unlock(&n->lock);
806 
807 next_elt:
808 			np = &n->next;
809 		}
810 		/*
811 		 * It's fine to release lock here, even if hash table
812 		 * grows while we are preempted.
813 		 */
814 		write_unlock_bh(&tbl->lock);
815 		cond_resched();
816 		write_lock_bh(&tbl->lock);
817 		nht = rcu_dereference_protected(tbl->nht,
818 						lockdep_is_held(&tbl->lock));
819 	}
820 out:
821 	/* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
822 	 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
823 	 * BASE_REACHABLE_TIME.
824 	 */
825 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
826 			      NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
827 	write_unlock_bh(&tbl->lock);
828 }
829 
830 static __inline__ int neigh_max_probes(struct neighbour *n)
831 {
832 	struct neigh_parms *p = n->parms;
833 	int max_probes = NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES);
834 	if (!(n->nud_state & NUD_PROBE))
835 		max_probes += NEIGH_VAR(p, MCAST_PROBES);
836 	return max_probes;
837 }
838 
839 static void neigh_invalidate(struct neighbour *neigh)
840 	__releases(neigh->lock)
841 	__acquires(neigh->lock)
842 {
843 	struct sk_buff *skb;
844 
845 	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
846 	neigh_dbg(2, "neigh %p is failed\n", neigh);
847 	neigh->updated = jiffies;
848 
849 	/* It is very thin place. report_unreachable is very complicated
850 	   routine. Particularly, it can hit the same neighbour entry!
851 
852 	   So that, we try to be accurate and avoid dead loop. --ANK
853 	 */
854 	while (neigh->nud_state == NUD_FAILED &&
855 	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
856 		write_unlock(&neigh->lock);
857 		neigh->ops->error_report(neigh, skb);
858 		write_lock(&neigh->lock);
859 	}
860 	__skb_queue_purge(&neigh->arp_queue);
861 	neigh->arp_queue_len_bytes = 0;
862 }
863 
864 static void neigh_probe(struct neighbour *neigh)
865 	__releases(neigh->lock)
866 {
867 	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
868 	/* keep skb alive even if arp_queue overflows */
869 	if (skb)
870 		skb = skb_copy(skb, GFP_ATOMIC);
871 	write_unlock(&neigh->lock);
872 	neigh->ops->solicit(neigh, skb);
873 	atomic_inc(&neigh->probes);
874 	kfree_skb(skb);
875 }
876 
877 /* Called when a timer expires for a neighbour entry. */
878 
879 static void neigh_timer_handler(unsigned long arg)
880 {
881 	unsigned long now, next;
882 	struct neighbour *neigh = (struct neighbour *)arg;
883 	unsigned int state;
884 	int notify = 0;
885 
886 	write_lock(&neigh->lock);
887 
888 	state = neigh->nud_state;
889 	now = jiffies;
890 	next = now + HZ;
891 
892 	if (!(state & NUD_IN_TIMER))
893 		goto out;
894 
895 	if (state & NUD_REACHABLE) {
896 		if (time_before_eq(now,
897 				   neigh->confirmed + neigh->parms->reachable_time)) {
898 			neigh_dbg(2, "neigh %p is still alive\n", neigh);
899 			next = neigh->confirmed + neigh->parms->reachable_time;
900 		} else if (time_before_eq(now,
901 					  neigh->used +
902 					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
903 			neigh_dbg(2, "neigh %p is delayed\n", neigh);
904 			neigh->nud_state = NUD_DELAY;
905 			neigh->updated = jiffies;
906 			neigh_suspect(neigh);
907 			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
908 		} else {
909 			neigh_dbg(2, "neigh %p is suspected\n", neigh);
910 			neigh->nud_state = NUD_STALE;
911 			neigh->updated = jiffies;
912 			neigh_suspect(neigh);
913 			notify = 1;
914 		}
915 	} else if (state & NUD_DELAY) {
916 		if (time_before_eq(now,
917 				   neigh->confirmed +
918 				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
919 			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
920 			neigh->nud_state = NUD_REACHABLE;
921 			neigh->updated = jiffies;
922 			neigh_connect(neigh);
923 			notify = 1;
924 			next = neigh->confirmed + neigh->parms->reachable_time;
925 		} else {
926 			neigh_dbg(2, "neigh %p is probed\n", neigh);
927 			neigh->nud_state = NUD_PROBE;
928 			neigh->updated = jiffies;
929 			atomic_set(&neigh->probes, 0);
930 			next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
931 		}
932 	} else {
933 		/* NUD_PROBE|NUD_INCOMPLETE */
934 		next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
935 	}
936 
937 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
938 	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
939 		neigh->nud_state = NUD_FAILED;
940 		notify = 1;
941 		neigh_invalidate(neigh);
942 		goto out;
943 	}
944 
945 	if (neigh->nud_state & NUD_IN_TIMER) {
946 		if (time_before(next, jiffies + HZ/2))
947 			next = jiffies + HZ/2;
948 		if (!mod_timer(&neigh->timer, next))
949 			neigh_hold(neigh);
950 	}
951 	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
952 		neigh_probe(neigh);
953 	} else {
954 out:
955 		write_unlock(&neigh->lock);
956 	}
957 
958 	if (notify)
959 		neigh_update_notify(neigh);
960 
961 	neigh_release(neigh);
962 }
963 
964 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
965 {
966 	int rc;
967 	bool immediate_probe = false;
968 
969 	write_lock_bh(&neigh->lock);
970 
971 	rc = 0;
972 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
973 		goto out_unlock_bh;
974 
975 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
976 		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
977 		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
978 			unsigned long next, now = jiffies;
979 
980 			atomic_set(&neigh->probes,
981 				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
982 			neigh->nud_state     = NUD_INCOMPLETE;
983 			neigh->updated = now;
984 			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
985 					 HZ/2);
986 			neigh_add_timer(neigh, next);
987 			immediate_probe = true;
988 		} else {
989 			neigh->nud_state = NUD_FAILED;
990 			neigh->updated = jiffies;
991 			write_unlock_bh(&neigh->lock);
992 
993 			kfree_skb(skb);
994 			return 1;
995 		}
996 	} else if (neigh->nud_state & NUD_STALE) {
997 		neigh_dbg(2, "neigh %p is delayed\n", neigh);
998 		neigh->nud_state = NUD_DELAY;
999 		neigh->updated = jiffies;
1000 		neigh_add_timer(neigh, jiffies +
1001 				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1002 	}
1003 
1004 	if (neigh->nud_state == NUD_INCOMPLETE) {
1005 		if (skb) {
1006 			while (neigh->arp_queue_len_bytes + skb->truesize >
1007 			       NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1008 				struct sk_buff *buff;
1009 
1010 				buff = __skb_dequeue(&neigh->arp_queue);
1011 				if (!buff)
1012 					break;
1013 				neigh->arp_queue_len_bytes -= buff->truesize;
1014 				kfree_skb(buff);
1015 				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1016 			}
1017 			skb_dst_force(skb);
1018 			__skb_queue_tail(&neigh->arp_queue, skb);
1019 			neigh->arp_queue_len_bytes += skb->truesize;
1020 		}
1021 		rc = 1;
1022 	}
1023 out_unlock_bh:
1024 	if (immediate_probe)
1025 		neigh_probe(neigh);
1026 	else
1027 		write_unlock(&neigh->lock);
1028 	local_bh_enable();
1029 	return rc;
1030 }
1031 EXPORT_SYMBOL(__neigh_event_send);
1032 
1033 static void neigh_update_hhs(struct neighbour *neigh)
1034 {
1035 	struct hh_cache *hh;
1036 	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1037 		= NULL;
1038 
1039 	if (neigh->dev->header_ops)
1040 		update = neigh->dev->header_ops->cache_update;
1041 
1042 	if (update) {
1043 		hh = &neigh->hh;
1044 		if (hh->hh_len) {
1045 			write_seqlock_bh(&hh->hh_lock);
1046 			update(hh, neigh->dev, neigh->ha);
1047 			write_sequnlock_bh(&hh->hh_lock);
1048 		}
1049 	}
1050 }
1051 
1052 
1053 
1054 /* Generic update routine.
1055    -- lladdr is new lladdr or NULL, if it is not supplied.
1056    -- new    is new state.
1057    -- flags
1058 	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1059 				if it is different.
1060 	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1061 				lladdr instead of overriding it
1062 				if it is different.
1063 				It also allows to retain current state
1064 				if lladdr is unchanged.
1065 	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
1066 
1067 	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1068 				NTF_ROUTER flag.
1069 	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
1070 				a router.
1071 
1072    Caller MUST hold reference count on the entry.
1073  */
1074 
1075 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1076 		 u32 flags)
1077 {
1078 	u8 old;
1079 	int err;
1080 	int notify = 0;
1081 	struct net_device *dev;
1082 	int update_isrouter = 0;
1083 
1084 	write_lock_bh(&neigh->lock);
1085 
1086 	dev    = neigh->dev;
1087 	old    = neigh->nud_state;
1088 	err    = -EPERM;
1089 
1090 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1091 	    (old & (NUD_NOARP | NUD_PERMANENT)))
1092 		goto out;
1093 
1094 	if (!(new & NUD_VALID)) {
1095 		neigh_del_timer(neigh);
1096 		if (old & NUD_CONNECTED)
1097 			neigh_suspect(neigh);
1098 		neigh->nud_state = new;
1099 		err = 0;
1100 		notify = old & NUD_VALID;
1101 		if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1102 		    (new & NUD_FAILED)) {
1103 			neigh_invalidate(neigh);
1104 			notify = 1;
1105 		}
1106 		goto out;
1107 	}
1108 
1109 	/* Compare new lladdr with cached one */
1110 	if (!dev->addr_len) {
1111 		/* First case: device needs no address. */
1112 		lladdr = neigh->ha;
1113 	} else if (lladdr) {
1114 		/* The second case: if something is already cached
1115 		   and a new address is proposed:
1116 		   - compare new & old
1117 		   - if they are different, check override flag
1118 		 */
1119 		if ((old & NUD_VALID) &&
1120 		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1121 			lladdr = neigh->ha;
1122 	} else {
1123 		/* No address is supplied; if we know something,
1124 		   use it, otherwise discard the request.
1125 		 */
1126 		err = -EINVAL;
1127 		if (!(old & NUD_VALID))
1128 			goto out;
1129 		lladdr = neigh->ha;
1130 	}
1131 
1132 	if (new & NUD_CONNECTED)
1133 		neigh->confirmed = jiffies;
1134 	neigh->updated = jiffies;
1135 
1136 	/* If entry was valid and address is not changed,
1137 	   do not change entry state, if new one is STALE.
1138 	 */
1139 	err = 0;
1140 	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1141 	if (old & NUD_VALID) {
1142 		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1143 			update_isrouter = 0;
1144 			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1145 			    (old & NUD_CONNECTED)) {
1146 				lladdr = neigh->ha;
1147 				new = NUD_STALE;
1148 			} else
1149 				goto out;
1150 		} else {
1151 			if (lladdr == neigh->ha && new == NUD_STALE &&
1152 			    ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1153 			     (old & NUD_CONNECTED))
1154 			    )
1155 				new = old;
1156 		}
1157 	}
1158 
1159 	if (new != old) {
1160 		neigh_del_timer(neigh);
1161 		if (new & NUD_IN_TIMER)
1162 			neigh_add_timer(neigh, (jiffies +
1163 						((new & NUD_REACHABLE) ?
1164 						 neigh->parms->reachable_time :
1165 						 0)));
1166 		neigh->nud_state = new;
1167 		notify = 1;
1168 	}
1169 
1170 	if (lladdr != neigh->ha) {
1171 		write_seqlock(&neigh->ha_lock);
1172 		memcpy(&neigh->ha, lladdr, dev->addr_len);
1173 		write_sequnlock(&neigh->ha_lock);
1174 		neigh_update_hhs(neigh);
1175 		if (!(new & NUD_CONNECTED))
1176 			neigh->confirmed = jiffies -
1177 				      (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1178 		notify = 1;
1179 	}
1180 	if (new == old)
1181 		goto out;
1182 	if (new & NUD_CONNECTED)
1183 		neigh_connect(neigh);
1184 	else
1185 		neigh_suspect(neigh);
1186 	if (!(old & NUD_VALID)) {
1187 		struct sk_buff *skb;
1188 
1189 		/* Again: avoid dead loop if something went wrong */
1190 
1191 		while (neigh->nud_state & NUD_VALID &&
1192 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1193 			struct dst_entry *dst = skb_dst(skb);
1194 			struct neighbour *n2, *n1 = neigh;
1195 			write_unlock_bh(&neigh->lock);
1196 
1197 			rcu_read_lock();
1198 
1199 			/* Why not just use 'neigh' as-is?  The problem is that
1200 			 * things such as shaper, eql, and sch_teql can end up
1201 			 * using alternative, different, neigh objects to output
1202 			 * the packet in the output path.  So what we need to do
1203 			 * here is re-lookup the top-level neigh in the path so
1204 			 * we can reinject the packet there.
1205 			 */
1206 			n2 = NULL;
1207 			if (dst) {
1208 				n2 = dst_neigh_lookup_skb(dst, skb);
1209 				if (n2)
1210 					n1 = n2;
1211 			}
1212 			n1->output(n1, skb);
1213 			if (n2)
1214 				neigh_release(n2);
1215 			rcu_read_unlock();
1216 
1217 			write_lock_bh(&neigh->lock);
1218 		}
1219 		__skb_queue_purge(&neigh->arp_queue);
1220 		neigh->arp_queue_len_bytes = 0;
1221 	}
1222 out:
1223 	if (update_isrouter) {
1224 		neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1225 			(neigh->flags | NTF_ROUTER) :
1226 			(neigh->flags & ~NTF_ROUTER);
1227 	}
1228 	write_unlock_bh(&neigh->lock);
1229 
1230 	if (notify)
1231 		neigh_update_notify(neigh);
1232 
1233 	return err;
1234 }
1235 EXPORT_SYMBOL(neigh_update);
1236 
1237 /* Update the neigh to listen temporarily for probe responses, even if it is
1238  * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1239  */
1240 void __neigh_set_probe_once(struct neighbour *neigh)
1241 {
1242 	neigh->updated = jiffies;
1243 	if (!(neigh->nud_state & NUD_FAILED))
1244 		return;
1245 	neigh->nud_state = NUD_INCOMPLETE;
1246 	atomic_set(&neigh->probes, neigh_max_probes(neigh));
1247 	neigh_add_timer(neigh,
1248 			jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
1249 }
1250 EXPORT_SYMBOL(__neigh_set_probe_once);
1251 
1252 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1253 				 u8 *lladdr, void *saddr,
1254 				 struct net_device *dev)
1255 {
1256 	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1257 						 lladdr || !dev->addr_len);
1258 	if (neigh)
1259 		neigh_update(neigh, lladdr, NUD_STALE,
1260 			     NEIGH_UPDATE_F_OVERRIDE);
1261 	return neigh;
1262 }
1263 EXPORT_SYMBOL(neigh_event_ns);
1264 
1265 /* called with read_lock_bh(&n->lock); */
1266 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst)
1267 {
1268 	struct net_device *dev = dst->dev;
1269 	__be16 prot = dst->ops->protocol;
1270 	struct hh_cache	*hh = &n->hh;
1271 
1272 	write_lock_bh(&n->lock);
1273 
1274 	/* Only one thread can come in here and initialize the
1275 	 * hh_cache entry.
1276 	 */
1277 	if (!hh->hh_len)
1278 		dev->header_ops->cache(n, hh, prot);
1279 
1280 	write_unlock_bh(&n->lock);
1281 }
1282 
1283 /* This function can be used in contexts, where only old dev_queue_xmit
1284  * worked, f.e. if you want to override normal output path (eql, shaper),
1285  * but resolution is not made yet.
1286  */
1287 
1288 int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb)
1289 {
1290 	struct net_device *dev = skb->dev;
1291 
1292 	__skb_pull(skb, skb_network_offset(skb));
1293 
1294 	if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1295 			    skb->len) < 0 &&
1296 	    dev_rebuild_header(skb))
1297 		return 0;
1298 
1299 	return dev_queue_xmit(skb);
1300 }
1301 EXPORT_SYMBOL(neigh_compat_output);
1302 
1303 /* Slow and careful. */
1304 
1305 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1306 {
1307 	struct dst_entry *dst = skb_dst(skb);
1308 	int rc = 0;
1309 
1310 	if (!dst)
1311 		goto discard;
1312 
1313 	if (!neigh_event_send(neigh, skb)) {
1314 		int err;
1315 		struct net_device *dev = neigh->dev;
1316 		unsigned int seq;
1317 
1318 		if (dev->header_ops->cache && !neigh->hh.hh_len)
1319 			neigh_hh_init(neigh, dst);
1320 
1321 		do {
1322 			__skb_pull(skb, skb_network_offset(skb));
1323 			seq = read_seqbegin(&neigh->ha_lock);
1324 			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1325 					      neigh->ha, NULL, skb->len);
1326 		} while (read_seqretry(&neigh->ha_lock, seq));
1327 
1328 		if (err >= 0)
1329 			rc = dev_queue_xmit(skb);
1330 		else
1331 			goto out_kfree_skb;
1332 	}
1333 out:
1334 	return rc;
1335 discard:
1336 	neigh_dbg(1, "%s: dst=%p neigh=%p\n", __func__, dst, neigh);
1337 out_kfree_skb:
1338 	rc = -EINVAL;
1339 	kfree_skb(skb);
1340 	goto out;
1341 }
1342 EXPORT_SYMBOL(neigh_resolve_output);
1343 
1344 /* As fast as possible without hh cache */
1345 
1346 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1347 {
1348 	struct net_device *dev = neigh->dev;
1349 	unsigned int seq;
1350 	int err;
1351 
1352 	do {
1353 		__skb_pull(skb, skb_network_offset(skb));
1354 		seq = read_seqbegin(&neigh->ha_lock);
1355 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1356 				      neigh->ha, NULL, skb->len);
1357 	} while (read_seqretry(&neigh->ha_lock, seq));
1358 
1359 	if (err >= 0)
1360 		err = dev_queue_xmit(skb);
1361 	else {
1362 		err = -EINVAL;
1363 		kfree_skb(skb);
1364 	}
1365 	return err;
1366 }
1367 EXPORT_SYMBOL(neigh_connected_output);
1368 
1369 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1370 {
1371 	return dev_queue_xmit(skb);
1372 }
1373 EXPORT_SYMBOL(neigh_direct_output);
1374 
1375 static void neigh_proxy_process(unsigned long arg)
1376 {
1377 	struct neigh_table *tbl = (struct neigh_table *)arg;
1378 	long sched_next = 0;
1379 	unsigned long now = jiffies;
1380 	struct sk_buff *skb, *n;
1381 
1382 	spin_lock(&tbl->proxy_queue.lock);
1383 
1384 	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1385 		long tdif = NEIGH_CB(skb)->sched_next - now;
1386 
1387 		if (tdif <= 0) {
1388 			struct net_device *dev = skb->dev;
1389 
1390 			__skb_unlink(skb, &tbl->proxy_queue);
1391 			if (tbl->proxy_redo && netif_running(dev)) {
1392 				rcu_read_lock();
1393 				tbl->proxy_redo(skb);
1394 				rcu_read_unlock();
1395 			} else {
1396 				kfree_skb(skb);
1397 			}
1398 
1399 			dev_put(dev);
1400 		} else if (!sched_next || tdif < sched_next)
1401 			sched_next = tdif;
1402 	}
1403 	del_timer(&tbl->proxy_timer);
1404 	if (sched_next)
1405 		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1406 	spin_unlock(&tbl->proxy_queue.lock);
1407 }
1408 
1409 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1410 		    struct sk_buff *skb)
1411 {
1412 	unsigned long now = jiffies;
1413 
1414 	unsigned long sched_next = now + (prandom_u32() %
1415 					  NEIGH_VAR(p, PROXY_DELAY));
1416 
1417 	if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1418 		kfree_skb(skb);
1419 		return;
1420 	}
1421 
1422 	NEIGH_CB(skb)->sched_next = sched_next;
1423 	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1424 
1425 	spin_lock(&tbl->proxy_queue.lock);
1426 	if (del_timer(&tbl->proxy_timer)) {
1427 		if (time_before(tbl->proxy_timer.expires, sched_next))
1428 			sched_next = tbl->proxy_timer.expires;
1429 	}
1430 	skb_dst_drop(skb);
1431 	dev_hold(skb->dev);
1432 	__skb_queue_tail(&tbl->proxy_queue, skb);
1433 	mod_timer(&tbl->proxy_timer, sched_next);
1434 	spin_unlock(&tbl->proxy_queue.lock);
1435 }
1436 EXPORT_SYMBOL(pneigh_enqueue);
1437 
1438 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1439 						      struct net *net, int ifindex)
1440 {
1441 	struct neigh_parms *p;
1442 
1443 	list_for_each_entry(p, &tbl->parms_list, list) {
1444 		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1445 		    (!p->dev && !ifindex && net_eq(net, &init_net)))
1446 			return p;
1447 	}
1448 
1449 	return NULL;
1450 }
1451 
1452 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1453 				      struct neigh_table *tbl)
1454 {
1455 	struct neigh_parms *p;
1456 	struct net *net = dev_net(dev);
1457 	const struct net_device_ops *ops = dev->netdev_ops;
1458 
1459 	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1460 	if (p) {
1461 		p->tbl		  = tbl;
1462 		atomic_set(&p->refcnt, 1);
1463 		p->reachable_time =
1464 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1465 		dev_hold(dev);
1466 		p->dev = dev;
1467 		write_pnet(&p->net, hold_net(net));
1468 		p->sysctl_table = NULL;
1469 
1470 		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1471 			release_net(net);
1472 			dev_put(dev);
1473 			kfree(p);
1474 			return NULL;
1475 		}
1476 
1477 		write_lock_bh(&tbl->lock);
1478 		list_add(&p->list, &tbl->parms.list);
1479 		write_unlock_bh(&tbl->lock);
1480 
1481 		neigh_parms_data_state_cleanall(p);
1482 	}
1483 	return p;
1484 }
1485 EXPORT_SYMBOL(neigh_parms_alloc);
1486 
1487 static void neigh_rcu_free_parms(struct rcu_head *head)
1488 {
1489 	struct neigh_parms *parms =
1490 		container_of(head, struct neigh_parms, rcu_head);
1491 
1492 	neigh_parms_put(parms);
1493 }
1494 
1495 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1496 {
1497 	if (!parms || parms == &tbl->parms)
1498 		return;
1499 	write_lock_bh(&tbl->lock);
1500 	list_del(&parms->list);
1501 	parms->dead = 1;
1502 	write_unlock_bh(&tbl->lock);
1503 	if (parms->dev)
1504 		dev_put(parms->dev);
1505 	call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1506 }
1507 EXPORT_SYMBOL(neigh_parms_release);
1508 
1509 static void neigh_parms_destroy(struct neigh_parms *parms)
1510 {
1511 	release_net(neigh_parms_net(parms));
1512 	kfree(parms);
1513 }
1514 
1515 static struct lock_class_key neigh_table_proxy_queue_class;
1516 
1517 static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1518 
1519 void neigh_table_init(int index, struct neigh_table *tbl)
1520 {
1521 	unsigned long now = jiffies;
1522 	unsigned long phsize;
1523 
1524 	INIT_LIST_HEAD(&tbl->parms_list);
1525 	list_add(&tbl->parms.list, &tbl->parms_list);
1526 	write_pnet(&tbl->parms.net, &init_net);
1527 	atomic_set(&tbl->parms.refcnt, 1);
1528 	tbl->parms.reachable_time =
1529 			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1530 
1531 	tbl->stats = alloc_percpu(struct neigh_statistics);
1532 	if (!tbl->stats)
1533 		panic("cannot create neighbour cache statistics");
1534 
1535 #ifdef CONFIG_PROC_FS
1536 	if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1537 			      &neigh_stat_seq_fops, tbl))
1538 		panic("cannot create neighbour proc dir entry");
1539 #endif
1540 
1541 	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1542 
1543 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1544 	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1545 
1546 	if (!tbl->nht || !tbl->phash_buckets)
1547 		panic("cannot allocate neighbour cache hashes");
1548 
1549 	if (!tbl->entry_size)
1550 		tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1551 					tbl->key_len, NEIGH_PRIV_ALIGN);
1552 	else
1553 		WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1554 
1555 	rwlock_init(&tbl->lock);
1556 	INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1557 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1558 			tbl->parms.reachable_time);
1559 	setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1560 	skb_queue_head_init_class(&tbl->proxy_queue,
1561 			&neigh_table_proxy_queue_class);
1562 
1563 	tbl->last_flush = now;
1564 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1565 
1566 	neigh_tables[index] = tbl;
1567 }
1568 EXPORT_SYMBOL(neigh_table_init);
1569 
1570 int neigh_table_clear(int index, struct neigh_table *tbl)
1571 {
1572 	neigh_tables[index] = NULL;
1573 	/* It is not clean... Fix it to unload IPv6 module safely */
1574 	cancel_delayed_work_sync(&tbl->gc_work);
1575 	del_timer_sync(&tbl->proxy_timer);
1576 	pneigh_queue_purge(&tbl->proxy_queue);
1577 	neigh_ifdown(tbl, NULL);
1578 	if (atomic_read(&tbl->entries))
1579 		pr_crit("neighbour leakage\n");
1580 
1581 	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1582 		 neigh_hash_free_rcu);
1583 	tbl->nht = NULL;
1584 
1585 	kfree(tbl->phash_buckets);
1586 	tbl->phash_buckets = NULL;
1587 
1588 	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1589 
1590 	free_percpu(tbl->stats);
1591 	tbl->stats = NULL;
1592 
1593 	return 0;
1594 }
1595 EXPORT_SYMBOL(neigh_table_clear);
1596 
1597 static struct neigh_table *neigh_find_table(int family)
1598 {
1599 	struct neigh_table *tbl = NULL;
1600 
1601 	switch (family) {
1602 	case AF_INET:
1603 		tbl = neigh_tables[NEIGH_ARP_TABLE];
1604 		break;
1605 	case AF_INET6:
1606 		tbl = neigh_tables[NEIGH_ND_TABLE];
1607 		break;
1608 	case AF_DECnet:
1609 		tbl = neigh_tables[NEIGH_DN_TABLE];
1610 		break;
1611 	}
1612 
1613 	return tbl;
1614 }
1615 
1616 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh)
1617 {
1618 	struct net *net = sock_net(skb->sk);
1619 	struct ndmsg *ndm;
1620 	struct nlattr *dst_attr;
1621 	struct neigh_table *tbl;
1622 	struct neighbour *neigh;
1623 	struct net_device *dev = NULL;
1624 	int err = -EINVAL;
1625 
1626 	ASSERT_RTNL();
1627 	if (nlmsg_len(nlh) < sizeof(*ndm))
1628 		goto out;
1629 
1630 	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1631 	if (dst_attr == NULL)
1632 		goto out;
1633 
1634 	ndm = nlmsg_data(nlh);
1635 	if (ndm->ndm_ifindex) {
1636 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1637 		if (dev == NULL) {
1638 			err = -ENODEV;
1639 			goto out;
1640 		}
1641 	}
1642 
1643 	tbl = neigh_find_table(ndm->ndm_family);
1644 	if (tbl == NULL)
1645 		return -EAFNOSUPPORT;
1646 
1647 	if (nla_len(dst_attr) < tbl->key_len)
1648 		goto out;
1649 
1650 	if (ndm->ndm_flags & NTF_PROXY) {
1651 		err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1652 		goto out;
1653 	}
1654 
1655 	if (dev == NULL)
1656 		goto out;
1657 
1658 	neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1659 	if (neigh == NULL) {
1660 		err = -ENOENT;
1661 		goto out;
1662 	}
1663 
1664 	err = neigh_update(neigh, NULL, NUD_FAILED,
1665 			   NEIGH_UPDATE_F_OVERRIDE |
1666 			   NEIGH_UPDATE_F_ADMIN);
1667 	neigh_release(neigh);
1668 
1669 out:
1670 	return err;
1671 }
1672 
1673 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh)
1674 {
1675 	int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1676 	struct net *net = sock_net(skb->sk);
1677 	struct ndmsg *ndm;
1678 	struct nlattr *tb[NDA_MAX+1];
1679 	struct neigh_table *tbl;
1680 	struct net_device *dev = NULL;
1681 	struct neighbour *neigh;
1682 	void *dst, *lladdr;
1683 	int err;
1684 
1685 	ASSERT_RTNL();
1686 	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1687 	if (err < 0)
1688 		goto out;
1689 
1690 	err = -EINVAL;
1691 	if (tb[NDA_DST] == NULL)
1692 		goto out;
1693 
1694 	ndm = nlmsg_data(nlh);
1695 	if (ndm->ndm_ifindex) {
1696 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1697 		if (dev == NULL) {
1698 			err = -ENODEV;
1699 			goto out;
1700 		}
1701 
1702 		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1703 			goto out;
1704 	}
1705 
1706 	tbl = neigh_find_table(ndm->ndm_family);
1707 	if (tbl == NULL)
1708 		return -EAFNOSUPPORT;
1709 
1710 	if (nla_len(tb[NDA_DST]) < tbl->key_len)
1711 		goto out;
1712 	dst = nla_data(tb[NDA_DST]);
1713 	lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1714 
1715 	if (ndm->ndm_flags & NTF_PROXY) {
1716 		struct pneigh_entry *pn;
1717 
1718 		err = -ENOBUFS;
1719 		pn = pneigh_lookup(tbl, net, dst, dev, 1);
1720 		if (pn) {
1721 			pn->flags = ndm->ndm_flags;
1722 			err = 0;
1723 		}
1724 		goto out;
1725 	}
1726 
1727 	if (dev == NULL)
1728 		goto out;
1729 
1730 	neigh = neigh_lookup(tbl, dst, dev);
1731 	if (neigh == NULL) {
1732 		if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1733 			err = -ENOENT;
1734 			goto out;
1735 		}
1736 
1737 		neigh = __neigh_lookup_errno(tbl, dst, dev);
1738 		if (IS_ERR(neigh)) {
1739 			err = PTR_ERR(neigh);
1740 			goto out;
1741 		}
1742 	} else {
1743 		if (nlh->nlmsg_flags & NLM_F_EXCL) {
1744 			err = -EEXIST;
1745 			neigh_release(neigh);
1746 			goto out;
1747 		}
1748 
1749 		if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1750 			flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1751 	}
1752 
1753 	if (ndm->ndm_flags & NTF_USE) {
1754 		neigh_event_send(neigh, NULL);
1755 		err = 0;
1756 	} else
1757 		err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1758 	neigh_release(neigh);
1759 
1760 out:
1761 	return err;
1762 }
1763 
1764 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1765 {
1766 	struct nlattr *nest;
1767 
1768 	nest = nla_nest_start(skb, NDTA_PARMS);
1769 	if (nest == NULL)
1770 		return -ENOBUFS;
1771 
1772 	if ((parms->dev &&
1773 	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1774 	    nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
1775 	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
1776 			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
1777 	    /* approximative value for deprecated QUEUE_LEN (in packets) */
1778 	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
1779 			NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
1780 	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
1781 	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
1782 	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
1783 			NEIGH_VAR(parms, UCAST_PROBES)) ||
1784 	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
1785 			NEIGH_VAR(parms, MCAST_PROBES)) ||
1786 	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) ||
1787 	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
1788 			  NEIGH_VAR(parms, BASE_REACHABLE_TIME)) ||
1789 	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
1790 			  NEIGH_VAR(parms, GC_STALETIME)) ||
1791 	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
1792 			  NEIGH_VAR(parms, DELAY_PROBE_TIME)) ||
1793 	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
1794 			  NEIGH_VAR(parms, RETRANS_TIME)) ||
1795 	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
1796 			  NEIGH_VAR(parms, ANYCAST_DELAY)) ||
1797 	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
1798 			  NEIGH_VAR(parms, PROXY_DELAY)) ||
1799 	    nla_put_msecs(skb, NDTPA_LOCKTIME,
1800 			  NEIGH_VAR(parms, LOCKTIME)))
1801 		goto nla_put_failure;
1802 	return nla_nest_end(skb, nest);
1803 
1804 nla_put_failure:
1805 	nla_nest_cancel(skb, nest);
1806 	return -EMSGSIZE;
1807 }
1808 
1809 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1810 			      u32 pid, u32 seq, int type, int flags)
1811 {
1812 	struct nlmsghdr *nlh;
1813 	struct ndtmsg *ndtmsg;
1814 
1815 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1816 	if (nlh == NULL)
1817 		return -EMSGSIZE;
1818 
1819 	ndtmsg = nlmsg_data(nlh);
1820 
1821 	read_lock_bh(&tbl->lock);
1822 	ndtmsg->ndtm_family = tbl->family;
1823 	ndtmsg->ndtm_pad1   = 0;
1824 	ndtmsg->ndtm_pad2   = 0;
1825 
1826 	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
1827 	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval) ||
1828 	    nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
1829 	    nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
1830 	    nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
1831 		goto nla_put_failure;
1832 	{
1833 		unsigned long now = jiffies;
1834 		unsigned int flush_delta = now - tbl->last_flush;
1835 		unsigned int rand_delta = now - tbl->last_rand;
1836 		struct neigh_hash_table *nht;
1837 		struct ndt_config ndc = {
1838 			.ndtc_key_len		= tbl->key_len,
1839 			.ndtc_entry_size	= tbl->entry_size,
1840 			.ndtc_entries		= atomic_read(&tbl->entries),
1841 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
1842 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
1843 			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
1844 		};
1845 
1846 		rcu_read_lock_bh();
1847 		nht = rcu_dereference_bh(tbl->nht);
1848 		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
1849 		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1850 		rcu_read_unlock_bh();
1851 
1852 		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
1853 			goto nla_put_failure;
1854 	}
1855 
1856 	{
1857 		int cpu;
1858 		struct ndt_stats ndst;
1859 
1860 		memset(&ndst, 0, sizeof(ndst));
1861 
1862 		for_each_possible_cpu(cpu) {
1863 			struct neigh_statistics	*st;
1864 
1865 			st = per_cpu_ptr(tbl->stats, cpu);
1866 			ndst.ndts_allocs		+= st->allocs;
1867 			ndst.ndts_destroys		+= st->destroys;
1868 			ndst.ndts_hash_grows		+= st->hash_grows;
1869 			ndst.ndts_res_failed		+= st->res_failed;
1870 			ndst.ndts_lookups		+= st->lookups;
1871 			ndst.ndts_hits			+= st->hits;
1872 			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
1873 			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
1874 			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
1875 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
1876 		}
1877 
1878 		if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst))
1879 			goto nla_put_failure;
1880 	}
1881 
1882 	BUG_ON(tbl->parms.dev);
1883 	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1884 		goto nla_put_failure;
1885 
1886 	read_unlock_bh(&tbl->lock);
1887 	nlmsg_end(skb, nlh);
1888 	return 0;
1889 
1890 nla_put_failure:
1891 	read_unlock_bh(&tbl->lock);
1892 	nlmsg_cancel(skb, nlh);
1893 	return -EMSGSIZE;
1894 }
1895 
1896 static int neightbl_fill_param_info(struct sk_buff *skb,
1897 				    struct neigh_table *tbl,
1898 				    struct neigh_parms *parms,
1899 				    u32 pid, u32 seq, int type,
1900 				    unsigned int flags)
1901 {
1902 	struct ndtmsg *ndtmsg;
1903 	struct nlmsghdr *nlh;
1904 
1905 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1906 	if (nlh == NULL)
1907 		return -EMSGSIZE;
1908 
1909 	ndtmsg = nlmsg_data(nlh);
1910 
1911 	read_lock_bh(&tbl->lock);
1912 	ndtmsg->ndtm_family = tbl->family;
1913 	ndtmsg->ndtm_pad1   = 0;
1914 	ndtmsg->ndtm_pad2   = 0;
1915 
1916 	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1917 	    neightbl_fill_parms(skb, parms) < 0)
1918 		goto errout;
1919 
1920 	read_unlock_bh(&tbl->lock);
1921 	nlmsg_end(skb, nlh);
1922 	return 0;
1923 errout:
1924 	read_unlock_bh(&tbl->lock);
1925 	nlmsg_cancel(skb, nlh);
1926 	return -EMSGSIZE;
1927 }
1928 
1929 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1930 	[NDTA_NAME]		= { .type = NLA_STRING },
1931 	[NDTA_THRESH1]		= { .type = NLA_U32 },
1932 	[NDTA_THRESH2]		= { .type = NLA_U32 },
1933 	[NDTA_THRESH3]		= { .type = NLA_U32 },
1934 	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
1935 	[NDTA_PARMS]		= { .type = NLA_NESTED },
1936 };
1937 
1938 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1939 	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
1940 	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
1941 	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
1942 	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
1943 	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
1944 	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
1945 	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
1946 	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
1947 	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
1948 	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
1949 	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
1950 	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
1951 	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
1952 };
1953 
1954 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
1955 {
1956 	struct net *net = sock_net(skb->sk);
1957 	struct neigh_table *tbl;
1958 	struct ndtmsg *ndtmsg;
1959 	struct nlattr *tb[NDTA_MAX+1];
1960 	bool found = false;
1961 	int err, tidx;
1962 
1963 	err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1964 			  nl_neightbl_policy);
1965 	if (err < 0)
1966 		goto errout;
1967 
1968 	if (tb[NDTA_NAME] == NULL) {
1969 		err = -EINVAL;
1970 		goto errout;
1971 	}
1972 
1973 	ndtmsg = nlmsg_data(nlh);
1974 
1975 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
1976 		tbl = neigh_tables[tidx];
1977 		if (!tbl)
1978 			continue;
1979 		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1980 			continue;
1981 		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
1982 			found = true;
1983 			break;
1984 		}
1985 	}
1986 
1987 	if (!found)
1988 		return -ENOENT;
1989 
1990 	/*
1991 	 * We acquire tbl->lock to be nice to the periodic timers and
1992 	 * make sure they always see a consistent set of values.
1993 	 */
1994 	write_lock_bh(&tbl->lock);
1995 
1996 	if (tb[NDTA_PARMS]) {
1997 		struct nlattr *tbp[NDTPA_MAX+1];
1998 		struct neigh_parms *p;
1999 		int i, ifindex = 0;
2000 
2001 		err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
2002 				       nl_ntbl_parm_policy);
2003 		if (err < 0)
2004 			goto errout_tbl_lock;
2005 
2006 		if (tbp[NDTPA_IFINDEX])
2007 			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2008 
2009 		p = lookup_neigh_parms(tbl, net, ifindex);
2010 		if (p == NULL) {
2011 			err = -ENOENT;
2012 			goto errout_tbl_lock;
2013 		}
2014 
2015 		for (i = 1; i <= NDTPA_MAX; i++) {
2016 			if (tbp[i] == NULL)
2017 				continue;
2018 
2019 			switch (i) {
2020 			case NDTPA_QUEUE_LEN:
2021 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2022 					      nla_get_u32(tbp[i]) *
2023 					      SKB_TRUESIZE(ETH_FRAME_LEN));
2024 				break;
2025 			case NDTPA_QUEUE_LENBYTES:
2026 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2027 					      nla_get_u32(tbp[i]));
2028 				break;
2029 			case NDTPA_PROXY_QLEN:
2030 				NEIGH_VAR_SET(p, PROXY_QLEN,
2031 					      nla_get_u32(tbp[i]));
2032 				break;
2033 			case NDTPA_APP_PROBES:
2034 				NEIGH_VAR_SET(p, APP_PROBES,
2035 					      nla_get_u32(tbp[i]));
2036 				break;
2037 			case NDTPA_UCAST_PROBES:
2038 				NEIGH_VAR_SET(p, UCAST_PROBES,
2039 					      nla_get_u32(tbp[i]));
2040 				break;
2041 			case NDTPA_MCAST_PROBES:
2042 				NEIGH_VAR_SET(p, MCAST_PROBES,
2043 					      nla_get_u32(tbp[i]));
2044 				break;
2045 			case NDTPA_BASE_REACHABLE_TIME:
2046 				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2047 					      nla_get_msecs(tbp[i]));
2048 				/* update reachable_time as well, otherwise, the change will
2049 				 * only be effective after the next time neigh_periodic_work
2050 				 * decides to recompute it (can be multiple minutes)
2051 				 */
2052 				p->reachable_time =
2053 					neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2054 				break;
2055 			case NDTPA_GC_STALETIME:
2056 				NEIGH_VAR_SET(p, GC_STALETIME,
2057 					      nla_get_msecs(tbp[i]));
2058 				break;
2059 			case NDTPA_DELAY_PROBE_TIME:
2060 				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2061 					      nla_get_msecs(tbp[i]));
2062 				break;
2063 			case NDTPA_RETRANS_TIME:
2064 				NEIGH_VAR_SET(p, RETRANS_TIME,
2065 					      nla_get_msecs(tbp[i]));
2066 				break;
2067 			case NDTPA_ANYCAST_DELAY:
2068 				NEIGH_VAR_SET(p, ANYCAST_DELAY,
2069 					      nla_get_msecs(tbp[i]));
2070 				break;
2071 			case NDTPA_PROXY_DELAY:
2072 				NEIGH_VAR_SET(p, PROXY_DELAY,
2073 					      nla_get_msecs(tbp[i]));
2074 				break;
2075 			case NDTPA_LOCKTIME:
2076 				NEIGH_VAR_SET(p, LOCKTIME,
2077 					      nla_get_msecs(tbp[i]));
2078 				break;
2079 			}
2080 		}
2081 	}
2082 
2083 	err = -ENOENT;
2084 	if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2085 	     tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2086 	    !net_eq(net, &init_net))
2087 		goto errout_tbl_lock;
2088 
2089 	if (tb[NDTA_THRESH1])
2090 		tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2091 
2092 	if (tb[NDTA_THRESH2])
2093 		tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2094 
2095 	if (tb[NDTA_THRESH3])
2096 		tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2097 
2098 	if (tb[NDTA_GC_INTERVAL])
2099 		tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2100 
2101 	err = 0;
2102 
2103 errout_tbl_lock:
2104 	write_unlock_bh(&tbl->lock);
2105 errout:
2106 	return err;
2107 }
2108 
2109 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2110 {
2111 	struct net *net = sock_net(skb->sk);
2112 	int family, tidx, nidx = 0;
2113 	int tbl_skip = cb->args[0];
2114 	int neigh_skip = cb->args[1];
2115 	struct neigh_table *tbl;
2116 
2117 	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2118 
2119 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2120 		struct neigh_parms *p;
2121 
2122 		tbl = neigh_tables[tidx];
2123 		if (!tbl)
2124 			continue;
2125 
2126 		if (tidx < tbl_skip || (family && tbl->family != family))
2127 			continue;
2128 
2129 		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2130 				       cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2131 				       NLM_F_MULTI) < 0)
2132 			break;
2133 
2134 		nidx = 0;
2135 		p = list_next_entry(&tbl->parms, list);
2136 		list_for_each_entry_from(p, &tbl->parms_list, list) {
2137 			if (!net_eq(neigh_parms_net(p), net))
2138 				continue;
2139 
2140 			if (nidx < neigh_skip)
2141 				goto next;
2142 
2143 			if (neightbl_fill_param_info(skb, tbl, p,
2144 						     NETLINK_CB(cb->skb).portid,
2145 						     cb->nlh->nlmsg_seq,
2146 						     RTM_NEWNEIGHTBL,
2147 						     NLM_F_MULTI) < 0)
2148 				goto out;
2149 		next:
2150 			nidx++;
2151 		}
2152 
2153 		neigh_skip = 0;
2154 	}
2155 out:
2156 	cb->args[0] = tidx;
2157 	cb->args[1] = nidx;
2158 
2159 	return skb->len;
2160 }
2161 
2162 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2163 			   u32 pid, u32 seq, int type, unsigned int flags)
2164 {
2165 	unsigned long now = jiffies;
2166 	struct nda_cacheinfo ci;
2167 	struct nlmsghdr *nlh;
2168 	struct ndmsg *ndm;
2169 
2170 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2171 	if (nlh == NULL)
2172 		return -EMSGSIZE;
2173 
2174 	ndm = nlmsg_data(nlh);
2175 	ndm->ndm_family	 = neigh->ops->family;
2176 	ndm->ndm_pad1    = 0;
2177 	ndm->ndm_pad2    = 0;
2178 	ndm->ndm_flags	 = neigh->flags;
2179 	ndm->ndm_type	 = neigh->type;
2180 	ndm->ndm_ifindex = neigh->dev->ifindex;
2181 
2182 	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2183 		goto nla_put_failure;
2184 
2185 	read_lock_bh(&neigh->lock);
2186 	ndm->ndm_state	 = neigh->nud_state;
2187 	if (neigh->nud_state & NUD_VALID) {
2188 		char haddr[MAX_ADDR_LEN];
2189 
2190 		neigh_ha_snapshot(haddr, neigh, neigh->dev);
2191 		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2192 			read_unlock_bh(&neigh->lock);
2193 			goto nla_put_failure;
2194 		}
2195 	}
2196 
2197 	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2198 	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2199 	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2200 	ci.ndm_refcnt	 = atomic_read(&neigh->refcnt) - 1;
2201 	read_unlock_bh(&neigh->lock);
2202 
2203 	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2204 	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2205 		goto nla_put_failure;
2206 
2207 	nlmsg_end(skb, nlh);
2208 	return 0;
2209 
2210 nla_put_failure:
2211 	nlmsg_cancel(skb, nlh);
2212 	return -EMSGSIZE;
2213 }
2214 
2215 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2216 			    u32 pid, u32 seq, int type, unsigned int flags,
2217 			    struct neigh_table *tbl)
2218 {
2219 	struct nlmsghdr *nlh;
2220 	struct ndmsg *ndm;
2221 
2222 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2223 	if (nlh == NULL)
2224 		return -EMSGSIZE;
2225 
2226 	ndm = nlmsg_data(nlh);
2227 	ndm->ndm_family	 = tbl->family;
2228 	ndm->ndm_pad1    = 0;
2229 	ndm->ndm_pad2    = 0;
2230 	ndm->ndm_flags	 = pn->flags | NTF_PROXY;
2231 	ndm->ndm_type	 = RTN_UNICAST;
2232 	ndm->ndm_ifindex = pn->dev->ifindex;
2233 	ndm->ndm_state	 = NUD_NONE;
2234 
2235 	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2236 		goto nla_put_failure;
2237 
2238 	nlmsg_end(skb, nlh);
2239 	return 0;
2240 
2241 nla_put_failure:
2242 	nlmsg_cancel(skb, nlh);
2243 	return -EMSGSIZE;
2244 }
2245 
2246 static void neigh_update_notify(struct neighbour *neigh)
2247 {
2248 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2249 	__neigh_notify(neigh, RTM_NEWNEIGH, 0);
2250 }
2251 
2252 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2253 			    struct netlink_callback *cb)
2254 {
2255 	struct net *net = sock_net(skb->sk);
2256 	struct neighbour *n;
2257 	int rc, h, s_h = cb->args[1];
2258 	int idx, s_idx = idx = cb->args[2];
2259 	struct neigh_hash_table *nht;
2260 
2261 	rcu_read_lock_bh();
2262 	nht = rcu_dereference_bh(tbl->nht);
2263 
2264 	for (h = s_h; h < (1 << nht->hash_shift); h++) {
2265 		if (h > s_h)
2266 			s_idx = 0;
2267 		for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2268 		     n != NULL;
2269 		     n = rcu_dereference_bh(n->next)) {
2270 			if (!net_eq(dev_net(n->dev), net))
2271 				continue;
2272 			if (idx < s_idx)
2273 				goto next;
2274 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2275 					    cb->nlh->nlmsg_seq,
2276 					    RTM_NEWNEIGH,
2277 					    NLM_F_MULTI) < 0) {
2278 				rc = -1;
2279 				goto out;
2280 			}
2281 next:
2282 			idx++;
2283 		}
2284 	}
2285 	rc = skb->len;
2286 out:
2287 	rcu_read_unlock_bh();
2288 	cb->args[1] = h;
2289 	cb->args[2] = idx;
2290 	return rc;
2291 }
2292 
2293 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2294 			     struct netlink_callback *cb)
2295 {
2296 	struct pneigh_entry *n;
2297 	struct net *net = sock_net(skb->sk);
2298 	int rc, h, s_h = cb->args[3];
2299 	int idx, s_idx = idx = cb->args[4];
2300 
2301 	read_lock_bh(&tbl->lock);
2302 
2303 	for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2304 		if (h > s_h)
2305 			s_idx = 0;
2306 		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2307 			if (dev_net(n->dev) != net)
2308 				continue;
2309 			if (idx < s_idx)
2310 				goto next;
2311 			if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2312 					    cb->nlh->nlmsg_seq,
2313 					    RTM_NEWNEIGH,
2314 					    NLM_F_MULTI, tbl) < 0) {
2315 				read_unlock_bh(&tbl->lock);
2316 				rc = -1;
2317 				goto out;
2318 			}
2319 		next:
2320 			idx++;
2321 		}
2322 	}
2323 
2324 	read_unlock_bh(&tbl->lock);
2325 	rc = skb->len;
2326 out:
2327 	cb->args[3] = h;
2328 	cb->args[4] = idx;
2329 	return rc;
2330 
2331 }
2332 
2333 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2334 {
2335 	struct neigh_table *tbl;
2336 	int t, family, s_t;
2337 	int proxy = 0;
2338 	int err;
2339 
2340 	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2341 
2342 	/* check for full ndmsg structure presence, family member is
2343 	 * the same for both structures
2344 	 */
2345 	if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
2346 	    ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
2347 		proxy = 1;
2348 
2349 	s_t = cb->args[0];
2350 
2351 	for (t = 0; t < NEIGH_NR_TABLES; t++) {
2352 		tbl = neigh_tables[t];
2353 
2354 		if (!tbl)
2355 			continue;
2356 		if (t < s_t || (family && tbl->family != family))
2357 			continue;
2358 		if (t > s_t)
2359 			memset(&cb->args[1], 0, sizeof(cb->args) -
2360 						sizeof(cb->args[0]));
2361 		if (proxy)
2362 			err = pneigh_dump_table(tbl, skb, cb);
2363 		else
2364 			err = neigh_dump_table(tbl, skb, cb);
2365 		if (err < 0)
2366 			break;
2367 	}
2368 
2369 	cb->args[0] = t;
2370 	return skb->len;
2371 }
2372 
2373 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2374 {
2375 	int chain;
2376 	struct neigh_hash_table *nht;
2377 
2378 	rcu_read_lock_bh();
2379 	nht = rcu_dereference_bh(tbl->nht);
2380 
2381 	read_lock(&tbl->lock); /* avoid resizes */
2382 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2383 		struct neighbour *n;
2384 
2385 		for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2386 		     n != NULL;
2387 		     n = rcu_dereference_bh(n->next))
2388 			cb(n, cookie);
2389 	}
2390 	read_unlock(&tbl->lock);
2391 	rcu_read_unlock_bh();
2392 }
2393 EXPORT_SYMBOL(neigh_for_each);
2394 
2395 /* The tbl->lock must be held as a writer and BH disabled. */
2396 void __neigh_for_each_release(struct neigh_table *tbl,
2397 			      int (*cb)(struct neighbour *))
2398 {
2399 	int chain;
2400 	struct neigh_hash_table *nht;
2401 
2402 	nht = rcu_dereference_protected(tbl->nht,
2403 					lockdep_is_held(&tbl->lock));
2404 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2405 		struct neighbour *n;
2406 		struct neighbour __rcu **np;
2407 
2408 		np = &nht->hash_buckets[chain];
2409 		while ((n = rcu_dereference_protected(*np,
2410 					lockdep_is_held(&tbl->lock))) != NULL) {
2411 			int release;
2412 
2413 			write_lock(&n->lock);
2414 			release = cb(n);
2415 			if (release) {
2416 				rcu_assign_pointer(*np,
2417 					rcu_dereference_protected(n->next,
2418 						lockdep_is_held(&tbl->lock)));
2419 				n->dead = 1;
2420 			} else
2421 				np = &n->next;
2422 			write_unlock(&n->lock);
2423 			if (release)
2424 				neigh_cleanup_and_release(n);
2425 		}
2426 	}
2427 }
2428 EXPORT_SYMBOL(__neigh_for_each_release);
2429 
2430 #ifdef CONFIG_PROC_FS
2431 
2432 static struct neighbour *neigh_get_first(struct seq_file *seq)
2433 {
2434 	struct neigh_seq_state *state = seq->private;
2435 	struct net *net = seq_file_net(seq);
2436 	struct neigh_hash_table *nht = state->nht;
2437 	struct neighbour *n = NULL;
2438 	int bucket = state->bucket;
2439 
2440 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2441 	for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2442 		n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2443 
2444 		while (n) {
2445 			if (!net_eq(dev_net(n->dev), net))
2446 				goto next;
2447 			if (state->neigh_sub_iter) {
2448 				loff_t fakep = 0;
2449 				void *v;
2450 
2451 				v = state->neigh_sub_iter(state, n, &fakep);
2452 				if (!v)
2453 					goto next;
2454 			}
2455 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2456 				break;
2457 			if (n->nud_state & ~NUD_NOARP)
2458 				break;
2459 next:
2460 			n = rcu_dereference_bh(n->next);
2461 		}
2462 
2463 		if (n)
2464 			break;
2465 	}
2466 	state->bucket = bucket;
2467 
2468 	return n;
2469 }
2470 
2471 static struct neighbour *neigh_get_next(struct seq_file *seq,
2472 					struct neighbour *n,
2473 					loff_t *pos)
2474 {
2475 	struct neigh_seq_state *state = seq->private;
2476 	struct net *net = seq_file_net(seq);
2477 	struct neigh_hash_table *nht = state->nht;
2478 
2479 	if (state->neigh_sub_iter) {
2480 		void *v = state->neigh_sub_iter(state, n, pos);
2481 		if (v)
2482 			return n;
2483 	}
2484 	n = rcu_dereference_bh(n->next);
2485 
2486 	while (1) {
2487 		while (n) {
2488 			if (!net_eq(dev_net(n->dev), net))
2489 				goto next;
2490 			if (state->neigh_sub_iter) {
2491 				void *v = state->neigh_sub_iter(state, n, pos);
2492 				if (v)
2493 					return n;
2494 				goto next;
2495 			}
2496 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2497 				break;
2498 
2499 			if (n->nud_state & ~NUD_NOARP)
2500 				break;
2501 next:
2502 			n = rcu_dereference_bh(n->next);
2503 		}
2504 
2505 		if (n)
2506 			break;
2507 
2508 		if (++state->bucket >= (1 << nht->hash_shift))
2509 			break;
2510 
2511 		n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2512 	}
2513 
2514 	if (n && pos)
2515 		--(*pos);
2516 	return n;
2517 }
2518 
2519 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2520 {
2521 	struct neighbour *n = neigh_get_first(seq);
2522 
2523 	if (n) {
2524 		--(*pos);
2525 		while (*pos) {
2526 			n = neigh_get_next(seq, n, pos);
2527 			if (!n)
2528 				break;
2529 		}
2530 	}
2531 	return *pos ? NULL : n;
2532 }
2533 
2534 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2535 {
2536 	struct neigh_seq_state *state = seq->private;
2537 	struct net *net = seq_file_net(seq);
2538 	struct neigh_table *tbl = state->tbl;
2539 	struct pneigh_entry *pn = NULL;
2540 	int bucket = state->bucket;
2541 
2542 	state->flags |= NEIGH_SEQ_IS_PNEIGH;
2543 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2544 		pn = tbl->phash_buckets[bucket];
2545 		while (pn && !net_eq(pneigh_net(pn), net))
2546 			pn = pn->next;
2547 		if (pn)
2548 			break;
2549 	}
2550 	state->bucket = bucket;
2551 
2552 	return pn;
2553 }
2554 
2555 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2556 					    struct pneigh_entry *pn,
2557 					    loff_t *pos)
2558 {
2559 	struct neigh_seq_state *state = seq->private;
2560 	struct net *net = seq_file_net(seq);
2561 	struct neigh_table *tbl = state->tbl;
2562 
2563 	do {
2564 		pn = pn->next;
2565 	} while (pn && !net_eq(pneigh_net(pn), net));
2566 
2567 	while (!pn) {
2568 		if (++state->bucket > PNEIGH_HASHMASK)
2569 			break;
2570 		pn = tbl->phash_buckets[state->bucket];
2571 		while (pn && !net_eq(pneigh_net(pn), net))
2572 			pn = pn->next;
2573 		if (pn)
2574 			break;
2575 	}
2576 
2577 	if (pn && pos)
2578 		--(*pos);
2579 
2580 	return pn;
2581 }
2582 
2583 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2584 {
2585 	struct pneigh_entry *pn = pneigh_get_first(seq);
2586 
2587 	if (pn) {
2588 		--(*pos);
2589 		while (*pos) {
2590 			pn = pneigh_get_next(seq, pn, pos);
2591 			if (!pn)
2592 				break;
2593 		}
2594 	}
2595 	return *pos ? NULL : pn;
2596 }
2597 
2598 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2599 {
2600 	struct neigh_seq_state *state = seq->private;
2601 	void *rc;
2602 	loff_t idxpos = *pos;
2603 
2604 	rc = neigh_get_idx(seq, &idxpos);
2605 	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2606 		rc = pneigh_get_idx(seq, &idxpos);
2607 
2608 	return rc;
2609 }
2610 
2611 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2612 	__acquires(rcu_bh)
2613 {
2614 	struct neigh_seq_state *state = seq->private;
2615 
2616 	state->tbl = tbl;
2617 	state->bucket = 0;
2618 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2619 
2620 	rcu_read_lock_bh();
2621 	state->nht = rcu_dereference_bh(tbl->nht);
2622 
2623 	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2624 }
2625 EXPORT_SYMBOL(neigh_seq_start);
2626 
2627 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2628 {
2629 	struct neigh_seq_state *state;
2630 	void *rc;
2631 
2632 	if (v == SEQ_START_TOKEN) {
2633 		rc = neigh_get_first(seq);
2634 		goto out;
2635 	}
2636 
2637 	state = seq->private;
2638 	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2639 		rc = neigh_get_next(seq, v, NULL);
2640 		if (rc)
2641 			goto out;
2642 		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2643 			rc = pneigh_get_first(seq);
2644 	} else {
2645 		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2646 		rc = pneigh_get_next(seq, v, NULL);
2647 	}
2648 out:
2649 	++(*pos);
2650 	return rc;
2651 }
2652 EXPORT_SYMBOL(neigh_seq_next);
2653 
2654 void neigh_seq_stop(struct seq_file *seq, void *v)
2655 	__releases(rcu_bh)
2656 {
2657 	rcu_read_unlock_bh();
2658 }
2659 EXPORT_SYMBOL(neigh_seq_stop);
2660 
2661 /* statistics via seq_file */
2662 
2663 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2664 {
2665 	struct neigh_table *tbl = seq->private;
2666 	int cpu;
2667 
2668 	if (*pos == 0)
2669 		return SEQ_START_TOKEN;
2670 
2671 	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2672 		if (!cpu_possible(cpu))
2673 			continue;
2674 		*pos = cpu+1;
2675 		return per_cpu_ptr(tbl->stats, cpu);
2676 	}
2677 	return NULL;
2678 }
2679 
2680 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2681 {
2682 	struct neigh_table *tbl = seq->private;
2683 	int cpu;
2684 
2685 	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2686 		if (!cpu_possible(cpu))
2687 			continue;
2688 		*pos = cpu+1;
2689 		return per_cpu_ptr(tbl->stats, cpu);
2690 	}
2691 	return NULL;
2692 }
2693 
2694 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2695 {
2696 
2697 }
2698 
2699 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2700 {
2701 	struct neigh_table *tbl = seq->private;
2702 	struct neigh_statistics *st = v;
2703 
2704 	if (v == SEQ_START_TOKEN) {
2705 		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards\n");
2706 		return 0;
2707 	}
2708 
2709 	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2710 			"%08lx %08lx  %08lx %08lx %08lx\n",
2711 		   atomic_read(&tbl->entries),
2712 
2713 		   st->allocs,
2714 		   st->destroys,
2715 		   st->hash_grows,
2716 
2717 		   st->lookups,
2718 		   st->hits,
2719 
2720 		   st->res_failed,
2721 
2722 		   st->rcv_probes_mcast,
2723 		   st->rcv_probes_ucast,
2724 
2725 		   st->periodic_gc_runs,
2726 		   st->forced_gc_runs,
2727 		   st->unres_discards
2728 		   );
2729 
2730 	return 0;
2731 }
2732 
2733 static const struct seq_operations neigh_stat_seq_ops = {
2734 	.start	= neigh_stat_seq_start,
2735 	.next	= neigh_stat_seq_next,
2736 	.stop	= neigh_stat_seq_stop,
2737 	.show	= neigh_stat_seq_show,
2738 };
2739 
2740 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2741 {
2742 	int ret = seq_open(file, &neigh_stat_seq_ops);
2743 
2744 	if (!ret) {
2745 		struct seq_file *sf = file->private_data;
2746 		sf->private = PDE_DATA(inode);
2747 	}
2748 	return ret;
2749 };
2750 
2751 static const struct file_operations neigh_stat_seq_fops = {
2752 	.owner	 = THIS_MODULE,
2753 	.open 	 = neigh_stat_seq_open,
2754 	.read	 = seq_read,
2755 	.llseek	 = seq_lseek,
2756 	.release = seq_release,
2757 };
2758 
2759 #endif /* CONFIG_PROC_FS */
2760 
2761 static inline size_t neigh_nlmsg_size(void)
2762 {
2763 	return NLMSG_ALIGN(sizeof(struct ndmsg))
2764 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2765 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2766 	       + nla_total_size(sizeof(struct nda_cacheinfo))
2767 	       + nla_total_size(4); /* NDA_PROBES */
2768 }
2769 
2770 static void __neigh_notify(struct neighbour *n, int type, int flags)
2771 {
2772 	struct net *net = dev_net(n->dev);
2773 	struct sk_buff *skb;
2774 	int err = -ENOBUFS;
2775 
2776 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2777 	if (skb == NULL)
2778 		goto errout;
2779 
2780 	err = neigh_fill_info(skb, n, 0, 0, type, flags);
2781 	if (err < 0) {
2782 		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2783 		WARN_ON(err == -EMSGSIZE);
2784 		kfree_skb(skb);
2785 		goto errout;
2786 	}
2787 	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2788 	return;
2789 errout:
2790 	if (err < 0)
2791 		rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2792 }
2793 
2794 void neigh_app_ns(struct neighbour *n)
2795 {
2796 	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2797 }
2798 EXPORT_SYMBOL(neigh_app_ns);
2799 
2800 #ifdef CONFIG_SYSCTL
2801 static int zero;
2802 static int int_max = INT_MAX;
2803 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
2804 
2805 static int proc_unres_qlen(struct ctl_table *ctl, int write,
2806 			   void __user *buffer, size_t *lenp, loff_t *ppos)
2807 {
2808 	int size, ret;
2809 	struct ctl_table tmp = *ctl;
2810 
2811 	tmp.extra1 = &zero;
2812 	tmp.extra2 = &unres_qlen_max;
2813 	tmp.data = &size;
2814 
2815 	size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
2816 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2817 
2818 	if (write && !ret)
2819 		*(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
2820 	return ret;
2821 }
2822 
2823 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
2824 						   int family)
2825 {
2826 	switch (family) {
2827 	case AF_INET:
2828 		return __in_dev_arp_parms_get_rcu(dev);
2829 	case AF_INET6:
2830 		return __in6_dev_nd_parms_get_rcu(dev);
2831 	}
2832 	return NULL;
2833 }
2834 
2835 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
2836 				  int index)
2837 {
2838 	struct net_device *dev;
2839 	int family = neigh_parms_family(p);
2840 
2841 	rcu_read_lock();
2842 	for_each_netdev_rcu(net, dev) {
2843 		struct neigh_parms *dst_p =
2844 				neigh_get_dev_parms_rcu(dev, family);
2845 
2846 		if (dst_p && !test_bit(index, dst_p->data_state))
2847 			dst_p->data[index] = p->data[index];
2848 	}
2849 	rcu_read_unlock();
2850 }
2851 
2852 static void neigh_proc_update(struct ctl_table *ctl, int write)
2853 {
2854 	struct net_device *dev = ctl->extra1;
2855 	struct neigh_parms *p = ctl->extra2;
2856 	struct net *net = neigh_parms_net(p);
2857 	int index = (int *) ctl->data - p->data;
2858 
2859 	if (!write)
2860 		return;
2861 
2862 	set_bit(index, p->data_state);
2863 	if (!dev) /* NULL dev means this is default value */
2864 		neigh_copy_dflt_parms(net, p, index);
2865 }
2866 
2867 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
2868 					   void __user *buffer,
2869 					   size_t *lenp, loff_t *ppos)
2870 {
2871 	struct ctl_table tmp = *ctl;
2872 	int ret;
2873 
2874 	tmp.extra1 = &zero;
2875 	tmp.extra2 = &int_max;
2876 
2877 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2878 	neigh_proc_update(ctl, write);
2879 	return ret;
2880 }
2881 
2882 int neigh_proc_dointvec(struct ctl_table *ctl, int write,
2883 			void __user *buffer, size_t *lenp, loff_t *ppos)
2884 {
2885 	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2886 
2887 	neigh_proc_update(ctl, write);
2888 	return ret;
2889 }
2890 EXPORT_SYMBOL(neigh_proc_dointvec);
2891 
2892 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
2893 				void __user *buffer,
2894 				size_t *lenp, loff_t *ppos)
2895 {
2896 	int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
2897 
2898 	neigh_proc_update(ctl, write);
2899 	return ret;
2900 }
2901 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
2902 
2903 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
2904 					      void __user *buffer,
2905 					      size_t *lenp, loff_t *ppos)
2906 {
2907 	int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
2908 
2909 	neigh_proc_update(ctl, write);
2910 	return ret;
2911 }
2912 
2913 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
2914 				   void __user *buffer,
2915 				   size_t *lenp, loff_t *ppos)
2916 {
2917 	int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
2918 
2919 	neigh_proc_update(ctl, write);
2920 	return ret;
2921 }
2922 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
2923 
2924 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
2925 					  void __user *buffer,
2926 					  size_t *lenp, loff_t *ppos)
2927 {
2928 	int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
2929 
2930 	neigh_proc_update(ctl, write);
2931 	return ret;
2932 }
2933 
2934 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
2935 					  void __user *buffer,
2936 					  size_t *lenp, loff_t *ppos)
2937 {
2938 	struct neigh_parms *p = ctl->extra2;
2939 	int ret;
2940 
2941 	if (strcmp(ctl->procname, "base_reachable_time") == 0)
2942 		ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
2943 	else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
2944 		ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
2945 	else
2946 		ret = -1;
2947 
2948 	if (write && ret == 0) {
2949 		/* update reachable_time as well, otherwise, the change will
2950 		 * only be effective after the next time neigh_periodic_work
2951 		 * decides to recompute it
2952 		 */
2953 		p->reachable_time =
2954 			neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2955 	}
2956 	return ret;
2957 }
2958 
2959 #define NEIGH_PARMS_DATA_OFFSET(index)	\
2960 	(&((struct neigh_parms *) 0)->data[index])
2961 
2962 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
2963 	[NEIGH_VAR_ ## attr] = { \
2964 		.procname	= name, \
2965 		.data		= NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
2966 		.maxlen		= sizeof(int), \
2967 		.mode		= mval, \
2968 		.proc_handler	= proc, \
2969 	}
2970 
2971 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
2972 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
2973 
2974 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
2975 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
2976 
2977 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
2978 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
2979 
2980 #define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
2981 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
2982 
2983 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
2984 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
2985 
2986 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
2987 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
2988 
2989 static struct neigh_sysctl_table {
2990 	struct ctl_table_header *sysctl_header;
2991 	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
2992 } neigh_sysctl_template __read_mostly = {
2993 	.neigh_vars = {
2994 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
2995 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
2996 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
2997 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
2998 		NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
2999 		NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3000 		NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3001 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3002 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3003 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3004 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3005 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3006 		NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3007 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3008 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3009 		[NEIGH_VAR_GC_INTERVAL] = {
3010 			.procname	= "gc_interval",
3011 			.maxlen		= sizeof(int),
3012 			.mode		= 0644,
3013 			.proc_handler	= proc_dointvec_jiffies,
3014 		},
3015 		[NEIGH_VAR_GC_THRESH1] = {
3016 			.procname	= "gc_thresh1",
3017 			.maxlen		= sizeof(int),
3018 			.mode		= 0644,
3019 			.extra1 	= &zero,
3020 			.extra2		= &int_max,
3021 			.proc_handler	= proc_dointvec_minmax,
3022 		},
3023 		[NEIGH_VAR_GC_THRESH2] = {
3024 			.procname	= "gc_thresh2",
3025 			.maxlen		= sizeof(int),
3026 			.mode		= 0644,
3027 			.extra1 	= &zero,
3028 			.extra2		= &int_max,
3029 			.proc_handler	= proc_dointvec_minmax,
3030 		},
3031 		[NEIGH_VAR_GC_THRESH3] = {
3032 			.procname	= "gc_thresh3",
3033 			.maxlen		= sizeof(int),
3034 			.mode		= 0644,
3035 			.extra1 	= &zero,
3036 			.extra2		= &int_max,
3037 			.proc_handler	= proc_dointvec_minmax,
3038 		},
3039 		{},
3040 	},
3041 };
3042 
3043 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3044 			  proc_handler *handler)
3045 {
3046 	int i;
3047 	struct neigh_sysctl_table *t;
3048 	const char *dev_name_source;
3049 	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3050 	char *p_name;
3051 
3052 	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3053 	if (!t)
3054 		goto err;
3055 
3056 	for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3057 		t->neigh_vars[i].data += (long) p;
3058 		t->neigh_vars[i].extra1 = dev;
3059 		t->neigh_vars[i].extra2 = p;
3060 	}
3061 
3062 	if (dev) {
3063 		dev_name_source = dev->name;
3064 		/* Terminate the table early */
3065 		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3066 		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3067 	} else {
3068 		struct neigh_table *tbl = p->tbl;
3069 		dev_name_source = "default";
3070 		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3071 		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3072 		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3073 		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3074 	}
3075 
3076 	if (handler) {
3077 		/* RetransTime */
3078 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3079 		/* ReachableTime */
3080 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3081 		/* RetransTime (in milliseconds)*/
3082 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3083 		/* ReachableTime (in milliseconds) */
3084 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3085 	} else {
3086 		/* Those handlers will update p->reachable_time after
3087 		 * base_reachable_time(_ms) is set to ensure the new timer starts being
3088 		 * applied after the next neighbour update instead of waiting for
3089 		 * neigh_periodic_work to update its value (can be multiple minutes)
3090 		 * So any handler that replaces them should do this as well
3091 		 */
3092 		/* ReachableTime */
3093 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3094 			neigh_proc_base_reachable_time;
3095 		/* ReachableTime (in milliseconds) */
3096 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3097 			neigh_proc_base_reachable_time;
3098 	}
3099 
3100 	/* Don't export sysctls to unprivileged users */
3101 	if (neigh_parms_net(p)->user_ns != &init_user_ns)
3102 		t->neigh_vars[0].procname = NULL;
3103 
3104 	switch (neigh_parms_family(p)) {
3105 	case AF_INET:
3106 	      p_name = "ipv4";
3107 	      break;
3108 	case AF_INET6:
3109 	      p_name = "ipv6";
3110 	      break;
3111 	default:
3112 	      BUG();
3113 	}
3114 
3115 	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3116 		p_name, dev_name_source);
3117 	t->sysctl_header =
3118 		register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3119 	if (!t->sysctl_header)
3120 		goto free;
3121 
3122 	p->sysctl_table = t;
3123 	return 0;
3124 
3125 free:
3126 	kfree(t);
3127 err:
3128 	return -ENOBUFS;
3129 }
3130 EXPORT_SYMBOL(neigh_sysctl_register);
3131 
3132 void neigh_sysctl_unregister(struct neigh_parms *p)
3133 {
3134 	if (p->sysctl_table) {
3135 		struct neigh_sysctl_table *t = p->sysctl_table;
3136 		p->sysctl_table = NULL;
3137 		unregister_net_sysctl_table(t->sysctl_header);
3138 		kfree(t);
3139 	}
3140 }
3141 EXPORT_SYMBOL(neigh_sysctl_unregister);
3142 
3143 #endif	/* CONFIG_SYSCTL */
3144 
3145 static int __init neigh_init(void)
3146 {
3147 	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL);
3148 	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL);
3149 	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL);
3150 
3151 	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3152 		      NULL);
3153 	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL);
3154 
3155 	return 0;
3156 }
3157 
3158 subsys_initcall(neigh_init);
3159 
3160