xref: /linux/net/core/neighbour.c (revision 9494a6c2e4f6ce21a1e6885145171f90c4492131)
1 /*
2  *	Generic address resolution entity
3  *
4  *	Authors:
5  *	Pedro Roque		<roque@di.fc.ul.pt>
6  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *	Fixes:
14  *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
15  *	Harald Welte		Add neighbour cache statistics like rtstat
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include <linux/slab.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/socket.h>
25 #include <linux/netdevice.h>
26 #include <linux/proc_fs.h>
27 #ifdef CONFIG_SYSCTL
28 #include <linux/sysctl.h>
29 #endif
30 #include <linux/times.h>
31 #include <net/net_namespace.h>
32 #include <net/neighbour.h>
33 #include <net/dst.h>
34 #include <net/sock.h>
35 #include <net/netevent.h>
36 #include <net/netlink.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/random.h>
39 #include <linux/string.h>
40 #include <linux/log2.h>
41 #include <linux/inetdevice.h>
42 #include <net/addrconf.h>
43 
44 #define DEBUG
45 #define NEIGH_DEBUG 1
46 #define neigh_dbg(level, fmt, ...)		\
47 do {						\
48 	if (level <= NEIGH_DEBUG)		\
49 		pr_debug(fmt, ##__VA_ARGS__);	\
50 } while (0)
51 
52 #define PNEIGH_HASHMASK		0xF
53 
54 static void neigh_timer_handler(struct timer_list *t);
55 static void __neigh_notify(struct neighbour *n, int type, int flags,
56 			   u32 pid);
57 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
58 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
59 				    struct net_device *dev);
60 
61 #ifdef CONFIG_PROC_FS
62 static const struct seq_operations neigh_stat_seq_ops;
63 #endif
64 
65 /*
66    Neighbour hash table buckets are protected with rwlock tbl->lock.
67 
68    - All the scans/updates to hash buckets MUST be made under this lock.
69    - NOTHING clever should be made under this lock: no callbacks
70      to protocol backends, no attempts to send something to network.
71      It will result in deadlocks, if backend/driver wants to use neighbour
72      cache.
73    - If the entry requires some non-trivial actions, increase
74      its reference count and release table lock.
75 
76    Neighbour entries are protected:
77    - with reference count.
78    - with rwlock neigh->lock
79 
80    Reference count prevents destruction.
81 
82    neigh->lock mainly serializes ll address data and its validity state.
83    However, the same lock is used to protect another entry fields:
84     - timer
85     - resolution queue
86 
87    Again, nothing clever shall be made under neigh->lock,
88    the most complicated procedure, which we allow is dev->hard_header.
89    It is supposed, that dev->hard_header is simplistic and does
90    not make callbacks to neighbour tables.
91  */
92 
93 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
94 {
95 	kfree_skb(skb);
96 	return -ENETDOWN;
97 }
98 
99 static void neigh_cleanup_and_release(struct neighbour *neigh)
100 {
101 	if (neigh->parms->neigh_cleanup)
102 		neigh->parms->neigh_cleanup(neigh);
103 
104 	__neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
105 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
106 	neigh_release(neigh);
107 }
108 
109 /*
110  * It is random distribution in the interval (1/2)*base...(3/2)*base.
111  * It corresponds to default IPv6 settings and is not overridable,
112  * because it is really reasonable choice.
113  */
114 
115 unsigned long neigh_rand_reach_time(unsigned long base)
116 {
117 	return base ? (prandom_u32() % base) + (base >> 1) : 0;
118 }
119 EXPORT_SYMBOL(neigh_rand_reach_time);
120 
121 
122 static bool neigh_del(struct neighbour *n, __u8 state, __u8 flags,
123 		      struct neighbour __rcu **np, struct neigh_table *tbl)
124 {
125 	bool retval = false;
126 
127 	write_lock(&n->lock);
128 	if (refcount_read(&n->refcnt) == 1 && !(n->nud_state & state) &&
129 	    !(n->flags & flags)) {
130 		struct neighbour *neigh;
131 
132 		neigh = rcu_dereference_protected(n->next,
133 						  lockdep_is_held(&tbl->lock));
134 		rcu_assign_pointer(*np, neigh);
135 		n->dead = 1;
136 		retval = true;
137 	}
138 	write_unlock(&n->lock);
139 	if (retval)
140 		neigh_cleanup_and_release(n);
141 	return retval;
142 }
143 
144 bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
145 {
146 	struct neigh_hash_table *nht;
147 	void *pkey = ndel->primary_key;
148 	u32 hash_val;
149 	struct neighbour *n;
150 	struct neighbour __rcu **np;
151 
152 	nht = rcu_dereference_protected(tbl->nht,
153 					lockdep_is_held(&tbl->lock));
154 	hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
155 	hash_val = hash_val >> (32 - nht->hash_shift);
156 
157 	np = &nht->hash_buckets[hash_val];
158 	while ((n = rcu_dereference_protected(*np,
159 					      lockdep_is_held(&tbl->lock)))) {
160 		if (n == ndel)
161 			return neigh_del(n, 0, 0, np, tbl);
162 		np = &n->next;
163 	}
164 	return false;
165 }
166 
167 static int neigh_forced_gc(struct neigh_table *tbl)
168 {
169 	int shrunk = 0;
170 	int i;
171 	struct neigh_hash_table *nht;
172 
173 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
174 
175 	write_lock_bh(&tbl->lock);
176 	nht = rcu_dereference_protected(tbl->nht,
177 					lockdep_is_held(&tbl->lock));
178 	for (i = 0; i < (1 << nht->hash_shift); i++) {
179 		struct neighbour *n;
180 		struct neighbour __rcu **np;
181 
182 		np = &nht->hash_buckets[i];
183 		while ((n = rcu_dereference_protected(*np,
184 					lockdep_is_held(&tbl->lock))) != NULL) {
185 			/* Neighbour record may be discarded if:
186 			 * - nobody refers to it.
187 			 * - it is not permanent
188 			 */
189 			if (neigh_del(n, NUD_PERMANENT, NTF_EXT_LEARNED, np,
190 				      tbl)) {
191 				shrunk = 1;
192 				continue;
193 			}
194 			np = &n->next;
195 		}
196 	}
197 
198 	tbl->last_flush = jiffies;
199 
200 	write_unlock_bh(&tbl->lock);
201 
202 	return shrunk;
203 }
204 
205 static void neigh_add_timer(struct neighbour *n, unsigned long when)
206 {
207 	neigh_hold(n);
208 	if (unlikely(mod_timer(&n->timer, when))) {
209 		printk("NEIGH: BUG, double timer add, state is %x\n",
210 		       n->nud_state);
211 		dump_stack();
212 	}
213 }
214 
215 static int neigh_del_timer(struct neighbour *n)
216 {
217 	if ((n->nud_state & NUD_IN_TIMER) &&
218 	    del_timer(&n->timer)) {
219 		neigh_release(n);
220 		return 1;
221 	}
222 	return 0;
223 }
224 
225 static void pneigh_queue_purge(struct sk_buff_head *list)
226 {
227 	struct sk_buff *skb;
228 
229 	while ((skb = skb_dequeue(list)) != NULL) {
230 		dev_put(skb->dev);
231 		kfree_skb(skb);
232 	}
233 }
234 
235 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
236 {
237 	int i;
238 	struct neigh_hash_table *nht;
239 
240 	nht = rcu_dereference_protected(tbl->nht,
241 					lockdep_is_held(&tbl->lock));
242 
243 	for (i = 0; i < (1 << nht->hash_shift); i++) {
244 		struct neighbour *n;
245 		struct neighbour __rcu **np = &nht->hash_buckets[i];
246 
247 		while ((n = rcu_dereference_protected(*np,
248 					lockdep_is_held(&tbl->lock))) != NULL) {
249 			if (dev && n->dev != dev) {
250 				np = &n->next;
251 				continue;
252 			}
253 			rcu_assign_pointer(*np,
254 				   rcu_dereference_protected(n->next,
255 						lockdep_is_held(&tbl->lock)));
256 			write_lock(&n->lock);
257 			neigh_del_timer(n);
258 			n->dead = 1;
259 
260 			if (refcount_read(&n->refcnt) != 1) {
261 				/* The most unpleasant situation.
262 				   We must destroy neighbour entry,
263 				   but someone still uses it.
264 
265 				   The destroy will be delayed until
266 				   the last user releases us, but
267 				   we must kill timers etc. and move
268 				   it to safe state.
269 				 */
270 				__skb_queue_purge(&n->arp_queue);
271 				n->arp_queue_len_bytes = 0;
272 				n->output = neigh_blackhole;
273 				if (n->nud_state & NUD_VALID)
274 					n->nud_state = NUD_NOARP;
275 				else
276 					n->nud_state = NUD_NONE;
277 				neigh_dbg(2, "neigh %p is stray\n", n);
278 			}
279 			write_unlock(&n->lock);
280 			neigh_cleanup_and_release(n);
281 		}
282 	}
283 }
284 
285 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
286 {
287 	write_lock_bh(&tbl->lock);
288 	neigh_flush_dev(tbl, dev);
289 	write_unlock_bh(&tbl->lock);
290 }
291 EXPORT_SYMBOL(neigh_changeaddr);
292 
293 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
294 {
295 	write_lock_bh(&tbl->lock);
296 	neigh_flush_dev(tbl, dev);
297 	pneigh_ifdown_and_unlock(tbl, dev);
298 
299 	del_timer_sync(&tbl->proxy_timer);
300 	pneigh_queue_purge(&tbl->proxy_queue);
301 	return 0;
302 }
303 EXPORT_SYMBOL(neigh_ifdown);
304 
305 static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
306 {
307 	struct neighbour *n = NULL;
308 	unsigned long now = jiffies;
309 	int entries;
310 
311 	entries = atomic_inc_return(&tbl->entries) - 1;
312 	if (entries >= tbl->gc_thresh3 ||
313 	    (entries >= tbl->gc_thresh2 &&
314 	     time_after(now, tbl->last_flush + 5 * HZ))) {
315 		if (!neigh_forced_gc(tbl) &&
316 		    entries >= tbl->gc_thresh3) {
317 			net_info_ratelimited("%s: neighbor table overflow!\n",
318 					     tbl->id);
319 			NEIGH_CACHE_STAT_INC(tbl, table_fulls);
320 			goto out_entries;
321 		}
322 	}
323 
324 	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
325 	if (!n)
326 		goto out_entries;
327 
328 	__skb_queue_head_init(&n->arp_queue);
329 	rwlock_init(&n->lock);
330 	seqlock_init(&n->ha_lock);
331 	n->updated	  = n->used = now;
332 	n->nud_state	  = NUD_NONE;
333 	n->output	  = neigh_blackhole;
334 	seqlock_init(&n->hh.hh_lock);
335 	n->parms	  = neigh_parms_clone(&tbl->parms);
336 	timer_setup(&n->timer, neigh_timer_handler, 0);
337 
338 	NEIGH_CACHE_STAT_INC(tbl, allocs);
339 	n->tbl		  = tbl;
340 	refcount_set(&n->refcnt, 1);
341 	n->dead		  = 1;
342 out:
343 	return n;
344 
345 out_entries:
346 	atomic_dec(&tbl->entries);
347 	goto out;
348 }
349 
350 static void neigh_get_hash_rnd(u32 *x)
351 {
352 	*x = get_random_u32() | 1;
353 }
354 
355 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
356 {
357 	size_t size = (1 << shift) * sizeof(struct neighbour *);
358 	struct neigh_hash_table *ret;
359 	struct neighbour __rcu **buckets;
360 	int i;
361 
362 	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
363 	if (!ret)
364 		return NULL;
365 	if (size <= PAGE_SIZE)
366 		buckets = kzalloc(size, GFP_ATOMIC);
367 	else
368 		buckets = (struct neighbour __rcu **)
369 			  __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
370 					   get_order(size));
371 	if (!buckets) {
372 		kfree(ret);
373 		return NULL;
374 	}
375 	ret->hash_buckets = buckets;
376 	ret->hash_shift = shift;
377 	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
378 		neigh_get_hash_rnd(&ret->hash_rnd[i]);
379 	return ret;
380 }
381 
382 static void neigh_hash_free_rcu(struct rcu_head *head)
383 {
384 	struct neigh_hash_table *nht = container_of(head,
385 						    struct neigh_hash_table,
386 						    rcu);
387 	size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
388 	struct neighbour __rcu **buckets = nht->hash_buckets;
389 
390 	if (size <= PAGE_SIZE)
391 		kfree(buckets);
392 	else
393 		free_pages((unsigned long)buckets, get_order(size));
394 	kfree(nht);
395 }
396 
397 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
398 						unsigned long new_shift)
399 {
400 	unsigned int i, hash;
401 	struct neigh_hash_table *new_nht, *old_nht;
402 
403 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
404 
405 	old_nht = rcu_dereference_protected(tbl->nht,
406 					    lockdep_is_held(&tbl->lock));
407 	new_nht = neigh_hash_alloc(new_shift);
408 	if (!new_nht)
409 		return old_nht;
410 
411 	for (i = 0; i < (1 << old_nht->hash_shift); i++) {
412 		struct neighbour *n, *next;
413 
414 		for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
415 						   lockdep_is_held(&tbl->lock));
416 		     n != NULL;
417 		     n = next) {
418 			hash = tbl->hash(n->primary_key, n->dev,
419 					 new_nht->hash_rnd);
420 
421 			hash >>= (32 - new_nht->hash_shift);
422 			next = rcu_dereference_protected(n->next,
423 						lockdep_is_held(&tbl->lock));
424 
425 			rcu_assign_pointer(n->next,
426 					   rcu_dereference_protected(
427 						new_nht->hash_buckets[hash],
428 						lockdep_is_held(&tbl->lock)));
429 			rcu_assign_pointer(new_nht->hash_buckets[hash], n);
430 		}
431 	}
432 
433 	rcu_assign_pointer(tbl->nht, new_nht);
434 	call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
435 	return new_nht;
436 }
437 
438 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
439 			       struct net_device *dev)
440 {
441 	struct neighbour *n;
442 
443 	NEIGH_CACHE_STAT_INC(tbl, lookups);
444 
445 	rcu_read_lock_bh();
446 	n = __neigh_lookup_noref(tbl, pkey, dev);
447 	if (n) {
448 		if (!refcount_inc_not_zero(&n->refcnt))
449 			n = NULL;
450 		NEIGH_CACHE_STAT_INC(tbl, hits);
451 	}
452 
453 	rcu_read_unlock_bh();
454 	return n;
455 }
456 EXPORT_SYMBOL(neigh_lookup);
457 
458 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
459 				     const void *pkey)
460 {
461 	struct neighbour *n;
462 	unsigned int key_len = tbl->key_len;
463 	u32 hash_val;
464 	struct neigh_hash_table *nht;
465 
466 	NEIGH_CACHE_STAT_INC(tbl, lookups);
467 
468 	rcu_read_lock_bh();
469 	nht = rcu_dereference_bh(tbl->nht);
470 	hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
471 
472 	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
473 	     n != NULL;
474 	     n = rcu_dereference_bh(n->next)) {
475 		if (!memcmp(n->primary_key, pkey, key_len) &&
476 		    net_eq(dev_net(n->dev), net)) {
477 			if (!refcount_inc_not_zero(&n->refcnt))
478 				n = NULL;
479 			NEIGH_CACHE_STAT_INC(tbl, hits);
480 			break;
481 		}
482 	}
483 
484 	rcu_read_unlock_bh();
485 	return n;
486 }
487 EXPORT_SYMBOL(neigh_lookup_nodev);
488 
489 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
490 				 struct net_device *dev, bool want_ref)
491 {
492 	u32 hash_val;
493 	unsigned int key_len = tbl->key_len;
494 	int error;
495 	struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
496 	struct neigh_hash_table *nht;
497 
498 	if (!n) {
499 		rc = ERR_PTR(-ENOBUFS);
500 		goto out;
501 	}
502 
503 	memcpy(n->primary_key, pkey, key_len);
504 	n->dev = dev;
505 	dev_hold(dev);
506 
507 	/* Protocol specific setup. */
508 	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
509 		rc = ERR_PTR(error);
510 		goto out_neigh_release;
511 	}
512 
513 	if (dev->netdev_ops->ndo_neigh_construct) {
514 		error = dev->netdev_ops->ndo_neigh_construct(dev, n);
515 		if (error < 0) {
516 			rc = ERR_PTR(error);
517 			goto out_neigh_release;
518 		}
519 	}
520 
521 	/* Device specific setup. */
522 	if (n->parms->neigh_setup &&
523 	    (error = n->parms->neigh_setup(n)) < 0) {
524 		rc = ERR_PTR(error);
525 		goto out_neigh_release;
526 	}
527 
528 	n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
529 
530 	write_lock_bh(&tbl->lock);
531 	nht = rcu_dereference_protected(tbl->nht,
532 					lockdep_is_held(&tbl->lock));
533 
534 	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
535 		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
536 
537 	hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
538 
539 	if (n->parms->dead) {
540 		rc = ERR_PTR(-EINVAL);
541 		goto out_tbl_unlock;
542 	}
543 
544 	for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
545 					    lockdep_is_held(&tbl->lock));
546 	     n1 != NULL;
547 	     n1 = rcu_dereference_protected(n1->next,
548 			lockdep_is_held(&tbl->lock))) {
549 		if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
550 			if (want_ref)
551 				neigh_hold(n1);
552 			rc = n1;
553 			goto out_tbl_unlock;
554 		}
555 	}
556 
557 	n->dead = 0;
558 	if (want_ref)
559 		neigh_hold(n);
560 	rcu_assign_pointer(n->next,
561 			   rcu_dereference_protected(nht->hash_buckets[hash_val],
562 						     lockdep_is_held(&tbl->lock)));
563 	rcu_assign_pointer(nht->hash_buckets[hash_val], n);
564 	write_unlock_bh(&tbl->lock);
565 	neigh_dbg(2, "neigh %p is created\n", n);
566 	rc = n;
567 out:
568 	return rc;
569 out_tbl_unlock:
570 	write_unlock_bh(&tbl->lock);
571 out_neigh_release:
572 	neigh_release(n);
573 	goto out;
574 }
575 EXPORT_SYMBOL(__neigh_create);
576 
577 static u32 pneigh_hash(const void *pkey, unsigned int key_len)
578 {
579 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
580 	hash_val ^= (hash_val >> 16);
581 	hash_val ^= hash_val >> 8;
582 	hash_val ^= hash_val >> 4;
583 	hash_val &= PNEIGH_HASHMASK;
584 	return hash_val;
585 }
586 
587 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
588 					      struct net *net,
589 					      const void *pkey,
590 					      unsigned int key_len,
591 					      struct net_device *dev)
592 {
593 	while (n) {
594 		if (!memcmp(n->key, pkey, key_len) &&
595 		    net_eq(pneigh_net(n), net) &&
596 		    (n->dev == dev || !n->dev))
597 			return n;
598 		n = n->next;
599 	}
600 	return NULL;
601 }
602 
603 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
604 		struct net *net, const void *pkey, struct net_device *dev)
605 {
606 	unsigned int key_len = tbl->key_len;
607 	u32 hash_val = pneigh_hash(pkey, key_len);
608 
609 	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
610 				 net, pkey, key_len, dev);
611 }
612 EXPORT_SYMBOL_GPL(__pneigh_lookup);
613 
614 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
615 				    struct net *net, const void *pkey,
616 				    struct net_device *dev, int creat)
617 {
618 	struct pneigh_entry *n;
619 	unsigned int key_len = tbl->key_len;
620 	u32 hash_val = pneigh_hash(pkey, key_len);
621 
622 	read_lock_bh(&tbl->lock);
623 	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
624 			      net, pkey, key_len, dev);
625 	read_unlock_bh(&tbl->lock);
626 
627 	if (n || !creat)
628 		goto out;
629 
630 	ASSERT_RTNL();
631 
632 	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
633 	if (!n)
634 		goto out;
635 
636 	write_pnet(&n->net, net);
637 	memcpy(n->key, pkey, key_len);
638 	n->dev = dev;
639 	if (dev)
640 		dev_hold(dev);
641 
642 	if (tbl->pconstructor && tbl->pconstructor(n)) {
643 		if (dev)
644 			dev_put(dev);
645 		kfree(n);
646 		n = NULL;
647 		goto out;
648 	}
649 
650 	write_lock_bh(&tbl->lock);
651 	n->next = tbl->phash_buckets[hash_val];
652 	tbl->phash_buckets[hash_val] = n;
653 	write_unlock_bh(&tbl->lock);
654 out:
655 	return n;
656 }
657 EXPORT_SYMBOL(pneigh_lookup);
658 
659 
660 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
661 		  struct net_device *dev)
662 {
663 	struct pneigh_entry *n, **np;
664 	unsigned int key_len = tbl->key_len;
665 	u32 hash_val = pneigh_hash(pkey, key_len);
666 
667 	write_lock_bh(&tbl->lock);
668 	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
669 	     np = &n->next) {
670 		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
671 		    net_eq(pneigh_net(n), net)) {
672 			*np = n->next;
673 			write_unlock_bh(&tbl->lock);
674 			if (tbl->pdestructor)
675 				tbl->pdestructor(n);
676 			if (n->dev)
677 				dev_put(n->dev);
678 			kfree(n);
679 			return 0;
680 		}
681 	}
682 	write_unlock_bh(&tbl->lock);
683 	return -ENOENT;
684 }
685 
686 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
687 				    struct net_device *dev)
688 {
689 	struct pneigh_entry *n, **np, *freelist = NULL;
690 	u32 h;
691 
692 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
693 		np = &tbl->phash_buckets[h];
694 		while ((n = *np) != NULL) {
695 			if (!dev || n->dev == dev) {
696 				*np = n->next;
697 				n->next = freelist;
698 				freelist = n;
699 				continue;
700 			}
701 			np = &n->next;
702 		}
703 	}
704 	write_unlock_bh(&tbl->lock);
705 	while ((n = freelist)) {
706 		freelist = n->next;
707 		n->next = NULL;
708 		if (tbl->pdestructor)
709 			tbl->pdestructor(n);
710 		if (n->dev)
711 			dev_put(n->dev);
712 		kfree(n);
713 	}
714 	return -ENOENT;
715 }
716 
717 static void neigh_parms_destroy(struct neigh_parms *parms);
718 
719 static inline void neigh_parms_put(struct neigh_parms *parms)
720 {
721 	if (refcount_dec_and_test(&parms->refcnt))
722 		neigh_parms_destroy(parms);
723 }
724 
725 /*
726  *	neighbour must already be out of the table;
727  *
728  */
729 void neigh_destroy(struct neighbour *neigh)
730 {
731 	struct net_device *dev = neigh->dev;
732 
733 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
734 
735 	if (!neigh->dead) {
736 		pr_warn("Destroying alive neighbour %p\n", neigh);
737 		dump_stack();
738 		return;
739 	}
740 
741 	if (neigh_del_timer(neigh))
742 		pr_warn("Impossible event\n");
743 
744 	write_lock_bh(&neigh->lock);
745 	__skb_queue_purge(&neigh->arp_queue);
746 	write_unlock_bh(&neigh->lock);
747 	neigh->arp_queue_len_bytes = 0;
748 
749 	if (dev->netdev_ops->ndo_neigh_destroy)
750 		dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
751 
752 	dev_put(dev);
753 	neigh_parms_put(neigh->parms);
754 
755 	neigh_dbg(2, "neigh %p is destroyed\n", neigh);
756 
757 	atomic_dec(&neigh->tbl->entries);
758 	kfree_rcu(neigh, rcu);
759 }
760 EXPORT_SYMBOL(neigh_destroy);
761 
762 /* Neighbour state is suspicious;
763    disable fast path.
764 
765    Called with write_locked neigh.
766  */
767 static void neigh_suspect(struct neighbour *neigh)
768 {
769 	neigh_dbg(2, "neigh %p is suspected\n", neigh);
770 
771 	neigh->output = neigh->ops->output;
772 }
773 
774 /* Neighbour state is OK;
775    enable fast path.
776 
777    Called with write_locked neigh.
778  */
779 static void neigh_connect(struct neighbour *neigh)
780 {
781 	neigh_dbg(2, "neigh %p is connected\n", neigh);
782 
783 	neigh->output = neigh->ops->connected_output;
784 }
785 
786 static void neigh_periodic_work(struct work_struct *work)
787 {
788 	struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
789 	struct neighbour *n;
790 	struct neighbour __rcu **np;
791 	unsigned int i;
792 	struct neigh_hash_table *nht;
793 
794 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
795 
796 	write_lock_bh(&tbl->lock);
797 	nht = rcu_dereference_protected(tbl->nht,
798 					lockdep_is_held(&tbl->lock));
799 
800 	/*
801 	 *	periodically recompute ReachableTime from random function
802 	 */
803 
804 	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
805 		struct neigh_parms *p;
806 		tbl->last_rand = jiffies;
807 		list_for_each_entry(p, &tbl->parms_list, list)
808 			p->reachable_time =
809 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
810 	}
811 
812 	if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
813 		goto out;
814 
815 	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
816 		np = &nht->hash_buckets[i];
817 
818 		while ((n = rcu_dereference_protected(*np,
819 				lockdep_is_held(&tbl->lock))) != NULL) {
820 			unsigned int state;
821 
822 			write_lock(&n->lock);
823 
824 			state = n->nud_state;
825 			if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
826 			    (n->flags & NTF_EXT_LEARNED)) {
827 				write_unlock(&n->lock);
828 				goto next_elt;
829 			}
830 
831 			if (time_before(n->used, n->confirmed))
832 				n->used = n->confirmed;
833 
834 			if (refcount_read(&n->refcnt) == 1 &&
835 			    (state == NUD_FAILED ||
836 			     time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
837 				*np = n->next;
838 				n->dead = 1;
839 				write_unlock(&n->lock);
840 				neigh_cleanup_and_release(n);
841 				continue;
842 			}
843 			write_unlock(&n->lock);
844 
845 next_elt:
846 			np = &n->next;
847 		}
848 		/*
849 		 * It's fine to release lock here, even if hash table
850 		 * grows while we are preempted.
851 		 */
852 		write_unlock_bh(&tbl->lock);
853 		cond_resched();
854 		write_lock_bh(&tbl->lock);
855 		nht = rcu_dereference_protected(tbl->nht,
856 						lockdep_is_held(&tbl->lock));
857 	}
858 out:
859 	/* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
860 	 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
861 	 * BASE_REACHABLE_TIME.
862 	 */
863 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
864 			      NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
865 	write_unlock_bh(&tbl->lock);
866 }
867 
868 static __inline__ int neigh_max_probes(struct neighbour *n)
869 {
870 	struct neigh_parms *p = n->parms;
871 	return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
872 	       (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
873 	        NEIGH_VAR(p, MCAST_PROBES));
874 }
875 
876 static void neigh_invalidate(struct neighbour *neigh)
877 	__releases(neigh->lock)
878 	__acquires(neigh->lock)
879 {
880 	struct sk_buff *skb;
881 
882 	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
883 	neigh_dbg(2, "neigh %p is failed\n", neigh);
884 	neigh->updated = jiffies;
885 
886 	/* It is very thin place. report_unreachable is very complicated
887 	   routine. Particularly, it can hit the same neighbour entry!
888 
889 	   So that, we try to be accurate and avoid dead loop. --ANK
890 	 */
891 	while (neigh->nud_state == NUD_FAILED &&
892 	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
893 		write_unlock(&neigh->lock);
894 		neigh->ops->error_report(neigh, skb);
895 		write_lock(&neigh->lock);
896 	}
897 	__skb_queue_purge(&neigh->arp_queue);
898 	neigh->arp_queue_len_bytes = 0;
899 }
900 
901 static void neigh_probe(struct neighbour *neigh)
902 	__releases(neigh->lock)
903 {
904 	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
905 	/* keep skb alive even if arp_queue overflows */
906 	if (skb)
907 		skb = skb_clone(skb, GFP_ATOMIC);
908 	write_unlock(&neigh->lock);
909 	if (neigh->ops->solicit)
910 		neigh->ops->solicit(neigh, skb);
911 	atomic_inc(&neigh->probes);
912 	kfree_skb(skb);
913 }
914 
915 /* Called when a timer expires for a neighbour entry. */
916 
917 static void neigh_timer_handler(struct timer_list *t)
918 {
919 	unsigned long now, next;
920 	struct neighbour *neigh = from_timer(neigh, t, timer);
921 	unsigned int state;
922 	int notify = 0;
923 
924 	write_lock(&neigh->lock);
925 
926 	state = neigh->nud_state;
927 	now = jiffies;
928 	next = now + HZ;
929 
930 	if (!(state & NUD_IN_TIMER))
931 		goto out;
932 
933 	if (state & NUD_REACHABLE) {
934 		if (time_before_eq(now,
935 				   neigh->confirmed + neigh->parms->reachable_time)) {
936 			neigh_dbg(2, "neigh %p is still alive\n", neigh);
937 			next = neigh->confirmed + neigh->parms->reachable_time;
938 		} else if (time_before_eq(now,
939 					  neigh->used +
940 					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
941 			neigh_dbg(2, "neigh %p is delayed\n", neigh);
942 			neigh->nud_state = NUD_DELAY;
943 			neigh->updated = jiffies;
944 			neigh_suspect(neigh);
945 			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
946 		} else {
947 			neigh_dbg(2, "neigh %p is suspected\n", neigh);
948 			neigh->nud_state = NUD_STALE;
949 			neigh->updated = jiffies;
950 			neigh_suspect(neigh);
951 			notify = 1;
952 		}
953 	} else if (state & NUD_DELAY) {
954 		if (time_before_eq(now,
955 				   neigh->confirmed +
956 				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
957 			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
958 			neigh->nud_state = NUD_REACHABLE;
959 			neigh->updated = jiffies;
960 			neigh_connect(neigh);
961 			notify = 1;
962 			next = neigh->confirmed + neigh->parms->reachable_time;
963 		} else {
964 			neigh_dbg(2, "neigh %p is probed\n", neigh);
965 			neigh->nud_state = NUD_PROBE;
966 			neigh->updated = jiffies;
967 			atomic_set(&neigh->probes, 0);
968 			notify = 1;
969 			next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
970 		}
971 	} else {
972 		/* NUD_PROBE|NUD_INCOMPLETE */
973 		next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
974 	}
975 
976 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
977 	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
978 		neigh->nud_state = NUD_FAILED;
979 		notify = 1;
980 		neigh_invalidate(neigh);
981 		goto out;
982 	}
983 
984 	if (neigh->nud_state & NUD_IN_TIMER) {
985 		if (time_before(next, jiffies + HZ/2))
986 			next = jiffies + HZ/2;
987 		if (!mod_timer(&neigh->timer, next))
988 			neigh_hold(neigh);
989 	}
990 	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
991 		neigh_probe(neigh);
992 	} else {
993 out:
994 		write_unlock(&neigh->lock);
995 	}
996 
997 	if (notify)
998 		neigh_update_notify(neigh, 0);
999 
1000 	neigh_release(neigh);
1001 }
1002 
1003 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
1004 {
1005 	int rc;
1006 	bool immediate_probe = false;
1007 
1008 	write_lock_bh(&neigh->lock);
1009 
1010 	rc = 0;
1011 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1012 		goto out_unlock_bh;
1013 	if (neigh->dead)
1014 		goto out_dead;
1015 
1016 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1017 		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1018 		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
1019 			unsigned long next, now = jiffies;
1020 
1021 			atomic_set(&neigh->probes,
1022 				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
1023 			neigh->nud_state     = NUD_INCOMPLETE;
1024 			neigh->updated = now;
1025 			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1026 					 HZ/2);
1027 			neigh_add_timer(neigh, next);
1028 			immediate_probe = true;
1029 		} else {
1030 			neigh->nud_state = NUD_FAILED;
1031 			neigh->updated = jiffies;
1032 			write_unlock_bh(&neigh->lock);
1033 
1034 			kfree_skb(skb);
1035 			return 1;
1036 		}
1037 	} else if (neigh->nud_state & NUD_STALE) {
1038 		neigh_dbg(2, "neigh %p is delayed\n", neigh);
1039 		neigh->nud_state = NUD_DELAY;
1040 		neigh->updated = jiffies;
1041 		neigh_add_timer(neigh, jiffies +
1042 				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1043 	}
1044 
1045 	if (neigh->nud_state == NUD_INCOMPLETE) {
1046 		if (skb) {
1047 			while (neigh->arp_queue_len_bytes + skb->truesize >
1048 			       NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1049 				struct sk_buff *buff;
1050 
1051 				buff = __skb_dequeue(&neigh->arp_queue);
1052 				if (!buff)
1053 					break;
1054 				neigh->arp_queue_len_bytes -= buff->truesize;
1055 				kfree_skb(buff);
1056 				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1057 			}
1058 			skb_dst_force(skb);
1059 			__skb_queue_tail(&neigh->arp_queue, skb);
1060 			neigh->arp_queue_len_bytes += skb->truesize;
1061 		}
1062 		rc = 1;
1063 	}
1064 out_unlock_bh:
1065 	if (immediate_probe)
1066 		neigh_probe(neigh);
1067 	else
1068 		write_unlock(&neigh->lock);
1069 	local_bh_enable();
1070 	return rc;
1071 
1072 out_dead:
1073 	if (neigh->nud_state & NUD_STALE)
1074 		goto out_unlock_bh;
1075 	write_unlock_bh(&neigh->lock);
1076 	kfree_skb(skb);
1077 	return 1;
1078 }
1079 EXPORT_SYMBOL(__neigh_event_send);
1080 
1081 static void neigh_update_hhs(struct neighbour *neigh)
1082 {
1083 	struct hh_cache *hh;
1084 	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1085 		= NULL;
1086 
1087 	if (neigh->dev->header_ops)
1088 		update = neigh->dev->header_ops->cache_update;
1089 
1090 	if (update) {
1091 		hh = &neigh->hh;
1092 		if (hh->hh_len) {
1093 			write_seqlock_bh(&hh->hh_lock);
1094 			update(hh, neigh->dev, neigh->ha);
1095 			write_sequnlock_bh(&hh->hh_lock);
1096 		}
1097 	}
1098 }
1099 
1100 
1101 
1102 /* Generic update routine.
1103    -- lladdr is new lladdr or NULL, if it is not supplied.
1104    -- new    is new state.
1105    -- flags
1106 	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1107 				if it is different.
1108 	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1109 				lladdr instead of overriding it
1110 				if it is different.
1111 	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
1112 
1113 	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1114 				NTF_ROUTER flag.
1115 	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
1116 				a router.
1117 
1118    Caller MUST hold reference count on the entry.
1119  */
1120 
1121 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1122 		 u32 flags, u32 nlmsg_pid)
1123 {
1124 	u8 old;
1125 	int err;
1126 	int notify = 0;
1127 	struct net_device *dev;
1128 	int update_isrouter = 0;
1129 
1130 	write_lock_bh(&neigh->lock);
1131 
1132 	dev    = neigh->dev;
1133 	old    = neigh->nud_state;
1134 	err    = -EPERM;
1135 
1136 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1137 	    (old & (NUD_NOARP | NUD_PERMANENT)))
1138 		goto out;
1139 	if (neigh->dead)
1140 		goto out;
1141 
1142 	neigh_update_ext_learned(neigh, flags, &notify);
1143 
1144 	if (!(new & NUD_VALID)) {
1145 		neigh_del_timer(neigh);
1146 		if (old & NUD_CONNECTED)
1147 			neigh_suspect(neigh);
1148 		neigh->nud_state = new;
1149 		err = 0;
1150 		notify = old & NUD_VALID;
1151 		if (((old & (NUD_INCOMPLETE | NUD_PROBE)) ||
1152 		     (flags & NEIGH_UPDATE_F_ADMIN)) &&
1153 		    (new & NUD_FAILED)) {
1154 			neigh_invalidate(neigh);
1155 			notify = 1;
1156 		}
1157 		goto out;
1158 	}
1159 
1160 	/* Compare new lladdr with cached one */
1161 	if (!dev->addr_len) {
1162 		/* First case: device needs no address. */
1163 		lladdr = neigh->ha;
1164 	} else if (lladdr) {
1165 		/* The second case: if something is already cached
1166 		   and a new address is proposed:
1167 		   - compare new & old
1168 		   - if they are different, check override flag
1169 		 */
1170 		if ((old & NUD_VALID) &&
1171 		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1172 			lladdr = neigh->ha;
1173 	} else {
1174 		/* No address is supplied; if we know something,
1175 		   use it, otherwise discard the request.
1176 		 */
1177 		err = -EINVAL;
1178 		if (!(old & NUD_VALID))
1179 			goto out;
1180 		lladdr = neigh->ha;
1181 	}
1182 
1183 	/* Update confirmed timestamp for neighbour entry after we
1184 	 * received ARP packet even if it doesn't change IP to MAC binding.
1185 	 */
1186 	if (new & NUD_CONNECTED)
1187 		neigh->confirmed = jiffies;
1188 
1189 	/* If entry was valid and address is not changed,
1190 	   do not change entry state, if new one is STALE.
1191 	 */
1192 	err = 0;
1193 	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1194 	if (old & NUD_VALID) {
1195 		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1196 			update_isrouter = 0;
1197 			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1198 			    (old & NUD_CONNECTED)) {
1199 				lladdr = neigh->ha;
1200 				new = NUD_STALE;
1201 			} else
1202 				goto out;
1203 		} else {
1204 			if (lladdr == neigh->ha && new == NUD_STALE &&
1205 			    !(flags & NEIGH_UPDATE_F_ADMIN))
1206 				new = old;
1207 		}
1208 	}
1209 
1210 	/* Update timestamp only once we know we will make a change to the
1211 	 * neighbour entry. Otherwise we risk to move the locktime window with
1212 	 * noop updates and ignore relevant ARP updates.
1213 	 */
1214 	if (new != old || lladdr != neigh->ha)
1215 		neigh->updated = jiffies;
1216 
1217 	if (new != old) {
1218 		neigh_del_timer(neigh);
1219 		if (new & NUD_PROBE)
1220 			atomic_set(&neigh->probes, 0);
1221 		if (new & NUD_IN_TIMER)
1222 			neigh_add_timer(neigh, (jiffies +
1223 						((new & NUD_REACHABLE) ?
1224 						 neigh->parms->reachable_time :
1225 						 0)));
1226 		neigh->nud_state = new;
1227 		notify = 1;
1228 	}
1229 
1230 	if (lladdr != neigh->ha) {
1231 		write_seqlock(&neigh->ha_lock);
1232 		memcpy(&neigh->ha, lladdr, dev->addr_len);
1233 		write_sequnlock(&neigh->ha_lock);
1234 		neigh_update_hhs(neigh);
1235 		if (!(new & NUD_CONNECTED))
1236 			neigh->confirmed = jiffies -
1237 				      (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1238 		notify = 1;
1239 	}
1240 	if (new == old)
1241 		goto out;
1242 	if (new & NUD_CONNECTED)
1243 		neigh_connect(neigh);
1244 	else
1245 		neigh_suspect(neigh);
1246 	if (!(old & NUD_VALID)) {
1247 		struct sk_buff *skb;
1248 
1249 		/* Again: avoid dead loop if something went wrong */
1250 
1251 		while (neigh->nud_state & NUD_VALID &&
1252 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1253 			struct dst_entry *dst = skb_dst(skb);
1254 			struct neighbour *n2, *n1 = neigh;
1255 			write_unlock_bh(&neigh->lock);
1256 
1257 			rcu_read_lock();
1258 
1259 			/* Why not just use 'neigh' as-is?  The problem is that
1260 			 * things such as shaper, eql, and sch_teql can end up
1261 			 * using alternative, different, neigh objects to output
1262 			 * the packet in the output path.  So what we need to do
1263 			 * here is re-lookup the top-level neigh in the path so
1264 			 * we can reinject the packet there.
1265 			 */
1266 			n2 = NULL;
1267 			if (dst) {
1268 				n2 = dst_neigh_lookup_skb(dst, skb);
1269 				if (n2)
1270 					n1 = n2;
1271 			}
1272 			n1->output(n1, skb);
1273 			if (n2)
1274 				neigh_release(n2);
1275 			rcu_read_unlock();
1276 
1277 			write_lock_bh(&neigh->lock);
1278 		}
1279 		__skb_queue_purge(&neigh->arp_queue);
1280 		neigh->arp_queue_len_bytes = 0;
1281 	}
1282 out:
1283 	if (update_isrouter)
1284 		neigh_update_is_router(neigh, flags, &notify);
1285 	write_unlock_bh(&neigh->lock);
1286 
1287 	if (notify)
1288 		neigh_update_notify(neigh, nlmsg_pid);
1289 
1290 	return err;
1291 }
1292 EXPORT_SYMBOL(neigh_update);
1293 
1294 /* Update the neigh to listen temporarily for probe responses, even if it is
1295  * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1296  */
1297 void __neigh_set_probe_once(struct neighbour *neigh)
1298 {
1299 	if (neigh->dead)
1300 		return;
1301 	neigh->updated = jiffies;
1302 	if (!(neigh->nud_state & NUD_FAILED))
1303 		return;
1304 	neigh->nud_state = NUD_INCOMPLETE;
1305 	atomic_set(&neigh->probes, neigh_max_probes(neigh));
1306 	neigh_add_timer(neigh,
1307 			jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
1308 }
1309 EXPORT_SYMBOL(__neigh_set_probe_once);
1310 
1311 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1312 				 u8 *lladdr, void *saddr,
1313 				 struct net_device *dev)
1314 {
1315 	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1316 						 lladdr || !dev->addr_len);
1317 	if (neigh)
1318 		neigh_update(neigh, lladdr, NUD_STALE,
1319 			     NEIGH_UPDATE_F_OVERRIDE, 0);
1320 	return neigh;
1321 }
1322 EXPORT_SYMBOL(neigh_event_ns);
1323 
1324 /* called with read_lock_bh(&n->lock); */
1325 static void neigh_hh_init(struct neighbour *n)
1326 {
1327 	struct net_device *dev = n->dev;
1328 	__be16 prot = n->tbl->protocol;
1329 	struct hh_cache	*hh = &n->hh;
1330 
1331 	write_lock_bh(&n->lock);
1332 
1333 	/* Only one thread can come in here and initialize the
1334 	 * hh_cache entry.
1335 	 */
1336 	if (!hh->hh_len)
1337 		dev->header_ops->cache(n, hh, prot);
1338 
1339 	write_unlock_bh(&n->lock);
1340 }
1341 
1342 /* Slow and careful. */
1343 
1344 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1345 {
1346 	int rc = 0;
1347 
1348 	if (!neigh_event_send(neigh, skb)) {
1349 		int err;
1350 		struct net_device *dev = neigh->dev;
1351 		unsigned int seq;
1352 
1353 		if (dev->header_ops->cache && !neigh->hh.hh_len)
1354 			neigh_hh_init(neigh);
1355 
1356 		do {
1357 			__skb_pull(skb, skb_network_offset(skb));
1358 			seq = read_seqbegin(&neigh->ha_lock);
1359 			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1360 					      neigh->ha, NULL, skb->len);
1361 		} while (read_seqretry(&neigh->ha_lock, seq));
1362 
1363 		if (err >= 0)
1364 			rc = dev_queue_xmit(skb);
1365 		else
1366 			goto out_kfree_skb;
1367 	}
1368 out:
1369 	return rc;
1370 out_kfree_skb:
1371 	rc = -EINVAL;
1372 	kfree_skb(skb);
1373 	goto out;
1374 }
1375 EXPORT_SYMBOL(neigh_resolve_output);
1376 
1377 /* As fast as possible without hh cache */
1378 
1379 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1380 {
1381 	struct net_device *dev = neigh->dev;
1382 	unsigned int seq;
1383 	int err;
1384 
1385 	do {
1386 		__skb_pull(skb, skb_network_offset(skb));
1387 		seq = read_seqbegin(&neigh->ha_lock);
1388 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1389 				      neigh->ha, NULL, skb->len);
1390 	} while (read_seqretry(&neigh->ha_lock, seq));
1391 
1392 	if (err >= 0)
1393 		err = dev_queue_xmit(skb);
1394 	else {
1395 		err = -EINVAL;
1396 		kfree_skb(skb);
1397 	}
1398 	return err;
1399 }
1400 EXPORT_SYMBOL(neigh_connected_output);
1401 
1402 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1403 {
1404 	return dev_queue_xmit(skb);
1405 }
1406 EXPORT_SYMBOL(neigh_direct_output);
1407 
1408 static void neigh_proxy_process(struct timer_list *t)
1409 {
1410 	struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1411 	long sched_next = 0;
1412 	unsigned long now = jiffies;
1413 	struct sk_buff *skb, *n;
1414 
1415 	spin_lock(&tbl->proxy_queue.lock);
1416 
1417 	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1418 		long tdif = NEIGH_CB(skb)->sched_next - now;
1419 
1420 		if (tdif <= 0) {
1421 			struct net_device *dev = skb->dev;
1422 
1423 			__skb_unlink(skb, &tbl->proxy_queue);
1424 			if (tbl->proxy_redo && netif_running(dev)) {
1425 				rcu_read_lock();
1426 				tbl->proxy_redo(skb);
1427 				rcu_read_unlock();
1428 			} else {
1429 				kfree_skb(skb);
1430 			}
1431 
1432 			dev_put(dev);
1433 		} else if (!sched_next || tdif < sched_next)
1434 			sched_next = tdif;
1435 	}
1436 	del_timer(&tbl->proxy_timer);
1437 	if (sched_next)
1438 		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1439 	spin_unlock(&tbl->proxy_queue.lock);
1440 }
1441 
1442 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1443 		    struct sk_buff *skb)
1444 {
1445 	unsigned long now = jiffies;
1446 
1447 	unsigned long sched_next = now + (prandom_u32() %
1448 					  NEIGH_VAR(p, PROXY_DELAY));
1449 
1450 	if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1451 		kfree_skb(skb);
1452 		return;
1453 	}
1454 
1455 	NEIGH_CB(skb)->sched_next = sched_next;
1456 	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1457 
1458 	spin_lock(&tbl->proxy_queue.lock);
1459 	if (del_timer(&tbl->proxy_timer)) {
1460 		if (time_before(tbl->proxy_timer.expires, sched_next))
1461 			sched_next = tbl->proxy_timer.expires;
1462 	}
1463 	skb_dst_drop(skb);
1464 	dev_hold(skb->dev);
1465 	__skb_queue_tail(&tbl->proxy_queue, skb);
1466 	mod_timer(&tbl->proxy_timer, sched_next);
1467 	spin_unlock(&tbl->proxy_queue.lock);
1468 }
1469 EXPORT_SYMBOL(pneigh_enqueue);
1470 
1471 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1472 						      struct net *net, int ifindex)
1473 {
1474 	struct neigh_parms *p;
1475 
1476 	list_for_each_entry(p, &tbl->parms_list, list) {
1477 		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1478 		    (!p->dev && !ifindex && net_eq(net, &init_net)))
1479 			return p;
1480 	}
1481 
1482 	return NULL;
1483 }
1484 
1485 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1486 				      struct neigh_table *tbl)
1487 {
1488 	struct neigh_parms *p;
1489 	struct net *net = dev_net(dev);
1490 	const struct net_device_ops *ops = dev->netdev_ops;
1491 
1492 	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1493 	if (p) {
1494 		p->tbl		  = tbl;
1495 		refcount_set(&p->refcnt, 1);
1496 		p->reachable_time =
1497 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1498 		dev_hold(dev);
1499 		p->dev = dev;
1500 		write_pnet(&p->net, net);
1501 		p->sysctl_table = NULL;
1502 
1503 		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1504 			dev_put(dev);
1505 			kfree(p);
1506 			return NULL;
1507 		}
1508 
1509 		write_lock_bh(&tbl->lock);
1510 		list_add(&p->list, &tbl->parms.list);
1511 		write_unlock_bh(&tbl->lock);
1512 
1513 		neigh_parms_data_state_cleanall(p);
1514 	}
1515 	return p;
1516 }
1517 EXPORT_SYMBOL(neigh_parms_alloc);
1518 
1519 static void neigh_rcu_free_parms(struct rcu_head *head)
1520 {
1521 	struct neigh_parms *parms =
1522 		container_of(head, struct neigh_parms, rcu_head);
1523 
1524 	neigh_parms_put(parms);
1525 }
1526 
1527 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1528 {
1529 	if (!parms || parms == &tbl->parms)
1530 		return;
1531 	write_lock_bh(&tbl->lock);
1532 	list_del(&parms->list);
1533 	parms->dead = 1;
1534 	write_unlock_bh(&tbl->lock);
1535 	if (parms->dev)
1536 		dev_put(parms->dev);
1537 	call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1538 }
1539 EXPORT_SYMBOL(neigh_parms_release);
1540 
1541 static void neigh_parms_destroy(struct neigh_parms *parms)
1542 {
1543 	kfree(parms);
1544 }
1545 
1546 static struct lock_class_key neigh_table_proxy_queue_class;
1547 
1548 static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1549 
1550 void neigh_table_init(int index, struct neigh_table *tbl)
1551 {
1552 	unsigned long now = jiffies;
1553 	unsigned long phsize;
1554 
1555 	INIT_LIST_HEAD(&tbl->parms_list);
1556 	list_add(&tbl->parms.list, &tbl->parms_list);
1557 	write_pnet(&tbl->parms.net, &init_net);
1558 	refcount_set(&tbl->parms.refcnt, 1);
1559 	tbl->parms.reachable_time =
1560 			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1561 
1562 	tbl->stats = alloc_percpu(struct neigh_statistics);
1563 	if (!tbl->stats)
1564 		panic("cannot create neighbour cache statistics");
1565 
1566 #ifdef CONFIG_PROC_FS
1567 	if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1568 			      &neigh_stat_seq_ops, tbl))
1569 		panic("cannot create neighbour proc dir entry");
1570 #endif
1571 
1572 	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1573 
1574 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1575 	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1576 
1577 	if (!tbl->nht || !tbl->phash_buckets)
1578 		panic("cannot allocate neighbour cache hashes");
1579 
1580 	if (!tbl->entry_size)
1581 		tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1582 					tbl->key_len, NEIGH_PRIV_ALIGN);
1583 	else
1584 		WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1585 
1586 	rwlock_init(&tbl->lock);
1587 	INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1588 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1589 			tbl->parms.reachable_time);
1590 	timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1591 	skb_queue_head_init_class(&tbl->proxy_queue,
1592 			&neigh_table_proxy_queue_class);
1593 
1594 	tbl->last_flush = now;
1595 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1596 
1597 	neigh_tables[index] = tbl;
1598 }
1599 EXPORT_SYMBOL(neigh_table_init);
1600 
1601 int neigh_table_clear(int index, struct neigh_table *tbl)
1602 {
1603 	neigh_tables[index] = NULL;
1604 	/* It is not clean... Fix it to unload IPv6 module safely */
1605 	cancel_delayed_work_sync(&tbl->gc_work);
1606 	del_timer_sync(&tbl->proxy_timer);
1607 	pneigh_queue_purge(&tbl->proxy_queue);
1608 	neigh_ifdown(tbl, NULL);
1609 	if (atomic_read(&tbl->entries))
1610 		pr_crit("neighbour leakage\n");
1611 
1612 	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1613 		 neigh_hash_free_rcu);
1614 	tbl->nht = NULL;
1615 
1616 	kfree(tbl->phash_buckets);
1617 	tbl->phash_buckets = NULL;
1618 
1619 	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1620 
1621 	free_percpu(tbl->stats);
1622 	tbl->stats = NULL;
1623 
1624 	return 0;
1625 }
1626 EXPORT_SYMBOL(neigh_table_clear);
1627 
1628 static struct neigh_table *neigh_find_table(int family)
1629 {
1630 	struct neigh_table *tbl = NULL;
1631 
1632 	switch (family) {
1633 	case AF_INET:
1634 		tbl = neigh_tables[NEIGH_ARP_TABLE];
1635 		break;
1636 	case AF_INET6:
1637 		tbl = neigh_tables[NEIGH_ND_TABLE];
1638 		break;
1639 	case AF_DECnet:
1640 		tbl = neigh_tables[NEIGH_DN_TABLE];
1641 		break;
1642 	}
1643 
1644 	return tbl;
1645 }
1646 
1647 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1648 			struct netlink_ext_ack *extack)
1649 {
1650 	struct net *net = sock_net(skb->sk);
1651 	struct ndmsg *ndm;
1652 	struct nlattr *dst_attr;
1653 	struct neigh_table *tbl;
1654 	struct neighbour *neigh;
1655 	struct net_device *dev = NULL;
1656 	int err = -EINVAL;
1657 
1658 	ASSERT_RTNL();
1659 	if (nlmsg_len(nlh) < sizeof(*ndm))
1660 		goto out;
1661 
1662 	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1663 	if (dst_attr == NULL)
1664 		goto out;
1665 
1666 	ndm = nlmsg_data(nlh);
1667 	if (ndm->ndm_ifindex) {
1668 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1669 		if (dev == NULL) {
1670 			err = -ENODEV;
1671 			goto out;
1672 		}
1673 	}
1674 
1675 	tbl = neigh_find_table(ndm->ndm_family);
1676 	if (tbl == NULL)
1677 		return -EAFNOSUPPORT;
1678 
1679 	if (nla_len(dst_attr) < (int)tbl->key_len)
1680 		goto out;
1681 
1682 	if (ndm->ndm_flags & NTF_PROXY) {
1683 		err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1684 		goto out;
1685 	}
1686 
1687 	if (dev == NULL)
1688 		goto out;
1689 
1690 	neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1691 	if (neigh == NULL) {
1692 		err = -ENOENT;
1693 		goto out;
1694 	}
1695 
1696 	err = neigh_update(neigh, NULL, NUD_FAILED,
1697 			   NEIGH_UPDATE_F_OVERRIDE |
1698 			   NEIGH_UPDATE_F_ADMIN,
1699 			   NETLINK_CB(skb).portid);
1700 	write_lock_bh(&tbl->lock);
1701 	neigh_release(neigh);
1702 	neigh_remove_one(neigh, tbl);
1703 	write_unlock_bh(&tbl->lock);
1704 
1705 out:
1706 	return err;
1707 }
1708 
1709 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1710 		     struct netlink_ext_ack *extack)
1711 {
1712 	int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
1713 		NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1714 	struct net *net = sock_net(skb->sk);
1715 	struct ndmsg *ndm;
1716 	struct nlattr *tb[NDA_MAX+1];
1717 	struct neigh_table *tbl;
1718 	struct net_device *dev = NULL;
1719 	struct neighbour *neigh;
1720 	void *dst, *lladdr;
1721 	int err;
1722 
1723 	ASSERT_RTNL();
1724 	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
1725 	if (err < 0)
1726 		goto out;
1727 
1728 	err = -EINVAL;
1729 	if (tb[NDA_DST] == NULL)
1730 		goto out;
1731 
1732 	ndm = nlmsg_data(nlh);
1733 	if (ndm->ndm_ifindex) {
1734 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1735 		if (dev == NULL) {
1736 			err = -ENODEV;
1737 			goto out;
1738 		}
1739 
1740 		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1741 			goto out;
1742 	}
1743 
1744 	tbl = neigh_find_table(ndm->ndm_family);
1745 	if (tbl == NULL)
1746 		return -EAFNOSUPPORT;
1747 
1748 	if (nla_len(tb[NDA_DST]) < (int)tbl->key_len)
1749 		goto out;
1750 	dst = nla_data(tb[NDA_DST]);
1751 	lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1752 
1753 	if (ndm->ndm_flags & NTF_PROXY) {
1754 		struct pneigh_entry *pn;
1755 
1756 		err = -ENOBUFS;
1757 		pn = pneigh_lookup(tbl, net, dst, dev, 1);
1758 		if (pn) {
1759 			pn->flags = ndm->ndm_flags;
1760 			err = 0;
1761 		}
1762 		goto out;
1763 	}
1764 
1765 	if (dev == NULL)
1766 		goto out;
1767 
1768 	neigh = neigh_lookup(tbl, dst, dev);
1769 	if (neigh == NULL) {
1770 		if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1771 			err = -ENOENT;
1772 			goto out;
1773 		}
1774 
1775 		neigh = __neigh_lookup_errno(tbl, dst, dev);
1776 		if (IS_ERR(neigh)) {
1777 			err = PTR_ERR(neigh);
1778 			goto out;
1779 		}
1780 	} else {
1781 		if (nlh->nlmsg_flags & NLM_F_EXCL) {
1782 			err = -EEXIST;
1783 			neigh_release(neigh);
1784 			goto out;
1785 		}
1786 
1787 		if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1788 			flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
1789 				   NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
1790 	}
1791 
1792 	if (ndm->ndm_flags & NTF_EXT_LEARNED)
1793 		flags |= NEIGH_UPDATE_F_EXT_LEARNED;
1794 
1795 	if (ndm->ndm_flags & NTF_ROUTER)
1796 		flags |= NEIGH_UPDATE_F_ISROUTER;
1797 
1798 	if (ndm->ndm_flags & NTF_USE) {
1799 		neigh_event_send(neigh, NULL);
1800 		err = 0;
1801 	} else
1802 		err = neigh_update(neigh, lladdr, ndm->ndm_state, flags,
1803 				   NETLINK_CB(skb).portid);
1804 	neigh_release(neigh);
1805 
1806 out:
1807 	return err;
1808 }
1809 
1810 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1811 {
1812 	struct nlattr *nest;
1813 
1814 	nest = nla_nest_start(skb, NDTA_PARMS);
1815 	if (nest == NULL)
1816 		return -ENOBUFS;
1817 
1818 	if ((parms->dev &&
1819 	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1820 	    nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
1821 	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
1822 			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
1823 	    /* approximative value for deprecated QUEUE_LEN (in packets) */
1824 	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
1825 			NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
1826 	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
1827 	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
1828 	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
1829 			NEIGH_VAR(parms, UCAST_PROBES)) ||
1830 	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
1831 			NEIGH_VAR(parms, MCAST_PROBES)) ||
1832 	    nla_put_u32(skb, NDTPA_MCAST_REPROBES,
1833 			NEIGH_VAR(parms, MCAST_REPROBES)) ||
1834 	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
1835 			  NDTPA_PAD) ||
1836 	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
1837 			  NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
1838 	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
1839 			  NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
1840 	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
1841 			  NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
1842 	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
1843 			  NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
1844 	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
1845 			  NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
1846 	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
1847 			  NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
1848 	    nla_put_msecs(skb, NDTPA_LOCKTIME,
1849 			  NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
1850 		goto nla_put_failure;
1851 	return nla_nest_end(skb, nest);
1852 
1853 nla_put_failure:
1854 	nla_nest_cancel(skb, nest);
1855 	return -EMSGSIZE;
1856 }
1857 
1858 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1859 			      u32 pid, u32 seq, int type, int flags)
1860 {
1861 	struct nlmsghdr *nlh;
1862 	struct ndtmsg *ndtmsg;
1863 
1864 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1865 	if (nlh == NULL)
1866 		return -EMSGSIZE;
1867 
1868 	ndtmsg = nlmsg_data(nlh);
1869 
1870 	read_lock_bh(&tbl->lock);
1871 	ndtmsg->ndtm_family = tbl->family;
1872 	ndtmsg->ndtm_pad1   = 0;
1873 	ndtmsg->ndtm_pad2   = 0;
1874 
1875 	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
1876 	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
1877 	    nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
1878 	    nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
1879 	    nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
1880 		goto nla_put_failure;
1881 	{
1882 		unsigned long now = jiffies;
1883 		unsigned int flush_delta = now - tbl->last_flush;
1884 		unsigned int rand_delta = now - tbl->last_rand;
1885 		struct neigh_hash_table *nht;
1886 		struct ndt_config ndc = {
1887 			.ndtc_key_len		= tbl->key_len,
1888 			.ndtc_entry_size	= tbl->entry_size,
1889 			.ndtc_entries		= atomic_read(&tbl->entries),
1890 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
1891 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
1892 			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
1893 		};
1894 
1895 		rcu_read_lock_bh();
1896 		nht = rcu_dereference_bh(tbl->nht);
1897 		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
1898 		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1899 		rcu_read_unlock_bh();
1900 
1901 		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
1902 			goto nla_put_failure;
1903 	}
1904 
1905 	{
1906 		int cpu;
1907 		struct ndt_stats ndst;
1908 
1909 		memset(&ndst, 0, sizeof(ndst));
1910 
1911 		for_each_possible_cpu(cpu) {
1912 			struct neigh_statistics	*st;
1913 
1914 			st = per_cpu_ptr(tbl->stats, cpu);
1915 			ndst.ndts_allocs		+= st->allocs;
1916 			ndst.ndts_destroys		+= st->destroys;
1917 			ndst.ndts_hash_grows		+= st->hash_grows;
1918 			ndst.ndts_res_failed		+= st->res_failed;
1919 			ndst.ndts_lookups		+= st->lookups;
1920 			ndst.ndts_hits			+= st->hits;
1921 			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
1922 			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
1923 			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
1924 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
1925 			ndst.ndts_table_fulls		+= st->table_fulls;
1926 		}
1927 
1928 		if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
1929 				  NDTA_PAD))
1930 			goto nla_put_failure;
1931 	}
1932 
1933 	BUG_ON(tbl->parms.dev);
1934 	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1935 		goto nla_put_failure;
1936 
1937 	read_unlock_bh(&tbl->lock);
1938 	nlmsg_end(skb, nlh);
1939 	return 0;
1940 
1941 nla_put_failure:
1942 	read_unlock_bh(&tbl->lock);
1943 	nlmsg_cancel(skb, nlh);
1944 	return -EMSGSIZE;
1945 }
1946 
1947 static int neightbl_fill_param_info(struct sk_buff *skb,
1948 				    struct neigh_table *tbl,
1949 				    struct neigh_parms *parms,
1950 				    u32 pid, u32 seq, int type,
1951 				    unsigned int flags)
1952 {
1953 	struct ndtmsg *ndtmsg;
1954 	struct nlmsghdr *nlh;
1955 
1956 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1957 	if (nlh == NULL)
1958 		return -EMSGSIZE;
1959 
1960 	ndtmsg = nlmsg_data(nlh);
1961 
1962 	read_lock_bh(&tbl->lock);
1963 	ndtmsg->ndtm_family = tbl->family;
1964 	ndtmsg->ndtm_pad1   = 0;
1965 	ndtmsg->ndtm_pad2   = 0;
1966 
1967 	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1968 	    neightbl_fill_parms(skb, parms) < 0)
1969 		goto errout;
1970 
1971 	read_unlock_bh(&tbl->lock);
1972 	nlmsg_end(skb, nlh);
1973 	return 0;
1974 errout:
1975 	read_unlock_bh(&tbl->lock);
1976 	nlmsg_cancel(skb, nlh);
1977 	return -EMSGSIZE;
1978 }
1979 
1980 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1981 	[NDTA_NAME]		= { .type = NLA_STRING },
1982 	[NDTA_THRESH1]		= { .type = NLA_U32 },
1983 	[NDTA_THRESH2]		= { .type = NLA_U32 },
1984 	[NDTA_THRESH3]		= { .type = NLA_U32 },
1985 	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
1986 	[NDTA_PARMS]		= { .type = NLA_NESTED },
1987 };
1988 
1989 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1990 	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
1991 	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
1992 	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
1993 	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
1994 	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
1995 	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
1996 	[NDTPA_MCAST_REPROBES]		= { .type = NLA_U32 },
1997 	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
1998 	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
1999 	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
2000 	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
2001 	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
2002 	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
2003 	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
2004 };
2005 
2006 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2007 			struct netlink_ext_ack *extack)
2008 {
2009 	struct net *net = sock_net(skb->sk);
2010 	struct neigh_table *tbl;
2011 	struct ndtmsg *ndtmsg;
2012 	struct nlattr *tb[NDTA_MAX+1];
2013 	bool found = false;
2014 	int err, tidx;
2015 
2016 	err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2017 			  nl_neightbl_policy, extack);
2018 	if (err < 0)
2019 		goto errout;
2020 
2021 	if (tb[NDTA_NAME] == NULL) {
2022 		err = -EINVAL;
2023 		goto errout;
2024 	}
2025 
2026 	ndtmsg = nlmsg_data(nlh);
2027 
2028 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2029 		tbl = neigh_tables[tidx];
2030 		if (!tbl)
2031 			continue;
2032 		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2033 			continue;
2034 		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2035 			found = true;
2036 			break;
2037 		}
2038 	}
2039 
2040 	if (!found)
2041 		return -ENOENT;
2042 
2043 	/*
2044 	 * We acquire tbl->lock to be nice to the periodic timers and
2045 	 * make sure they always see a consistent set of values.
2046 	 */
2047 	write_lock_bh(&tbl->lock);
2048 
2049 	if (tb[NDTA_PARMS]) {
2050 		struct nlattr *tbp[NDTPA_MAX+1];
2051 		struct neigh_parms *p;
2052 		int i, ifindex = 0;
2053 
2054 		err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
2055 				       nl_ntbl_parm_policy, extack);
2056 		if (err < 0)
2057 			goto errout_tbl_lock;
2058 
2059 		if (tbp[NDTPA_IFINDEX])
2060 			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2061 
2062 		p = lookup_neigh_parms(tbl, net, ifindex);
2063 		if (p == NULL) {
2064 			err = -ENOENT;
2065 			goto errout_tbl_lock;
2066 		}
2067 
2068 		for (i = 1; i <= NDTPA_MAX; i++) {
2069 			if (tbp[i] == NULL)
2070 				continue;
2071 
2072 			switch (i) {
2073 			case NDTPA_QUEUE_LEN:
2074 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2075 					      nla_get_u32(tbp[i]) *
2076 					      SKB_TRUESIZE(ETH_FRAME_LEN));
2077 				break;
2078 			case NDTPA_QUEUE_LENBYTES:
2079 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2080 					      nla_get_u32(tbp[i]));
2081 				break;
2082 			case NDTPA_PROXY_QLEN:
2083 				NEIGH_VAR_SET(p, PROXY_QLEN,
2084 					      nla_get_u32(tbp[i]));
2085 				break;
2086 			case NDTPA_APP_PROBES:
2087 				NEIGH_VAR_SET(p, APP_PROBES,
2088 					      nla_get_u32(tbp[i]));
2089 				break;
2090 			case NDTPA_UCAST_PROBES:
2091 				NEIGH_VAR_SET(p, UCAST_PROBES,
2092 					      nla_get_u32(tbp[i]));
2093 				break;
2094 			case NDTPA_MCAST_PROBES:
2095 				NEIGH_VAR_SET(p, MCAST_PROBES,
2096 					      nla_get_u32(tbp[i]));
2097 				break;
2098 			case NDTPA_MCAST_REPROBES:
2099 				NEIGH_VAR_SET(p, MCAST_REPROBES,
2100 					      nla_get_u32(tbp[i]));
2101 				break;
2102 			case NDTPA_BASE_REACHABLE_TIME:
2103 				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2104 					      nla_get_msecs(tbp[i]));
2105 				/* update reachable_time as well, otherwise, the change will
2106 				 * only be effective after the next time neigh_periodic_work
2107 				 * decides to recompute it (can be multiple minutes)
2108 				 */
2109 				p->reachable_time =
2110 					neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2111 				break;
2112 			case NDTPA_GC_STALETIME:
2113 				NEIGH_VAR_SET(p, GC_STALETIME,
2114 					      nla_get_msecs(tbp[i]));
2115 				break;
2116 			case NDTPA_DELAY_PROBE_TIME:
2117 				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2118 					      nla_get_msecs(tbp[i]));
2119 				call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2120 				break;
2121 			case NDTPA_RETRANS_TIME:
2122 				NEIGH_VAR_SET(p, RETRANS_TIME,
2123 					      nla_get_msecs(tbp[i]));
2124 				break;
2125 			case NDTPA_ANYCAST_DELAY:
2126 				NEIGH_VAR_SET(p, ANYCAST_DELAY,
2127 					      nla_get_msecs(tbp[i]));
2128 				break;
2129 			case NDTPA_PROXY_DELAY:
2130 				NEIGH_VAR_SET(p, PROXY_DELAY,
2131 					      nla_get_msecs(tbp[i]));
2132 				break;
2133 			case NDTPA_LOCKTIME:
2134 				NEIGH_VAR_SET(p, LOCKTIME,
2135 					      nla_get_msecs(tbp[i]));
2136 				break;
2137 			}
2138 		}
2139 	}
2140 
2141 	err = -ENOENT;
2142 	if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2143 	     tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2144 	    !net_eq(net, &init_net))
2145 		goto errout_tbl_lock;
2146 
2147 	if (tb[NDTA_THRESH1])
2148 		tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2149 
2150 	if (tb[NDTA_THRESH2])
2151 		tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2152 
2153 	if (tb[NDTA_THRESH3])
2154 		tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2155 
2156 	if (tb[NDTA_GC_INTERVAL])
2157 		tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2158 
2159 	err = 0;
2160 
2161 errout_tbl_lock:
2162 	write_unlock_bh(&tbl->lock);
2163 errout:
2164 	return err;
2165 }
2166 
2167 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2168 {
2169 	struct net *net = sock_net(skb->sk);
2170 	int family, tidx, nidx = 0;
2171 	int tbl_skip = cb->args[0];
2172 	int neigh_skip = cb->args[1];
2173 	struct neigh_table *tbl;
2174 
2175 	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2176 
2177 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2178 		struct neigh_parms *p;
2179 
2180 		tbl = neigh_tables[tidx];
2181 		if (!tbl)
2182 			continue;
2183 
2184 		if (tidx < tbl_skip || (family && tbl->family != family))
2185 			continue;
2186 
2187 		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2188 				       cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2189 				       NLM_F_MULTI) < 0)
2190 			break;
2191 
2192 		nidx = 0;
2193 		p = list_next_entry(&tbl->parms, list);
2194 		list_for_each_entry_from(p, &tbl->parms_list, list) {
2195 			if (!net_eq(neigh_parms_net(p), net))
2196 				continue;
2197 
2198 			if (nidx < neigh_skip)
2199 				goto next;
2200 
2201 			if (neightbl_fill_param_info(skb, tbl, p,
2202 						     NETLINK_CB(cb->skb).portid,
2203 						     cb->nlh->nlmsg_seq,
2204 						     RTM_NEWNEIGHTBL,
2205 						     NLM_F_MULTI) < 0)
2206 				goto out;
2207 		next:
2208 			nidx++;
2209 		}
2210 
2211 		neigh_skip = 0;
2212 	}
2213 out:
2214 	cb->args[0] = tidx;
2215 	cb->args[1] = nidx;
2216 
2217 	return skb->len;
2218 }
2219 
2220 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2221 			   u32 pid, u32 seq, int type, unsigned int flags)
2222 {
2223 	unsigned long now = jiffies;
2224 	struct nda_cacheinfo ci;
2225 	struct nlmsghdr *nlh;
2226 	struct ndmsg *ndm;
2227 
2228 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2229 	if (nlh == NULL)
2230 		return -EMSGSIZE;
2231 
2232 	ndm = nlmsg_data(nlh);
2233 	ndm->ndm_family	 = neigh->ops->family;
2234 	ndm->ndm_pad1    = 0;
2235 	ndm->ndm_pad2    = 0;
2236 	ndm->ndm_flags	 = neigh->flags;
2237 	ndm->ndm_type	 = neigh->type;
2238 	ndm->ndm_ifindex = neigh->dev->ifindex;
2239 
2240 	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2241 		goto nla_put_failure;
2242 
2243 	read_lock_bh(&neigh->lock);
2244 	ndm->ndm_state	 = neigh->nud_state;
2245 	if (neigh->nud_state & NUD_VALID) {
2246 		char haddr[MAX_ADDR_LEN];
2247 
2248 		neigh_ha_snapshot(haddr, neigh, neigh->dev);
2249 		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2250 			read_unlock_bh(&neigh->lock);
2251 			goto nla_put_failure;
2252 		}
2253 	}
2254 
2255 	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2256 	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2257 	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2258 	ci.ndm_refcnt	 = refcount_read(&neigh->refcnt) - 1;
2259 	read_unlock_bh(&neigh->lock);
2260 
2261 	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2262 	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2263 		goto nla_put_failure;
2264 
2265 	nlmsg_end(skb, nlh);
2266 	return 0;
2267 
2268 nla_put_failure:
2269 	nlmsg_cancel(skb, nlh);
2270 	return -EMSGSIZE;
2271 }
2272 
2273 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2274 			    u32 pid, u32 seq, int type, unsigned int flags,
2275 			    struct neigh_table *tbl)
2276 {
2277 	struct nlmsghdr *nlh;
2278 	struct ndmsg *ndm;
2279 
2280 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2281 	if (nlh == NULL)
2282 		return -EMSGSIZE;
2283 
2284 	ndm = nlmsg_data(nlh);
2285 	ndm->ndm_family	 = tbl->family;
2286 	ndm->ndm_pad1    = 0;
2287 	ndm->ndm_pad2    = 0;
2288 	ndm->ndm_flags	 = pn->flags | NTF_PROXY;
2289 	ndm->ndm_type	 = RTN_UNICAST;
2290 	ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2291 	ndm->ndm_state	 = NUD_NONE;
2292 
2293 	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2294 		goto nla_put_failure;
2295 
2296 	nlmsg_end(skb, nlh);
2297 	return 0;
2298 
2299 nla_put_failure:
2300 	nlmsg_cancel(skb, nlh);
2301 	return -EMSGSIZE;
2302 }
2303 
2304 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2305 {
2306 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2307 	__neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2308 }
2309 
2310 static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2311 {
2312 	struct net_device *master;
2313 
2314 	if (!master_idx)
2315 		return false;
2316 
2317 	master = netdev_master_upper_dev_get(dev);
2318 	if (!master || master->ifindex != master_idx)
2319 		return true;
2320 
2321 	return false;
2322 }
2323 
2324 static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2325 {
2326 	if (filter_idx && dev->ifindex != filter_idx)
2327 		return true;
2328 
2329 	return false;
2330 }
2331 
2332 struct neigh_dump_filter {
2333 	int master_idx;
2334 	int dev_idx;
2335 };
2336 
2337 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2338 			    struct netlink_callback *cb,
2339 			    struct neigh_dump_filter *filter)
2340 {
2341 	struct net *net = sock_net(skb->sk);
2342 	struct neighbour *n;
2343 	int rc, h, s_h = cb->args[1];
2344 	int idx, s_idx = idx = cb->args[2];
2345 	struct neigh_hash_table *nht;
2346 	unsigned int flags = NLM_F_MULTI;
2347 
2348 	if (filter->dev_idx || filter->master_idx)
2349 		flags |= NLM_F_DUMP_FILTERED;
2350 
2351 	rcu_read_lock_bh();
2352 	nht = rcu_dereference_bh(tbl->nht);
2353 
2354 	for (h = s_h; h < (1 << nht->hash_shift); h++) {
2355 		if (h > s_h)
2356 			s_idx = 0;
2357 		for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2358 		     n != NULL;
2359 		     n = rcu_dereference_bh(n->next)) {
2360 			if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2361 				goto next;
2362 			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2363 			    neigh_master_filtered(n->dev, filter->master_idx))
2364 				goto next;
2365 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2366 					    cb->nlh->nlmsg_seq,
2367 					    RTM_NEWNEIGH,
2368 					    flags) < 0) {
2369 				rc = -1;
2370 				goto out;
2371 			}
2372 next:
2373 			idx++;
2374 		}
2375 	}
2376 	rc = skb->len;
2377 out:
2378 	rcu_read_unlock_bh();
2379 	cb->args[1] = h;
2380 	cb->args[2] = idx;
2381 	return rc;
2382 }
2383 
2384 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2385 			     struct netlink_callback *cb,
2386 			     struct neigh_dump_filter *filter)
2387 {
2388 	struct pneigh_entry *n;
2389 	struct net *net = sock_net(skb->sk);
2390 	int rc, h, s_h = cb->args[3];
2391 	int idx, s_idx = idx = cb->args[4];
2392 	unsigned int flags = NLM_F_MULTI;
2393 
2394 	if (filter->dev_idx || filter->master_idx)
2395 		flags |= NLM_F_DUMP_FILTERED;
2396 
2397 	read_lock_bh(&tbl->lock);
2398 
2399 	for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2400 		if (h > s_h)
2401 			s_idx = 0;
2402 		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2403 			if (idx < s_idx || pneigh_net(n) != net)
2404 				goto next;
2405 			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2406 			    neigh_master_filtered(n->dev, filter->master_idx))
2407 				goto next;
2408 			if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2409 					    cb->nlh->nlmsg_seq,
2410 					    RTM_NEWNEIGH, flags, tbl) < 0) {
2411 				read_unlock_bh(&tbl->lock);
2412 				rc = -1;
2413 				goto out;
2414 			}
2415 		next:
2416 			idx++;
2417 		}
2418 	}
2419 
2420 	read_unlock_bh(&tbl->lock);
2421 	rc = skb->len;
2422 out:
2423 	cb->args[3] = h;
2424 	cb->args[4] = idx;
2425 	return rc;
2426 
2427 }
2428 
2429 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2430 {
2431 	const struct nlmsghdr *nlh = cb->nlh;
2432 	struct neigh_dump_filter filter = {};
2433 	struct nlattr *tb[NDA_MAX + 1];
2434 	struct neigh_table *tbl;
2435 	int t, family, s_t;
2436 	int proxy = 0;
2437 	int err;
2438 
2439 	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2440 
2441 	/* check for full ndmsg structure presence, family member is
2442 	 * the same for both structures
2443 	 */
2444 	if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2445 	    ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
2446 		proxy = 1;
2447 
2448 	err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL, NULL);
2449 	if (!err) {
2450 		if (tb[NDA_IFINDEX]) {
2451 			if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
2452 				return -EINVAL;
2453 			filter.dev_idx = nla_get_u32(tb[NDA_IFINDEX]);
2454 		}
2455 		if (tb[NDA_MASTER]) {
2456 			if (nla_len(tb[NDA_MASTER]) != sizeof(u32))
2457 				return -EINVAL;
2458 			filter.master_idx = nla_get_u32(tb[NDA_MASTER]);
2459 		}
2460 	}
2461 	s_t = cb->args[0];
2462 
2463 	for (t = 0; t < NEIGH_NR_TABLES; t++) {
2464 		tbl = neigh_tables[t];
2465 
2466 		if (!tbl)
2467 			continue;
2468 		if (t < s_t || (family && tbl->family != family))
2469 			continue;
2470 		if (t > s_t)
2471 			memset(&cb->args[1], 0, sizeof(cb->args) -
2472 						sizeof(cb->args[0]));
2473 		if (proxy)
2474 			err = pneigh_dump_table(tbl, skb, cb, &filter);
2475 		else
2476 			err = neigh_dump_table(tbl, skb, cb, &filter);
2477 		if (err < 0)
2478 			break;
2479 	}
2480 
2481 	cb->args[0] = t;
2482 	return skb->len;
2483 }
2484 
2485 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2486 {
2487 	int chain;
2488 	struct neigh_hash_table *nht;
2489 
2490 	rcu_read_lock_bh();
2491 	nht = rcu_dereference_bh(tbl->nht);
2492 
2493 	read_lock(&tbl->lock); /* avoid resizes */
2494 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2495 		struct neighbour *n;
2496 
2497 		for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2498 		     n != NULL;
2499 		     n = rcu_dereference_bh(n->next))
2500 			cb(n, cookie);
2501 	}
2502 	read_unlock(&tbl->lock);
2503 	rcu_read_unlock_bh();
2504 }
2505 EXPORT_SYMBOL(neigh_for_each);
2506 
2507 /* The tbl->lock must be held as a writer and BH disabled. */
2508 void __neigh_for_each_release(struct neigh_table *tbl,
2509 			      int (*cb)(struct neighbour *))
2510 {
2511 	int chain;
2512 	struct neigh_hash_table *nht;
2513 
2514 	nht = rcu_dereference_protected(tbl->nht,
2515 					lockdep_is_held(&tbl->lock));
2516 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2517 		struct neighbour *n;
2518 		struct neighbour __rcu **np;
2519 
2520 		np = &nht->hash_buckets[chain];
2521 		while ((n = rcu_dereference_protected(*np,
2522 					lockdep_is_held(&tbl->lock))) != NULL) {
2523 			int release;
2524 
2525 			write_lock(&n->lock);
2526 			release = cb(n);
2527 			if (release) {
2528 				rcu_assign_pointer(*np,
2529 					rcu_dereference_protected(n->next,
2530 						lockdep_is_held(&tbl->lock)));
2531 				n->dead = 1;
2532 			} else
2533 				np = &n->next;
2534 			write_unlock(&n->lock);
2535 			if (release)
2536 				neigh_cleanup_and_release(n);
2537 		}
2538 	}
2539 }
2540 EXPORT_SYMBOL(__neigh_for_each_release);
2541 
2542 int neigh_xmit(int index, struct net_device *dev,
2543 	       const void *addr, struct sk_buff *skb)
2544 {
2545 	int err = -EAFNOSUPPORT;
2546 	if (likely(index < NEIGH_NR_TABLES)) {
2547 		struct neigh_table *tbl;
2548 		struct neighbour *neigh;
2549 
2550 		tbl = neigh_tables[index];
2551 		if (!tbl)
2552 			goto out;
2553 		rcu_read_lock_bh();
2554 		neigh = __neigh_lookup_noref(tbl, addr, dev);
2555 		if (!neigh)
2556 			neigh = __neigh_create(tbl, addr, dev, false);
2557 		err = PTR_ERR(neigh);
2558 		if (IS_ERR(neigh)) {
2559 			rcu_read_unlock_bh();
2560 			goto out_kfree_skb;
2561 		}
2562 		err = neigh->output(neigh, skb);
2563 		rcu_read_unlock_bh();
2564 	}
2565 	else if (index == NEIGH_LINK_TABLE) {
2566 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
2567 				      addr, NULL, skb->len);
2568 		if (err < 0)
2569 			goto out_kfree_skb;
2570 		err = dev_queue_xmit(skb);
2571 	}
2572 out:
2573 	return err;
2574 out_kfree_skb:
2575 	kfree_skb(skb);
2576 	goto out;
2577 }
2578 EXPORT_SYMBOL(neigh_xmit);
2579 
2580 #ifdef CONFIG_PROC_FS
2581 
2582 static struct neighbour *neigh_get_first(struct seq_file *seq)
2583 {
2584 	struct neigh_seq_state *state = seq->private;
2585 	struct net *net = seq_file_net(seq);
2586 	struct neigh_hash_table *nht = state->nht;
2587 	struct neighbour *n = NULL;
2588 	int bucket = state->bucket;
2589 
2590 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2591 	for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2592 		n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2593 
2594 		while (n) {
2595 			if (!net_eq(dev_net(n->dev), net))
2596 				goto next;
2597 			if (state->neigh_sub_iter) {
2598 				loff_t fakep = 0;
2599 				void *v;
2600 
2601 				v = state->neigh_sub_iter(state, n, &fakep);
2602 				if (!v)
2603 					goto next;
2604 			}
2605 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2606 				break;
2607 			if (n->nud_state & ~NUD_NOARP)
2608 				break;
2609 next:
2610 			n = rcu_dereference_bh(n->next);
2611 		}
2612 
2613 		if (n)
2614 			break;
2615 	}
2616 	state->bucket = bucket;
2617 
2618 	return n;
2619 }
2620 
2621 static struct neighbour *neigh_get_next(struct seq_file *seq,
2622 					struct neighbour *n,
2623 					loff_t *pos)
2624 {
2625 	struct neigh_seq_state *state = seq->private;
2626 	struct net *net = seq_file_net(seq);
2627 	struct neigh_hash_table *nht = state->nht;
2628 
2629 	if (state->neigh_sub_iter) {
2630 		void *v = state->neigh_sub_iter(state, n, pos);
2631 		if (v)
2632 			return n;
2633 	}
2634 	n = rcu_dereference_bh(n->next);
2635 
2636 	while (1) {
2637 		while (n) {
2638 			if (!net_eq(dev_net(n->dev), net))
2639 				goto next;
2640 			if (state->neigh_sub_iter) {
2641 				void *v = state->neigh_sub_iter(state, n, pos);
2642 				if (v)
2643 					return n;
2644 				goto next;
2645 			}
2646 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2647 				break;
2648 
2649 			if (n->nud_state & ~NUD_NOARP)
2650 				break;
2651 next:
2652 			n = rcu_dereference_bh(n->next);
2653 		}
2654 
2655 		if (n)
2656 			break;
2657 
2658 		if (++state->bucket >= (1 << nht->hash_shift))
2659 			break;
2660 
2661 		n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2662 	}
2663 
2664 	if (n && pos)
2665 		--(*pos);
2666 	return n;
2667 }
2668 
2669 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2670 {
2671 	struct neighbour *n = neigh_get_first(seq);
2672 
2673 	if (n) {
2674 		--(*pos);
2675 		while (*pos) {
2676 			n = neigh_get_next(seq, n, pos);
2677 			if (!n)
2678 				break;
2679 		}
2680 	}
2681 	return *pos ? NULL : n;
2682 }
2683 
2684 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2685 {
2686 	struct neigh_seq_state *state = seq->private;
2687 	struct net *net = seq_file_net(seq);
2688 	struct neigh_table *tbl = state->tbl;
2689 	struct pneigh_entry *pn = NULL;
2690 	int bucket = state->bucket;
2691 
2692 	state->flags |= NEIGH_SEQ_IS_PNEIGH;
2693 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2694 		pn = tbl->phash_buckets[bucket];
2695 		while (pn && !net_eq(pneigh_net(pn), net))
2696 			pn = pn->next;
2697 		if (pn)
2698 			break;
2699 	}
2700 	state->bucket = bucket;
2701 
2702 	return pn;
2703 }
2704 
2705 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2706 					    struct pneigh_entry *pn,
2707 					    loff_t *pos)
2708 {
2709 	struct neigh_seq_state *state = seq->private;
2710 	struct net *net = seq_file_net(seq);
2711 	struct neigh_table *tbl = state->tbl;
2712 
2713 	do {
2714 		pn = pn->next;
2715 	} while (pn && !net_eq(pneigh_net(pn), net));
2716 
2717 	while (!pn) {
2718 		if (++state->bucket > PNEIGH_HASHMASK)
2719 			break;
2720 		pn = tbl->phash_buckets[state->bucket];
2721 		while (pn && !net_eq(pneigh_net(pn), net))
2722 			pn = pn->next;
2723 		if (pn)
2724 			break;
2725 	}
2726 
2727 	if (pn && pos)
2728 		--(*pos);
2729 
2730 	return pn;
2731 }
2732 
2733 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2734 {
2735 	struct pneigh_entry *pn = pneigh_get_first(seq);
2736 
2737 	if (pn) {
2738 		--(*pos);
2739 		while (*pos) {
2740 			pn = pneigh_get_next(seq, pn, pos);
2741 			if (!pn)
2742 				break;
2743 		}
2744 	}
2745 	return *pos ? NULL : pn;
2746 }
2747 
2748 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2749 {
2750 	struct neigh_seq_state *state = seq->private;
2751 	void *rc;
2752 	loff_t idxpos = *pos;
2753 
2754 	rc = neigh_get_idx(seq, &idxpos);
2755 	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2756 		rc = pneigh_get_idx(seq, &idxpos);
2757 
2758 	return rc;
2759 }
2760 
2761 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2762 	__acquires(rcu_bh)
2763 {
2764 	struct neigh_seq_state *state = seq->private;
2765 
2766 	state->tbl = tbl;
2767 	state->bucket = 0;
2768 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2769 
2770 	rcu_read_lock_bh();
2771 	state->nht = rcu_dereference_bh(tbl->nht);
2772 
2773 	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2774 }
2775 EXPORT_SYMBOL(neigh_seq_start);
2776 
2777 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2778 {
2779 	struct neigh_seq_state *state;
2780 	void *rc;
2781 
2782 	if (v == SEQ_START_TOKEN) {
2783 		rc = neigh_get_first(seq);
2784 		goto out;
2785 	}
2786 
2787 	state = seq->private;
2788 	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2789 		rc = neigh_get_next(seq, v, NULL);
2790 		if (rc)
2791 			goto out;
2792 		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2793 			rc = pneigh_get_first(seq);
2794 	} else {
2795 		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2796 		rc = pneigh_get_next(seq, v, NULL);
2797 	}
2798 out:
2799 	++(*pos);
2800 	return rc;
2801 }
2802 EXPORT_SYMBOL(neigh_seq_next);
2803 
2804 void neigh_seq_stop(struct seq_file *seq, void *v)
2805 	__releases(rcu_bh)
2806 {
2807 	rcu_read_unlock_bh();
2808 }
2809 EXPORT_SYMBOL(neigh_seq_stop);
2810 
2811 /* statistics via seq_file */
2812 
2813 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2814 {
2815 	struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
2816 	int cpu;
2817 
2818 	if (*pos == 0)
2819 		return SEQ_START_TOKEN;
2820 
2821 	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2822 		if (!cpu_possible(cpu))
2823 			continue;
2824 		*pos = cpu+1;
2825 		return per_cpu_ptr(tbl->stats, cpu);
2826 	}
2827 	return NULL;
2828 }
2829 
2830 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2831 {
2832 	struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
2833 	int cpu;
2834 
2835 	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2836 		if (!cpu_possible(cpu))
2837 			continue;
2838 		*pos = cpu+1;
2839 		return per_cpu_ptr(tbl->stats, cpu);
2840 	}
2841 	return NULL;
2842 }
2843 
2844 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2845 {
2846 
2847 }
2848 
2849 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2850 {
2851 	struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
2852 	struct neigh_statistics *st = v;
2853 
2854 	if (v == SEQ_START_TOKEN) {
2855 		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
2856 		return 0;
2857 	}
2858 
2859 	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2860 			"%08lx %08lx  %08lx %08lx %08lx %08lx\n",
2861 		   atomic_read(&tbl->entries),
2862 
2863 		   st->allocs,
2864 		   st->destroys,
2865 		   st->hash_grows,
2866 
2867 		   st->lookups,
2868 		   st->hits,
2869 
2870 		   st->res_failed,
2871 
2872 		   st->rcv_probes_mcast,
2873 		   st->rcv_probes_ucast,
2874 
2875 		   st->periodic_gc_runs,
2876 		   st->forced_gc_runs,
2877 		   st->unres_discards,
2878 		   st->table_fulls
2879 		   );
2880 
2881 	return 0;
2882 }
2883 
2884 static const struct seq_operations neigh_stat_seq_ops = {
2885 	.start	= neigh_stat_seq_start,
2886 	.next	= neigh_stat_seq_next,
2887 	.stop	= neigh_stat_seq_stop,
2888 	.show	= neigh_stat_seq_show,
2889 };
2890 #endif /* CONFIG_PROC_FS */
2891 
2892 static inline size_t neigh_nlmsg_size(void)
2893 {
2894 	return NLMSG_ALIGN(sizeof(struct ndmsg))
2895 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2896 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2897 	       + nla_total_size(sizeof(struct nda_cacheinfo))
2898 	       + nla_total_size(4); /* NDA_PROBES */
2899 }
2900 
2901 static void __neigh_notify(struct neighbour *n, int type, int flags,
2902 			   u32 pid)
2903 {
2904 	struct net *net = dev_net(n->dev);
2905 	struct sk_buff *skb;
2906 	int err = -ENOBUFS;
2907 
2908 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2909 	if (skb == NULL)
2910 		goto errout;
2911 
2912 	err = neigh_fill_info(skb, n, pid, 0, type, flags);
2913 	if (err < 0) {
2914 		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2915 		WARN_ON(err == -EMSGSIZE);
2916 		kfree_skb(skb);
2917 		goto errout;
2918 	}
2919 	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2920 	return;
2921 errout:
2922 	if (err < 0)
2923 		rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2924 }
2925 
2926 void neigh_app_ns(struct neighbour *n)
2927 {
2928 	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
2929 }
2930 EXPORT_SYMBOL(neigh_app_ns);
2931 
2932 #ifdef CONFIG_SYSCTL
2933 static int zero;
2934 static int int_max = INT_MAX;
2935 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
2936 
2937 static int proc_unres_qlen(struct ctl_table *ctl, int write,
2938 			   void __user *buffer, size_t *lenp, loff_t *ppos)
2939 {
2940 	int size, ret;
2941 	struct ctl_table tmp = *ctl;
2942 
2943 	tmp.extra1 = &zero;
2944 	tmp.extra2 = &unres_qlen_max;
2945 	tmp.data = &size;
2946 
2947 	size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
2948 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2949 
2950 	if (write && !ret)
2951 		*(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
2952 	return ret;
2953 }
2954 
2955 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
2956 						   int family)
2957 {
2958 	switch (family) {
2959 	case AF_INET:
2960 		return __in_dev_arp_parms_get_rcu(dev);
2961 	case AF_INET6:
2962 		return __in6_dev_nd_parms_get_rcu(dev);
2963 	}
2964 	return NULL;
2965 }
2966 
2967 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
2968 				  int index)
2969 {
2970 	struct net_device *dev;
2971 	int family = neigh_parms_family(p);
2972 
2973 	rcu_read_lock();
2974 	for_each_netdev_rcu(net, dev) {
2975 		struct neigh_parms *dst_p =
2976 				neigh_get_dev_parms_rcu(dev, family);
2977 
2978 		if (dst_p && !test_bit(index, dst_p->data_state))
2979 			dst_p->data[index] = p->data[index];
2980 	}
2981 	rcu_read_unlock();
2982 }
2983 
2984 static void neigh_proc_update(struct ctl_table *ctl, int write)
2985 {
2986 	struct net_device *dev = ctl->extra1;
2987 	struct neigh_parms *p = ctl->extra2;
2988 	struct net *net = neigh_parms_net(p);
2989 	int index = (int *) ctl->data - p->data;
2990 
2991 	if (!write)
2992 		return;
2993 
2994 	set_bit(index, p->data_state);
2995 	if (index == NEIGH_VAR_DELAY_PROBE_TIME)
2996 		call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2997 	if (!dev) /* NULL dev means this is default value */
2998 		neigh_copy_dflt_parms(net, p, index);
2999 }
3000 
3001 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
3002 					   void __user *buffer,
3003 					   size_t *lenp, loff_t *ppos)
3004 {
3005 	struct ctl_table tmp = *ctl;
3006 	int ret;
3007 
3008 	tmp.extra1 = &zero;
3009 	tmp.extra2 = &int_max;
3010 
3011 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3012 	neigh_proc_update(ctl, write);
3013 	return ret;
3014 }
3015 
3016 int neigh_proc_dointvec(struct ctl_table *ctl, int write,
3017 			void __user *buffer, size_t *lenp, loff_t *ppos)
3018 {
3019 	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3020 
3021 	neigh_proc_update(ctl, write);
3022 	return ret;
3023 }
3024 EXPORT_SYMBOL(neigh_proc_dointvec);
3025 
3026 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
3027 				void __user *buffer,
3028 				size_t *lenp, loff_t *ppos)
3029 {
3030 	int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3031 
3032 	neigh_proc_update(ctl, write);
3033 	return ret;
3034 }
3035 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3036 
3037 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
3038 					      void __user *buffer,
3039 					      size_t *lenp, loff_t *ppos)
3040 {
3041 	int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3042 
3043 	neigh_proc_update(ctl, write);
3044 	return ret;
3045 }
3046 
3047 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
3048 				   void __user *buffer,
3049 				   size_t *lenp, loff_t *ppos)
3050 {
3051 	int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3052 
3053 	neigh_proc_update(ctl, write);
3054 	return ret;
3055 }
3056 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3057 
3058 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
3059 					  void __user *buffer,
3060 					  size_t *lenp, loff_t *ppos)
3061 {
3062 	int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3063 
3064 	neigh_proc_update(ctl, write);
3065 	return ret;
3066 }
3067 
3068 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3069 					  void __user *buffer,
3070 					  size_t *lenp, loff_t *ppos)
3071 {
3072 	struct neigh_parms *p = ctl->extra2;
3073 	int ret;
3074 
3075 	if (strcmp(ctl->procname, "base_reachable_time") == 0)
3076 		ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3077 	else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3078 		ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3079 	else
3080 		ret = -1;
3081 
3082 	if (write && ret == 0) {
3083 		/* update reachable_time as well, otherwise, the change will
3084 		 * only be effective after the next time neigh_periodic_work
3085 		 * decides to recompute it
3086 		 */
3087 		p->reachable_time =
3088 			neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3089 	}
3090 	return ret;
3091 }
3092 
3093 #define NEIGH_PARMS_DATA_OFFSET(index)	\
3094 	(&((struct neigh_parms *) 0)->data[index])
3095 
3096 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3097 	[NEIGH_VAR_ ## attr] = { \
3098 		.procname	= name, \
3099 		.data		= NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3100 		.maxlen		= sizeof(int), \
3101 		.mode		= mval, \
3102 		.proc_handler	= proc, \
3103 	}
3104 
3105 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3106 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3107 
3108 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3109 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3110 
3111 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3112 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3113 
3114 #define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
3115 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3116 
3117 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3118 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3119 
3120 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3121 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3122 
3123 static struct neigh_sysctl_table {
3124 	struct ctl_table_header *sysctl_header;
3125 	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3126 } neigh_sysctl_template __read_mostly = {
3127 	.neigh_vars = {
3128 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3129 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3130 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3131 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3132 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3133 		NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3134 		NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3135 		NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3136 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3137 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3138 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3139 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3140 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3141 		NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3142 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3143 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3144 		[NEIGH_VAR_GC_INTERVAL] = {
3145 			.procname	= "gc_interval",
3146 			.maxlen		= sizeof(int),
3147 			.mode		= 0644,
3148 			.proc_handler	= proc_dointvec_jiffies,
3149 		},
3150 		[NEIGH_VAR_GC_THRESH1] = {
3151 			.procname	= "gc_thresh1",
3152 			.maxlen		= sizeof(int),
3153 			.mode		= 0644,
3154 			.extra1 	= &zero,
3155 			.extra2		= &int_max,
3156 			.proc_handler	= proc_dointvec_minmax,
3157 		},
3158 		[NEIGH_VAR_GC_THRESH2] = {
3159 			.procname	= "gc_thresh2",
3160 			.maxlen		= sizeof(int),
3161 			.mode		= 0644,
3162 			.extra1 	= &zero,
3163 			.extra2		= &int_max,
3164 			.proc_handler	= proc_dointvec_minmax,
3165 		},
3166 		[NEIGH_VAR_GC_THRESH3] = {
3167 			.procname	= "gc_thresh3",
3168 			.maxlen		= sizeof(int),
3169 			.mode		= 0644,
3170 			.extra1 	= &zero,
3171 			.extra2		= &int_max,
3172 			.proc_handler	= proc_dointvec_minmax,
3173 		},
3174 		{},
3175 	},
3176 };
3177 
3178 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3179 			  proc_handler *handler)
3180 {
3181 	int i;
3182 	struct neigh_sysctl_table *t;
3183 	const char *dev_name_source;
3184 	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3185 	char *p_name;
3186 
3187 	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3188 	if (!t)
3189 		goto err;
3190 
3191 	for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3192 		t->neigh_vars[i].data += (long) p;
3193 		t->neigh_vars[i].extra1 = dev;
3194 		t->neigh_vars[i].extra2 = p;
3195 	}
3196 
3197 	if (dev) {
3198 		dev_name_source = dev->name;
3199 		/* Terminate the table early */
3200 		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3201 		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3202 	} else {
3203 		struct neigh_table *tbl = p->tbl;
3204 		dev_name_source = "default";
3205 		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3206 		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3207 		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3208 		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3209 	}
3210 
3211 	if (handler) {
3212 		/* RetransTime */
3213 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3214 		/* ReachableTime */
3215 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3216 		/* RetransTime (in milliseconds)*/
3217 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3218 		/* ReachableTime (in milliseconds) */
3219 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3220 	} else {
3221 		/* Those handlers will update p->reachable_time after
3222 		 * base_reachable_time(_ms) is set to ensure the new timer starts being
3223 		 * applied after the next neighbour update instead of waiting for
3224 		 * neigh_periodic_work to update its value (can be multiple minutes)
3225 		 * So any handler that replaces them should do this as well
3226 		 */
3227 		/* ReachableTime */
3228 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3229 			neigh_proc_base_reachable_time;
3230 		/* ReachableTime (in milliseconds) */
3231 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3232 			neigh_proc_base_reachable_time;
3233 	}
3234 
3235 	/* Don't export sysctls to unprivileged users */
3236 	if (neigh_parms_net(p)->user_ns != &init_user_ns)
3237 		t->neigh_vars[0].procname = NULL;
3238 
3239 	switch (neigh_parms_family(p)) {
3240 	case AF_INET:
3241 	      p_name = "ipv4";
3242 	      break;
3243 	case AF_INET6:
3244 	      p_name = "ipv6";
3245 	      break;
3246 	default:
3247 	      BUG();
3248 	}
3249 
3250 	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3251 		p_name, dev_name_source);
3252 	t->sysctl_header =
3253 		register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3254 	if (!t->sysctl_header)
3255 		goto free;
3256 
3257 	p->sysctl_table = t;
3258 	return 0;
3259 
3260 free:
3261 	kfree(t);
3262 err:
3263 	return -ENOBUFS;
3264 }
3265 EXPORT_SYMBOL(neigh_sysctl_register);
3266 
3267 void neigh_sysctl_unregister(struct neigh_parms *p)
3268 {
3269 	if (p->sysctl_table) {
3270 		struct neigh_sysctl_table *t = p->sysctl_table;
3271 		p->sysctl_table = NULL;
3272 		unregister_net_sysctl_table(t->sysctl_header);
3273 		kfree(t);
3274 	}
3275 }
3276 EXPORT_SYMBOL(neigh_sysctl_unregister);
3277 
3278 #endif	/* CONFIG_SYSCTL */
3279 
3280 static int __init neigh_init(void)
3281 {
3282 	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
3283 	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
3284 	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, 0);
3285 
3286 	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3287 		      0);
3288 	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
3289 
3290 	return 0;
3291 }
3292 
3293 subsys_initcall(neigh_init);
3294