xref: /linux/net/core/neighbour.c (revision b7d3826c2ed6c3e626e7ae796c5df2c0d2551c6a)
1 /*
2  *	Generic address resolution entity
3  *
4  *	Authors:
5  *	Pedro Roque		<roque@di.fc.ul.pt>
6  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *	Fixes:
14  *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
15  *	Harald Welte		Add neighbour cache statistics like rtstat
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include <linux/slab.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/socket.h>
25 #include <linux/netdevice.h>
26 #include <linux/proc_fs.h>
27 #ifdef CONFIG_SYSCTL
28 #include <linux/sysctl.h>
29 #endif
30 #include <linux/times.h>
31 #include <net/net_namespace.h>
32 #include <net/neighbour.h>
33 #include <net/dst.h>
34 #include <net/sock.h>
35 #include <net/netevent.h>
36 #include <net/netlink.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/random.h>
39 #include <linux/string.h>
40 #include <linux/log2.h>
41 #include <linux/inetdevice.h>
42 #include <net/addrconf.h>
43 
44 #define DEBUG
45 #define NEIGH_DEBUG 1
46 #define neigh_dbg(level, fmt, ...)		\
47 do {						\
48 	if (level <= NEIGH_DEBUG)		\
49 		pr_debug(fmt, ##__VA_ARGS__);	\
50 } while (0)
51 
52 #define PNEIGH_HASHMASK		0xF
53 
54 static void neigh_timer_handler(struct timer_list *t);
55 static void __neigh_notify(struct neighbour *n, int type, int flags,
56 			   u32 pid);
57 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
58 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
59 				    struct net_device *dev);
60 
61 #ifdef CONFIG_PROC_FS
62 static const struct seq_operations neigh_stat_seq_ops;
63 #endif
64 
65 /*
66    Neighbour hash table buckets are protected with rwlock tbl->lock.
67 
68    - All the scans/updates to hash buckets MUST be made under this lock.
69    - NOTHING clever should be made under this lock: no callbacks
70      to protocol backends, no attempts to send something to network.
71      It will result in deadlocks, if backend/driver wants to use neighbour
72      cache.
73    - If the entry requires some non-trivial actions, increase
74      its reference count and release table lock.
75 
76    Neighbour entries are protected:
77    - with reference count.
78    - with rwlock neigh->lock
79 
80    Reference count prevents destruction.
81 
82    neigh->lock mainly serializes ll address data and its validity state.
83    However, the same lock is used to protect another entry fields:
84     - timer
85     - resolution queue
86 
87    Again, nothing clever shall be made under neigh->lock,
88    the most complicated procedure, which we allow is dev->hard_header.
89    It is supposed, that dev->hard_header is simplistic and does
90    not make callbacks to neighbour tables.
91  */
92 
93 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
94 {
95 	kfree_skb(skb);
96 	return -ENETDOWN;
97 }
98 
99 static void neigh_cleanup_and_release(struct neighbour *neigh)
100 {
101 	if (neigh->parms->neigh_cleanup)
102 		neigh->parms->neigh_cleanup(neigh);
103 
104 	__neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
105 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
106 	neigh_release(neigh);
107 }
108 
109 /*
110  * It is random distribution in the interval (1/2)*base...(3/2)*base.
111  * It corresponds to default IPv6 settings and is not overridable,
112  * because it is really reasonable choice.
113  */
114 
115 unsigned long neigh_rand_reach_time(unsigned long base)
116 {
117 	return base ? (prandom_u32() % base) + (base >> 1) : 0;
118 }
119 EXPORT_SYMBOL(neigh_rand_reach_time);
120 
121 
122 static bool neigh_del(struct neighbour *n, __u8 state, __u8 flags,
123 		      struct neighbour __rcu **np, struct neigh_table *tbl)
124 {
125 	bool retval = false;
126 
127 	write_lock(&n->lock);
128 	if (refcount_read(&n->refcnt) == 1 && !(n->nud_state & state) &&
129 	    !(n->flags & flags)) {
130 		struct neighbour *neigh;
131 
132 		neigh = rcu_dereference_protected(n->next,
133 						  lockdep_is_held(&tbl->lock));
134 		rcu_assign_pointer(*np, neigh);
135 		n->dead = 1;
136 		retval = true;
137 	}
138 	write_unlock(&n->lock);
139 	if (retval)
140 		neigh_cleanup_and_release(n);
141 	return retval;
142 }
143 
144 bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
145 {
146 	struct neigh_hash_table *nht;
147 	void *pkey = ndel->primary_key;
148 	u32 hash_val;
149 	struct neighbour *n;
150 	struct neighbour __rcu **np;
151 
152 	nht = rcu_dereference_protected(tbl->nht,
153 					lockdep_is_held(&tbl->lock));
154 	hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
155 	hash_val = hash_val >> (32 - nht->hash_shift);
156 
157 	np = &nht->hash_buckets[hash_val];
158 	while ((n = rcu_dereference_protected(*np,
159 					      lockdep_is_held(&tbl->lock)))) {
160 		if (n == ndel)
161 			return neigh_del(n, 0, 0, np, tbl);
162 		np = &n->next;
163 	}
164 	return false;
165 }
166 
167 static int neigh_forced_gc(struct neigh_table *tbl)
168 {
169 	int shrunk = 0;
170 	int i;
171 	struct neigh_hash_table *nht;
172 
173 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
174 
175 	write_lock_bh(&tbl->lock);
176 	nht = rcu_dereference_protected(tbl->nht,
177 					lockdep_is_held(&tbl->lock));
178 	for (i = 0; i < (1 << nht->hash_shift); i++) {
179 		struct neighbour *n;
180 		struct neighbour __rcu **np;
181 
182 		np = &nht->hash_buckets[i];
183 		while ((n = rcu_dereference_protected(*np,
184 					lockdep_is_held(&tbl->lock))) != NULL) {
185 			/* Neighbour record may be discarded if:
186 			 * - nobody refers to it.
187 			 * - it is not permanent
188 			 */
189 			if (neigh_del(n, NUD_PERMANENT, NTF_EXT_LEARNED, np,
190 				      tbl)) {
191 				shrunk = 1;
192 				continue;
193 			}
194 			np = &n->next;
195 		}
196 	}
197 
198 	tbl->last_flush = jiffies;
199 
200 	write_unlock_bh(&tbl->lock);
201 
202 	return shrunk;
203 }
204 
205 static void neigh_add_timer(struct neighbour *n, unsigned long when)
206 {
207 	neigh_hold(n);
208 	if (unlikely(mod_timer(&n->timer, when))) {
209 		printk("NEIGH: BUG, double timer add, state is %x\n",
210 		       n->nud_state);
211 		dump_stack();
212 	}
213 }
214 
215 static int neigh_del_timer(struct neighbour *n)
216 {
217 	if ((n->nud_state & NUD_IN_TIMER) &&
218 	    del_timer(&n->timer)) {
219 		neigh_release(n);
220 		return 1;
221 	}
222 	return 0;
223 }
224 
225 static void pneigh_queue_purge(struct sk_buff_head *list)
226 {
227 	struct sk_buff *skb;
228 
229 	while ((skb = skb_dequeue(list)) != NULL) {
230 		dev_put(skb->dev);
231 		kfree_skb(skb);
232 	}
233 }
234 
235 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
236 {
237 	int i;
238 	struct neigh_hash_table *nht;
239 
240 	nht = rcu_dereference_protected(tbl->nht,
241 					lockdep_is_held(&tbl->lock));
242 
243 	for (i = 0; i < (1 << nht->hash_shift); i++) {
244 		struct neighbour *n;
245 		struct neighbour __rcu **np = &nht->hash_buckets[i];
246 
247 		while ((n = rcu_dereference_protected(*np,
248 					lockdep_is_held(&tbl->lock))) != NULL) {
249 			if (dev && n->dev != dev) {
250 				np = &n->next;
251 				continue;
252 			}
253 			rcu_assign_pointer(*np,
254 				   rcu_dereference_protected(n->next,
255 						lockdep_is_held(&tbl->lock)));
256 			write_lock(&n->lock);
257 			neigh_del_timer(n);
258 			n->dead = 1;
259 
260 			if (refcount_read(&n->refcnt) != 1) {
261 				/* The most unpleasant situation.
262 				   We must destroy neighbour entry,
263 				   but someone still uses it.
264 
265 				   The destroy will be delayed until
266 				   the last user releases us, but
267 				   we must kill timers etc. and move
268 				   it to safe state.
269 				 */
270 				__skb_queue_purge(&n->arp_queue);
271 				n->arp_queue_len_bytes = 0;
272 				n->output = neigh_blackhole;
273 				if (n->nud_state & NUD_VALID)
274 					n->nud_state = NUD_NOARP;
275 				else
276 					n->nud_state = NUD_NONE;
277 				neigh_dbg(2, "neigh %p is stray\n", n);
278 			}
279 			write_unlock(&n->lock);
280 			neigh_cleanup_and_release(n);
281 		}
282 	}
283 }
284 
285 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
286 {
287 	write_lock_bh(&tbl->lock);
288 	neigh_flush_dev(tbl, dev);
289 	write_unlock_bh(&tbl->lock);
290 }
291 EXPORT_SYMBOL(neigh_changeaddr);
292 
293 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
294 {
295 	write_lock_bh(&tbl->lock);
296 	neigh_flush_dev(tbl, dev);
297 	pneigh_ifdown_and_unlock(tbl, dev);
298 
299 	del_timer_sync(&tbl->proxy_timer);
300 	pneigh_queue_purge(&tbl->proxy_queue);
301 	return 0;
302 }
303 EXPORT_SYMBOL(neigh_ifdown);
304 
305 static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
306 {
307 	struct neighbour *n = NULL;
308 	unsigned long now = jiffies;
309 	int entries;
310 
311 	entries = atomic_inc_return(&tbl->entries) - 1;
312 	if (entries >= tbl->gc_thresh3 ||
313 	    (entries >= tbl->gc_thresh2 &&
314 	     time_after(now, tbl->last_flush + 5 * HZ))) {
315 		if (!neigh_forced_gc(tbl) &&
316 		    entries >= tbl->gc_thresh3) {
317 			net_info_ratelimited("%s: neighbor table overflow!\n",
318 					     tbl->id);
319 			NEIGH_CACHE_STAT_INC(tbl, table_fulls);
320 			goto out_entries;
321 		}
322 	}
323 
324 	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
325 	if (!n)
326 		goto out_entries;
327 
328 	__skb_queue_head_init(&n->arp_queue);
329 	rwlock_init(&n->lock);
330 	seqlock_init(&n->ha_lock);
331 	n->updated	  = n->used = now;
332 	n->nud_state	  = NUD_NONE;
333 	n->output	  = neigh_blackhole;
334 	seqlock_init(&n->hh.hh_lock);
335 	n->parms	  = neigh_parms_clone(&tbl->parms);
336 	timer_setup(&n->timer, neigh_timer_handler, 0);
337 
338 	NEIGH_CACHE_STAT_INC(tbl, allocs);
339 	n->tbl		  = tbl;
340 	refcount_set(&n->refcnt, 1);
341 	n->dead		  = 1;
342 out:
343 	return n;
344 
345 out_entries:
346 	atomic_dec(&tbl->entries);
347 	goto out;
348 }
349 
350 static void neigh_get_hash_rnd(u32 *x)
351 {
352 	*x = get_random_u32() | 1;
353 }
354 
355 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
356 {
357 	size_t size = (1 << shift) * sizeof(struct neighbour *);
358 	struct neigh_hash_table *ret;
359 	struct neighbour __rcu **buckets;
360 	int i;
361 
362 	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
363 	if (!ret)
364 		return NULL;
365 	if (size <= PAGE_SIZE)
366 		buckets = kzalloc(size, GFP_ATOMIC);
367 	else
368 		buckets = (struct neighbour __rcu **)
369 			  __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
370 					   get_order(size));
371 	if (!buckets) {
372 		kfree(ret);
373 		return NULL;
374 	}
375 	ret->hash_buckets = buckets;
376 	ret->hash_shift = shift;
377 	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
378 		neigh_get_hash_rnd(&ret->hash_rnd[i]);
379 	return ret;
380 }
381 
382 static void neigh_hash_free_rcu(struct rcu_head *head)
383 {
384 	struct neigh_hash_table *nht = container_of(head,
385 						    struct neigh_hash_table,
386 						    rcu);
387 	size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
388 	struct neighbour __rcu **buckets = nht->hash_buckets;
389 
390 	if (size <= PAGE_SIZE)
391 		kfree(buckets);
392 	else
393 		free_pages((unsigned long)buckets, get_order(size));
394 	kfree(nht);
395 }
396 
397 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
398 						unsigned long new_shift)
399 {
400 	unsigned int i, hash;
401 	struct neigh_hash_table *new_nht, *old_nht;
402 
403 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
404 
405 	old_nht = rcu_dereference_protected(tbl->nht,
406 					    lockdep_is_held(&tbl->lock));
407 	new_nht = neigh_hash_alloc(new_shift);
408 	if (!new_nht)
409 		return old_nht;
410 
411 	for (i = 0; i < (1 << old_nht->hash_shift); i++) {
412 		struct neighbour *n, *next;
413 
414 		for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
415 						   lockdep_is_held(&tbl->lock));
416 		     n != NULL;
417 		     n = next) {
418 			hash = tbl->hash(n->primary_key, n->dev,
419 					 new_nht->hash_rnd);
420 
421 			hash >>= (32 - new_nht->hash_shift);
422 			next = rcu_dereference_protected(n->next,
423 						lockdep_is_held(&tbl->lock));
424 
425 			rcu_assign_pointer(n->next,
426 					   rcu_dereference_protected(
427 						new_nht->hash_buckets[hash],
428 						lockdep_is_held(&tbl->lock)));
429 			rcu_assign_pointer(new_nht->hash_buckets[hash], n);
430 		}
431 	}
432 
433 	rcu_assign_pointer(tbl->nht, new_nht);
434 	call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
435 	return new_nht;
436 }
437 
438 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
439 			       struct net_device *dev)
440 {
441 	struct neighbour *n;
442 
443 	NEIGH_CACHE_STAT_INC(tbl, lookups);
444 
445 	rcu_read_lock_bh();
446 	n = __neigh_lookup_noref(tbl, pkey, dev);
447 	if (n) {
448 		if (!refcount_inc_not_zero(&n->refcnt))
449 			n = NULL;
450 		NEIGH_CACHE_STAT_INC(tbl, hits);
451 	}
452 
453 	rcu_read_unlock_bh();
454 	return n;
455 }
456 EXPORT_SYMBOL(neigh_lookup);
457 
458 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
459 				     const void *pkey)
460 {
461 	struct neighbour *n;
462 	unsigned int key_len = tbl->key_len;
463 	u32 hash_val;
464 	struct neigh_hash_table *nht;
465 
466 	NEIGH_CACHE_STAT_INC(tbl, lookups);
467 
468 	rcu_read_lock_bh();
469 	nht = rcu_dereference_bh(tbl->nht);
470 	hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
471 
472 	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
473 	     n != NULL;
474 	     n = rcu_dereference_bh(n->next)) {
475 		if (!memcmp(n->primary_key, pkey, key_len) &&
476 		    net_eq(dev_net(n->dev), net)) {
477 			if (!refcount_inc_not_zero(&n->refcnt))
478 				n = NULL;
479 			NEIGH_CACHE_STAT_INC(tbl, hits);
480 			break;
481 		}
482 	}
483 
484 	rcu_read_unlock_bh();
485 	return n;
486 }
487 EXPORT_SYMBOL(neigh_lookup_nodev);
488 
489 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
490 				 struct net_device *dev, bool want_ref)
491 {
492 	u32 hash_val;
493 	unsigned int key_len = tbl->key_len;
494 	int error;
495 	struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
496 	struct neigh_hash_table *nht;
497 
498 	if (!n) {
499 		rc = ERR_PTR(-ENOBUFS);
500 		goto out;
501 	}
502 
503 	memcpy(n->primary_key, pkey, key_len);
504 	n->dev = dev;
505 	dev_hold(dev);
506 
507 	/* Protocol specific setup. */
508 	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
509 		rc = ERR_PTR(error);
510 		goto out_neigh_release;
511 	}
512 
513 	if (dev->netdev_ops->ndo_neigh_construct) {
514 		error = dev->netdev_ops->ndo_neigh_construct(dev, n);
515 		if (error < 0) {
516 			rc = ERR_PTR(error);
517 			goto out_neigh_release;
518 		}
519 	}
520 
521 	/* Device specific setup. */
522 	if (n->parms->neigh_setup &&
523 	    (error = n->parms->neigh_setup(n)) < 0) {
524 		rc = ERR_PTR(error);
525 		goto out_neigh_release;
526 	}
527 
528 	n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
529 
530 	write_lock_bh(&tbl->lock);
531 	nht = rcu_dereference_protected(tbl->nht,
532 					lockdep_is_held(&tbl->lock));
533 
534 	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
535 		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
536 
537 	hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
538 
539 	if (n->parms->dead) {
540 		rc = ERR_PTR(-EINVAL);
541 		goto out_tbl_unlock;
542 	}
543 
544 	for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
545 					    lockdep_is_held(&tbl->lock));
546 	     n1 != NULL;
547 	     n1 = rcu_dereference_protected(n1->next,
548 			lockdep_is_held(&tbl->lock))) {
549 		if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
550 			if (want_ref)
551 				neigh_hold(n1);
552 			rc = n1;
553 			goto out_tbl_unlock;
554 		}
555 	}
556 
557 	n->dead = 0;
558 	if (want_ref)
559 		neigh_hold(n);
560 	rcu_assign_pointer(n->next,
561 			   rcu_dereference_protected(nht->hash_buckets[hash_val],
562 						     lockdep_is_held(&tbl->lock)));
563 	rcu_assign_pointer(nht->hash_buckets[hash_val], n);
564 	write_unlock_bh(&tbl->lock);
565 	neigh_dbg(2, "neigh %p is created\n", n);
566 	rc = n;
567 out:
568 	return rc;
569 out_tbl_unlock:
570 	write_unlock_bh(&tbl->lock);
571 out_neigh_release:
572 	neigh_release(n);
573 	goto out;
574 }
575 EXPORT_SYMBOL(__neigh_create);
576 
577 static u32 pneigh_hash(const void *pkey, unsigned int key_len)
578 {
579 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
580 	hash_val ^= (hash_val >> 16);
581 	hash_val ^= hash_val >> 8;
582 	hash_val ^= hash_val >> 4;
583 	hash_val &= PNEIGH_HASHMASK;
584 	return hash_val;
585 }
586 
587 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
588 					      struct net *net,
589 					      const void *pkey,
590 					      unsigned int key_len,
591 					      struct net_device *dev)
592 {
593 	while (n) {
594 		if (!memcmp(n->key, pkey, key_len) &&
595 		    net_eq(pneigh_net(n), net) &&
596 		    (n->dev == dev || !n->dev))
597 			return n;
598 		n = n->next;
599 	}
600 	return NULL;
601 }
602 
603 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
604 		struct net *net, const void *pkey, struct net_device *dev)
605 {
606 	unsigned int key_len = tbl->key_len;
607 	u32 hash_val = pneigh_hash(pkey, key_len);
608 
609 	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
610 				 net, pkey, key_len, dev);
611 }
612 EXPORT_SYMBOL_GPL(__pneigh_lookup);
613 
614 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
615 				    struct net *net, const void *pkey,
616 				    struct net_device *dev, int creat)
617 {
618 	struct pneigh_entry *n;
619 	unsigned int key_len = tbl->key_len;
620 	u32 hash_val = pneigh_hash(pkey, key_len);
621 
622 	read_lock_bh(&tbl->lock);
623 	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
624 			      net, pkey, key_len, dev);
625 	read_unlock_bh(&tbl->lock);
626 
627 	if (n || !creat)
628 		goto out;
629 
630 	ASSERT_RTNL();
631 
632 	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
633 	if (!n)
634 		goto out;
635 
636 	write_pnet(&n->net, net);
637 	memcpy(n->key, pkey, key_len);
638 	n->dev = dev;
639 	if (dev)
640 		dev_hold(dev);
641 
642 	if (tbl->pconstructor && tbl->pconstructor(n)) {
643 		if (dev)
644 			dev_put(dev);
645 		kfree(n);
646 		n = NULL;
647 		goto out;
648 	}
649 
650 	write_lock_bh(&tbl->lock);
651 	n->next = tbl->phash_buckets[hash_val];
652 	tbl->phash_buckets[hash_val] = n;
653 	write_unlock_bh(&tbl->lock);
654 out:
655 	return n;
656 }
657 EXPORT_SYMBOL(pneigh_lookup);
658 
659 
660 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
661 		  struct net_device *dev)
662 {
663 	struct pneigh_entry *n, **np;
664 	unsigned int key_len = tbl->key_len;
665 	u32 hash_val = pneigh_hash(pkey, key_len);
666 
667 	write_lock_bh(&tbl->lock);
668 	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
669 	     np = &n->next) {
670 		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
671 		    net_eq(pneigh_net(n), net)) {
672 			*np = n->next;
673 			write_unlock_bh(&tbl->lock);
674 			if (tbl->pdestructor)
675 				tbl->pdestructor(n);
676 			if (n->dev)
677 				dev_put(n->dev);
678 			kfree(n);
679 			return 0;
680 		}
681 	}
682 	write_unlock_bh(&tbl->lock);
683 	return -ENOENT;
684 }
685 
686 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
687 				    struct net_device *dev)
688 {
689 	struct pneigh_entry *n, **np, *freelist = NULL;
690 	u32 h;
691 
692 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
693 		np = &tbl->phash_buckets[h];
694 		while ((n = *np) != NULL) {
695 			if (!dev || n->dev == dev) {
696 				*np = n->next;
697 				n->next = freelist;
698 				freelist = n;
699 				continue;
700 			}
701 			np = &n->next;
702 		}
703 	}
704 	write_unlock_bh(&tbl->lock);
705 	while ((n = freelist)) {
706 		freelist = n->next;
707 		n->next = NULL;
708 		if (tbl->pdestructor)
709 			tbl->pdestructor(n);
710 		if (n->dev)
711 			dev_put(n->dev);
712 		kfree(n);
713 	}
714 	return -ENOENT;
715 }
716 
717 static void neigh_parms_destroy(struct neigh_parms *parms);
718 
719 static inline void neigh_parms_put(struct neigh_parms *parms)
720 {
721 	if (refcount_dec_and_test(&parms->refcnt))
722 		neigh_parms_destroy(parms);
723 }
724 
725 /*
726  *	neighbour must already be out of the table;
727  *
728  */
729 void neigh_destroy(struct neighbour *neigh)
730 {
731 	struct net_device *dev = neigh->dev;
732 
733 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
734 
735 	if (!neigh->dead) {
736 		pr_warn("Destroying alive neighbour %p\n", neigh);
737 		dump_stack();
738 		return;
739 	}
740 
741 	if (neigh_del_timer(neigh))
742 		pr_warn("Impossible event\n");
743 
744 	write_lock_bh(&neigh->lock);
745 	__skb_queue_purge(&neigh->arp_queue);
746 	write_unlock_bh(&neigh->lock);
747 	neigh->arp_queue_len_bytes = 0;
748 
749 	if (dev->netdev_ops->ndo_neigh_destroy)
750 		dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
751 
752 	dev_put(dev);
753 	neigh_parms_put(neigh->parms);
754 
755 	neigh_dbg(2, "neigh %p is destroyed\n", neigh);
756 
757 	atomic_dec(&neigh->tbl->entries);
758 	kfree_rcu(neigh, rcu);
759 }
760 EXPORT_SYMBOL(neigh_destroy);
761 
762 /* Neighbour state is suspicious;
763    disable fast path.
764 
765    Called with write_locked neigh.
766  */
767 static void neigh_suspect(struct neighbour *neigh)
768 {
769 	neigh_dbg(2, "neigh %p is suspected\n", neigh);
770 
771 	neigh->output = neigh->ops->output;
772 }
773 
774 /* Neighbour state is OK;
775    enable fast path.
776 
777    Called with write_locked neigh.
778  */
779 static void neigh_connect(struct neighbour *neigh)
780 {
781 	neigh_dbg(2, "neigh %p is connected\n", neigh);
782 
783 	neigh->output = neigh->ops->connected_output;
784 }
785 
786 static void neigh_periodic_work(struct work_struct *work)
787 {
788 	struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
789 	struct neighbour *n;
790 	struct neighbour __rcu **np;
791 	unsigned int i;
792 	struct neigh_hash_table *nht;
793 
794 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
795 
796 	write_lock_bh(&tbl->lock);
797 	nht = rcu_dereference_protected(tbl->nht,
798 					lockdep_is_held(&tbl->lock));
799 
800 	/*
801 	 *	periodically recompute ReachableTime from random function
802 	 */
803 
804 	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
805 		struct neigh_parms *p;
806 		tbl->last_rand = jiffies;
807 		list_for_each_entry(p, &tbl->parms_list, list)
808 			p->reachable_time =
809 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
810 	}
811 
812 	if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
813 		goto out;
814 
815 	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
816 		np = &nht->hash_buckets[i];
817 
818 		while ((n = rcu_dereference_protected(*np,
819 				lockdep_is_held(&tbl->lock))) != NULL) {
820 			unsigned int state;
821 
822 			write_lock(&n->lock);
823 
824 			state = n->nud_state;
825 			if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
826 			    (n->flags & NTF_EXT_LEARNED)) {
827 				write_unlock(&n->lock);
828 				goto next_elt;
829 			}
830 
831 			if (time_before(n->used, n->confirmed))
832 				n->used = n->confirmed;
833 
834 			if (refcount_read(&n->refcnt) == 1 &&
835 			    (state == NUD_FAILED ||
836 			     time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
837 				*np = n->next;
838 				n->dead = 1;
839 				write_unlock(&n->lock);
840 				neigh_cleanup_and_release(n);
841 				continue;
842 			}
843 			write_unlock(&n->lock);
844 
845 next_elt:
846 			np = &n->next;
847 		}
848 		/*
849 		 * It's fine to release lock here, even if hash table
850 		 * grows while we are preempted.
851 		 */
852 		write_unlock_bh(&tbl->lock);
853 		cond_resched();
854 		write_lock_bh(&tbl->lock);
855 		nht = rcu_dereference_protected(tbl->nht,
856 						lockdep_is_held(&tbl->lock));
857 	}
858 out:
859 	/* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
860 	 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
861 	 * BASE_REACHABLE_TIME.
862 	 */
863 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
864 			      NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
865 	write_unlock_bh(&tbl->lock);
866 }
867 
868 static __inline__ int neigh_max_probes(struct neighbour *n)
869 {
870 	struct neigh_parms *p = n->parms;
871 	return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
872 	       (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
873 	        NEIGH_VAR(p, MCAST_PROBES));
874 }
875 
876 static void neigh_invalidate(struct neighbour *neigh)
877 	__releases(neigh->lock)
878 	__acquires(neigh->lock)
879 {
880 	struct sk_buff *skb;
881 
882 	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
883 	neigh_dbg(2, "neigh %p is failed\n", neigh);
884 	neigh->updated = jiffies;
885 
886 	/* It is very thin place. report_unreachable is very complicated
887 	   routine. Particularly, it can hit the same neighbour entry!
888 
889 	   So that, we try to be accurate and avoid dead loop. --ANK
890 	 */
891 	while (neigh->nud_state == NUD_FAILED &&
892 	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
893 		write_unlock(&neigh->lock);
894 		neigh->ops->error_report(neigh, skb);
895 		write_lock(&neigh->lock);
896 	}
897 	__skb_queue_purge(&neigh->arp_queue);
898 	neigh->arp_queue_len_bytes = 0;
899 }
900 
901 static void neigh_probe(struct neighbour *neigh)
902 	__releases(neigh->lock)
903 {
904 	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
905 	/* keep skb alive even if arp_queue overflows */
906 	if (skb)
907 		skb = skb_clone(skb, GFP_ATOMIC);
908 	write_unlock(&neigh->lock);
909 	if (neigh->ops->solicit)
910 		neigh->ops->solicit(neigh, skb);
911 	atomic_inc(&neigh->probes);
912 	kfree_skb(skb);
913 }
914 
915 /* Called when a timer expires for a neighbour entry. */
916 
917 static void neigh_timer_handler(struct timer_list *t)
918 {
919 	unsigned long now, next;
920 	struct neighbour *neigh = from_timer(neigh, t, timer);
921 	unsigned int state;
922 	int notify = 0;
923 
924 	write_lock(&neigh->lock);
925 
926 	state = neigh->nud_state;
927 	now = jiffies;
928 	next = now + HZ;
929 
930 	if (!(state & NUD_IN_TIMER))
931 		goto out;
932 
933 	if (state & NUD_REACHABLE) {
934 		if (time_before_eq(now,
935 				   neigh->confirmed + neigh->parms->reachable_time)) {
936 			neigh_dbg(2, "neigh %p is still alive\n", neigh);
937 			next = neigh->confirmed + neigh->parms->reachable_time;
938 		} else if (time_before_eq(now,
939 					  neigh->used +
940 					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
941 			neigh_dbg(2, "neigh %p is delayed\n", neigh);
942 			neigh->nud_state = NUD_DELAY;
943 			neigh->updated = jiffies;
944 			neigh_suspect(neigh);
945 			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
946 		} else {
947 			neigh_dbg(2, "neigh %p is suspected\n", neigh);
948 			neigh->nud_state = NUD_STALE;
949 			neigh->updated = jiffies;
950 			neigh_suspect(neigh);
951 			notify = 1;
952 		}
953 	} else if (state & NUD_DELAY) {
954 		if (time_before_eq(now,
955 				   neigh->confirmed +
956 				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
957 			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
958 			neigh->nud_state = NUD_REACHABLE;
959 			neigh->updated = jiffies;
960 			neigh_connect(neigh);
961 			notify = 1;
962 			next = neigh->confirmed + neigh->parms->reachable_time;
963 		} else {
964 			neigh_dbg(2, "neigh %p is probed\n", neigh);
965 			neigh->nud_state = NUD_PROBE;
966 			neigh->updated = jiffies;
967 			atomic_set(&neigh->probes, 0);
968 			notify = 1;
969 			next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
970 		}
971 	} else {
972 		/* NUD_PROBE|NUD_INCOMPLETE */
973 		next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
974 	}
975 
976 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
977 	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
978 		neigh->nud_state = NUD_FAILED;
979 		notify = 1;
980 		neigh_invalidate(neigh);
981 		goto out;
982 	}
983 
984 	if (neigh->nud_state & NUD_IN_TIMER) {
985 		if (time_before(next, jiffies + HZ/2))
986 			next = jiffies + HZ/2;
987 		if (!mod_timer(&neigh->timer, next))
988 			neigh_hold(neigh);
989 	}
990 	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
991 		neigh_probe(neigh);
992 	} else {
993 out:
994 		write_unlock(&neigh->lock);
995 	}
996 
997 	if (notify)
998 		neigh_update_notify(neigh, 0);
999 
1000 	neigh_release(neigh);
1001 }
1002 
1003 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
1004 {
1005 	int rc;
1006 	bool immediate_probe = false;
1007 
1008 	write_lock_bh(&neigh->lock);
1009 
1010 	rc = 0;
1011 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1012 		goto out_unlock_bh;
1013 	if (neigh->dead)
1014 		goto out_dead;
1015 
1016 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1017 		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1018 		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
1019 			unsigned long next, now = jiffies;
1020 
1021 			atomic_set(&neigh->probes,
1022 				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
1023 			neigh->nud_state     = NUD_INCOMPLETE;
1024 			neigh->updated = now;
1025 			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1026 					 HZ/2);
1027 			neigh_add_timer(neigh, next);
1028 			immediate_probe = true;
1029 		} else {
1030 			neigh->nud_state = NUD_FAILED;
1031 			neigh->updated = jiffies;
1032 			write_unlock_bh(&neigh->lock);
1033 
1034 			kfree_skb(skb);
1035 			return 1;
1036 		}
1037 	} else if (neigh->nud_state & NUD_STALE) {
1038 		neigh_dbg(2, "neigh %p is delayed\n", neigh);
1039 		neigh->nud_state = NUD_DELAY;
1040 		neigh->updated = jiffies;
1041 		neigh_add_timer(neigh, jiffies +
1042 				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1043 	}
1044 
1045 	if (neigh->nud_state == NUD_INCOMPLETE) {
1046 		if (skb) {
1047 			while (neigh->arp_queue_len_bytes + skb->truesize >
1048 			       NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1049 				struct sk_buff *buff;
1050 
1051 				buff = __skb_dequeue(&neigh->arp_queue);
1052 				if (!buff)
1053 					break;
1054 				neigh->arp_queue_len_bytes -= buff->truesize;
1055 				kfree_skb(buff);
1056 				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1057 			}
1058 			skb_dst_force(skb);
1059 			__skb_queue_tail(&neigh->arp_queue, skb);
1060 			neigh->arp_queue_len_bytes += skb->truesize;
1061 		}
1062 		rc = 1;
1063 	}
1064 out_unlock_bh:
1065 	if (immediate_probe)
1066 		neigh_probe(neigh);
1067 	else
1068 		write_unlock(&neigh->lock);
1069 	local_bh_enable();
1070 	return rc;
1071 
1072 out_dead:
1073 	if (neigh->nud_state & NUD_STALE)
1074 		goto out_unlock_bh;
1075 	write_unlock_bh(&neigh->lock);
1076 	kfree_skb(skb);
1077 	return 1;
1078 }
1079 EXPORT_SYMBOL(__neigh_event_send);
1080 
1081 static void neigh_update_hhs(struct neighbour *neigh)
1082 {
1083 	struct hh_cache *hh;
1084 	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1085 		= NULL;
1086 
1087 	if (neigh->dev->header_ops)
1088 		update = neigh->dev->header_ops->cache_update;
1089 
1090 	if (update) {
1091 		hh = &neigh->hh;
1092 		if (hh->hh_len) {
1093 			write_seqlock_bh(&hh->hh_lock);
1094 			update(hh, neigh->dev, neigh->ha);
1095 			write_sequnlock_bh(&hh->hh_lock);
1096 		}
1097 	}
1098 }
1099 
1100 
1101 
1102 /* Generic update routine.
1103    -- lladdr is new lladdr or NULL, if it is not supplied.
1104    -- new    is new state.
1105    -- flags
1106 	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1107 				if it is different.
1108 	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1109 				lladdr instead of overriding it
1110 				if it is different.
1111 	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
1112 
1113 	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1114 				NTF_ROUTER flag.
1115 	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
1116 				a router.
1117 
1118    Caller MUST hold reference count on the entry.
1119  */
1120 
1121 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1122 		 u32 flags, u32 nlmsg_pid)
1123 {
1124 	u8 old;
1125 	int err;
1126 	int notify = 0;
1127 	struct net_device *dev;
1128 	int update_isrouter = 0;
1129 
1130 	write_lock_bh(&neigh->lock);
1131 
1132 	dev    = neigh->dev;
1133 	old    = neigh->nud_state;
1134 	err    = -EPERM;
1135 
1136 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1137 	    (old & (NUD_NOARP | NUD_PERMANENT)))
1138 		goto out;
1139 	if (neigh->dead)
1140 		goto out;
1141 
1142 	neigh_update_ext_learned(neigh, flags, &notify);
1143 
1144 	if (!(new & NUD_VALID)) {
1145 		neigh_del_timer(neigh);
1146 		if (old & NUD_CONNECTED)
1147 			neigh_suspect(neigh);
1148 		neigh->nud_state = new;
1149 		err = 0;
1150 		notify = old & NUD_VALID;
1151 		if (((old & (NUD_INCOMPLETE | NUD_PROBE)) ||
1152 		     (flags & NEIGH_UPDATE_F_ADMIN)) &&
1153 		    (new & NUD_FAILED)) {
1154 			neigh_invalidate(neigh);
1155 			notify = 1;
1156 		}
1157 		goto out;
1158 	}
1159 
1160 	/* Compare new lladdr with cached one */
1161 	if (!dev->addr_len) {
1162 		/* First case: device needs no address. */
1163 		lladdr = neigh->ha;
1164 	} else if (lladdr) {
1165 		/* The second case: if something is already cached
1166 		   and a new address is proposed:
1167 		   - compare new & old
1168 		   - if they are different, check override flag
1169 		 */
1170 		if ((old & NUD_VALID) &&
1171 		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1172 			lladdr = neigh->ha;
1173 	} else {
1174 		/* No address is supplied; if we know something,
1175 		   use it, otherwise discard the request.
1176 		 */
1177 		err = -EINVAL;
1178 		if (!(old & NUD_VALID))
1179 			goto out;
1180 		lladdr = neigh->ha;
1181 	}
1182 
1183 	/* Update confirmed timestamp for neighbour entry after we
1184 	 * received ARP packet even if it doesn't change IP to MAC binding.
1185 	 */
1186 	if (new & NUD_CONNECTED)
1187 		neigh->confirmed = jiffies;
1188 
1189 	/* If entry was valid and address is not changed,
1190 	   do not change entry state, if new one is STALE.
1191 	 */
1192 	err = 0;
1193 	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1194 	if (old & NUD_VALID) {
1195 		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1196 			update_isrouter = 0;
1197 			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1198 			    (old & NUD_CONNECTED)) {
1199 				lladdr = neigh->ha;
1200 				new = NUD_STALE;
1201 			} else
1202 				goto out;
1203 		} else {
1204 			if (lladdr == neigh->ha && new == NUD_STALE &&
1205 			    !(flags & NEIGH_UPDATE_F_ADMIN))
1206 				new = old;
1207 		}
1208 	}
1209 
1210 	/* Update timestamp only once we know we will make a change to the
1211 	 * neighbour entry. Otherwise we risk to move the locktime window with
1212 	 * noop updates and ignore relevant ARP updates.
1213 	 */
1214 	if (new != old || lladdr != neigh->ha)
1215 		neigh->updated = jiffies;
1216 
1217 	if (new != old) {
1218 		neigh_del_timer(neigh);
1219 		if (new & NUD_PROBE)
1220 			atomic_set(&neigh->probes, 0);
1221 		if (new & NUD_IN_TIMER)
1222 			neigh_add_timer(neigh, (jiffies +
1223 						((new & NUD_REACHABLE) ?
1224 						 neigh->parms->reachable_time :
1225 						 0)));
1226 		neigh->nud_state = new;
1227 		notify = 1;
1228 	}
1229 
1230 	if (lladdr != neigh->ha) {
1231 		write_seqlock(&neigh->ha_lock);
1232 		memcpy(&neigh->ha, lladdr, dev->addr_len);
1233 		write_sequnlock(&neigh->ha_lock);
1234 		neigh_update_hhs(neigh);
1235 		if (!(new & NUD_CONNECTED))
1236 			neigh->confirmed = jiffies -
1237 				      (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1238 		notify = 1;
1239 	}
1240 	if (new == old)
1241 		goto out;
1242 	if (new & NUD_CONNECTED)
1243 		neigh_connect(neigh);
1244 	else
1245 		neigh_suspect(neigh);
1246 	if (!(old & NUD_VALID)) {
1247 		struct sk_buff *skb;
1248 
1249 		/* Again: avoid dead loop if something went wrong */
1250 
1251 		while (neigh->nud_state & NUD_VALID &&
1252 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1253 			struct dst_entry *dst = skb_dst(skb);
1254 			struct neighbour *n2, *n1 = neigh;
1255 			write_unlock_bh(&neigh->lock);
1256 
1257 			rcu_read_lock();
1258 
1259 			/* Why not just use 'neigh' as-is?  The problem is that
1260 			 * things such as shaper, eql, and sch_teql can end up
1261 			 * using alternative, different, neigh objects to output
1262 			 * the packet in the output path.  So what we need to do
1263 			 * here is re-lookup the top-level neigh in the path so
1264 			 * we can reinject the packet there.
1265 			 */
1266 			n2 = NULL;
1267 			if (dst) {
1268 				n2 = dst_neigh_lookup_skb(dst, skb);
1269 				if (n2)
1270 					n1 = n2;
1271 			}
1272 			n1->output(n1, skb);
1273 			if (n2)
1274 				neigh_release(n2);
1275 			rcu_read_unlock();
1276 
1277 			write_lock_bh(&neigh->lock);
1278 		}
1279 		__skb_queue_purge(&neigh->arp_queue);
1280 		neigh->arp_queue_len_bytes = 0;
1281 	}
1282 out:
1283 	if (update_isrouter)
1284 		neigh_update_is_router(neigh, flags, &notify);
1285 	write_unlock_bh(&neigh->lock);
1286 
1287 	if (notify)
1288 		neigh_update_notify(neigh, nlmsg_pid);
1289 
1290 	return err;
1291 }
1292 EXPORT_SYMBOL(neigh_update);
1293 
1294 /* Update the neigh to listen temporarily for probe responses, even if it is
1295  * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1296  */
1297 void __neigh_set_probe_once(struct neighbour *neigh)
1298 {
1299 	if (neigh->dead)
1300 		return;
1301 	neigh->updated = jiffies;
1302 	if (!(neigh->nud_state & NUD_FAILED))
1303 		return;
1304 	neigh->nud_state = NUD_INCOMPLETE;
1305 	atomic_set(&neigh->probes, neigh_max_probes(neigh));
1306 	neigh_add_timer(neigh,
1307 			jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
1308 }
1309 EXPORT_SYMBOL(__neigh_set_probe_once);
1310 
1311 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1312 				 u8 *lladdr, void *saddr,
1313 				 struct net_device *dev)
1314 {
1315 	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1316 						 lladdr || !dev->addr_len);
1317 	if (neigh)
1318 		neigh_update(neigh, lladdr, NUD_STALE,
1319 			     NEIGH_UPDATE_F_OVERRIDE, 0);
1320 	return neigh;
1321 }
1322 EXPORT_SYMBOL(neigh_event_ns);
1323 
1324 /* called with read_lock_bh(&n->lock); */
1325 static void neigh_hh_init(struct neighbour *n)
1326 {
1327 	struct net_device *dev = n->dev;
1328 	__be16 prot = n->tbl->protocol;
1329 	struct hh_cache	*hh = &n->hh;
1330 
1331 	write_lock_bh(&n->lock);
1332 
1333 	/* Only one thread can come in here and initialize the
1334 	 * hh_cache entry.
1335 	 */
1336 	if (!hh->hh_len)
1337 		dev->header_ops->cache(n, hh, prot);
1338 
1339 	write_unlock_bh(&n->lock);
1340 }
1341 
1342 /* Slow and careful. */
1343 
1344 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1345 {
1346 	int rc = 0;
1347 
1348 	if (!neigh_event_send(neigh, skb)) {
1349 		int err;
1350 		struct net_device *dev = neigh->dev;
1351 		unsigned int seq;
1352 
1353 		if (dev->header_ops->cache && !neigh->hh.hh_len)
1354 			neigh_hh_init(neigh);
1355 
1356 		do {
1357 			__skb_pull(skb, skb_network_offset(skb));
1358 			seq = read_seqbegin(&neigh->ha_lock);
1359 			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1360 					      neigh->ha, NULL, skb->len);
1361 		} while (read_seqretry(&neigh->ha_lock, seq));
1362 
1363 		if (err >= 0)
1364 			rc = dev_queue_xmit(skb);
1365 		else
1366 			goto out_kfree_skb;
1367 	}
1368 out:
1369 	return rc;
1370 out_kfree_skb:
1371 	rc = -EINVAL;
1372 	kfree_skb(skb);
1373 	goto out;
1374 }
1375 EXPORT_SYMBOL(neigh_resolve_output);
1376 
1377 /* As fast as possible without hh cache */
1378 
1379 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1380 {
1381 	struct net_device *dev = neigh->dev;
1382 	unsigned int seq;
1383 	int err;
1384 
1385 	do {
1386 		__skb_pull(skb, skb_network_offset(skb));
1387 		seq = read_seqbegin(&neigh->ha_lock);
1388 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1389 				      neigh->ha, NULL, skb->len);
1390 	} while (read_seqretry(&neigh->ha_lock, seq));
1391 
1392 	if (err >= 0)
1393 		err = dev_queue_xmit(skb);
1394 	else {
1395 		err = -EINVAL;
1396 		kfree_skb(skb);
1397 	}
1398 	return err;
1399 }
1400 EXPORT_SYMBOL(neigh_connected_output);
1401 
1402 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1403 {
1404 	return dev_queue_xmit(skb);
1405 }
1406 EXPORT_SYMBOL(neigh_direct_output);
1407 
1408 static void neigh_proxy_process(struct timer_list *t)
1409 {
1410 	struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1411 	long sched_next = 0;
1412 	unsigned long now = jiffies;
1413 	struct sk_buff *skb, *n;
1414 
1415 	spin_lock(&tbl->proxy_queue.lock);
1416 
1417 	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1418 		long tdif = NEIGH_CB(skb)->sched_next - now;
1419 
1420 		if (tdif <= 0) {
1421 			struct net_device *dev = skb->dev;
1422 
1423 			__skb_unlink(skb, &tbl->proxy_queue);
1424 			if (tbl->proxy_redo && netif_running(dev)) {
1425 				rcu_read_lock();
1426 				tbl->proxy_redo(skb);
1427 				rcu_read_unlock();
1428 			} else {
1429 				kfree_skb(skb);
1430 			}
1431 
1432 			dev_put(dev);
1433 		} else if (!sched_next || tdif < sched_next)
1434 			sched_next = tdif;
1435 	}
1436 	del_timer(&tbl->proxy_timer);
1437 	if (sched_next)
1438 		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1439 	spin_unlock(&tbl->proxy_queue.lock);
1440 }
1441 
1442 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1443 		    struct sk_buff *skb)
1444 {
1445 	unsigned long now = jiffies;
1446 
1447 	unsigned long sched_next = now + (prandom_u32() %
1448 					  NEIGH_VAR(p, PROXY_DELAY));
1449 
1450 	if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1451 		kfree_skb(skb);
1452 		return;
1453 	}
1454 
1455 	NEIGH_CB(skb)->sched_next = sched_next;
1456 	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1457 
1458 	spin_lock(&tbl->proxy_queue.lock);
1459 	if (del_timer(&tbl->proxy_timer)) {
1460 		if (time_before(tbl->proxy_timer.expires, sched_next))
1461 			sched_next = tbl->proxy_timer.expires;
1462 	}
1463 	skb_dst_drop(skb);
1464 	dev_hold(skb->dev);
1465 	__skb_queue_tail(&tbl->proxy_queue, skb);
1466 	mod_timer(&tbl->proxy_timer, sched_next);
1467 	spin_unlock(&tbl->proxy_queue.lock);
1468 }
1469 EXPORT_SYMBOL(pneigh_enqueue);
1470 
1471 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1472 						      struct net *net, int ifindex)
1473 {
1474 	struct neigh_parms *p;
1475 
1476 	list_for_each_entry(p, &tbl->parms_list, list) {
1477 		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1478 		    (!p->dev && !ifindex && net_eq(net, &init_net)))
1479 			return p;
1480 	}
1481 
1482 	return NULL;
1483 }
1484 
1485 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1486 				      struct neigh_table *tbl)
1487 {
1488 	struct neigh_parms *p;
1489 	struct net *net = dev_net(dev);
1490 	const struct net_device_ops *ops = dev->netdev_ops;
1491 
1492 	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1493 	if (p) {
1494 		p->tbl		  = tbl;
1495 		refcount_set(&p->refcnt, 1);
1496 		p->reachable_time =
1497 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1498 		dev_hold(dev);
1499 		p->dev = dev;
1500 		write_pnet(&p->net, net);
1501 		p->sysctl_table = NULL;
1502 
1503 		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1504 			dev_put(dev);
1505 			kfree(p);
1506 			return NULL;
1507 		}
1508 
1509 		write_lock_bh(&tbl->lock);
1510 		list_add(&p->list, &tbl->parms.list);
1511 		write_unlock_bh(&tbl->lock);
1512 
1513 		neigh_parms_data_state_cleanall(p);
1514 	}
1515 	return p;
1516 }
1517 EXPORT_SYMBOL(neigh_parms_alloc);
1518 
1519 static void neigh_rcu_free_parms(struct rcu_head *head)
1520 {
1521 	struct neigh_parms *parms =
1522 		container_of(head, struct neigh_parms, rcu_head);
1523 
1524 	neigh_parms_put(parms);
1525 }
1526 
1527 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1528 {
1529 	if (!parms || parms == &tbl->parms)
1530 		return;
1531 	write_lock_bh(&tbl->lock);
1532 	list_del(&parms->list);
1533 	parms->dead = 1;
1534 	write_unlock_bh(&tbl->lock);
1535 	if (parms->dev)
1536 		dev_put(parms->dev);
1537 	call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1538 }
1539 EXPORT_SYMBOL(neigh_parms_release);
1540 
1541 static void neigh_parms_destroy(struct neigh_parms *parms)
1542 {
1543 	kfree(parms);
1544 }
1545 
1546 static struct lock_class_key neigh_table_proxy_queue_class;
1547 
1548 static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1549 
1550 void neigh_table_init(int index, struct neigh_table *tbl)
1551 {
1552 	unsigned long now = jiffies;
1553 	unsigned long phsize;
1554 
1555 	INIT_LIST_HEAD(&tbl->parms_list);
1556 	list_add(&tbl->parms.list, &tbl->parms_list);
1557 	write_pnet(&tbl->parms.net, &init_net);
1558 	refcount_set(&tbl->parms.refcnt, 1);
1559 	tbl->parms.reachable_time =
1560 			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1561 
1562 	tbl->stats = alloc_percpu(struct neigh_statistics);
1563 	if (!tbl->stats)
1564 		panic("cannot create neighbour cache statistics");
1565 
1566 #ifdef CONFIG_PROC_FS
1567 	if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1568 			      &neigh_stat_seq_ops, tbl))
1569 		panic("cannot create neighbour proc dir entry");
1570 #endif
1571 
1572 	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1573 
1574 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1575 	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1576 
1577 	if (!tbl->nht || !tbl->phash_buckets)
1578 		panic("cannot allocate neighbour cache hashes");
1579 
1580 	if (!tbl->entry_size)
1581 		tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1582 					tbl->key_len, NEIGH_PRIV_ALIGN);
1583 	else
1584 		WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1585 
1586 	rwlock_init(&tbl->lock);
1587 	INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1588 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1589 			tbl->parms.reachable_time);
1590 	timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1591 	skb_queue_head_init_class(&tbl->proxy_queue,
1592 			&neigh_table_proxy_queue_class);
1593 
1594 	tbl->last_flush = now;
1595 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1596 
1597 	neigh_tables[index] = tbl;
1598 }
1599 EXPORT_SYMBOL(neigh_table_init);
1600 
1601 int neigh_table_clear(int index, struct neigh_table *tbl)
1602 {
1603 	neigh_tables[index] = NULL;
1604 	/* It is not clean... Fix it to unload IPv6 module safely */
1605 	cancel_delayed_work_sync(&tbl->gc_work);
1606 	del_timer_sync(&tbl->proxy_timer);
1607 	pneigh_queue_purge(&tbl->proxy_queue);
1608 	neigh_ifdown(tbl, NULL);
1609 	if (atomic_read(&tbl->entries))
1610 		pr_crit("neighbour leakage\n");
1611 
1612 	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1613 		 neigh_hash_free_rcu);
1614 	tbl->nht = NULL;
1615 
1616 	kfree(tbl->phash_buckets);
1617 	tbl->phash_buckets = NULL;
1618 
1619 	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1620 
1621 	free_percpu(tbl->stats);
1622 	tbl->stats = NULL;
1623 
1624 	return 0;
1625 }
1626 EXPORT_SYMBOL(neigh_table_clear);
1627 
1628 static struct neigh_table *neigh_find_table(int family)
1629 {
1630 	struct neigh_table *tbl = NULL;
1631 
1632 	switch (family) {
1633 	case AF_INET:
1634 		tbl = neigh_tables[NEIGH_ARP_TABLE];
1635 		break;
1636 	case AF_INET6:
1637 		tbl = neigh_tables[NEIGH_ND_TABLE];
1638 		break;
1639 	case AF_DECnet:
1640 		tbl = neigh_tables[NEIGH_DN_TABLE];
1641 		break;
1642 	}
1643 
1644 	return tbl;
1645 }
1646 
1647 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1648 			struct netlink_ext_ack *extack)
1649 {
1650 	struct net *net = sock_net(skb->sk);
1651 	struct ndmsg *ndm;
1652 	struct nlattr *dst_attr;
1653 	struct neigh_table *tbl;
1654 	struct neighbour *neigh;
1655 	struct net_device *dev = NULL;
1656 	int err = -EINVAL;
1657 
1658 	ASSERT_RTNL();
1659 	if (nlmsg_len(nlh) < sizeof(*ndm))
1660 		goto out;
1661 
1662 	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1663 	if (dst_attr == NULL)
1664 		goto out;
1665 
1666 	ndm = nlmsg_data(nlh);
1667 	if (ndm->ndm_ifindex) {
1668 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1669 		if (dev == NULL) {
1670 			err = -ENODEV;
1671 			goto out;
1672 		}
1673 	}
1674 
1675 	tbl = neigh_find_table(ndm->ndm_family);
1676 	if (tbl == NULL)
1677 		return -EAFNOSUPPORT;
1678 
1679 	if (nla_len(dst_attr) < (int)tbl->key_len)
1680 		goto out;
1681 
1682 	if (ndm->ndm_flags & NTF_PROXY) {
1683 		err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1684 		goto out;
1685 	}
1686 
1687 	if (dev == NULL)
1688 		goto out;
1689 
1690 	neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1691 	if (neigh == NULL) {
1692 		err = -ENOENT;
1693 		goto out;
1694 	}
1695 
1696 	err = neigh_update(neigh, NULL, NUD_FAILED,
1697 			   NEIGH_UPDATE_F_OVERRIDE |
1698 			   NEIGH_UPDATE_F_ADMIN,
1699 			   NETLINK_CB(skb).portid);
1700 	write_lock_bh(&tbl->lock);
1701 	neigh_release(neigh);
1702 	neigh_remove_one(neigh, tbl);
1703 	write_unlock_bh(&tbl->lock);
1704 
1705 out:
1706 	return err;
1707 }
1708 
1709 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1710 		     struct netlink_ext_ack *extack)
1711 {
1712 	int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
1713 		NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1714 	struct net *net = sock_net(skb->sk);
1715 	struct ndmsg *ndm;
1716 	struct nlattr *tb[NDA_MAX+1];
1717 	struct neigh_table *tbl;
1718 	struct net_device *dev = NULL;
1719 	struct neighbour *neigh;
1720 	void *dst, *lladdr;
1721 	int err;
1722 
1723 	ASSERT_RTNL();
1724 	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
1725 	if (err < 0)
1726 		goto out;
1727 
1728 	err = -EINVAL;
1729 	if (tb[NDA_DST] == NULL)
1730 		goto out;
1731 
1732 	ndm = nlmsg_data(nlh);
1733 	if (ndm->ndm_ifindex) {
1734 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1735 		if (dev == NULL) {
1736 			err = -ENODEV;
1737 			goto out;
1738 		}
1739 
1740 		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1741 			goto out;
1742 	}
1743 
1744 	tbl = neigh_find_table(ndm->ndm_family);
1745 	if (tbl == NULL)
1746 		return -EAFNOSUPPORT;
1747 
1748 	if (nla_len(tb[NDA_DST]) < (int)tbl->key_len)
1749 		goto out;
1750 	dst = nla_data(tb[NDA_DST]);
1751 	lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1752 
1753 	if (ndm->ndm_flags & NTF_PROXY) {
1754 		struct pneigh_entry *pn;
1755 
1756 		err = -ENOBUFS;
1757 		pn = pneigh_lookup(tbl, net, dst, dev, 1);
1758 		if (pn) {
1759 			pn->flags = ndm->ndm_flags;
1760 			err = 0;
1761 		}
1762 		goto out;
1763 	}
1764 
1765 	if (dev == NULL)
1766 		goto out;
1767 
1768 	neigh = neigh_lookup(tbl, dst, dev);
1769 	if (neigh == NULL) {
1770 		if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1771 			err = -ENOENT;
1772 			goto out;
1773 		}
1774 
1775 		neigh = __neigh_lookup_errno(tbl, dst, dev);
1776 		if (IS_ERR(neigh)) {
1777 			err = PTR_ERR(neigh);
1778 			goto out;
1779 		}
1780 	} else {
1781 		if (nlh->nlmsg_flags & NLM_F_EXCL) {
1782 			err = -EEXIST;
1783 			neigh_release(neigh);
1784 			goto out;
1785 		}
1786 
1787 		if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1788 			flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
1789 				   NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
1790 	}
1791 
1792 	if (ndm->ndm_flags & NTF_EXT_LEARNED)
1793 		flags |= NEIGH_UPDATE_F_EXT_LEARNED;
1794 
1795 	if (ndm->ndm_flags & NTF_ROUTER)
1796 		flags |= NEIGH_UPDATE_F_ISROUTER;
1797 
1798 	if (ndm->ndm_flags & NTF_USE) {
1799 		neigh_event_send(neigh, NULL);
1800 		err = 0;
1801 	} else
1802 		err = neigh_update(neigh, lladdr, ndm->ndm_state, flags,
1803 				   NETLINK_CB(skb).portid);
1804 	neigh_release(neigh);
1805 
1806 out:
1807 	return err;
1808 }
1809 
1810 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1811 {
1812 	struct nlattr *nest;
1813 
1814 	nest = nla_nest_start(skb, NDTA_PARMS);
1815 	if (nest == NULL)
1816 		return -ENOBUFS;
1817 
1818 	if ((parms->dev &&
1819 	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1820 	    nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
1821 	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
1822 			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
1823 	    /* approximative value for deprecated QUEUE_LEN (in packets) */
1824 	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
1825 			NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
1826 	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
1827 	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
1828 	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
1829 			NEIGH_VAR(parms, UCAST_PROBES)) ||
1830 	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
1831 			NEIGH_VAR(parms, MCAST_PROBES)) ||
1832 	    nla_put_u32(skb, NDTPA_MCAST_REPROBES,
1833 			NEIGH_VAR(parms, MCAST_REPROBES)) ||
1834 	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
1835 			  NDTPA_PAD) ||
1836 	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
1837 			  NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
1838 	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
1839 			  NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
1840 	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
1841 			  NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
1842 	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
1843 			  NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
1844 	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
1845 			  NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
1846 	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
1847 			  NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
1848 	    nla_put_msecs(skb, NDTPA_LOCKTIME,
1849 			  NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
1850 		goto nla_put_failure;
1851 	return nla_nest_end(skb, nest);
1852 
1853 nla_put_failure:
1854 	nla_nest_cancel(skb, nest);
1855 	return -EMSGSIZE;
1856 }
1857 
1858 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1859 			      u32 pid, u32 seq, int type, int flags)
1860 {
1861 	struct nlmsghdr *nlh;
1862 	struct ndtmsg *ndtmsg;
1863 
1864 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1865 	if (nlh == NULL)
1866 		return -EMSGSIZE;
1867 
1868 	ndtmsg = nlmsg_data(nlh);
1869 
1870 	read_lock_bh(&tbl->lock);
1871 	ndtmsg->ndtm_family = tbl->family;
1872 	ndtmsg->ndtm_pad1   = 0;
1873 	ndtmsg->ndtm_pad2   = 0;
1874 
1875 	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
1876 	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
1877 	    nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
1878 	    nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
1879 	    nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
1880 		goto nla_put_failure;
1881 	{
1882 		unsigned long now = jiffies;
1883 		unsigned int flush_delta = now - tbl->last_flush;
1884 		unsigned int rand_delta = now - tbl->last_rand;
1885 		struct neigh_hash_table *nht;
1886 		struct ndt_config ndc = {
1887 			.ndtc_key_len		= tbl->key_len,
1888 			.ndtc_entry_size	= tbl->entry_size,
1889 			.ndtc_entries		= atomic_read(&tbl->entries),
1890 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
1891 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
1892 			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
1893 		};
1894 
1895 		rcu_read_lock_bh();
1896 		nht = rcu_dereference_bh(tbl->nht);
1897 		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
1898 		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1899 		rcu_read_unlock_bh();
1900 
1901 		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
1902 			goto nla_put_failure;
1903 	}
1904 
1905 	{
1906 		int cpu;
1907 		struct ndt_stats ndst;
1908 
1909 		memset(&ndst, 0, sizeof(ndst));
1910 
1911 		for_each_possible_cpu(cpu) {
1912 			struct neigh_statistics	*st;
1913 
1914 			st = per_cpu_ptr(tbl->stats, cpu);
1915 			ndst.ndts_allocs		+= st->allocs;
1916 			ndst.ndts_destroys		+= st->destroys;
1917 			ndst.ndts_hash_grows		+= st->hash_grows;
1918 			ndst.ndts_res_failed		+= st->res_failed;
1919 			ndst.ndts_lookups		+= st->lookups;
1920 			ndst.ndts_hits			+= st->hits;
1921 			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
1922 			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
1923 			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
1924 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
1925 			ndst.ndts_table_fulls		+= st->table_fulls;
1926 		}
1927 
1928 		if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
1929 				  NDTA_PAD))
1930 			goto nla_put_failure;
1931 	}
1932 
1933 	BUG_ON(tbl->parms.dev);
1934 	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1935 		goto nla_put_failure;
1936 
1937 	read_unlock_bh(&tbl->lock);
1938 	nlmsg_end(skb, nlh);
1939 	return 0;
1940 
1941 nla_put_failure:
1942 	read_unlock_bh(&tbl->lock);
1943 	nlmsg_cancel(skb, nlh);
1944 	return -EMSGSIZE;
1945 }
1946 
1947 static int neightbl_fill_param_info(struct sk_buff *skb,
1948 				    struct neigh_table *tbl,
1949 				    struct neigh_parms *parms,
1950 				    u32 pid, u32 seq, int type,
1951 				    unsigned int flags)
1952 {
1953 	struct ndtmsg *ndtmsg;
1954 	struct nlmsghdr *nlh;
1955 
1956 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1957 	if (nlh == NULL)
1958 		return -EMSGSIZE;
1959 
1960 	ndtmsg = nlmsg_data(nlh);
1961 
1962 	read_lock_bh(&tbl->lock);
1963 	ndtmsg->ndtm_family = tbl->family;
1964 	ndtmsg->ndtm_pad1   = 0;
1965 	ndtmsg->ndtm_pad2   = 0;
1966 
1967 	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1968 	    neightbl_fill_parms(skb, parms) < 0)
1969 		goto errout;
1970 
1971 	read_unlock_bh(&tbl->lock);
1972 	nlmsg_end(skb, nlh);
1973 	return 0;
1974 errout:
1975 	read_unlock_bh(&tbl->lock);
1976 	nlmsg_cancel(skb, nlh);
1977 	return -EMSGSIZE;
1978 }
1979 
1980 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1981 	[NDTA_NAME]		= { .type = NLA_STRING },
1982 	[NDTA_THRESH1]		= { .type = NLA_U32 },
1983 	[NDTA_THRESH2]		= { .type = NLA_U32 },
1984 	[NDTA_THRESH3]		= { .type = NLA_U32 },
1985 	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
1986 	[NDTA_PARMS]		= { .type = NLA_NESTED },
1987 };
1988 
1989 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1990 	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
1991 	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
1992 	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
1993 	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
1994 	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
1995 	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
1996 	[NDTPA_MCAST_REPROBES]		= { .type = NLA_U32 },
1997 	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
1998 	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
1999 	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
2000 	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
2001 	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
2002 	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
2003 	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
2004 };
2005 
2006 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2007 			struct netlink_ext_ack *extack)
2008 {
2009 	struct net *net = sock_net(skb->sk);
2010 	struct neigh_table *tbl;
2011 	struct ndtmsg *ndtmsg;
2012 	struct nlattr *tb[NDTA_MAX+1];
2013 	bool found = false;
2014 	int err, tidx;
2015 
2016 	err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2017 			  nl_neightbl_policy, extack);
2018 	if (err < 0)
2019 		goto errout;
2020 
2021 	if (tb[NDTA_NAME] == NULL) {
2022 		err = -EINVAL;
2023 		goto errout;
2024 	}
2025 
2026 	ndtmsg = nlmsg_data(nlh);
2027 
2028 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2029 		tbl = neigh_tables[tidx];
2030 		if (!tbl)
2031 			continue;
2032 		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2033 			continue;
2034 		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2035 			found = true;
2036 			break;
2037 		}
2038 	}
2039 
2040 	if (!found)
2041 		return -ENOENT;
2042 
2043 	/*
2044 	 * We acquire tbl->lock to be nice to the periodic timers and
2045 	 * make sure they always see a consistent set of values.
2046 	 */
2047 	write_lock_bh(&tbl->lock);
2048 
2049 	if (tb[NDTA_PARMS]) {
2050 		struct nlattr *tbp[NDTPA_MAX+1];
2051 		struct neigh_parms *p;
2052 		int i, ifindex = 0;
2053 
2054 		err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
2055 				       nl_ntbl_parm_policy, extack);
2056 		if (err < 0)
2057 			goto errout_tbl_lock;
2058 
2059 		if (tbp[NDTPA_IFINDEX])
2060 			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2061 
2062 		p = lookup_neigh_parms(tbl, net, ifindex);
2063 		if (p == NULL) {
2064 			err = -ENOENT;
2065 			goto errout_tbl_lock;
2066 		}
2067 
2068 		for (i = 1; i <= NDTPA_MAX; i++) {
2069 			if (tbp[i] == NULL)
2070 				continue;
2071 
2072 			switch (i) {
2073 			case NDTPA_QUEUE_LEN:
2074 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2075 					      nla_get_u32(tbp[i]) *
2076 					      SKB_TRUESIZE(ETH_FRAME_LEN));
2077 				break;
2078 			case NDTPA_QUEUE_LENBYTES:
2079 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2080 					      nla_get_u32(tbp[i]));
2081 				break;
2082 			case NDTPA_PROXY_QLEN:
2083 				NEIGH_VAR_SET(p, PROXY_QLEN,
2084 					      nla_get_u32(tbp[i]));
2085 				break;
2086 			case NDTPA_APP_PROBES:
2087 				NEIGH_VAR_SET(p, APP_PROBES,
2088 					      nla_get_u32(tbp[i]));
2089 				break;
2090 			case NDTPA_UCAST_PROBES:
2091 				NEIGH_VAR_SET(p, UCAST_PROBES,
2092 					      nla_get_u32(tbp[i]));
2093 				break;
2094 			case NDTPA_MCAST_PROBES:
2095 				NEIGH_VAR_SET(p, MCAST_PROBES,
2096 					      nla_get_u32(tbp[i]));
2097 				break;
2098 			case NDTPA_MCAST_REPROBES:
2099 				NEIGH_VAR_SET(p, MCAST_REPROBES,
2100 					      nla_get_u32(tbp[i]));
2101 				break;
2102 			case NDTPA_BASE_REACHABLE_TIME:
2103 				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2104 					      nla_get_msecs(tbp[i]));
2105 				/* update reachable_time as well, otherwise, the change will
2106 				 * only be effective after the next time neigh_periodic_work
2107 				 * decides to recompute it (can be multiple minutes)
2108 				 */
2109 				p->reachable_time =
2110 					neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2111 				break;
2112 			case NDTPA_GC_STALETIME:
2113 				NEIGH_VAR_SET(p, GC_STALETIME,
2114 					      nla_get_msecs(tbp[i]));
2115 				break;
2116 			case NDTPA_DELAY_PROBE_TIME:
2117 				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2118 					      nla_get_msecs(tbp[i]));
2119 				call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2120 				break;
2121 			case NDTPA_RETRANS_TIME:
2122 				NEIGH_VAR_SET(p, RETRANS_TIME,
2123 					      nla_get_msecs(tbp[i]));
2124 				break;
2125 			case NDTPA_ANYCAST_DELAY:
2126 				NEIGH_VAR_SET(p, ANYCAST_DELAY,
2127 					      nla_get_msecs(tbp[i]));
2128 				break;
2129 			case NDTPA_PROXY_DELAY:
2130 				NEIGH_VAR_SET(p, PROXY_DELAY,
2131 					      nla_get_msecs(tbp[i]));
2132 				break;
2133 			case NDTPA_LOCKTIME:
2134 				NEIGH_VAR_SET(p, LOCKTIME,
2135 					      nla_get_msecs(tbp[i]));
2136 				break;
2137 			}
2138 		}
2139 	}
2140 
2141 	err = -ENOENT;
2142 	if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2143 	     tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2144 	    !net_eq(net, &init_net))
2145 		goto errout_tbl_lock;
2146 
2147 	if (tb[NDTA_THRESH1])
2148 		tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2149 
2150 	if (tb[NDTA_THRESH2])
2151 		tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2152 
2153 	if (tb[NDTA_THRESH3])
2154 		tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2155 
2156 	if (tb[NDTA_GC_INTERVAL])
2157 		tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2158 
2159 	err = 0;
2160 
2161 errout_tbl_lock:
2162 	write_unlock_bh(&tbl->lock);
2163 errout:
2164 	return err;
2165 }
2166 
2167 static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
2168 				    struct netlink_ext_ack *extack)
2169 {
2170 	struct ndtmsg *ndtm;
2171 
2172 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
2173 		NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
2174 		return -EINVAL;
2175 	}
2176 
2177 	ndtm = nlmsg_data(nlh);
2178 	if (ndtm->ndtm_pad1  || ndtm->ndtm_pad2) {
2179 		NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
2180 		return -EINVAL;
2181 	}
2182 
2183 	if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
2184 		NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
2185 		return -EINVAL;
2186 	}
2187 
2188 	return 0;
2189 }
2190 
2191 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2192 {
2193 	const struct nlmsghdr *nlh = cb->nlh;
2194 	struct net *net = sock_net(skb->sk);
2195 	int family, tidx, nidx = 0;
2196 	int tbl_skip = cb->args[0];
2197 	int neigh_skip = cb->args[1];
2198 	struct neigh_table *tbl;
2199 
2200 	if (cb->strict_check) {
2201 		int err = neightbl_valid_dump_info(nlh, cb->extack);
2202 
2203 		if (err < 0)
2204 			return err;
2205 	}
2206 
2207 	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2208 
2209 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2210 		struct neigh_parms *p;
2211 
2212 		tbl = neigh_tables[tidx];
2213 		if (!tbl)
2214 			continue;
2215 
2216 		if (tidx < tbl_skip || (family && tbl->family != family))
2217 			continue;
2218 
2219 		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2220 				       nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2221 				       NLM_F_MULTI) < 0)
2222 			break;
2223 
2224 		nidx = 0;
2225 		p = list_next_entry(&tbl->parms, list);
2226 		list_for_each_entry_from(p, &tbl->parms_list, list) {
2227 			if (!net_eq(neigh_parms_net(p), net))
2228 				continue;
2229 
2230 			if (nidx < neigh_skip)
2231 				goto next;
2232 
2233 			if (neightbl_fill_param_info(skb, tbl, p,
2234 						     NETLINK_CB(cb->skb).portid,
2235 						     nlh->nlmsg_seq,
2236 						     RTM_NEWNEIGHTBL,
2237 						     NLM_F_MULTI) < 0)
2238 				goto out;
2239 		next:
2240 			nidx++;
2241 		}
2242 
2243 		neigh_skip = 0;
2244 	}
2245 out:
2246 	cb->args[0] = tidx;
2247 	cb->args[1] = nidx;
2248 
2249 	return skb->len;
2250 }
2251 
2252 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2253 			   u32 pid, u32 seq, int type, unsigned int flags)
2254 {
2255 	unsigned long now = jiffies;
2256 	struct nda_cacheinfo ci;
2257 	struct nlmsghdr *nlh;
2258 	struct ndmsg *ndm;
2259 
2260 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2261 	if (nlh == NULL)
2262 		return -EMSGSIZE;
2263 
2264 	ndm = nlmsg_data(nlh);
2265 	ndm->ndm_family	 = neigh->ops->family;
2266 	ndm->ndm_pad1    = 0;
2267 	ndm->ndm_pad2    = 0;
2268 	ndm->ndm_flags	 = neigh->flags;
2269 	ndm->ndm_type	 = neigh->type;
2270 	ndm->ndm_ifindex = neigh->dev->ifindex;
2271 
2272 	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2273 		goto nla_put_failure;
2274 
2275 	read_lock_bh(&neigh->lock);
2276 	ndm->ndm_state	 = neigh->nud_state;
2277 	if (neigh->nud_state & NUD_VALID) {
2278 		char haddr[MAX_ADDR_LEN];
2279 
2280 		neigh_ha_snapshot(haddr, neigh, neigh->dev);
2281 		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2282 			read_unlock_bh(&neigh->lock);
2283 			goto nla_put_failure;
2284 		}
2285 	}
2286 
2287 	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2288 	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2289 	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2290 	ci.ndm_refcnt	 = refcount_read(&neigh->refcnt) - 1;
2291 	read_unlock_bh(&neigh->lock);
2292 
2293 	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2294 	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2295 		goto nla_put_failure;
2296 
2297 	nlmsg_end(skb, nlh);
2298 	return 0;
2299 
2300 nla_put_failure:
2301 	nlmsg_cancel(skb, nlh);
2302 	return -EMSGSIZE;
2303 }
2304 
2305 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2306 			    u32 pid, u32 seq, int type, unsigned int flags,
2307 			    struct neigh_table *tbl)
2308 {
2309 	struct nlmsghdr *nlh;
2310 	struct ndmsg *ndm;
2311 
2312 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2313 	if (nlh == NULL)
2314 		return -EMSGSIZE;
2315 
2316 	ndm = nlmsg_data(nlh);
2317 	ndm->ndm_family	 = tbl->family;
2318 	ndm->ndm_pad1    = 0;
2319 	ndm->ndm_pad2    = 0;
2320 	ndm->ndm_flags	 = pn->flags | NTF_PROXY;
2321 	ndm->ndm_type	 = RTN_UNICAST;
2322 	ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2323 	ndm->ndm_state	 = NUD_NONE;
2324 
2325 	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2326 		goto nla_put_failure;
2327 
2328 	nlmsg_end(skb, nlh);
2329 	return 0;
2330 
2331 nla_put_failure:
2332 	nlmsg_cancel(skb, nlh);
2333 	return -EMSGSIZE;
2334 }
2335 
2336 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2337 {
2338 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2339 	__neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2340 }
2341 
2342 static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2343 {
2344 	struct net_device *master;
2345 
2346 	if (!master_idx)
2347 		return false;
2348 
2349 	master = netdev_master_upper_dev_get(dev);
2350 	if (!master || master->ifindex != master_idx)
2351 		return true;
2352 
2353 	return false;
2354 }
2355 
2356 static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2357 {
2358 	if (filter_idx && dev->ifindex != filter_idx)
2359 		return true;
2360 
2361 	return false;
2362 }
2363 
2364 struct neigh_dump_filter {
2365 	int master_idx;
2366 	int dev_idx;
2367 };
2368 
2369 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2370 			    struct netlink_callback *cb,
2371 			    struct neigh_dump_filter *filter)
2372 {
2373 	struct net *net = sock_net(skb->sk);
2374 	struct neighbour *n;
2375 	int rc, h, s_h = cb->args[1];
2376 	int idx, s_idx = idx = cb->args[2];
2377 	struct neigh_hash_table *nht;
2378 	unsigned int flags = NLM_F_MULTI;
2379 
2380 	if (filter->dev_idx || filter->master_idx)
2381 		flags |= NLM_F_DUMP_FILTERED;
2382 
2383 	rcu_read_lock_bh();
2384 	nht = rcu_dereference_bh(tbl->nht);
2385 
2386 	for (h = s_h; h < (1 << nht->hash_shift); h++) {
2387 		if (h > s_h)
2388 			s_idx = 0;
2389 		for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2390 		     n != NULL;
2391 		     n = rcu_dereference_bh(n->next)) {
2392 			if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2393 				goto next;
2394 			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2395 			    neigh_master_filtered(n->dev, filter->master_idx))
2396 				goto next;
2397 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2398 					    cb->nlh->nlmsg_seq,
2399 					    RTM_NEWNEIGH,
2400 					    flags) < 0) {
2401 				rc = -1;
2402 				goto out;
2403 			}
2404 next:
2405 			idx++;
2406 		}
2407 	}
2408 	rc = skb->len;
2409 out:
2410 	rcu_read_unlock_bh();
2411 	cb->args[1] = h;
2412 	cb->args[2] = idx;
2413 	return rc;
2414 }
2415 
2416 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2417 			     struct netlink_callback *cb,
2418 			     struct neigh_dump_filter *filter)
2419 {
2420 	struct pneigh_entry *n;
2421 	struct net *net = sock_net(skb->sk);
2422 	int rc, h, s_h = cb->args[3];
2423 	int idx, s_idx = idx = cb->args[4];
2424 	unsigned int flags = NLM_F_MULTI;
2425 
2426 	if (filter->dev_idx || filter->master_idx)
2427 		flags |= NLM_F_DUMP_FILTERED;
2428 
2429 	read_lock_bh(&tbl->lock);
2430 
2431 	for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2432 		if (h > s_h)
2433 			s_idx = 0;
2434 		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2435 			if (idx < s_idx || pneigh_net(n) != net)
2436 				goto next;
2437 			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2438 			    neigh_master_filtered(n->dev, filter->master_idx))
2439 				goto next;
2440 			if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2441 					    cb->nlh->nlmsg_seq,
2442 					    RTM_NEWNEIGH, flags, tbl) < 0) {
2443 				read_unlock_bh(&tbl->lock);
2444 				rc = -1;
2445 				goto out;
2446 			}
2447 		next:
2448 			idx++;
2449 		}
2450 	}
2451 
2452 	read_unlock_bh(&tbl->lock);
2453 	rc = skb->len;
2454 out:
2455 	cb->args[3] = h;
2456 	cb->args[4] = idx;
2457 	return rc;
2458 
2459 }
2460 
2461 static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2462 				bool strict_check,
2463 				struct neigh_dump_filter *filter,
2464 				struct netlink_ext_ack *extack)
2465 {
2466 	struct nlattr *tb[NDA_MAX + 1];
2467 	int err, i;
2468 
2469 	if (strict_check) {
2470 		struct ndmsg *ndm;
2471 
2472 		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2473 			NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
2474 			return -EINVAL;
2475 		}
2476 
2477 		ndm = nlmsg_data(nlh);
2478 		if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_ifindex ||
2479 		    ndm->ndm_state || ndm->ndm_flags || ndm->ndm_type) {
2480 			NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2481 			return -EINVAL;
2482 		}
2483 
2484 		err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
2485 					 NULL, extack);
2486 	} else {
2487 		err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
2488 				  NULL, extack);
2489 	}
2490 	if (err < 0)
2491 		return err;
2492 
2493 	for (i = 0; i <= NDA_MAX; ++i) {
2494 		if (!tb[i])
2495 			continue;
2496 
2497 		/* all new attributes should require strict_check */
2498 		switch (i) {
2499 		case NDA_IFINDEX:
2500 			if (nla_len(tb[i]) != sizeof(u32)) {
2501 				NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in neighbor dump request");
2502 				return -EINVAL;
2503 			}
2504 			filter->dev_idx = nla_get_u32(tb[i]);
2505 			break;
2506 		case NDA_MASTER:
2507 			if (nla_len(tb[i]) != sizeof(u32)) {
2508 				NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in neighbor dump request");
2509 				return -EINVAL;
2510 			}
2511 			filter->master_idx = nla_get_u32(tb[i]);
2512 			break;
2513 		default:
2514 			if (strict_check) {
2515 				NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
2516 				return -EINVAL;
2517 			}
2518 		}
2519 	}
2520 
2521 	return 0;
2522 }
2523 
2524 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2525 {
2526 	const struct nlmsghdr *nlh = cb->nlh;
2527 	struct neigh_dump_filter filter = {};
2528 	struct neigh_table *tbl;
2529 	int t, family, s_t;
2530 	int proxy = 0;
2531 	int err;
2532 
2533 	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2534 
2535 	/* check for full ndmsg structure presence, family member is
2536 	 * the same for both structures
2537 	 */
2538 	if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2539 	    ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
2540 		proxy = 1;
2541 
2542 	err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
2543 	if (err < 0 && cb->strict_check)
2544 		return err;
2545 
2546 	s_t = cb->args[0];
2547 
2548 	for (t = 0; t < NEIGH_NR_TABLES; t++) {
2549 		tbl = neigh_tables[t];
2550 
2551 		if (!tbl)
2552 			continue;
2553 		if (t < s_t || (family && tbl->family != family))
2554 			continue;
2555 		if (t > s_t)
2556 			memset(&cb->args[1], 0, sizeof(cb->args) -
2557 						sizeof(cb->args[0]));
2558 		if (proxy)
2559 			err = pneigh_dump_table(tbl, skb, cb, &filter);
2560 		else
2561 			err = neigh_dump_table(tbl, skb, cb, &filter);
2562 		if (err < 0)
2563 			break;
2564 	}
2565 
2566 	cb->args[0] = t;
2567 	return skb->len;
2568 }
2569 
2570 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2571 {
2572 	int chain;
2573 	struct neigh_hash_table *nht;
2574 
2575 	rcu_read_lock_bh();
2576 	nht = rcu_dereference_bh(tbl->nht);
2577 
2578 	read_lock(&tbl->lock); /* avoid resizes */
2579 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2580 		struct neighbour *n;
2581 
2582 		for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2583 		     n != NULL;
2584 		     n = rcu_dereference_bh(n->next))
2585 			cb(n, cookie);
2586 	}
2587 	read_unlock(&tbl->lock);
2588 	rcu_read_unlock_bh();
2589 }
2590 EXPORT_SYMBOL(neigh_for_each);
2591 
2592 /* The tbl->lock must be held as a writer and BH disabled. */
2593 void __neigh_for_each_release(struct neigh_table *tbl,
2594 			      int (*cb)(struct neighbour *))
2595 {
2596 	int chain;
2597 	struct neigh_hash_table *nht;
2598 
2599 	nht = rcu_dereference_protected(tbl->nht,
2600 					lockdep_is_held(&tbl->lock));
2601 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2602 		struct neighbour *n;
2603 		struct neighbour __rcu **np;
2604 
2605 		np = &nht->hash_buckets[chain];
2606 		while ((n = rcu_dereference_protected(*np,
2607 					lockdep_is_held(&tbl->lock))) != NULL) {
2608 			int release;
2609 
2610 			write_lock(&n->lock);
2611 			release = cb(n);
2612 			if (release) {
2613 				rcu_assign_pointer(*np,
2614 					rcu_dereference_protected(n->next,
2615 						lockdep_is_held(&tbl->lock)));
2616 				n->dead = 1;
2617 			} else
2618 				np = &n->next;
2619 			write_unlock(&n->lock);
2620 			if (release)
2621 				neigh_cleanup_and_release(n);
2622 		}
2623 	}
2624 }
2625 EXPORT_SYMBOL(__neigh_for_each_release);
2626 
2627 int neigh_xmit(int index, struct net_device *dev,
2628 	       const void *addr, struct sk_buff *skb)
2629 {
2630 	int err = -EAFNOSUPPORT;
2631 	if (likely(index < NEIGH_NR_TABLES)) {
2632 		struct neigh_table *tbl;
2633 		struct neighbour *neigh;
2634 
2635 		tbl = neigh_tables[index];
2636 		if (!tbl)
2637 			goto out;
2638 		rcu_read_lock_bh();
2639 		neigh = __neigh_lookup_noref(tbl, addr, dev);
2640 		if (!neigh)
2641 			neigh = __neigh_create(tbl, addr, dev, false);
2642 		err = PTR_ERR(neigh);
2643 		if (IS_ERR(neigh)) {
2644 			rcu_read_unlock_bh();
2645 			goto out_kfree_skb;
2646 		}
2647 		err = neigh->output(neigh, skb);
2648 		rcu_read_unlock_bh();
2649 	}
2650 	else if (index == NEIGH_LINK_TABLE) {
2651 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
2652 				      addr, NULL, skb->len);
2653 		if (err < 0)
2654 			goto out_kfree_skb;
2655 		err = dev_queue_xmit(skb);
2656 	}
2657 out:
2658 	return err;
2659 out_kfree_skb:
2660 	kfree_skb(skb);
2661 	goto out;
2662 }
2663 EXPORT_SYMBOL(neigh_xmit);
2664 
2665 #ifdef CONFIG_PROC_FS
2666 
2667 static struct neighbour *neigh_get_first(struct seq_file *seq)
2668 {
2669 	struct neigh_seq_state *state = seq->private;
2670 	struct net *net = seq_file_net(seq);
2671 	struct neigh_hash_table *nht = state->nht;
2672 	struct neighbour *n = NULL;
2673 	int bucket = state->bucket;
2674 
2675 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2676 	for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2677 		n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2678 
2679 		while (n) {
2680 			if (!net_eq(dev_net(n->dev), net))
2681 				goto next;
2682 			if (state->neigh_sub_iter) {
2683 				loff_t fakep = 0;
2684 				void *v;
2685 
2686 				v = state->neigh_sub_iter(state, n, &fakep);
2687 				if (!v)
2688 					goto next;
2689 			}
2690 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2691 				break;
2692 			if (n->nud_state & ~NUD_NOARP)
2693 				break;
2694 next:
2695 			n = rcu_dereference_bh(n->next);
2696 		}
2697 
2698 		if (n)
2699 			break;
2700 	}
2701 	state->bucket = bucket;
2702 
2703 	return n;
2704 }
2705 
2706 static struct neighbour *neigh_get_next(struct seq_file *seq,
2707 					struct neighbour *n,
2708 					loff_t *pos)
2709 {
2710 	struct neigh_seq_state *state = seq->private;
2711 	struct net *net = seq_file_net(seq);
2712 	struct neigh_hash_table *nht = state->nht;
2713 
2714 	if (state->neigh_sub_iter) {
2715 		void *v = state->neigh_sub_iter(state, n, pos);
2716 		if (v)
2717 			return n;
2718 	}
2719 	n = rcu_dereference_bh(n->next);
2720 
2721 	while (1) {
2722 		while (n) {
2723 			if (!net_eq(dev_net(n->dev), net))
2724 				goto next;
2725 			if (state->neigh_sub_iter) {
2726 				void *v = state->neigh_sub_iter(state, n, pos);
2727 				if (v)
2728 					return n;
2729 				goto next;
2730 			}
2731 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2732 				break;
2733 
2734 			if (n->nud_state & ~NUD_NOARP)
2735 				break;
2736 next:
2737 			n = rcu_dereference_bh(n->next);
2738 		}
2739 
2740 		if (n)
2741 			break;
2742 
2743 		if (++state->bucket >= (1 << nht->hash_shift))
2744 			break;
2745 
2746 		n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2747 	}
2748 
2749 	if (n && pos)
2750 		--(*pos);
2751 	return n;
2752 }
2753 
2754 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2755 {
2756 	struct neighbour *n = neigh_get_first(seq);
2757 
2758 	if (n) {
2759 		--(*pos);
2760 		while (*pos) {
2761 			n = neigh_get_next(seq, n, pos);
2762 			if (!n)
2763 				break;
2764 		}
2765 	}
2766 	return *pos ? NULL : n;
2767 }
2768 
2769 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2770 {
2771 	struct neigh_seq_state *state = seq->private;
2772 	struct net *net = seq_file_net(seq);
2773 	struct neigh_table *tbl = state->tbl;
2774 	struct pneigh_entry *pn = NULL;
2775 	int bucket = state->bucket;
2776 
2777 	state->flags |= NEIGH_SEQ_IS_PNEIGH;
2778 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2779 		pn = tbl->phash_buckets[bucket];
2780 		while (pn && !net_eq(pneigh_net(pn), net))
2781 			pn = pn->next;
2782 		if (pn)
2783 			break;
2784 	}
2785 	state->bucket = bucket;
2786 
2787 	return pn;
2788 }
2789 
2790 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2791 					    struct pneigh_entry *pn,
2792 					    loff_t *pos)
2793 {
2794 	struct neigh_seq_state *state = seq->private;
2795 	struct net *net = seq_file_net(seq);
2796 	struct neigh_table *tbl = state->tbl;
2797 
2798 	do {
2799 		pn = pn->next;
2800 	} while (pn && !net_eq(pneigh_net(pn), net));
2801 
2802 	while (!pn) {
2803 		if (++state->bucket > PNEIGH_HASHMASK)
2804 			break;
2805 		pn = tbl->phash_buckets[state->bucket];
2806 		while (pn && !net_eq(pneigh_net(pn), net))
2807 			pn = pn->next;
2808 		if (pn)
2809 			break;
2810 	}
2811 
2812 	if (pn && pos)
2813 		--(*pos);
2814 
2815 	return pn;
2816 }
2817 
2818 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2819 {
2820 	struct pneigh_entry *pn = pneigh_get_first(seq);
2821 
2822 	if (pn) {
2823 		--(*pos);
2824 		while (*pos) {
2825 			pn = pneigh_get_next(seq, pn, pos);
2826 			if (!pn)
2827 				break;
2828 		}
2829 	}
2830 	return *pos ? NULL : pn;
2831 }
2832 
2833 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2834 {
2835 	struct neigh_seq_state *state = seq->private;
2836 	void *rc;
2837 	loff_t idxpos = *pos;
2838 
2839 	rc = neigh_get_idx(seq, &idxpos);
2840 	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2841 		rc = pneigh_get_idx(seq, &idxpos);
2842 
2843 	return rc;
2844 }
2845 
2846 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2847 	__acquires(rcu_bh)
2848 {
2849 	struct neigh_seq_state *state = seq->private;
2850 
2851 	state->tbl = tbl;
2852 	state->bucket = 0;
2853 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2854 
2855 	rcu_read_lock_bh();
2856 	state->nht = rcu_dereference_bh(tbl->nht);
2857 
2858 	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2859 }
2860 EXPORT_SYMBOL(neigh_seq_start);
2861 
2862 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2863 {
2864 	struct neigh_seq_state *state;
2865 	void *rc;
2866 
2867 	if (v == SEQ_START_TOKEN) {
2868 		rc = neigh_get_first(seq);
2869 		goto out;
2870 	}
2871 
2872 	state = seq->private;
2873 	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2874 		rc = neigh_get_next(seq, v, NULL);
2875 		if (rc)
2876 			goto out;
2877 		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2878 			rc = pneigh_get_first(seq);
2879 	} else {
2880 		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2881 		rc = pneigh_get_next(seq, v, NULL);
2882 	}
2883 out:
2884 	++(*pos);
2885 	return rc;
2886 }
2887 EXPORT_SYMBOL(neigh_seq_next);
2888 
2889 void neigh_seq_stop(struct seq_file *seq, void *v)
2890 	__releases(rcu_bh)
2891 {
2892 	rcu_read_unlock_bh();
2893 }
2894 EXPORT_SYMBOL(neigh_seq_stop);
2895 
2896 /* statistics via seq_file */
2897 
2898 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2899 {
2900 	struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
2901 	int cpu;
2902 
2903 	if (*pos == 0)
2904 		return SEQ_START_TOKEN;
2905 
2906 	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2907 		if (!cpu_possible(cpu))
2908 			continue;
2909 		*pos = cpu+1;
2910 		return per_cpu_ptr(tbl->stats, cpu);
2911 	}
2912 	return NULL;
2913 }
2914 
2915 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2916 {
2917 	struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
2918 	int cpu;
2919 
2920 	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2921 		if (!cpu_possible(cpu))
2922 			continue;
2923 		*pos = cpu+1;
2924 		return per_cpu_ptr(tbl->stats, cpu);
2925 	}
2926 	return NULL;
2927 }
2928 
2929 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2930 {
2931 
2932 }
2933 
2934 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2935 {
2936 	struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
2937 	struct neigh_statistics *st = v;
2938 
2939 	if (v == SEQ_START_TOKEN) {
2940 		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
2941 		return 0;
2942 	}
2943 
2944 	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2945 			"%08lx %08lx  %08lx %08lx %08lx %08lx\n",
2946 		   atomic_read(&tbl->entries),
2947 
2948 		   st->allocs,
2949 		   st->destroys,
2950 		   st->hash_grows,
2951 
2952 		   st->lookups,
2953 		   st->hits,
2954 
2955 		   st->res_failed,
2956 
2957 		   st->rcv_probes_mcast,
2958 		   st->rcv_probes_ucast,
2959 
2960 		   st->periodic_gc_runs,
2961 		   st->forced_gc_runs,
2962 		   st->unres_discards,
2963 		   st->table_fulls
2964 		   );
2965 
2966 	return 0;
2967 }
2968 
2969 static const struct seq_operations neigh_stat_seq_ops = {
2970 	.start	= neigh_stat_seq_start,
2971 	.next	= neigh_stat_seq_next,
2972 	.stop	= neigh_stat_seq_stop,
2973 	.show	= neigh_stat_seq_show,
2974 };
2975 #endif /* CONFIG_PROC_FS */
2976 
2977 static inline size_t neigh_nlmsg_size(void)
2978 {
2979 	return NLMSG_ALIGN(sizeof(struct ndmsg))
2980 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2981 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2982 	       + nla_total_size(sizeof(struct nda_cacheinfo))
2983 	       + nla_total_size(4); /* NDA_PROBES */
2984 }
2985 
2986 static void __neigh_notify(struct neighbour *n, int type, int flags,
2987 			   u32 pid)
2988 {
2989 	struct net *net = dev_net(n->dev);
2990 	struct sk_buff *skb;
2991 	int err = -ENOBUFS;
2992 
2993 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2994 	if (skb == NULL)
2995 		goto errout;
2996 
2997 	err = neigh_fill_info(skb, n, pid, 0, type, flags);
2998 	if (err < 0) {
2999 		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3000 		WARN_ON(err == -EMSGSIZE);
3001 		kfree_skb(skb);
3002 		goto errout;
3003 	}
3004 	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3005 	return;
3006 errout:
3007 	if (err < 0)
3008 		rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3009 }
3010 
3011 void neigh_app_ns(struct neighbour *n)
3012 {
3013 	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
3014 }
3015 EXPORT_SYMBOL(neigh_app_ns);
3016 
3017 #ifdef CONFIG_SYSCTL
3018 static int zero;
3019 static int int_max = INT_MAX;
3020 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
3021 
3022 static int proc_unres_qlen(struct ctl_table *ctl, int write,
3023 			   void __user *buffer, size_t *lenp, loff_t *ppos)
3024 {
3025 	int size, ret;
3026 	struct ctl_table tmp = *ctl;
3027 
3028 	tmp.extra1 = &zero;
3029 	tmp.extra2 = &unres_qlen_max;
3030 	tmp.data = &size;
3031 
3032 	size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
3033 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3034 
3035 	if (write && !ret)
3036 		*(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
3037 	return ret;
3038 }
3039 
3040 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
3041 						   int family)
3042 {
3043 	switch (family) {
3044 	case AF_INET:
3045 		return __in_dev_arp_parms_get_rcu(dev);
3046 	case AF_INET6:
3047 		return __in6_dev_nd_parms_get_rcu(dev);
3048 	}
3049 	return NULL;
3050 }
3051 
3052 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
3053 				  int index)
3054 {
3055 	struct net_device *dev;
3056 	int family = neigh_parms_family(p);
3057 
3058 	rcu_read_lock();
3059 	for_each_netdev_rcu(net, dev) {
3060 		struct neigh_parms *dst_p =
3061 				neigh_get_dev_parms_rcu(dev, family);
3062 
3063 		if (dst_p && !test_bit(index, dst_p->data_state))
3064 			dst_p->data[index] = p->data[index];
3065 	}
3066 	rcu_read_unlock();
3067 }
3068 
3069 static void neigh_proc_update(struct ctl_table *ctl, int write)
3070 {
3071 	struct net_device *dev = ctl->extra1;
3072 	struct neigh_parms *p = ctl->extra2;
3073 	struct net *net = neigh_parms_net(p);
3074 	int index = (int *) ctl->data - p->data;
3075 
3076 	if (!write)
3077 		return;
3078 
3079 	set_bit(index, p->data_state);
3080 	if (index == NEIGH_VAR_DELAY_PROBE_TIME)
3081 		call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
3082 	if (!dev) /* NULL dev means this is default value */
3083 		neigh_copy_dflt_parms(net, p, index);
3084 }
3085 
3086 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
3087 					   void __user *buffer,
3088 					   size_t *lenp, loff_t *ppos)
3089 {
3090 	struct ctl_table tmp = *ctl;
3091 	int ret;
3092 
3093 	tmp.extra1 = &zero;
3094 	tmp.extra2 = &int_max;
3095 
3096 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3097 	neigh_proc_update(ctl, write);
3098 	return ret;
3099 }
3100 
3101 int neigh_proc_dointvec(struct ctl_table *ctl, int write,
3102 			void __user *buffer, size_t *lenp, loff_t *ppos)
3103 {
3104 	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3105 
3106 	neigh_proc_update(ctl, write);
3107 	return ret;
3108 }
3109 EXPORT_SYMBOL(neigh_proc_dointvec);
3110 
3111 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
3112 				void __user *buffer,
3113 				size_t *lenp, loff_t *ppos)
3114 {
3115 	int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3116 
3117 	neigh_proc_update(ctl, write);
3118 	return ret;
3119 }
3120 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3121 
3122 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
3123 					      void __user *buffer,
3124 					      size_t *lenp, loff_t *ppos)
3125 {
3126 	int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3127 
3128 	neigh_proc_update(ctl, write);
3129 	return ret;
3130 }
3131 
3132 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
3133 				   void __user *buffer,
3134 				   size_t *lenp, loff_t *ppos)
3135 {
3136 	int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3137 
3138 	neigh_proc_update(ctl, write);
3139 	return ret;
3140 }
3141 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3142 
3143 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
3144 					  void __user *buffer,
3145 					  size_t *lenp, loff_t *ppos)
3146 {
3147 	int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3148 
3149 	neigh_proc_update(ctl, write);
3150 	return ret;
3151 }
3152 
3153 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3154 					  void __user *buffer,
3155 					  size_t *lenp, loff_t *ppos)
3156 {
3157 	struct neigh_parms *p = ctl->extra2;
3158 	int ret;
3159 
3160 	if (strcmp(ctl->procname, "base_reachable_time") == 0)
3161 		ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3162 	else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3163 		ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3164 	else
3165 		ret = -1;
3166 
3167 	if (write && ret == 0) {
3168 		/* update reachable_time as well, otherwise, the change will
3169 		 * only be effective after the next time neigh_periodic_work
3170 		 * decides to recompute it
3171 		 */
3172 		p->reachable_time =
3173 			neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3174 	}
3175 	return ret;
3176 }
3177 
3178 #define NEIGH_PARMS_DATA_OFFSET(index)	\
3179 	(&((struct neigh_parms *) 0)->data[index])
3180 
3181 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3182 	[NEIGH_VAR_ ## attr] = { \
3183 		.procname	= name, \
3184 		.data		= NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3185 		.maxlen		= sizeof(int), \
3186 		.mode		= mval, \
3187 		.proc_handler	= proc, \
3188 	}
3189 
3190 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3191 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3192 
3193 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3194 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3195 
3196 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3197 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3198 
3199 #define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
3200 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3201 
3202 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3203 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3204 
3205 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3206 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3207 
3208 static struct neigh_sysctl_table {
3209 	struct ctl_table_header *sysctl_header;
3210 	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3211 } neigh_sysctl_template __read_mostly = {
3212 	.neigh_vars = {
3213 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3214 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3215 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3216 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3217 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3218 		NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3219 		NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3220 		NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3221 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3222 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3223 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3224 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3225 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3226 		NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3227 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3228 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3229 		[NEIGH_VAR_GC_INTERVAL] = {
3230 			.procname	= "gc_interval",
3231 			.maxlen		= sizeof(int),
3232 			.mode		= 0644,
3233 			.proc_handler	= proc_dointvec_jiffies,
3234 		},
3235 		[NEIGH_VAR_GC_THRESH1] = {
3236 			.procname	= "gc_thresh1",
3237 			.maxlen		= sizeof(int),
3238 			.mode		= 0644,
3239 			.extra1 	= &zero,
3240 			.extra2		= &int_max,
3241 			.proc_handler	= proc_dointvec_minmax,
3242 		},
3243 		[NEIGH_VAR_GC_THRESH2] = {
3244 			.procname	= "gc_thresh2",
3245 			.maxlen		= sizeof(int),
3246 			.mode		= 0644,
3247 			.extra1 	= &zero,
3248 			.extra2		= &int_max,
3249 			.proc_handler	= proc_dointvec_minmax,
3250 		},
3251 		[NEIGH_VAR_GC_THRESH3] = {
3252 			.procname	= "gc_thresh3",
3253 			.maxlen		= sizeof(int),
3254 			.mode		= 0644,
3255 			.extra1 	= &zero,
3256 			.extra2		= &int_max,
3257 			.proc_handler	= proc_dointvec_minmax,
3258 		},
3259 		{},
3260 	},
3261 };
3262 
3263 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3264 			  proc_handler *handler)
3265 {
3266 	int i;
3267 	struct neigh_sysctl_table *t;
3268 	const char *dev_name_source;
3269 	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3270 	char *p_name;
3271 
3272 	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3273 	if (!t)
3274 		goto err;
3275 
3276 	for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3277 		t->neigh_vars[i].data += (long) p;
3278 		t->neigh_vars[i].extra1 = dev;
3279 		t->neigh_vars[i].extra2 = p;
3280 	}
3281 
3282 	if (dev) {
3283 		dev_name_source = dev->name;
3284 		/* Terminate the table early */
3285 		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3286 		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3287 	} else {
3288 		struct neigh_table *tbl = p->tbl;
3289 		dev_name_source = "default";
3290 		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3291 		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3292 		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3293 		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3294 	}
3295 
3296 	if (handler) {
3297 		/* RetransTime */
3298 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3299 		/* ReachableTime */
3300 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3301 		/* RetransTime (in milliseconds)*/
3302 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3303 		/* ReachableTime (in milliseconds) */
3304 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3305 	} else {
3306 		/* Those handlers will update p->reachable_time after
3307 		 * base_reachable_time(_ms) is set to ensure the new timer starts being
3308 		 * applied after the next neighbour update instead of waiting for
3309 		 * neigh_periodic_work to update its value (can be multiple minutes)
3310 		 * So any handler that replaces them should do this as well
3311 		 */
3312 		/* ReachableTime */
3313 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3314 			neigh_proc_base_reachable_time;
3315 		/* ReachableTime (in milliseconds) */
3316 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3317 			neigh_proc_base_reachable_time;
3318 	}
3319 
3320 	/* Don't export sysctls to unprivileged users */
3321 	if (neigh_parms_net(p)->user_ns != &init_user_ns)
3322 		t->neigh_vars[0].procname = NULL;
3323 
3324 	switch (neigh_parms_family(p)) {
3325 	case AF_INET:
3326 	      p_name = "ipv4";
3327 	      break;
3328 	case AF_INET6:
3329 	      p_name = "ipv6";
3330 	      break;
3331 	default:
3332 	      BUG();
3333 	}
3334 
3335 	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3336 		p_name, dev_name_source);
3337 	t->sysctl_header =
3338 		register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3339 	if (!t->sysctl_header)
3340 		goto free;
3341 
3342 	p->sysctl_table = t;
3343 	return 0;
3344 
3345 free:
3346 	kfree(t);
3347 err:
3348 	return -ENOBUFS;
3349 }
3350 EXPORT_SYMBOL(neigh_sysctl_register);
3351 
3352 void neigh_sysctl_unregister(struct neigh_parms *p)
3353 {
3354 	if (p->sysctl_table) {
3355 		struct neigh_sysctl_table *t = p->sysctl_table;
3356 		p->sysctl_table = NULL;
3357 		unregister_net_sysctl_table(t->sysctl_header);
3358 		kfree(t);
3359 	}
3360 }
3361 EXPORT_SYMBOL(neigh_sysctl_unregister);
3362 
3363 #endif	/* CONFIG_SYSCTL */
3364 
3365 static int __init neigh_init(void)
3366 {
3367 	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
3368 	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
3369 	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, 0);
3370 
3371 	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3372 		      0);
3373 	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
3374 
3375 	return 0;
3376 }
3377 
3378 subsys_initcall(neigh_init);
3379