xref: /linux/net/core/neighbour.c (revision 02ff58dcf70ad7d11b01523dc404166ed11021da)
1 /*
2  *	Generic address resolution entity
3  *
4  *	Authors:
5  *	Pedro Roque		<roque@di.fc.ul.pt>
6  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *	Fixes:
14  *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
15  *	Harald Welte		Add neighbour cache statistics like rtstat
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include <linux/slab.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/socket.h>
25 #include <linux/netdevice.h>
26 #include <linux/proc_fs.h>
27 #ifdef CONFIG_SYSCTL
28 #include <linux/sysctl.h>
29 #endif
30 #include <linux/times.h>
31 #include <net/net_namespace.h>
32 #include <net/neighbour.h>
33 #include <net/dst.h>
34 #include <net/sock.h>
35 #include <net/netevent.h>
36 #include <net/netlink.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/random.h>
39 #include <linux/string.h>
40 #include <linux/log2.h>
41 #include <linux/inetdevice.h>
42 #include <net/addrconf.h>
43 
44 #define DEBUG
45 #define NEIGH_DEBUG 1
46 #define neigh_dbg(level, fmt, ...)		\
47 do {						\
48 	if (level <= NEIGH_DEBUG)		\
49 		pr_debug(fmt, ##__VA_ARGS__);	\
50 } while (0)
51 
52 #define PNEIGH_HASHMASK		0xF
53 
54 static void neigh_timer_handler(struct timer_list *t);
55 static void __neigh_notify(struct neighbour *n, int type, int flags,
56 			   u32 pid);
57 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
58 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
59 				    struct net_device *dev);
60 
61 #ifdef CONFIG_PROC_FS
62 static const struct seq_operations neigh_stat_seq_ops;
63 #endif
64 
65 /*
66    Neighbour hash table buckets are protected with rwlock tbl->lock.
67 
68    - All the scans/updates to hash buckets MUST be made under this lock.
69    - NOTHING clever should be made under this lock: no callbacks
70      to protocol backends, no attempts to send something to network.
71      It will result in deadlocks, if backend/driver wants to use neighbour
72      cache.
73    - If the entry requires some non-trivial actions, increase
74      its reference count and release table lock.
75 
76    Neighbour entries are protected:
77    - with reference count.
78    - with rwlock neigh->lock
79 
80    Reference count prevents destruction.
81 
82    neigh->lock mainly serializes ll address data and its validity state.
83    However, the same lock is used to protect another entry fields:
84     - timer
85     - resolution queue
86 
87    Again, nothing clever shall be made under neigh->lock,
88    the most complicated procedure, which we allow is dev->hard_header.
89    It is supposed, that dev->hard_header is simplistic and does
90    not make callbacks to neighbour tables.
91  */
92 
93 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
94 {
95 	kfree_skb(skb);
96 	return -ENETDOWN;
97 }
98 
99 static void neigh_cleanup_and_release(struct neighbour *neigh)
100 {
101 	if (neigh->parms->neigh_cleanup)
102 		neigh->parms->neigh_cleanup(neigh);
103 
104 	__neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
105 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
106 	neigh_release(neigh);
107 }
108 
109 /*
110  * It is random distribution in the interval (1/2)*base...(3/2)*base.
111  * It corresponds to default IPv6 settings and is not overridable,
112  * because it is really reasonable choice.
113  */
114 
115 unsigned long neigh_rand_reach_time(unsigned long base)
116 {
117 	return base ? (prandom_u32() % base) + (base >> 1) : 0;
118 }
119 EXPORT_SYMBOL(neigh_rand_reach_time);
120 
121 static void neigh_mark_dead(struct neighbour *n)
122 {
123 	n->dead = 1;
124 	if (!list_empty(&n->gc_list)) {
125 		list_del_init(&n->gc_list);
126 		atomic_dec(&n->tbl->gc_entries);
127 	}
128 }
129 
130 static void neigh_change_state(struct neighbour *n, u8 new)
131 {
132 	bool on_gc_list = !list_empty(&n->gc_list);
133 	bool new_is_perm = new & NUD_PERMANENT;
134 
135 	n->nud_state = new;
136 
137 	/* remove from the gc list if new state is permanent;
138 	 * add to the gc list if new state is not permanent
139 	 */
140 	if (new_is_perm && on_gc_list) {
141 		write_lock_bh(&n->tbl->lock);
142 		list_del_init(&n->gc_list);
143 		write_unlock_bh(&n->tbl->lock);
144 
145 		atomic_dec(&n->tbl->gc_entries);
146 	} else if (!new_is_perm && !on_gc_list) {
147 		/* add entries to the tail; cleaning removes from the front */
148 		write_lock_bh(&n->tbl->lock);
149 		list_add_tail(&n->gc_list, &n->tbl->gc_list);
150 		write_unlock_bh(&n->tbl->lock);
151 
152 		atomic_inc(&n->tbl->gc_entries);
153 	}
154 }
155 
156 static bool neigh_del(struct neighbour *n, __u8 state, __u8 flags,
157 		      struct neighbour __rcu **np, struct neigh_table *tbl)
158 {
159 	bool retval = false;
160 
161 	write_lock(&n->lock);
162 	if (refcount_read(&n->refcnt) == 1 && !(n->nud_state & state) &&
163 	    !(n->flags & flags)) {
164 		struct neighbour *neigh;
165 
166 		neigh = rcu_dereference_protected(n->next,
167 						  lockdep_is_held(&tbl->lock));
168 		rcu_assign_pointer(*np, neigh);
169 		neigh_mark_dead(n);
170 		retval = true;
171 	}
172 	write_unlock(&n->lock);
173 	if (retval)
174 		neigh_cleanup_and_release(n);
175 	return retval;
176 }
177 
178 bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
179 {
180 	struct neigh_hash_table *nht;
181 	void *pkey = ndel->primary_key;
182 	u32 hash_val;
183 	struct neighbour *n;
184 	struct neighbour __rcu **np;
185 
186 	nht = rcu_dereference_protected(tbl->nht,
187 					lockdep_is_held(&tbl->lock));
188 	hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
189 	hash_val = hash_val >> (32 - nht->hash_shift);
190 
191 	np = &nht->hash_buckets[hash_val];
192 	while ((n = rcu_dereference_protected(*np,
193 					      lockdep_is_held(&tbl->lock)))) {
194 		if (n == ndel)
195 			return neigh_del(n, 0, 0, np, tbl);
196 		np = &n->next;
197 	}
198 	return false;
199 }
200 
201 static int neigh_forced_gc(struct neigh_table *tbl)
202 {
203 	int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
204 	unsigned long tref = jiffies - 5 * HZ;
205 	u8 flags = NTF_EXT_LEARNED;
206 	struct neighbour *n, *tmp;
207 	u8 state = NUD_PERMANENT;
208 	int shrunk = 0;
209 
210 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
211 
212 	write_lock_bh(&tbl->lock);
213 
214 	list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
215 		if (refcount_read(&n->refcnt) == 1) {
216 			bool remove = false;
217 
218 			write_lock(&n->lock);
219 			if (!(n->nud_state & state) && !(n->flags & flags) &&
220 			    time_after(tref, n->updated))
221 				remove = true;
222 			write_unlock(&n->lock);
223 
224 			if (remove && neigh_remove_one(n, tbl))
225 				shrunk++;
226 			if (shrunk >= max_clean)
227 				break;
228 		}
229 	}
230 
231 	tbl->last_flush = jiffies;
232 
233 	write_unlock_bh(&tbl->lock);
234 
235 	return shrunk;
236 }
237 
238 static void neigh_add_timer(struct neighbour *n, unsigned long when)
239 {
240 	neigh_hold(n);
241 	if (unlikely(mod_timer(&n->timer, when))) {
242 		printk("NEIGH: BUG, double timer add, state is %x\n",
243 		       n->nud_state);
244 		dump_stack();
245 	}
246 }
247 
248 static int neigh_del_timer(struct neighbour *n)
249 {
250 	if ((n->nud_state & NUD_IN_TIMER) &&
251 	    del_timer(&n->timer)) {
252 		neigh_release(n);
253 		return 1;
254 	}
255 	return 0;
256 }
257 
258 static void pneigh_queue_purge(struct sk_buff_head *list)
259 {
260 	struct sk_buff *skb;
261 
262 	while ((skb = skb_dequeue(list)) != NULL) {
263 		dev_put(skb->dev);
264 		kfree_skb(skb);
265 	}
266 }
267 
268 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
269 			    bool skip_perm)
270 {
271 	int i;
272 	struct neigh_hash_table *nht;
273 
274 	nht = rcu_dereference_protected(tbl->nht,
275 					lockdep_is_held(&tbl->lock));
276 
277 	for (i = 0; i < (1 << nht->hash_shift); i++) {
278 		struct neighbour *n;
279 		struct neighbour __rcu **np = &nht->hash_buckets[i];
280 
281 		while ((n = rcu_dereference_protected(*np,
282 					lockdep_is_held(&tbl->lock))) != NULL) {
283 			if (dev && n->dev != dev) {
284 				np = &n->next;
285 				continue;
286 			}
287 			if (skip_perm && n->nud_state & NUD_PERMANENT) {
288 				np = &n->next;
289 				continue;
290 			}
291 			rcu_assign_pointer(*np,
292 				   rcu_dereference_protected(n->next,
293 						lockdep_is_held(&tbl->lock)));
294 			write_lock(&n->lock);
295 			neigh_del_timer(n);
296 			neigh_mark_dead(n);
297 			if (refcount_read(&n->refcnt) != 1) {
298 				/* The most unpleasant situation.
299 				   We must destroy neighbour entry,
300 				   but someone still uses it.
301 
302 				   The destroy will be delayed until
303 				   the last user releases us, but
304 				   we must kill timers etc. and move
305 				   it to safe state.
306 				 */
307 				__skb_queue_purge(&n->arp_queue);
308 				n->arp_queue_len_bytes = 0;
309 				n->output = neigh_blackhole;
310 				if (n->nud_state & NUD_VALID)
311 					n->nud_state = NUD_NOARP;
312 				else
313 					n->nud_state = NUD_NONE;
314 				neigh_dbg(2, "neigh %p is stray\n", n);
315 			}
316 			write_unlock(&n->lock);
317 			neigh_cleanup_and_release(n);
318 		}
319 	}
320 }
321 
322 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
323 {
324 	write_lock_bh(&tbl->lock);
325 	neigh_flush_dev(tbl, dev, false);
326 	write_unlock_bh(&tbl->lock);
327 }
328 EXPORT_SYMBOL(neigh_changeaddr);
329 
330 static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
331 			  bool skip_perm)
332 {
333 	write_lock_bh(&tbl->lock);
334 	neigh_flush_dev(tbl, dev, skip_perm);
335 	pneigh_ifdown_and_unlock(tbl, dev);
336 
337 	del_timer_sync(&tbl->proxy_timer);
338 	pneigh_queue_purge(&tbl->proxy_queue);
339 	return 0;
340 }
341 
342 int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
343 {
344 	__neigh_ifdown(tbl, dev, true);
345 	return 0;
346 }
347 EXPORT_SYMBOL(neigh_carrier_down);
348 
349 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
350 {
351 	__neigh_ifdown(tbl, dev, false);
352 	return 0;
353 }
354 EXPORT_SYMBOL(neigh_ifdown);
355 
356 static struct neighbour *neigh_alloc(struct neigh_table *tbl,
357 				     struct net_device *dev,
358 				     bool permanent)
359 {
360 	struct neighbour *n = NULL;
361 	unsigned long now = jiffies;
362 	int entries;
363 
364 	if (permanent)
365 		goto do_alloc;
366 
367 	entries = atomic_inc_return(&tbl->gc_entries) - 1;
368 	if (entries >= tbl->gc_thresh3 ||
369 	    (entries >= tbl->gc_thresh2 &&
370 	     time_after(now, tbl->last_flush + 5 * HZ))) {
371 		if (!neigh_forced_gc(tbl) &&
372 		    entries >= tbl->gc_thresh3) {
373 			net_info_ratelimited("%s: neighbor table overflow!\n",
374 					     tbl->id);
375 			NEIGH_CACHE_STAT_INC(tbl, table_fulls);
376 			goto out_entries;
377 		}
378 	}
379 
380 do_alloc:
381 	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
382 	if (!n)
383 		goto out_entries;
384 
385 	__skb_queue_head_init(&n->arp_queue);
386 	rwlock_init(&n->lock);
387 	seqlock_init(&n->ha_lock);
388 	n->updated	  = n->used = now;
389 	n->nud_state	  = NUD_NONE;
390 	n->output	  = neigh_blackhole;
391 	seqlock_init(&n->hh.hh_lock);
392 	n->parms	  = neigh_parms_clone(&tbl->parms);
393 	timer_setup(&n->timer, neigh_timer_handler, 0);
394 
395 	NEIGH_CACHE_STAT_INC(tbl, allocs);
396 	n->tbl		  = tbl;
397 	refcount_set(&n->refcnt, 1);
398 	n->dead		  = 1;
399 	INIT_LIST_HEAD(&n->gc_list);
400 
401 	atomic_inc(&tbl->entries);
402 out:
403 	return n;
404 
405 out_entries:
406 	if (!permanent)
407 		atomic_dec(&tbl->gc_entries);
408 	goto out;
409 }
410 
411 static void neigh_get_hash_rnd(u32 *x)
412 {
413 	*x = get_random_u32() | 1;
414 }
415 
416 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
417 {
418 	size_t size = (1 << shift) * sizeof(struct neighbour *);
419 	struct neigh_hash_table *ret;
420 	struct neighbour __rcu **buckets;
421 	int i;
422 
423 	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
424 	if (!ret)
425 		return NULL;
426 	if (size <= PAGE_SIZE)
427 		buckets = kzalloc(size, GFP_ATOMIC);
428 	else
429 		buckets = (struct neighbour __rcu **)
430 			  __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
431 					   get_order(size));
432 	if (!buckets) {
433 		kfree(ret);
434 		return NULL;
435 	}
436 	ret->hash_buckets = buckets;
437 	ret->hash_shift = shift;
438 	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
439 		neigh_get_hash_rnd(&ret->hash_rnd[i]);
440 	return ret;
441 }
442 
443 static void neigh_hash_free_rcu(struct rcu_head *head)
444 {
445 	struct neigh_hash_table *nht = container_of(head,
446 						    struct neigh_hash_table,
447 						    rcu);
448 	size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
449 	struct neighbour __rcu **buckets = nht->hash_buckets;
450 
451 	if (size <= PAGE_SIZE)
452 		kfree(buckets);
453 	else
454 		free_pages((unsigned long)buckets, get_order(size));
455 	kfree(nht);
456 }
457 
458 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
459 						unsigned long new_shift)
460 {
461 	unsigned int i, hash;
462 	struct neigh_hash_table *new_nht, *old_nht;
463 
464 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
465 
466 	old_nht = rcu_dereference_protected(tbl->nht,
467 					    lockdep_is_held(&tbl->lock));
468 	new_nht = neigh_hash_alloc(new_shift);
469 	if (!new_nht)
470 		return old_nht;
471 
472 	for (i = 0; i < (1 << old_nht->hash_shift); i++) {
473 		struct neighbour *n, *next;
474 
475 		for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
476 						   lockdep_is_held(&tbl->lock));
477 		     n != NULL;
478 		     n = next) {
479 			hash = tbl->hash(n->primary_key, n->dev,
480 					 new_nht->hash_rnd);
481 
482 			hash >>= (32 - new_nht->hash_shift);
483 			next = rcu_dereference_protected(n->next,
484 						lockdep_is_held(&tbl->lock));
485 
486 			rcu_assign_pointer(n->next,
487 					   rcu_dereference_protected(
488 						new_nht->hash_buckets[hash],
489 						lockdep_is_held(&tbl->lock)));
490 			rcu_assign_pointer(new_nht->hash_buckets[hash], n);
491 		}
492 	}
493 
494 	rcu_assign_pointer(tbl->nht, new_nht);
495 	call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
496 	return new_nht;
497 }
498 
499 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
500 			       struct net_device *dev)
501 {
502 	struct neighbour *n;
503 
504 	NEIGH_CACHE_STAT_INC(tbl, lookups);
505 
506 	rcu_read_lock_bh();
507 	n = __neigh_lookup_noref(tbl, pkey, dev);
508 	if (n) {
509 		if (!refcount_inc_not_zero(&n->refcnt))
510 			n = NULL;
511 		NEIGH_CACHE_STAT_INC(tbl, hits);
512 	}
513 
514 	rcu_read_unlock_bh();
515 	return n;
516 }
517 EXPORT_SYMBOL(neigh_lookup);
518 
519 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
520 				     const void *pkey)
521 {
522 	struct neighbour *n;
523 	unsigned int key_len = tbl->key_len;
524 	u32 hash_val;
525 	struct neigh_hash_table *nht;
526 
527 	NEIGH_CACHE_STAT_INC(tbl, lookups);
528 
529 	rcu_read_lock_bh();
530 	nht = rcu_dereference_bh(tbl->nht);
531 	hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
532 
533 	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
534 	     n != NULL;
535 	     n = rcu_dereference_bh(n->next)) {
536 		if (!memcmp(n->primary_key, pkey, key_len) &&
537 		    net_eq(dev_net(n->dev), net)) {
538 			if (!refcount_inc_not_zero(&n->refcnt))
539 				n = NULL;
540 			NEIGH_CACHE_STAT_INC(tbl, hits);
541 			break;
542 		}
543 	}
544 
545 	rcu_read_unlock_bh();
546 	return n;
547 }
548 EXPORT_SYMBOL(neigh_lookup_nodev);
549 
550 static struct neighbour *___neigh_create(struct neigh_table *tbl,
551 					 const void *pkey,
552 					 struct net_device *dev,
553 					 bool permanent, bool want_ref)
554 {
555 	struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev, permanent);
556 	u32 hash_val;
557 	unsigned int key_len = tbl->key_len;
558 	int error;
559 	struct neigh_hash_table *nht;
560 
561 	if (!n) {
562 		rc = ERR_PTR(-ENOBUFS);
563 		goto out;
564 	}
565 
566 	memcpy(n->primary_key, pkey, key_len);
567 	n->dev = dev;
568 	dev_hold(dev);
569 
570 	/* Protocol specific setup. */
571 	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
572 		rc = ERR_PTR(error);
573 		goto out_neigh_release;
574 	}
575 
576 	if (dev->netdev_ops->ndo_neigh_construct) {
577 		error = dev->netdev_ops->ndo_neigh_construct(dev, n);
578 		if (error < 0) {
579 			rc = ERR_PTR(error);
580 			goto out_neigh_release;
581 		}
582 	}
583 
584 	/* Device specific setup. */
585 	if (n->parms->neigh_setup &&
586 	    (error = n->parms->neigh_setup(n)) < 0) {
587 		rc = ERR_PTR(error);
588 		goto out_neigh_release;
589 	}
590 
591 	n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
592 
593 	write_lock_bh(&tbl->lock);
594 	nht = rcu_dereference_protected(tbl->nht,
595 					lockdep_is_held(&tbl->lock));
596 
597 	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
598 		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
599 
600 	hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
601 
602 	if (n->parms->dead) {
603 		rc = ERR_PTR(-EINVAL);
604 		goto out_tbl_unlock;
605 	}
606 
607 	for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
608 					    lockdep_is_held(&tbl->lock));
609 	     n1 != NULL;
610 	     n1 = rcu_dereference_protected(n1->next,
611 			lockdep_is_held(&tbl->lock))) {
612 		if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
613 			if (want_ref)
614 				neigh_hold(n1);
615 			rc = n1;
616 			goto out_tbl_unlock;
617 		}
618 	}
619 
620 	n->dead = 0;
621 	if (!permanent)
622 		list_add_tail(&n->gc_list, &n->tbl->gc_list);
623 
624 	if (want_ref)
625 		neigh_hold(n);
626 	rcu_assign_pointer(n->next,
627 			   rcu_dereference_protected(nht->hash_buckets[hash_val],
628 						     lockdep_is_held(&tbl->lock)));
629 	rcu_assign_pointer(nht->hash_buckets[hash_val], n);
630 	write_unlock_bh(&tbl->lock);
631 	neigh_dbg(2, "neigh %p is created\n", n);
632 	rc = n;
633 out:
634 	return rc;
635 out_tbl_unlock:
636 	write_unlock_bh(&tbl->lock);
637 out_neigh_release:
638 	neigh_release(n);
639 	goto out;
640 }
641 
642 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
643 				 struct net_device *dev, bool want_ref)
644 {
645 	return ___neigh_create(tbl, pkey, dev, false, want_ref);
646 }
647 EXPORT_SYMBOL(__neigh_create);
648 
649 static u32 pneigh_hash(const void *pkey, unsigned int key_len)
650 {
651 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
652 	hash_val ^= (hash_val >> 16);
653 	hash_val ^= hash_val >> 8;
654 	hash_val ^= hash_val >> 4;
655 	hash_val &= PNEIGH_HASHMASK;
656 	return hash_val;
657 }
658 
659 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
660 					      struct net *net,
661 					      const void *pkey,
662 					      unsigned int key_len,
663 					      struct net_device *dev)
664 {
665 	while (n) {
666 		if (!memcmp(n->key, pkey, key_len) &&
667 		    net_eq(pneigh_net(n), net) &&
668 		    (n->dev == dev || !n->dev))
669 			return n;
670 		n = n->next;
671 	}
672 	return NULL;
673 }
674 
675 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
676 		struct net *net, const void *pkey, struct net_device *dev)
677 {
678 	unsigned int key_len = tbl->key_len;
679 	u32 hash_val = pneigh_hash(pkey, key_len);
680 
681 	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
682 				 net, pkey, key_len, dev);
683 }
684 EXPORT_SYMBOL_GPL(__pneigh_lookup);
685 
686 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
687 				    struct net *net, const void *pkey,
688 				    struct net_device *dev, int creat)
689 {
690 	struct pneigh_entry *n;
691 	unsigned int key_len = tbl->key_len;
692 	u32 hash_val = pneigh_hash(pkey, key_len);
693 
694 	read_lock_bh(&tbl->lock);
695 	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
696 			      net, pkey, key_len, dev);
697 	read_unlock_bh(&tbl->lock);
698 
699 	if (n || !creat)
700 		goto out;
701 
702 	ASSERT_RTNL();
703 
704 	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
705 	if (!n)
706 		goto out;
707 
708 	write_pnet(&n->net, net);
709 	memcpy(n->key, pkey, key_len);
710 	n->dev = dev;
711 	if (dev)
712 		dev_hold(dev);
713 
714 	if (tbl->pconstructor && tbl->pconstructor(n)) {
715 		if (dev)
716 			dev_put(dev);
717 		kfree(n);
718 		n = NULL;
719 		goto out;
720 	}
721 
722 	write_lock_bh(&tbl->lock);
723 	n->next = tbl->phash_buckets[hash_val];
724 	tbl->phash_buckets[hash_val] = n;
725 	write_unlock_bh(&tbl->lock);
726 out:
727 	return n;
728 }
729 EXPORT_SYMBOL(pneigh_lookup);
730 
731 
732 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
733 		  struct net_device *dev)
734 {
735 	struct pneigh_entry *n, **np;
736 	unsigned int key_len = tbl->key_len;
737 	u32 hash_val = pneigh_hash(pkey, key_len);
738 
739 	write_lock_bh(&tbl->lock);
740 	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
741 	     np = &n->next) {
742 		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
743 		    net_eq(pneigh_net(n), net)) {
744 			*np = n->next;
745 			write_unlock_bh(&tbl->lock);
746 			if (tbl->pdestructor)
747 				tbl->pdestructor(n);
748 			if (n->dev)
749 				dev_put(n->dev);
750 			kfree(n);
751 			return 0;
752 		}
753 	}
754 	write_unlock_bh(&tbl->lock);
755 	return -ENOENT;
756 }
757 
758 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
759 				    struct net_device *dev)
760 {
761 	struct pneigh_entry *n, **np, *freelist = NULL;
762 	u32 h;
763 
764 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
765 		np = &tbl->phash_buckets[h];
766 		while ((n = *np) != NULL) {
767 			if (!dev || n->dev == dev) {
768 				*np = n->next;
769 				n->next = freelist;
770 				freelist = n;
771 				continue;
772 			}
773 			np = &n->next;
774 		}
775 	}
776 	write_unlock_bh(&tbl->lock);
777 	while ((n = freelist)) {
778 		freelist = n->next;
779 		n->next = NULL;
780 		if (tbl->pdestructor)
781 			tbl->pdestructor(n);
782 		if (n->dev)
783 			dev_put(n->dev);
784 		kfree(n);
785 	}
786 	return -ENOENT;
787 }
788 
789 static void neigh_parms_destroy(struct neigh_parms *parms);
790 
791 static inline void neigh_parms_put(struct neigh_parms *parms)
792 {
793 	if (refcount_dec_and_test(&parms->refcnt))
794 		neigh_parms_destroy(parms);
795 }
796 
797 /*
798  *	neighbour must already be out of the table;
799  *
800  */
801 void neigh_destroy(struct neighbour *neigh)
802 {
803 	struct net_device *dev = neigh->dev;
804 
805 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
806 
807 	if (!neigh->dead) {
808 		pr_warn("Destroying alive neighbour %p\n", neigh);
809 		dump_stack();
810 		return;
811 	}
812 
813 	if (neigh_del_timer(neigh))
814 		pr_warn("Impossible event\n");
815 
816 	write_lock_bh(&neigh->lock);
817 	__skb_queue_purge(&neigh->arp_queue);
818 	write_unlock_bh(&neigh->lock);
819 	neigh->arp_queue_len_bytes = 0;
820 
821 	if (dev->netdev_ops->ndo_neigh_destroy)
822 		dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
823 
824 	dev_put(dev);
825 	neigh_parms_put(neigh->parms);
826 
827 	neigh_dbg(2, "neigh %p is destroyed\n", neigh);
828 
829 	atomic_dec(&neigh->tbl->entries);
830 	kfree_rcu(neigh, rcu);
831 }
832 EXPORT_SYMBOL(neigh_destroy);
833 
834 /* Neighbour state is suspicious;
835    disable fast path.
836 
837    Called with write_locked neigh.
838  */
839 static void neigh_suspect(struct neighbour *neigh)
840 {
841 	neigh_dbg(2, "neigh %p is suspected\n", neigh);
842 
843 	neigh->output = neigh->ops->output;
844 }
845 
846 /* Neighbour state is OK;
847    enable fast path.
848 
849    Called with write_locked neigh.
850  */
851 static void neigh_connect(struct neighbour *neigh)
852 {
853 	neigh_dbg(2, "neigh %p is connected\n", neigh);
854 
855 	neigh->output = neigh->ops->connected_output;
856 }
857 
858 static void neigh_periodic_work(struct work_struct *work)
859 {
860 	struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
861 	struct neighbour *n;
862 	struct neighbour __rcu **np;
863 	unsigned int i;
864 	struct neigh_hash_table *nht;
865 
866 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
867 
868 	write_lock_bh(&tbl->lock);
869 	nht = rcu_dereference_protected(tbl->nht,
870 					lockdep_is_held(&tbl->lock));
871 
872 	/*
873 	 *	periodically recompute ReachableTime from random function
874 	 */
875 
876 	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
877 		struct neigh_parms *p;
878 		tbl->last_rand = jiffies;
879 		list_for_each_entry(p, &tbl->parms_list, list)
880 			p->reachable_time =
881 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
882 	}
883 
884 	if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
885 		goto out;
886 
887 	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
888 		np = &nht->hash_buckets[i];
889 
890 		while ((n = rcu_dereference_protected(*np,
891 				lockdep_is_held(&tbl->lock))) != NULL) {
892 			unsigned int state;
893 
894 			write_lock(&n->lock);
895 
896 			state = n->nud_state;
897 			if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
898 			    (n->flags & NTF_EXT_LEARNED)) {
899 				write_unlock(&n->lock);
900 				goto next_elt;
901 			}
902 
903 			if (time_before(n->used, n->confirmed))
904 				n->used = n->confirmed;
905 
906 			if (refcount_read(&n->refcnt) == 1 &&
907 			    (state == NUD_FAILED ||
908 			     time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
909 				*np = n->next;
910 				neigh_mark_dead(n);
911 				write_unlock(&n->lock);
912 				neigh_cleanup_and_release(n);
913 				continue;
914 			}
915 			write_unlock(&n->lock);
916 
917 next_elt:
918 			np = &n->next;
919 		}
920 		/*
921 		 * It's fine to release lock here, even if hash table
922 		 * grows while we are preempted.
923 		 */
924 		write_unlock_bh(&tbl->lock);
925 		cond_resched();
926 		write_lock_bh(&tbl->lock);
927 		nht = rcu_dereference_protected(tbl->nht,
928 						lockdep_is_held(&tbl->lock));
929 	}
930 out:
931 	/* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
932 	 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
933 	 * BASE_REACHABLE_TIME.
934 	 */
935 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
936 			      NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
937 	write_unlock_bh(&tbl->lock);
938 }
939 
940 static __inline__ int neigh_max_probes(struct neighbour *n)
941 {
942 	struct neigh_parms *p = n->parms;
943 	return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
944 	       (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
945 	        NEIGH_VAR(p, MCAST_PROBES));
946 }
947 
948 static void neigh_invalidate(struct neighbour *neigh)
949 	__releases(neigh->lock)
950 	__acquires(neigh->lock)
951 {
952 	struct sk_buff *skb;
953 
954 	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
955 	neigh_dbg(2, "neigh %p is failed\n", neigh);
956 	neigh->updated = jiffies;
957 
958 	/* It is very thin place. report_unreachable is very complicated
959 	   routine. Particularly, it can hit the same neighbour entry!
960 
961 	   So that, we try to be accurate and avoid dead loop. --ANK
962 	 */
963 	while (neigh->nud_state == NUD_FAILED &&
964 	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
965 		write_unlock(&neigh->lock);
966 		neigh->ops->error_report(neigh, skb);
967 		write_lock(&neigh->lock);
968 	}
969 	__skb_queue_purge(&neigh->arp_queue);
970 	neigh->arp_queue_len_bytes = 0;
971 }
972 
973 static void neigh_probe(struct neighbour *neigh)
974 	__releases(neigh->lock)
975 {
976 	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
977 	/* keep skb alive even if arp_queue overflows */
978 	if (skb)
979 		skb = skb_clone(skb, GFP_ATOMIC);
980 	write_unlock(&neigh->lock);
981 	if (neigh->ops->solicit)
982 		neigh->ops->solicit(neigh, skb);
983 	atomic_inc(&neigh->probes);
984 	kfree_skb(skb);
985 }
986 
987 /* Called when a timer expires for a neighbour entry. */
988 
989 static void neigh_timer_handler(struct timer_list *t)
990 {
991 	unsigned long now, next;
992 	struct neighbour *neigh = from_timer(neigh, t, timer);
993 	unsigned int state;
994 	int notify = 0;
995 
996 	write_lock(&neigh->lock);
997 
998 	state = neigh->nud_state;
999 	now = jiffies;
1000 	next = now + HZ;
1001 
1002 	if (!(state & NUD_IN_TIMER))
1003 		goto out;
1004 
1005 	if (state & NUD_REACHABLE) {
1006 		if (time_before_eq(now,
1007 				   neigh->confirmed + neigh->parms->reachable_time)) {
1008 			neigh_dbg(2, "neigh %p is still alive\n", neigh);
1009 			next = neigh->confirmed + neigh->parms->reachable_time;
1010 		} else if (time_before_eq(now,
1011 					  neigh->used +
1012 					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1013 			neigh_dbg(2, "neigh %p is delayed\n", neigh);
1014 			neigh->nud_state = NUD_DELAY;
1015 			neigh->updated = jiffies;
1016 			neigh_suspect(neigh);
1017 			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
1018 		} else {
1019 			neigh_dbg(2, "neigh %p is suspected\n", neigh);
1020 			neigh->nud_state = NUD_STALE;
1021 			neigh->updated = jiffies;
1022 			neigh_suspect(neigh);
1023 			notify = 1;
1024 		}
1025 	} else if (state & NUD_DELAY) {
1026 		if (time_before_eq(now,
1027 				   neigh->confirmed +
1028 				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1029 			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
1030 			neigh->nud_state = NUD_REACHABLE;
1031 			neigh->updated = jiffies;
1032 			neigh_connect(neigh);
1033 			notify = 1;
1034 			next = neigh->confirmed + neigh->parms->reachable_time;
1035 		} else {
1036 			neigh_dbg(2, "neigh %p is probed\n", neigh);
1037 			neigh->nud_state = NUD_PROBE;
1038 			neigh->updated = jiffies;
1039 			atomic_set(&neigh->probes, 0);
1040 			notify = 1;
1041 			next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
1042 		}
1043 	} else {
1044 		/* NUD_PROBE|NUD_INCOMPLETE */
1045 		next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
1046 	}
1047 
1048 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1049 	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
1050 		neigh->nud_state = NUD_FAILED;
1051 		notify = 1;
1052 		neigh_invalidate(neigh);
1053 		goto out;
1054 	}
1055 
1056 	if (neigh->nud_state & NUD_IN_TIMER) {
1057 		if (time_before(next, jiffies + HZ/2))
1058 			next = jiffies + HZ/2;
1059 		if (!mod_timer(&neigh->timer, next))
1060 			neigh_hold(neigh);
1061 	}
1062 	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
1063 		neigh_probe(neigh);
1064 	} else {
1065 out:
1066 		write_unlock(&neigh->lock);
1067 	}
1068 
1069 	if (notify)
1070 		neigh_update_notify(neigh, 0);
1071 
1072 	neigh_release(neigh);
1073 }
1074 
1075 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
1076 {
1077 	int rc;
1078 	bool immediate_probe = false;
1079 
1080 	write_lock_bh(&neigh->lock);
1081 
1082 	rc = 0;
1083 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1084 		goto out_unlock_bh;
1085 	if (neigh->dead)
1086 		goto out_dead;
1087 
1088 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1089 		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1090 		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
1091 			unsigned long next, now = jiffies;
1092 
1093 			atomic_set(&neigh->probes,
1094 				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
1095 			neigh->nud_state     = NUD_INCOMPLETE;
1096 			neigh->updated = now;
1097 			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1098 					 HZ/2);
1099 			neigh_add_timer(neigh, next);
1100 			immediate_probe = true;
1101 		} else {
1102 			neigh->nud_state = NUD_FAILED;
1103 			neigh->updated = jiffies;
1104 			write_unlock_bh(&neigh->lock);
1105 
1106 			kfree_skb(skb);
1107 			return 1;
1108 		}
1109 	} else if (neigh->nud_state & NUD_STALE) {
1110 		neigh_dbg(2, "neigh %p is delayed\n", neigh);
1111 		neigh->nud_state = NUD_DELAY;
1112 		neigh->updated = jiffies;
1113 		neigh_add_timer(neigh, jiffies +
1114 				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1115 	}
1116 
1117 	if (neigh->nud_state == NUD_INCOMPLETE) {
1118 		if (skb) {
1119 			while (neigh->arp_queue_len_bytes + skb->truesize >
1120 			       NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1121 				struct sk_buff *buff;
1122 
1123 				buff = __skb_dequeue(&neigh->arp_queue);
1124 				if (!buff)
1125 					break;
1126 				neigh->arp_queue_len_bytes -= buff->truesize;
1127 				kfree_skb(buff);
1128 				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1129 			}
1130 			skb_dst_force(skb);
1131 			__skb_queue_tail(&neigh->arp_queue, skb);
1132 			neigh->arp_queue_len_bytes += skb->truesize;
1133 		}
1134 		rc = 1;
1135 	}
1136 out_unlock_bh:
1137 	if (immediate_probe)
1138 		neigh_probe(neigh);
1139 	else
1140 		write_unlock(&neigh->lock);
1141 	local_bh_enable();
1142 	return rc;
1143 
1144 out_dead:
1145 	if (neigh->nud_state & NUD_STALE)
1146 		goto out_unlock_bh;
1147 	write_unlock_bh(&neigh->lock);
1148 	kfree_skb(skb);
1149 	return 1;
1150 }
1151 EXPORT_SYMBOL(__neigh_event_send);
1152 
1153 static void neigh_update_hhs(struct neighbour *neigh)
1154 {
1155 	struct hh_cache *hh;
1156 	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1157 		= NULL;
1158 
1159 	if (neigh->dev->header_ops)
1160 		update = neigh->dev->header_ops->cache_update;
1161 
1162 	if (update) {
1163 		hh = &neigh->hh;
1164 		if (hh->hh_len) {
1165 			write_seqlock_bh(&hh->hh_lock);
1166 			update(hh, neigh->dev, neigh->ha);
1167 			write_sequnlock_bh(&hh->hh_lock);
1168 		}
1169 	}
1170 }
1171 
1172 
1173 
1174 /* Generic update routine.
1175    -- lladdr is new lladdr or NULL, if it is not supplied.
1176    -- new    is new state.
1177    -- flags
1178 	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1179 				if it is different.
1180 	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1181 				lladdr instead of overriding it
1182 				if it is different.
1183 	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
1184 
1185 	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1186 				NTF_ROUTER flag.
1187 	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
1188 				a router.
1189 
1190    Caller MUST hold reference count on the entry.
1191  */
1192 
1193 static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
1194 			  u8 new, u32 flags, u32 nlmsg_pid,
1195 			  struct netlink_ext_ack *extack)
1196 {
1197 	u8 old;
1198 	int err;
1199 	int notify = 0;
1200 	struct net_device *dev;
1201 	int update_isrouter = 0;
1202 
1203 	write_lock_bh(&neigh->lock);
1204 
1205 	dev    = neigh->dev;
1206 	old    = neigh->nud_state;
1207 	err    = -EPERM;
1208 
1209 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1210 	    (old & (NUD_NOARP | NUD_PERMANENT)))
1211 		goto out;
1212 	if (neigh->dead) {
1213 		NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
1214 		goto out;
1215 	}
1216 
1217 	neigh_update_ext_learned(neigh, flags, &notify);
1218 
1219 	if (!(new & NUD_VALID)) {
1220 		neigh_del_timer(neigh);
1221 		if (old & NUD_CONNECTED)
1222 			neigh_suspect(neigh);
1223 		neigh_change_state(neigh, new);
1224 		err = 0;
1225 		notify = old & NUD_VALID;
1226 		if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1227 		    (new & NUD_FAILED)) {
1228 			neigh_invalidate(neigh);
1229 			notify = 1;
1230 		}
1231 		goto out;
1232 	}
1233 
1234 	/* Compare new lladdr with cached one */
1235 	if (!dev->addr_len) {
1236 		/* First case: device needs no address. */
1237 		lladdr = neigh->ha;
1238 	} else if (lladdr) {
1239 		/* The second case: if something is already cached
1240 		   and a new address is proposed:
1241 		   - compare new & old
1242 		   - if they are different, check override flag
1243 		 */
1244 		if ((old & NUD_VALID) &&
1245 		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1246 			lladdr = neigh->ha;
1247 	} else {
1248 		/* No address is supplied; if we know something,
1249 		   use it, otherwise discard the request.
1250 		 */
1251 		err = -EINVAL;
1252 		if (!(old & NUD_VALID)) {
1253 			NL_SET_ERR_MSG(extack, "No link layer address given");
1254 			goto out;
1255 		}
1256 		lladdr = neigh->ha;
1257 	}
1258 
1259 	/* Update confirmed timestamp for neighbour entry after we
1260 	 * received ARP packet even if it doesn't change IP to MAC binding.
1261 	 */
1262 	if (new & NUD_CONNECTED)
1263 		neigh->confirmed = jiffies;
1264 
1265 	/* If entry was valid and address is not changed,
1266 	   do not change entry state, if new one is STALE.
1267 	 */
1268 	err = 0;
1269 	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1270 	if (old & NUD_VALID) {
1271 		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1272 			update_isrouter = 0;
1273 			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1274 			    (old & NUD_CONNECTED)) {
1275 				lladdr = neigh->ha;
1276 				new = NUD_STALE;
1277 			} else
1278 				goto out;
1279 		} else {
1280 			if (lladdr == neigh->ha && new == NUD_STALE &&
1281 			    !(flags & NEIGH_UPDATE_F_ADMIN))
1282 				new = old;
1283 		}
1284 	}
1285 
1286 	/* Update timestamp only once we know we will make a change to the
1287 	 * neighbour entry. Otherwise we risk to move the locktime window with
1288 	 * noop updates and ignore relevant ARP updates.
1289 	 */
1290 	if (new != old || lladdr != neigh->ha)
1291 		neigh->updated = jiffies;
1292 
1293 	if (new != old) {
1294 		neigh_del_timer(neigh);
1295 		if (new & NUD_PROBE)
1296 			atomic_set(&neigh->probes, 0);
1297 		if (new & NUD_IN_TIMER)
1298 			neigh_add_timer(neigh, (jiffies +
1299 						((new & NUD_REACHABLE) ?
1300 						 neigh->parms->reachable_time :
1301 						 0)));
1302 		neigh_change_state(neigh, new);
1303 		notify = 1;
1304 	}
1305 
1306 	if (lladdr != neigh->ha) {
1307 		write_seqlock(&neigh->ha_lock);
1308 		memcpy(&neigh->ha, lladdr, dev->addr_len);
1309 		write_sequnlock(&neigh->ha_lock);
1310 		neigh_update_hhs(neigh);
1311 		if (!(new & NUD_CONNECTED))
1312 			neigh->confirmed = jiffies -
1313 				      (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1314 		notify = 1;
1315 	}
1316 	if (new == old)
1317 		goto out;
1318 	if (new & NUD_CONNECTED)
1319 		neigh_connect(neigh);
1320 	else
1321 		neigh_suspect(neigh);
1322 	if (!(old & NUD_VALID)) {
1323 		struct sk_buff *skb;
1324 
1325 		/* Again: avoid dead loop if something went wrong */
1326 
1327 		while (neigh->nud_state & NUD_VALID &&
1328 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1329 			struct dst_entry *dst = skb_dst(skb);
1330 			struct neighbour *n2, *n1 = neigh;
1331 			write_unlock_bh(&neigh->lock);
1332 
1333 			rcu_read_lock();
1334 
1335 			/* Why not just use 'neigh' as-is?  The problem is that
1336 			 * things such as shaper, eql, and sch_teql can end up
1337 			 * using alternative, different, neigh objects to output
1338 			 * the packet in the output path.  So what we need to do
1339 			 * here is re-lookup the top-level neigh in the path so
1340 			 * we can reinject the packet there.
1341 			 */
1342 			n2 = NULL;
1343 			if (dst) {
1344 				n2 = dst_neigh_lookup_skb(dst, skb);
1345 				if (n2)
1346 					n1 = n2;
1347 			}
1348 			n1->output(n1, skb);
1349 			if (n2)
1350 				neigh_release(n2);
1351 			rcu_read_unlock();
1352 
1353 			write_lock_bh(&neigh->lock);
1354 		}
1355 		__skb_queue_purge(&neigh->arp_queue);
1356 		neigh->arp_queue_len_bytes = 0;
1357 	}
1358 out:
1359 	if (update_isrouter)
1360 		neigh_update_is_router(neigh, flags, &notify);
1361 	write_unlock_bh(&neigh->lock);
1362 
1363 	if (notify)
1364 		neigh_update_notify(neigh, nlmsg_pid);
1365 
1366 	return err;
1367 }
1368 
1369 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1370 		 u32 flags, u32 nlmsg_pid)
1371 {
1372 	return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
1373 }
1374 EXPORT_SYMBOL(neigh_update);
1375 
1376 /* Update the neigh to listen temporarily for probe responses, even if it is
1377  * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1378  */
1379 void __neigh_set_probe_once(struct neighbour *neigh)
1380 {
1381 	if (neigh->dead)
1382 		return;
1383 	neigh->updated = jiffies;
1384 	if (!(neigh->nud_state & NUD_FAILED))
1385 		return;
1386 	neigh->nud_state = NUD_INCOMPLETE;
1387 	atomic_set(&neigh->probes, neigh_max_probes(neigh));
1388 	neigh_add_timer(neigh,
1389 			jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
1390 }
1391 EXPORT_SYMBOL(__neigh_set_probe_once);
1392 
1393 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1394 				 u8 *lladdr, void *saddr,
1395 				 struct net_device *dev)
1396 {
1397 	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1398 						 lladdr || !dev->addr_len);
1399 	if (neigh)
1400 		neigh_update(neigh, lladdr, NUD_STALE,
1401 			     NEIGH_UPDATE_F_OVERRIDE, 0);
1402 	return neigh;
1403 }
1404 EXPORT_SYMBOL(neigh_event_ns);
1405 
1406 /* called with read_lock_bh(&n->lock); */
1407 static void neigh_hh_init(struct neighbour *n)
1408 {
1409 	struct net_device *dev = n->dev;
1410 	__be16 prot = n->tbl->protocol;
1411 	struct hh_cache	*hh = &n->hh;
1412 
1413 	write_lock_bh(&n->lock);
1414 
1415 	/* Only one thread can come in here and initialize the
1416 	 * hh_cache entry.
1417 	 */
1418 	if (!hh->hh_len)
1419 		dev->header_ops->cache(n, hh, prot);
1420 
1421 	write_unlock_bh(&n->lock);
1422 }
1423 
1424 /* Slow and careful. */
1425 
1426 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1427 {
1428 	int rc = 0;
1429 
1430 	if (!neigh_event_send(neigh, skb)) {
1431 		int err;
1432 		struct net_device *dev = neigh->dev;
1433 		unsigned int seq;
1434 
1435 		if (dev->header_ops->cache && !neigh->hh.hh_len)
1436 			neigh_hh_init(neigh);
1437 
1438 		do {
1439 			__skb_pull(skb, skb_network_offset(skb));
1440 			seq = read_seqbegin(&neigh->ha_lock);
1441 			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1442 					      neigh->ha, NULL, skb->len);
1443 		} while (read_seqretry(&neigh->ha_lock, seq));
1444 
1445 		if (err >= 0)
1446 			rc = dev_queue_xmit(skb);
1447 		else
1448 			goto out_kfree_skb;
1449 	}
1450 out:
1451 	return rc;
1452 out_kfree_skb:
1453 	rc = -EINVAL;
1454 	kfree_skb(skb);
1455 	goto out;
1456 }
1457 EXPORT_SYMBOL(neigh_resolve_output);
1458 
1459 /* As fast as possible without hh cache */
1460 
1461 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1462 {
1463 	struct net_device *dev = neigh->dev;
1464 	unsigned int seq;
1465 	int err;
1466 
1467 	do {
1468 		__skb_pull(skb, skb_network_offset(skb));
1469 		seq = read_seqbegin(&neigh->ha_lock);
1470 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1471 				      neigh->ha, NULL, skb->len);
1472 	} while (read_seqretry(&neigh->ha_lock, seq));
1473 
1474 	if (err >= 0)
1475 		err = dev_queue_xmit(skb);
1476 	else {
1477 		err = -EINVAL;
1478 		kfree_skb(skb);
1479 	}
1480 	return err;
1481 }
1482 EXPORT_SYMBOL(neigh_connected_output);
1483 
1484 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1485 {
1486 	return dev_queue_xmit(skb);
1487 }
1488 EXPORT_SYMBOL(neigh_direct_output);
1489 
1490 static void neigh_proxy_process(struct timer_list *t)
1491 {
1492 	struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1493 	long sched_next = 0;
1494 	unsigned long now = jiffies;
1495 	struct sk_buff *skb, *n;
1496 
1497 	spin_lock(&tbl->proxy_queue.lock);
1498 
1499 	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1500 		long tdif = NEIGH_CB(skb)->sched_next - now;
1501 
1502 		if (tdif <= 0) {
1503 			struct net_device *dev = skb->dev;
1504 
1505 			__skb_unlink(skb, &tbl->proxy_queue);
1506 			if (tbl->proxy_redo && netif_running(dev)) {
1507 				rcu_read_lock();
1508 				tbl->proxy_redo(skb);
1509 				rcu_read_unlock();
1510 			} else {
1511 				kfree_skb(skb);
1512 			}
1513 
1514 			dev_put(dev);
1515 		} else if (!sched_next || tdif < sched_next)
1516 			sched_next = tdif;
1517 	}
1518 	del_timer(&tbl->proxy_timer);
1519 	if (sched_next)
1520 		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1521 	spin_unlock(&tbl->proxy_queue.lock);
1522 }
1523 
1524 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1525 		    struct sk_buff *skb)
1526 {
1527 	unsigned long now = jiffies;
1528 
1529 	unsigned long sched_next = now + (prandom_u32() %
1530 					  NEIGH_VAR(p, PROXY_DELAY));
1531 
1532 	if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1533 		kfree_skb(skb);
1534 		return;
1535 	}
1536 
1537 	NEIGH_CB(skb)->sched_next = sched_next;
1538 	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1539 
1540 	spin_lock(&tbl->proxy_queue.lock);
1541 	if (del_timer(&tbl->proxy_timer)) {
1542 		if (time_before(tbl->proxy_timer.expires, sched_next))
1543 			sched_next = tbl->proxy_timer.expires;
1544 	}
1545 	skb_dst_drop(skb);
1546 	dev_hold(skb->dev);
1547 	__skb_queue_tail(&tbl->proxy_queue, skb);
1548 	mod_timer(&tbl->proxy_timer, sched_next);
1549 	spin_unlock(&tbl->proxy_queue.lock);
1550 }
1551 EXPORT_SYMBOL(pneigh_enqueue);
1552 
1553 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1554 						      struct net *net, int ifindex)
1555 {
1556 	struct neigh_parms *p;
1557 
1558 	list_for_each_entry(p, &tbl->parms_list, list) {
1559 		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1560 		    (!p->dev && !ifindex && net_eq(net, &init_net)))
1561 			return p;
1562 	}
1563 
1564 	return NULL;
1565 }
1566 
1567 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1568 				      struct neigh_table *tbl)
1569 {
1570 	struct neigh_parms *p;
1571 	struct net *net = dev_net(dev);
1572 	const struct net_device_ops *ops = dev->netdev_ops;
1573 
1574 	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1575 	if (p) {
1576 		p->tbl		  = tbl;
1577 		refcount_set(&p->refcnt, 1);
1578 		p->reachable_time =
1579 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1580 		dev_hold(dev);
1581 		p->dev = dev;
1582 		write_pnet(&p->net, net);
1583 		p->sysctl_table = NULL;
1584 
1585 		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1586 			dev_put(dev);
1587 			kfree(p);
1588 			return NULL;
1589 		}
1590 
1591 		write_lock_bh(&tbl->lock);
1592 		list_add(&p->list, &tbl->parms.list);
1593 		write_unlock_bh(&tbl->lock);
1594 
1595 		neigh_parms_data_state_cleanall(p);
1596 	}
1597 	return p;
1598 }
1599 EXPORT_SYMBOL(neigh_parms_alloc);
1600 
1601 static void neigh_rcu_free_parms(struct rcu_head *head)
1602 {
1603 	struct neigh_parms *parms =
1604 		container_of(head, struct neigh_parms, rcu_head);
1605 
1606 	neigh_parms_put(parms);
1607 }
1608 
1609 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1610 {
1611 	if (!parms || parms == &tbl->parms)
1612 		return;
1613 	write_lock_bh(&tbl->lock);
1614 	list_del(&parms->list);
1615 	parms->dead = 1;
1616 	write_unlock_bh(&tbl->lock);
1617 	if (parms->dev)
1618 		dev_put(parms->dev);
1619 	call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1620 }
1621 EXPORT_SYMBOL(neigh_parms_release);
1622 
1623 static void neigh_parms_destroy(struct neigh_parms *parms)
1624 {
1625 	kfree(parms);
1626 }
1627 
1628 static struct lock_class_key neigh_table_proxy_queue_class;
1629 
1630 static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1631 
1632 void neigh_table_init(int index, struct neigh_table *tbl)
1633 {
1634 	unsigned long now = jiffies;
1635 	unsigned long phsize;
1636 
1637 	INIT_LIST_HEAD(&tbl->parms_list);
1638 	INIT_LIST_HEAD(&tbl->gc_list);
1639 	list_add(&tbl->parms.list, &tbl->parms_list);
1640 	write_pnet(&tbl->parms.net, &init_net);
1641 	refcount_set(&tbl->parms.refcnt, 1);
1642 	tbl->parms.reachable_time =
1643 			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1644 
1645 	tbl->stats = alloc_percpu(struct neigh_statistics);
1646 	if (!tbl->stats)
1647 		panic("cannot create neighbour cache statistics");
1648 
1649 #ifdef CONFIG_PROC_FS
1650 	if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1651 			      &neigh_stat_seq_ops, tbl))
1652 		panic("cannot create neighbour proc dir entry");
1653 #endif
1654 
1655 	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1656 
1657 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1658 	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1659 
1660 	if (!tbl->nht || !tbl->phash_buckets)
1661 		panic("cannot allocate neighbour cache hashes");
1662 
1663 	if (!tbl->entry_size)
1664 		tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1665 					tbl->key_len, NEIGH_PRIV_ALIGN);
1666 	else
1667 		WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1668 
1669 	rwlock_init(&tbl->lock);
1670 	INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1671 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1672 			tbl->parms.reachable_time);
1673 	timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1674 	skb_queue_head_init_class(&tbl->proxy_queue,
1675 			&neigh_table_proxy_queue_class);
1676 
1677 	tbl->last_flush = now;
1678 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1679 
1680 	neigh_tables[index] = tbl;
1681 }
1682 EXPORT_SYMBOL(neigh_table_init);
1683 
1684 int neigh_table_clear(int index, struct neigh_table *tbl)
1685 {
1686 	neigh_tables[index] = NULL;
1687 	/* It is not clean... Fix it to unload IPv6 module safely */
1688 	cancel_delayed_work_sync(&tbl->gc_work);
1689 	del_timer_sync(&tbl->proxy_timer);
1690 	pneigh_queue_purge(&tbl->proxy_queue);
1691 	neigh_ifdown(tbl, NULL);
1692 	if (atomic_read(&tbl->entries))
1693 		pr_crit("neighbour leakage\n");
1694 
1695 	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1696 		 neigh_hash_free_rcu);
1697 	tbl->nht = NULL;
1698 
1699 	kfree(tbl->phash_buckets);
1700 	tbl->phash_buckets = NULL;
1701 
1702 	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1703 
1704 	free_percpu(tbl->stats);
1705 	tbl->stats = NULL;
1706 
1707 	return 0;
1708 }
1709 EXPORT_SYMBOL(neigh_table_clear);
1710 
1711 static struct neigh_table *neigh_find_table(int family)
1712 {
1713 	struct neigh_table *tbl = NULL;
1714 
1715 	switch (family) {
1716 	case AF_INET:
1717 		tbl = neigh_tables[NEIGH_ARP_TABLE];
1718 		break;
1719 	case AF_INET6:
1720 		tbl = neigh_tables[NEIGH_ND_TABLE];
1721 		break;
1722 	case AF_DECnet:
1723 		tbl = neigh_tables[NEIGH_DN_TABLE];
1724 		break;
1725 	}
1726 
1727 	return tbl;
1728 }
1729 
1730 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1731 			struct netlink_ext_ack *extack)
1732 {
1733 	struct net *net = sock_net(skb->sk);
1734 	struct ndmsg *ndm;
1735 	struct nlattr *dst_attr;
1736 	struct neigh_table *tbl;
1737 	struct neighbour *neigh;
1738 	struct net_device *dev = NULL;
1739 	int err = -EINVAL;
1740 
1741 	ASSERT_RTNL();
1742 	if (nlmsg_len(nlh) < sizeof(*ndm))
1743 		goto out;
1744 
1745 	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1746 	if (!dst_attr) {
1747 		NL_SET_ERR_MSG(extack, "Network address not specified");
1748 		goto out;
1749 	}
1750 
1751 	ndm = nlmsg_data(nlh);
1752 	if (ndm->ndm_ifindex) {
1753 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1754 		if (dev == NULL) {
1755 			err = -ENODEV;
1756 			goto out;
1757 		}
1758 	}
1759 
1760 	tbl = neigh_find_table(ndm->ndm_family);
1761 	if (tbl == NULL)
1762 		return -EAFNOSUPPORT;
1763 
1764 	if (nla_len(dst_attr) < (int)tbl->key_len) {
1765 		NL_SET_ERR_MSG(extack, "Invalid network address");
1766 		goto out;
1767 	}
1768 
1769 	if (ndm->ndm_flags & NTF_PROXY) {
1770 		err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1771 		goto out;
1772 	}
1773 
1774 	if (dev == NULL)
1775 		goto out;
1776 
1777 	neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1778 	if (neigh == NULL) {
1779 		err = -ENOENT;
1780 		goto out;
1781 	}
1782 
1783 	err = __neigh_update(neigh, NULL, NUD_FAILED,
1784 			     NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
1785 			     NETLINK_CB(skb).portid, extack);
1786 	write_lock_bh(&tbl->lock);
1787 	neigh_release(neigh);
1788 	neigh_remove_one(neigh, tbl);
1789 	write_unlock_bh(&tbl->lock);
1790 
1791 out:
1792 	return err;
1793 }
1794 
1795 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1796 		     struct netlink_ext_ack *extack)
1797 {
1798 	int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
1799 		NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1800 	struct net *net = sock_net(skb->sk);
1801 	struct ndmsg *ndm;
1802 	struct nlattr *tb[NDA_MAX+1];
1803 	struct neigh_table *tbl;
1804 	struct net_device *dev = NULL;
1805 	struct neighbour *neigh;
1806 	void *dst, *lladdr;
1807 	int err;
1808 
1809 	ASSERT_RTNL();
1810 	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
1811 	if (err < 0)
1812 		goto out;
1813 
1814 	err = -EINVAL;
1815 	if (!tb[NDA_DST]) {
1816 		NL_SET_ERR_MSG(extack, "Network address not specified");
1817 		goto out;
1818 	}
1819 
1820 	ndm = nlmsg_data(nlh);
1821 	if (ndm->ndm_ifindex) {
1822 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1823 		if (dev == NULL) {
1824 			err = -ENODEV;
1825 			goto out;
1826 		}
1827 
1828 		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
1829 			NL_SET_ERR_MSG(extack, "Invalid link address");
1830 			goto out;
1831 		}
1832 	}
1833 
1834 	tbl = neigh_find_table(ndm->ndm_family);
1835 	if (tbl == NULL)
1836 		return -EAFNOSUPPORT;
1837 
1838 	if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
1839 		NL_SET_ERR_MSG(extack, "Invalid network address");
1840 		goto out;
1841 	}
1842 
1843 	dst = nla_data(tb[NDA_DST]);
1844 	lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1845 
1846 	if (ndm->ndm_flags & NTF_PROXY) {
1847 		struct pneigh_entry *pn;
1848 
1849 		err = -ENOBUFS;
1850 		pn = pneigh_lookup(tbl, net, dst, dev, 1);
1851 		if (pn) {
1852 			pn->flags = ndm->ndm_flags;
1853 			err = 0;
1854 		}
1855 		goto out;
1856 	}
1857 
1858 	if (!dev) {
1859 		NL_SET_ERR_MSG(extack, "Device not specified");
1860 		goto out;
1861 	}
1862 
1863 	neigh = neigh_lookup(tbl, dst, dev);
1864 	if (neigh == NULL) {
1865 		if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1866 			err = -ENOENT;
1867 			goto out;
1868 		}
1869 
1870 		neigh = ___neigh_create(tbl, dst, dev,
1871 					ndm->ndm_state & NUD_PERMANENT,
1872 					true);
1873 		if (IS_ERR(neigh)) {
1874 			err = PTR_ERR(neigh);
1875 			goto out;
1876 		}
1877 	} else {
1878 		if (nlh->nlmsg_flags & NLM_F_EXCL) {
1879 			err = -EEXIST;
1880 			neigh_release(neigh);
1881 			goto out;
1882 		}
1883 
1884 		if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1885 			flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
1886 				   NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
1887 	}
1888 
1889 	if (ndm->ndm_flags & NTF_EXT_LEARNED)
1890 		flags |= NEIGH_UPDATE_F_EXT_LEARNED;
1891 
1892 	if (ndm->ndm_flags & NTF_ROUTER)
1893 		flags |= NEIGH_UPDATE_F_ISROUTER;
1894 
1895 	if (ndm->ndm_flags & NTF_USE) {
1896 		neigh_event_send(neigh, NULL);
1897 		err = 0;
1898 	} else
1899 		err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
1900 				     NETLINK_CB(skb).portid, extack);
1901 	neigh_release(neigh);
1902 
1903 out:
1904 	return err;
1905 }
1906 
1907 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1908 {
1909 	struct nlattr *nest;
1910 
1911 	nest = nla_nest_start(skb, NDTA_PARMS);
1912 	if (nest == NULL)
1913 		return -ENOBUFS;
1914 
1915 	if ((parms->dev &&
1916 	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1917 	    nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
1918 	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
1919 			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
1920 	    /* approximative value for deprecated QUEUE_LEN (in packets) */
1921 	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
1922 			NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
1923 	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
1924 	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
1925 	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
1926 			NEIGH_VAR(parms, UCAST_PROBES)) ||
1927 	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
1928 			NEIGH_VAR(parms, MCAST_PROBES)) ||
1929 	    nla_put_u32(skb, NDTPA_MCAST_REPROBES,
1930 			NEIGH_VAR(parms, MCAST_REPROBES)) ||
1931 	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
1932 			  NDTPA_PAD) ||
1933 	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
1934 			  NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
1935 	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
1936 			  NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
1937 	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
1938 			  NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
1939 	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
1940 			  NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
1941 	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
1942 			  NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
1943 	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
1944 			  NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
1945 	    nla_put_msecs(skb, NDTPA_LOCKTIME,
1946 			  NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
1947 		goto nla_put_failure;
1948 	return nla_nest_end(skb, nest);
1949 
1950 nla_put_failure:
1951 	nla_nest_cancel(skb, nest);
1952 	return -EMSGSIZE;
1953 }
1954 
1955 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1956 			      u32 pid, u32 seq, int type, int flags)
1957 {
1958 	struct nlmsghdr *nlh;
1959 	struct ndtmsg *ndtmsg;
1960 
1961 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1962 	if (nlh == NULL)
1963 		return -EMSGSIZE;
1964 
1965 	ndtmsg = nlmsg_data(nlh);
1966 
1967 	read_lock_bh(&tbl->lock);
1968 	ndtmsg->ndtm_family = tbl->family;
1969 	ndtmsg->ndtm_pad1   = 0;
1970 	ndtmsg->ndtm_pad2   = 0;
1971 
1972 	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
1973 	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
1974 	    nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
1975 	    nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
1976 	    nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
1977 		goto nla_put_failure;
1978 	{
1979 		unsigned long now = jiffies;
1980 		unsigned int flush_delta = now - tbl->last_flush;
1981 		unsigned int rand_delta = now - tbl->last_rand;
1982 		struct neigh_hash_table *nht;
1983 		struct ndt_config ndc = {
1984 			.ndtc_key_len		= tbl->key_len,
1985 			.ndtc_entry_size	= tbl->entry_size,
1986 			.ndtc_entries		= atomic_read(&tbl->entries),
1987 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
1988 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
1989 			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
1990 		};
1991 
1992 		rcu_read_lock_bh();
1993 		nht = rcu_dereference_bh(tbl->nht);
1994 		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
1995 		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1996 		rcu_read_unlock_bh();
1997 
1998 		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
1999 			goto nla_put_failure;
2000 	}
2001 
2002 	{
2003 		int cpu;
2004 		struct ndt_stats ndst;
2005 
2006 		memset(&ndst, 0, sizeof(ndst));
2007 
2008 		for_each_possible_cpu(cpu) {
2009 			struct neigh_statistics	*st;
2010 
2011 			st = per_cpu_ptr(tbl->stats, cpu);
2012 			ndst.ndts_allocs		+= st->allocs;
2013 			ndst.ndts_destroys		+= st->destroys;
2014 			ndst.ndts_hash_grows		+= st->hash_grows;
2015 			ndst.ndts_res_failed		+= st->res_failed;
2016 			ndst.ndts_lookups		+= st->lookups;
2017 			ndst.ndts_hits			+= st->hits;
2018 			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
2019 			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
2020 			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
2021 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
2022 			ndst.ndts_table_fulls		+= st->table_fulls;
2023 		}
2024 
2025 		if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
2026 				  NDTA_PAD))
2027 			goto nla_put_failure;
2028 	}
2029 
2030 	BUG_ON(tbl->parms.dev);
2031 	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
2032 		goto nla_put_failure;
2033 
2034 	read_unlock_bh(&tbl->lock);
2035 	nlmsg_end(skb, nlh);
2036 	return 0;
2037 
2038 nla_put_failure:
2039 	read_unlock_bh(&tbl->lock);
2040 	nlmsg_cancel(skb, nlh);
2041 	return -EMSGSIZE;
2042 }
2043 
2044 static int neightbl_fill_param_info(struct sk_buff *skb,
2045 				    struct neigh_table *tbl,
2046 				    struct neigh_parms *parms,
2047 				    u32 pid, u32 seq, int type,
2048 				    unsigned int flags)
2049 {
2050 	struct ndtmsg *ndtmsg;
2051 	struct nlmsghdr *nlh;
2052 
2053 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2054 	if (nlh == NULL)
2055 		return -EMSGSIZE;
2056 
2057 	ndtmsg = nlmsg_data(nlh);
2058 
2059 	read_lock_bh(&tbl->lock);
2060 	ndtmsg->ndtm_family = tbl->family;
2061 	ndtmsg->ndtm_pad1   = 0;
2062 	ndtmsg->ndtm_pad2   = 0;
2063 
2064 	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2065 	    neightbl_fill_parms(skb, parms) < 0)
2066 		goto errout;
2067 
2068 	read_unlock_bh(&tbl->lock);
2069 	nlmsg_end(skb, nlh);
2070 	return 0;
2071 errout:
2072 	read_unlock_bh(&tbl->lock);
2073 	nlmsg_cancel(skb, nlh);
2074 	return -EMSGSIZE;
2075 }
2076 
2077 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
2078 	[NDTA_NAME]		= { .type = NLA_STRING },
2079 	[NDTA_THRESH1]		= { .type = NLA_U32 },
2080 	[NDTA_THRESH2]		= { .type = NLA_U32 },
2081 	[NDTA_THRESH3]		= { .type = NLA_U32 },
2082 	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
2083 	[NDTA_PARMS]		= { .type = NLA_NESTED },
2084 };
2085 
2086 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
2087 	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
2088 	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
2089 	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
2090 	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
2091 	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
2092 	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
2093 	[NDTPA_MCAST_REPROBES]		= { .type = NLA_U32 },
2094 	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
2095 	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
2096 	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
2097 	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
2098 	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
2099 	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
2100 	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
2101 };
2102 
2103 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2104 			struct netlink_ext_ack *extack)
2105 {
2106 	struct net *net = sock_net(skb->sk);
2107 	struct neigh_table *tbl;
2108 	struct ndtmsg *ndtmsg;
2109 	struct nlattr *tb[NDTA_MAX+1];
2110 	bool found = false;
2111 	int err, tidx;
2112 
2113 	err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2114 			  nl_neightbl_policy, extack);
2115 	if (err < 0)
2116 		goto errout;
2117 
2118 	if (tb[NDTA_NAME] == NULL) {
2119 		err = -EINVAL;
2120 		goto errout;
2121 	}
2122 
2123 	ndtmsg = nlmsg_data(nlh);
2124 
2125 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2126 		tbl = neigh_tables[tidx];
2127 		if (!tbl)
2128 			continue;
2129 		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2130 			continue;
2131 		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2132 			found = true;
2133 			break;
2134 		}
2135 	}
2136 
2137 	if (!found)
2138 		return -ENOENT;
2139 
2140 	/*
2141 	 * We acquire tbl->lock to be nice to the periodic timers and
2142 	 * make sure they always see a consistent set of values.
2143 	 */
2144 	write_lock_bh(&tbl->lock);
2145 
2146 	if (tb[NDTA_PARMS]) {
2147 		struct nlattr *tbp[NDTPA_MAX+1];
2148 		struct neigh_parms *p;
2149 		int i, ifindex = 0;
2150 
2151 		err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
2152 				       nl_ntbl_parm_policy, extack);
2153 		if (err < 0)
2154 			goto errout_tbl_lock;
2155 
2156 		if (tbp[NDTPA_IFINDEX])
2157 			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2158 
2159 		p = lookup_neigh_parms(tbl, net, ifindex);
2160 		if (p == NULL) {
2161 			err = -ENOENT;
2162 			goto errout_tbl_lock;
2163 		}
2164 
2165 		for (i = 1; i <= NDTPA_MAX; i++) {
2166 			if (tbp[i] == NULL)
2167 				continue;
2168 
2169 			switch (i) {
2170 			case NDTPA_QUEUE_LEN:
2171 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2172 					      nla_get_u32(tbp[i]) *
2173 					      SKB_TRUESIZE(ETH_FRAME_LEN));
2174 				break;
2175 			case NDTPA_QUEUE_LENBYTES:
2176 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2177 					      nla_get_u32(tbp[i]));
2178 				break;
2179 			case NDTPA_PROXY_QLEN:
2180 				NEIGH_VAR_SET(p, PROXY_QLEN,
2181 					      nla_get_u32(tbp[i]));
2182 				break;
2183 			case NDTPA_APP_PROBES:
2184 				NEIGH_VAR_SET(p, APP_PROBES,
2185 					      nla_get_u32(tbp[i]));
2186 				break;
2187 			case NDTPA_UCAST_PROBES:
2188 				NEIGH_VAR_SET(p, UCAST_PROBES,
2189 					      nla_get_u32(tbp[i]));
2190 				break;
2191 			case NDTPA_MCAST_PROBES:
2192 				NEIGH_VAR_SET(p, MCAST_PROBES,
2193 					      nla_get_u32(tbp[i]));
2194 				break;
2195 			case NDTPA_MCAST_REPROBES:
2196 				NEIGH_VAR_SET(p, MCAST_REPROBES,
2197 					      nla_get_u32(tbp[i]));
2198 				break;
2199 			case NDTPA_BASE_REACHABLE_TIME:
2200 				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2201 					      nla_get_msecs(tbp[i]));
2202 				/* update reachable_time as well, otherwise, the change will
2203 				 * only be effective after the next time neigh_periodic_work
2204 				 * decides to recompute it (can be multiple minutes)
2205 				 */
2206 				p->reachable_time =
2207 					neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2208 				break;
2209 			case NDTPA_GC_STALETIME:
2210 				NEIGH_VAR_SET(p, GC_STALETIME,
2211 					      nla_get_msecs(tbp[i]));
2212 				break;
2213 			case NDTPA_DELAY_PROBE_TIME:
2214 				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2215 					      nla_get_msecs(tbp[i]));
2216 				call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2217 				break;
2218 			case NDTPA_RETRANS_TIME:
2219 				NEIGH_VAR_SET(p, RETRANS_TIME,
2220 					      nla_get_msecs(tbp[i]));
2221 				break;
2222 			case NDTPA_ANYCAST_DELAY:
2223 				NEIGH_VAR_SET(p, ANYCAST_DELAY,
2224 					      nla_get_msecs(tbp[i]));
2225 				break;
2226 			case NDTPA_PROXY_DELAY:
2227 				NEIGH_VAR_SET(p, PROXY_DELAY,
2228 					      nla_get_msecs(tbp[i]));
2229 				break;
2230 			case NDTPA_LOCKTIME:
2231 				NEIGH_VAR_SET(p, LOCKTIME,
2232 					      nla_get_msecs(tbp[i]));
2233 				break;
2234 			}
2235 		}
2236 	}
2237 
2238 	err = -ENOENT;
2239 	if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2240 	     tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2241 	    !net_eq(net, &init_net))
2242 		goto errout_tbl_lock;
2243 
2244 	if (tb[NDTA_THRESH1])
2245 		tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2246 
2247 	if (tb[NDTA_THRESH2])
2248 		tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2249 
2250 	if (tb[NDTA_THRESH3])
2251 		tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2252 
2253 	if (tb[NDTA_GC_INTERVAL])
2254 		tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2255 
2256 	err = 0;
2257 
2258 errout_tbl_lock:
2259 	write_unlock_bh(&tbl->lock);
2260 errout:
2261 	return err;
2262 }
2263 
2264 static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
2265 				    struct netlink_ext_ack *extack)
2266 {
2267 	struct ndtmsg *ndtm;
2268 
2269 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
2270 		NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
2271 		return -EINVAL;
2272 	}
2273 
2274 	ndtm = nlmsg_data(nlh);
2275 	if (ndtm->ndtm_pad1  || ndtm->ndtm_pad2) {
2276 		NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
2277 		return -EINVAL;
2278 	}
2279 
2280 	if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
2281 		NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
2282 		return -EINVAL;
2283 	}
2284 
2285 	return 0;
2286 }
2287 
2288 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2289 {
2290 	const struct nlmsghdr *nlh = cb->nlh;
2291 	struct net *net = sock_net(skb->sk);
2292 	int family, tidx, nidx = 0;
2293 	int tbl_skip = cb->args[0];
2294 	int neigh_skip = cb->args[1];
2295 	struct neigh_table *tbl;
2296 
2297 	if (cb->strict_check) {
2298 		int err = neightbl_valid_dump_info(nlh, cb->extack);
2299 
2300 		if (err < 0)
2301 			return err;
2302 	}
2303 
2304 	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2305 
2306 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2307 		struct neigh_parms *p;
2308 
2309 		tbl = neigh_tables[tidx];
2310 		if (!tbl)
2311 			continue;
2312 
2313 		if (tidx < tbl_skip || (family && tbl->family != family))
2314 			continue;
2315 
2316 		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2317 				       nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2318 				       NLM_F_MULTI) < 0)
2319 			break;
2320 
2321 		nidx = 0;
2322 		p = list_next_entry(&tbl->parms, list);
2323 		list_for_each_entry_from(p, &tbl->parms_list, list) {
2324 			if (!net_eq(neigh_parms_net(p), net))
2325 				continue;
2326 
2327 			if (nidx < neigh_skip)
2328 				goto next;
2329 
2330 			if (neightbl_fill_param_info(skb, tbl, p,
2331 						     NETLINK_CB(cb->skb).portid,
2332 						     nlh->nlmsg_seq,
2333 						     RTM_NEWNEIGHTBL,
2334 						     NLM_F_MULTI) < 0)
2335 				goto out;
2336 		next:
2337 			nidx++;
2338 		}
2339 
2340 		neigh_skip = 0;
2341 	}
2342 out:
2343 	cb->args[0] = tidx;
2344 	cb->args[1] = nidx;
2345 
2346 	return skb->len;
2347 }
2348 
2349 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2350 			   u32 pid, u32 seq, int type, unsigned int flags)
2351 {
2352 	unsigned long now = jiffies;
2353 	struct nda_cacheinfo ci;
2354 	struct nlmsghdr *nlh;
2355 	struct ndmsg *ndm;
2356 
2357 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2358 	if (nlh == NULL)
2359 		return -EMSGSIZE;
2360 
2361 	ndm = nlmsg_data(nlh);
2362 	ndm->ndm_family	 = neigh->ops->family;
2363 	ndm->ndm_pad1    = 0;
2364 	ndm->ndm_pad2    = 0;
2365 	ndm->ndm_flags	 = neigh->flags;
2366 	ndm->ndm_type	 = neigh->type;
2367 	ndm->ndm_ifindex = neigh->dev->ifindex;
2368 
2369 	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2370 		goto nla_put_failure;
2371 
2372 	read_lock_bh(&neigh->lock);
2373 	ndm->ndm_state	 = neigh->nud_state;
2374 	if (neigh->nud_state & NUD_VALID) {
2375 		char haddr[MAX_ADDR_LEN];
2376 
2377 		neigh_ha_snapshot(haddr, neigh, neigh->dev);
2378 		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2379 			read_unlock_bh(&neigh->lock);
2380 			goto nla_put_failure;
2381 		}
2382 	}
2383 
2384 	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2385 	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2386 	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2387 	ci.ndm_refcnt	 = refcount_read(&neigh->refcnt) - 1;
2388 	read_unlock_bh(&neigh->lock);
2389 
2390 	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2391 	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2392 		goto nla_put_failure;
2393 
2394 	nlmsg_end(skb, nlh);
2395 	return 0;
2396 
2397 nla_put_failure:
2398 	nlmsg_cancel(skb, nlh);
2399 	return -EMSGSIZE;
2400 }
2401 
2402 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2403 			    u32 pid, u32 seq, int type, unsigned int flags,
2404 			    struct neigh_table *tbl)
2405 {
2406 	struct nlmsghdr *nlh;
2407 	struct ndmsg *ndm;
2408 
2409 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2410 	if (nlh == NULL)
2411 		return -EMSGSIZE;
2412 
2413 	ndm = nlmsg_data(nlh);
2414 	ndm->ndm_family	 = tbl->family;
2415 	ndm->ndm_pad1    = 0;
2416 	ndm->ndm_pad2    = 0;
2417 	ndm->ndm_flags	 = pn->flags | NTF_PROXY;
2418 	ndm->ndm_type	 = RTN_UNICAST;
2419 	ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2420 	ndm->ndm_state	 = NUD_NONE;
2421 
2422 	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2423 		goto nla_put_failure;
2424 
2425 	nlmsg_end(skb, nlh);
2426 	return 0;
2427 
2428 nla_put_failure:
2429 	nlmsg_cancel(skb, nlh);
2430 	return -EMSGSIZE;
2431 }
2432 
2433 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2434 {
2435 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2436 	__neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2437 }
2438 
2439 static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2440 {
2441 	struct net_device *master;
2442 
2443 	if (!master_idx)
2444 		return false;
2445 
2446 	master = dev ? netdev_master_upper_dev_get(dev) : NULL;
2447 	if (!master || master->ifindex != master_idx)
2448 		return true;
2449 
2450 	return false;
2451 }
2452 
2453 static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2454 {
2455 	if (filter_idx && (!dev || dev->ifindex != filter_idx))
2456 		return true;
2457 
2458 	return false;
2459 }
2460 
2461 struct neigh_dump_filter {
2462 	int master_idx;
2463 	int dev_idx;
2464 };
2465 
2466 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2467 			    struct netlink_callback *cb,
2468 			    struct neigh_dump_filter *filter)
2469 {
2470 	struct net *net = sock_net(skb->sk);
2471 	struct neighbour *n;
2472 	int rc, h, s_h = cb->args[1];
2473 	int idx, s_idx = idx = cb->args[2];
2474 	struct neigh_hash_table *nht;
2475 	unsigned int flags = NLM_F_MULTI;
2476 
2477 	if (filter->dev_idx || filter->master_idx)
2478 		flags |= NLM_F_DUMP_FILTERED;
2479 
2480 	rcu_read_lock_bh();
2481 	nht = rcu_dereference_bh(tbl->nht);
2482 
2483 	for (h = s_h; h < (1 << nht->hash_shift); h++) {
2484 		if (h > s_h)
2485 			s_idx = 0;
2486 		for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2487 		     n != NULL;
2488 		     n = rcu_dereference_bh(n->next)) {
2489 			if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2490 				goto next;
2491 			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2492 			    neigh_master_filtered(n->dev, filter->master_idx))
2493 				goto next;
2494 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2495 					    cb->nlh->nlmsg_seq,
2496 					    RTM_NEWNEIGH,
2497 					    flags) < 0) {
2498 				rc = -1;
2499 				goto out;
2500 			}
2501 next:
2502 			idx++;
2503 		}
2504 	}
2505 	rc = skb->len;
2506 out:
2507 	rcu_read_unlock_bh();
2508 	cb->args[1] = h;
2509 	cb->args[2] = idx;
2510 	return rc;
2511 }
2512 
2513 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2514 			     struct netlink_callback *cb,
2515 			     struct neigh_dump_filter *filter)
2516 {
2517 	struct pneigh_entry *n;
2518 	struct net *net = sock_net(skb->sk);
2519 	int rc, h, s_h = cb->args[3];
2520 	int idx, s_idx = idx = cb->args[4];
2521 	unsigned int flags = NLM_F_MULTI;
2522 
2523 	if (filter->dev_idx || filter->master_idx)
2524 		flags |= NLM_F_DUMP_FILTERED;
2525 
2526 	read_lock_bh(&tbl->lock);
2527 
2528 	for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2529 		if (h > s_h)
2530 			s_idx = 0;
2531 		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2532 			if (idx < s_idx || pneigh_net(n) != net)
2533 				goto next;
2534 			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2535 			    neigh_master_filtered(n->dev, filter->master_idx))
2536 				goto next;
2537 			if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2538 					    cb->nlh->nlmsg_seq,
2539 					    RTM_NEWNEIGH, flags, tbl) < 0) {
2540 				read_unlock_bh(&tbl->lock);
2541 				rc = -1;
2542 				goto out;
2543 			}
2544 		next:
2545 			idx++;
2546 		}
2547 	}
2548 
2549 	read_unlock_bh(&tbl->lock);
2550 	rc = skb->len;
2551 out:
2552 	cb->args[3] = h;
2553 	cb->args[4] = idx;
2554 	return rc;
2555 
2556 }
2557 
2558 static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2559 				bool strict_check,
2560 				struct neigh_dump_filter *filter,
2561 				struct netlink_ext_ack *extack)
2562 {
2563 	struct nlattr *tb[NDA_MAX + 1];
2564 	int err, i;
2565 
2566 	if (strict_check) {
2567 		struct ndmsg *ndm;
2568 
2569 		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2570 			NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
2571 			return -EINVAL;
2572 		}
2573 
2574 		ndm = nlmsg_data(nlh);
2575 		if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_ifindex ||
2576 		    ndm->ndm_state || ndm->ndm_flags || ndm->ndm_type) {
2577 			NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2578 			return -EINVAL;
2579 		}
2580 
2581 		err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
2582 					 NULL, extack);
2583 	} else {
2584 		err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
2585 				  NULL, extack);
2586 	}
2587 	if (err < 0)
2588 		return err;
2589 
2590 	for (i = 0; i <= NDA_MAX; ++i) {
2591 		if (!tb[i])
2592 			continue;
2593 
2594 		/* all new attributes should require strict_check */
2595 		switch (i) {
2596 		case NDA_IFINDEX:
2597 			if (nla_len(tb[i]) != sizeof(u32)) {
2598 				NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in neighbor dump request");
2599 				return -EINVAL;
2600 			}
2601 			filter->dev_idx = nla_get_u32(tb[i]);
2602 			break;
2603 		case NDA_MASTER:
2604 			if (nla_len(tb[i]) != sizeof(u32)) {
2605 				NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in neighbor dump request");
2606 				return -EINVAL;
2607 			}
2608 			filter->master_idx = nla_get_u32(tb[i]);
2609 			break;
2610 		default:
2611 			if (strict_check) {
2612 				NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
2613 				return -EINVAL;
2614 			}
2615 		}
2616 	}
2617 
2618 	return 0;
2619 }
2620 
2621 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2622 {
2623 	const struct nlmsghdr *nlh = cb->nlh;
2624 	struct neigh_dump_filter filter = {};
2625 	struct neigh_table *tbl;
2626 	int t, family, s_t;
2627 	int proxy = 0;
2628 	int err;
2629 
2630 	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2631 
2632 	/* check for full ndmsg structure presence, family member is
2633 	 * the same for both structures
2634 	 */
2635 	if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2636 	    ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
2637 		proxy = 1;
2638 
2639 	err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
2640 	if (err < 0 && cb->strict_check)
2641 		return err;
2642 
2643 	s_t = cb->args[0];
2644 
2645 	for (t = 0; t < NEIGH_NR_TABLES; t++) {
2646 		tbl = neigh_tables[t];
2647 
2648 		if (!tbl)
2649 			continue;
2650 		if (t < s_t || (family && tbl->family != family))
2651 			continue;
2652 		if (t > s_t)
2653 			memset(&cb->args[1], 0, sizeof(cb->args) -
2654 						sizeof(cb->args[0]));
2655 		if (proxy)
2656 			err = pneigh_dump_table(tbl, skb, cb, &filter);
2657 		else
2658 			err = neigh_dump_table(tbl, skb, cb, &filter);
2659 		if (err < 0)
2660 			break;
2661 	}
2662 
2663 	cb->args[0] = t;
2664 	return skb->len;
2665 }
2666 
2667 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2668 {
2669 	int chain;
2670 	struct neigh_hash_table *nht;
2671 
2672 	rcu_read_lock_bh();
2673 	nht = rcu_dereference_bh(tbl->nht);
2674 
2675 	read_lock(&tbl->lock); /* avoid resizes */
2676 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2677 		struct neighbour *n;
2678 
2679 		for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2680 		     n != NULL;
2681 		     n = rcu_dereference_bh(n->next))
2682 			cb(n, cookie);
2683 	}
2684 	read_unlock(&tbl->lock);
2685 	rcu_read_unlock_bh();
2686 }
2687 EXPORT_SYMBOL(neigh_for_each);
2688 
2689 /* The tbl->lock must be held as a writer and BH disabled. */
2690 void __neigh_for_each_release(struct neigh_table *tbl,
2691 			      int (*cb)(struct neighbour *))
2692 {
2693 	int chain;
2694 	struct neigh_hash_table *nht;
2695 
2696 	nht = rcu_dereference_protected(tbl->nht,
2697 					lockdep_is_held(&tbl->lock));
2698 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2699 		struct neighbour *n;
2700 		struct neighbour __rcu **np;
2701 
2702 		np = &nht->hash_buckets[chain];
2703 		while ((n = rcu_dereference_protected(*np,
2704 					lockdep_is_held(&tbl->lock))) != NULL) {
2705 			int release;
2706 
2707 			write_lock(&n->lock);
2708 			release = cb(n);
2709 			if (release) {
2710 				rcu_assign_pointer(*np,
2711 					rcu_dereference_protected(n->next,
2712 						lockdep_is_held(&tbl->lock)));
2713 				neigh_mark_dead(n);
2714 			} else
2715 				np = &n->next;
2716 			write_unlock(&n->lock);
2717 			if (release)
2718 				neigh_cleanup_and_release(n);
2719 		}
2720 	}
2721 }
2722 EXPORT_SYMBOL(__neigh_for_each_release);
2723 
2724 int neigh_xmit(int index, struct net_device *dev,
2725 	       const void *addr, struct sk_buff *skb)
2726 {
2727 	int err = -EAFNOSUPPORT;
2728 	if (likely(index < NEIGH_NR_TABLES)) {
2729 		struct neigh_table *tbl;
2730 		struct neighbour *neigh;
2731 
2732 		tbl = neigh_tables[index];
2733 		if (!tbl)
2734 			goto out;
2735 		rcu_read_lock_bh();
2736 		neigh = __neigh_lookup_noref(tbl, addr, dev);
2737 		if (!neigh)
2738 			neigh = __neigh_create(tbl, addr, dev, false);
2739 		err = PTR_ERR(neigh);
2740 		if (IS_ERR(neigh)) {
2741 			rcu_read_unlock_bh();
2742 			goto out_kfree_skb;
2743 		}
2744 		err = neigh->output(neigh, skb);
2745 		rcu_read_unlock_bh();
2746 	}
2747 	else if (index == NEIGH_LINK_TABLE) {
2748 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
2749 				      addr, NULL, skb->len);
2750 		if (err < 0)
2751 			goto out_kfree_skb;
2752 		err = dev_queue_xmit(skb);
2753 	}
2754 out:
2755 	return err;
2756 out_kfree_skb:
2757 	kfree_skb(skb);
2758 	goto out;
2759 }
2760 EXPORT_SYMBOL(neigh_xmit);
2761 
2762 #ifdef CONFIG_PROC_FS
2763 
2764 static struct neighbour *neigh_get_first(struct seq_file *seq)
2765 {
2766 	struct neigh_seq_state *state = seq->private;
2767 	struct net *net = seq_file_net(seq);
2768 	struct neigh_hash_table *nht = state->nht;
2769 	struct neighbour *n = NULL;
2770 	int bucket = state->bucket;
2771 
2772 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2773 	for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2774 		n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2775 
2776 		while (n) {
2777 			if (!net_eq(dev_net(n->dev), net))
2778 				goto next;
2779 			if (state->neigh_sub_iter) {
2780 				loff_t fakep = 0;
2781 				void *v;
2782 
2783 				v = state->neigh_sub_iter(state, n, &fakep);
2784 				if (!v)
2785 					goto next;
2786 			}
2787 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2788 				break;
2789 			if (n->nud_state & ~NUD_NOARP)
2790 				break;
2791 next:
2792 			n = rcu_dereference_bh(n->next);
2793 		}
2794 
2795 		if (n)
2796 			break;
2797 	}
2798 	state->bucket = bucket;
2799 
2800 	return n;
2801 }
2802 
2803 static struct neighbour *neigh_get_next(struct seq_file *seq,
2804 					struct neighbour *n,
2805 					loff_t *pos)
2806 {
2807 	struct neigh_seq_state *state = seq->private;
2808 	struct net *net = seq_file_net(seq);
2809 	struct neigh_hash_table *nht = state->nht;
2810 
2811 	if (state->neigh_sub_iter) {
2812 		void *v = state->neigh_sub_iter(state, n, pos);
2813 		if (v)
2814 			return n;
2815 	}
2816 	n = rcu_dereference_bh(n->next);
2817 
2818 	while (1) {
2819 		while (n) {
2820 			if (!net_eq(dev_net(n->dev), net))
2821 				goto next;
2822 			if (state->neigh_sub_iter) {
2823 				void *v = state->neigh_sub_iter(state, n, pos);
2824 				if (v)
2825 					return n;
2826 				goto next;
2827 			}
2828 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2829 				break;
2830 
2831 			if (n->nud_state & ~NUD_NOARP)
2832 				break;
2833 next:
2834 			n = rcu_dereference_bh(n->next);
2835 		}
2836 
2837 		if (n)
2838 			break;
2839 
2840 		if (++state->bucket >= (1 << nht->hash_shift))
2841 			break;
2842 
2843 		n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2844 	}
2845 
2846 	if (n && pos)
2847 		--(*pos);
2848 	return n;
2849 }
2850 
2851 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2852 {
2853 	struct neighbour *n = neigh_get_first(seq);
2854 
2855 	if (n) {
2856 		--(*pos);
2857 		while (*pos) {
2858 			n = neigh_get_next(seq, n, pos);
2859 			if (!n)
2860 				break;
2861 		}
2862 	}
2863 	return *pos ? NULL : n;
2864 }
2865 
2866 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2867 {
2868 	struct neigh_seq_state *state = seq->private;
2869 	struct net *net = seq_file_net(seq);
2870 	struct neigh_table *tbl = state->tbl;
2871 	struct pneigh_entry *pn = NULL;
2872 	int bucket = state->bucket;
2873 
2874 	state->flags |= NEIGH_SEQ_IS_PNEIGH;
2875 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2876 		pn = tbl->phash_buckets[bucket];
2877 		while (pn && !net_eq(pneigh_net(pn), net))
2878 			pn = pn->next;
2879 		if (pn)
2880 			break;
2881 	}
2882 	state->bucket = bucket;
2883 
2884 	return pn;
2885 }
2886 
2887 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2888 					    struct pneigh_entry *pn,
2889 					    loff_t *pos)
2890 {
2891 	struct neigh_seq_state *state = seq->private;
2892 	struct net *net = seq_file_net(seq);
2893 	struct neigh_table *tbl = state->tbl;
2894 
2895 	do {
2896 		pn = pn->next;
2897 	} while (pn && !net_eq(pneigh_net(pn), net));
2898 
2899 	while (!pn) {
2900 		if (++state->bucket > PNEIGH_HASHMASK)
2901 			break;
2902 		pn = tbl->phash_buckets[state->bucket];
2903 		while (pn && !net_eq(pneigh_net(pn), net))
2904 			pn = pn->next;
2905 		if (pn)
2906 			break;
2907 	}
2908 
2909 	if (pn && pos)
2910 		--(*pos);
2911 
2912 	return pn;
2913 }
2914 
2915 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2916 {
2917 	struct pneigh_entry *pn = pneigh_get_first(seq);
2918 
2919 	if (pn) {
2920 		--(*pos);
2921 		while (*pos) {
2922 			pn = pneigh_get_next(seq, pn, pos);
2923 			if (!pn)
2924 				break;
2925 		}
2926 	}
2927 	return *pos ? NULL : pn;
2928 }
2929 
2930 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2931 {
2932 	struct neigh_seq_state *state = seq->private;
2933 	void *rc;
2934 	loff_t idxpos = *pos;
2935 
2936 	rc = neigh_get_idx(seq, &idxpos);
2937 	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2938 		rc = pneigh_get_idx(seq, &idxpos);
2939 
2940 	return rc;
2941 }
2942 
2943 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2944 	__acquires(rcu_bh)
2945 {
2946 	struct neigh_seq_state *state = seq->private;
2947 
2948 	state->tbl = tbl;
2949 	state->bucket = 0;
2950 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2951 
2952 	rcu_read_lock_bh();
2953 	state->nht = rcu_dereference_bh(tbl->nht);
2954 
2955 	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2956 }
2957 EXPORT_SYMBOL(neigh_seq_start);
2958 
2959 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2960 {
2961 	struct neigh_seq_state *state;
2962 	void *rc;
2963 
2964 	if (v == SEQ_START_TOKEN) {
2965 		rc = neigh_get_first(seq);
2966 		goto out;
2967 	}
2968 
2969 	state = seq->private;
2970 	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2971 		rc = neigh_get_next(seq, v, NULL);
2972 		if (rc)
2973 			goto out;
2974 		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2975 			rc = pneigh_get_first(seq);
2976 	} else {
2977 		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2978 		rc = pneigh_get_next(seq, v, NULL);
2979 	}
2980 out:
2981 	++(*pos);
2982 	return rc;
2983 }
2984 EXPORT_SYMBOL(neigh_seq_next);
2985 
2986 void neigh_seq_stop(struct seq_file *seq, void *v)
2987 	__releases(rcu_bh)
2988 {
2989 	rcu_read_unlock_bh();
2990 }
2991 EXPORT_SYMBOL(neigh_seq_stop);
2992 
2993 /* statistics via seq_file */
2994 
2995 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2996 {
2997 	struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
2998 	int cpu;
2999 
3000 	if (*pos == 0)
3001 		return SEQ_START_TOKEN;
3002 
3003 	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
3004 		if (!cpu_possible(cpu))
3005 			continue;
3006 		*pos = cpu+1;
3007 		return per_cpu_ptr(tbl->stats, cpu);
3008 	}
3009 	return NULL;
3010 }
3011 
3012 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3013 {
3014 	struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3015 	int cpu;
3016 
3017 	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
3018 		if (!cpu_possible(cpu))
3019 			continue;
3020 		*pos = cpu+1;
3021 		return per_cpu_ptr(tbl->stats, cpu);
3022 	}
3023 	return NULL;
3024 }
3025 
3026 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
3027 {
3028 
3029 }
3030 
3031 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
3032 {
3033 	struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3034 	struct neigh_statistics *st = v;
3035 
3036 	if (v == SEQ_START_TOKEN) {
3037 		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
3038 		return 0;
3039 	}
3040 
3041 	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
3042 			"%08lx %08lx  %08lx %08lx %08lx %08lx\n",
3043 		   atomic_read(&tbl->entries),
3044 
3045 		   st->allocs,
3046 		   st->destroys,
3047 		   st->hash_grows,
3048 
3049 		   st->lookups,
3050 		   st->hits,
3051 
3052 		   st->res_failed,
3053 
3054 		   st->rcv_probes_mcast,
3055 		   st->rcv_probes_ucast,
3056 
3057 		   st->periodic_gc_runs,
3058 		   st->forced_gc_runs,
3059 		   st->unres_discards,
3060 		   st->table_fulls
3061 		   );
3062 
3063 	return 0;
3064 }
3065 
3066 static const struct seq_operations neigh_stat_seq_ops = {
3067 	.start	= neigh_stat_seq_start,
3068 	.next	= neigh_stat_seq_next,
3069 	.stop	= neigh_stat_seq_stop,
3070 	.show	= neigh_stat_seq_show,
3071 };
3072 #endif /* CONFIG_PROC_FS */
3073 
3074 static inline size_t neigh_nlmsg_size(void)
3075 {
3076 	return NLMSG_ALIGN(sizeof(struct ndmsg))
3077 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
3078 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
3079 	       + nla_total_size(sizeof(struct nda_cacheinfo))
3080 	       + nla_total_size(4); /* NDA_PROBES */
3081 }
3082 
3083 static void __neigh_notify(struct neighbour *n, int type, int flags,
3084 			   u32 pid)
3085 {
3086 	struct net *net = dev_net(n->dev);
3087 	struct sk_buff *skb;
3088 	int err = -ENOBUFS;
3089 
3090 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
3091 	if (skb == NULL)
3092 		goto errout;
3093 
3094 	err = neigh_fill_info(skb, n, pid, 0, type, flags);
3095 	if (err < 0) {
3096 		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3097 		WARN_ON(err == -EMSGSIZE);
3098 		kfree_skb(skb);
3099 		goto errout;
3100 	}
3101 	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3102 	return;
3103 errout:
3104 	if (err < 0)
3105 		rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3106 }
3107 
3108 void neigh_app_ns(struct neighbour *n)
3109 {
3110 	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
3111 }
3112 EXPORT_SYMBOL(neigh_app_ns);
3113 
3114 #ifdef CONFIG_SYSCTL
3115 static int zero;
3116 static int int_max = INT_MAX;
3117 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
3118 
3119 static int proc_unres_qlen(struct ctl_table *ctl, int write,
3120 			   void __user *buffer, size_t *lenp, loff_t *ppos)
3121 {
3122 	int size, ret;
3123 	struct ctl_table tmp = *ctl;
3124 
3125 	tmp.extra1 = &zero;
3126 	tmp.extra2 = &unres_qlen_max;
3127 	tmp.data = &size;
3128 
3129 	size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
3130 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3131 
3132 	if (write && !ret)
3133 		*(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
3134 	return ret;
3135 }
3136 
3137 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
3138 						   int family)
3139 {
3140 	switch (family) {
3141 	case AF_INET:
3142 		return __in_dev_arp_parms_get_rcu(dev);
3143 	case AF_INET6:
3144 		return __in6_dev_nd_parms_get_rcu(dev);
3145 	}
3146 	return NULL;
3147 }
3148 
3149 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
3150 				  int index)
3151 {
3152 	struct net_device *dev;
3153 	int family = neigh_parms_family(p);
3154 
3155 	rcu_read_lock();
3156 	for_each_netdev_rcu(net, dev) {
3157 		struct neigh_parms *dst_p =
3158 				neigh_get_dev_parms_rcu(dev, family);
3159 
3160 		if (dst_p && !test_bit(index, dst_p->data_state))
3161 			dst_p->data[index] = p->data[index];
3162 	}
3163 	rcu_read_unlock();
3164 }
3165 
3166 static void neigh_proc_update(struct ctl_table *ctl, int write)
3167 {
3168 	struct net_device *dev = ctl->extra1;
3169 	struct neigh_parms *p = ctl->extra2;
3170 	struct net *net = neigh_parms_net(p);
3171 	int index = (int *) ctl->data - p->data;
3172 
3173 	if (!write)
3174 		return;
3175 
3176 	set_bit(index, p->data_state);
3177 	if (index == NEIGH_VAR_DELAY_PROBE_TIME)
3178 		call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
3179 	if (!dev) /* NULL dev means this is default value */
3180 		neigh_copy_dflt_parms(net, p, index);
3181 }
3182 
3183 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
3184 					   void __user *buffer,
3185 					   size_t *lenp, loff_t *ppos)
3186 {
3187 	struct ctl_table tmp = *ctl;
3188 	int ret;
3189 
3190 	tmp.extra1 = &zero;
3191 	tmp.extra2 = &int_max;
3192 
3193 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3194 	neigh_proc_update(ctl, write);
3195 	return ret;
3196 }
3197 
3198 int neigh_proc_dointvec(struct ctl_table *ctl, int write,
3199 			void __user *buffer, size_t *lenp, loff_t *ppos)
3200 {
3201 	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3202 
3203 	neigh_proc_update(ctl, write);
3204 	return ret;
3205 }
3206 EXPORT_SYMBOL(neigh_proc_dointvec);
3207 
3208 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
3209 				void __user *buffer,
3210 				size_t *lenp, loff_t *ppos)
3211 {
3212 	int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3213 
3214 	neigh_proc_update(ctl, write);
3215 	return ret;
3216 }
3217 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3218 
3219 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
3220 					      void __user *buffer,
3221 					      size_t *lenp, loff_t *ppos)
3222 {
3223 	int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3224 
3225 	neigh_proc_update(ctl, write);
3226 	return ret;
3227 }
3228 
3229 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
3230 				   void __user *buffer,
3231 				   size_t *lenp, loff_t *ppos)
3232 {
3233 	int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3234 
3235 	neigh_proc_update(ctl, write);
3236 	return ret;
3237 }
3238 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3239 
3240 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
3241 					  void __user *buffer,
3242 					  size_t *lenp, loff_t *ppos)
3243 {
3244 	int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3245 
3246 	neigh_proc_update(ctl, write);
3247 	return ret;
3248 }
3249 
3250 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3251 					  void __user *buffer,
3252 					  size_t *lenp, loff_t *ppos)
3253 {
3254 	struct neigh_parms *p = ctl->extra2;
3255 	int ret;
3256 
3257 	if (strcmp(ctl->procname, "base_reachable_time") == 0)
3258 		ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3259 	else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3260 		ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3261 	else
3262 		ret = -1;
3263 
3264 	if (write && ret == 0) {
3265 		/* update reachable_time as well, otherwise, the change will
3266 		 * only be effective after the next time neigh_periodic_work
3267 		 * decides to recompute it
3268 		 */
3269 		p->reachable_time =
3270 			neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3271 	}
3272 	return ret;
3273 }
3274 
3275 #define NEIGH_PARMS_DATA_OFFSET(index)	\
3276 	(&((struct neigh_parms *) 0)->data[index])
3277 
3278 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3279 	[NEIGH_VAR_ ## attr] = { \
3280 		.procname	= name, \
3281 		.data		= NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3282 		.maxlen		= sizeof(int), \
3283 		.mode		= mval, \
3284 		.proc_handler	= proc, \
3285 	}
3286 
3287 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3288 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3289 
3290 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3291 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3292 
3293 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3294 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3295 
3296 #define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
3297 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3298 
3299 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3300 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3301 
3302 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3303 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3304 
3305 static struct neigh_sysctl_table {
3306 	struct ctl_table_header *sysctl_header;
3307 	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3308 } neigh_sysctl_template __read_mostly = {
3309 	.neigh_vars = {
3310 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3311 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3312 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3313 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3314 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3315 		NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3316 		NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3317 		NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3318 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3319 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3320 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3321 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3322 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3323 		NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3324 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3325 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3326 		[NEIGH_VAR_GC_INTERVAL] = {
3327 			.procname	= "gc_interval",
3328 			.maxlen		= sizeof(int),
3329 			.mode		= 0644,
3330 			.proc_handler	= proc_dointvec_jiffies,
3331 		},
3332 		[NEIGH_VAR_GC_THRESH1] = {
3333 			.procname	= "gc_thresh1",
3334 			.maxlen		= sizeof(int),
3335 			.mode		= 0644,
3336 			.extra1 	= &zero,
3337 			.extra2		= &int_max,
3338 			.proc_handler	= proc_dointvec_minmax,
3339 		},
3340 		[NEIGH_VAR_GC_THRESH2] = {
3341 			.procname	= "gc_thresh2",
3342 			.maxlen		= sizeof(int),
3343 			.mode		= 0644,
3344 			.extra1 	= &zero,
3345 			.extra2		= &int_max,
3346 			.proc_handler	= proc_dointvec_minmax,
3347 		},
3348 		[NEIGH_VAR_GC_THRESH3] = {
3349 			.procname	= "gc_thresh3",
3350 			.maxlen		= sizeof(int),
3351 			.mode		= 0644,
3352 			.extra1 	= &zero,
3353 			.extra2		= &int_max,
3354 			.proc_handler	= proc_dointvec_minmax,
3355 		},
3356 		{},
3357 	},
3358 };
3359 
3360 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3361 			  proc_handler *handler)
3362 {
3363 	int i;
3364 	struct neigh_sysctl_table *t;
3365 	const char *dev_name_source;
3366 	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3367 	char *p_name;
3368 
3369 	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3370 	if (!t)
3371 		goto err;
3372 
3373 	for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3374 		t->neigh_vars[i].data += (long) p;
3375 		t->neigh_vars[i].extra1 = dev;
3376 		t->neigh_vars[i].extra2 = p;
3377 	}
3378 
3379 	if (dev) {
3380 		dev_name_source = dev->name;
3381 		/* Terminate the table early */
3382 		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3383 		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3384 	} else {
3385 		struct neigh_table *tbl = p->tbl;
3386 		dev_name_source = "default";
3387 		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3388 		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3389 		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3390 		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3391 	}
3392 
3393 	if (handler) {
3394 		/* RetransTime */
3395 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3396 		/* ReachableTime */
3397 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3398 		/* RetransTime (in milliseconds)*/
3399 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3400 		/* ReachableTime (in milliseconds) */
3401 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3402 	} else {
3403 		/* Those handlers will update p->reachable_time after
3404 		 * base_reachable_time(_ms) is set to ensure the new timer starts being
3405 		 * applied after the next neighbour update instead of waiting for
3406 		 * neigh_periodic_work to update its value (can be multiple minutes)
3407 		 * So any handler that replaces them should do this as well
3408 		 */
3409 		/* ReachableTime */
3410 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3411 			neigh_proc_base_reachable_time;
3412 		/* ReachableTime (in milliseconds) */
3413 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3414 			neigh_proc_base_reachable_time;
3415 	}
3416 
3417 	/* Don't export sysctls to unprivileged users */
3418 	if (neigh_parms_net(p)->user_ns != &init_user_ns)
3419 		t->neigh_vars[0].procname = NULL;
3420 
3421 	switch (neigh_parms_family(p)) {
3422 	case AF_INET:
3423 	      p_name = "ipv4";
3424 	      break;
3425 	case AF_INET6:
3426 	      p_name = "ipv6";
3427 	      break;
3428 	default:
3429 	      BUG();
3430 	}
3431 
3432 	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3433 		p_name, dev_name_source);
3434 	t->sysctl_header =
3435 		register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3436 	if (!t->sysctl_header)
3437 		goto free;
3438 
3439 	p->sysctl_table = t;
3440 	return 0;
3441 
3442 free:
3443 	kfree(t);
3444 err:
3445 	return -ENOBUFS;
3446 }
3447 EXPORT_SYMBOL(neigh_sysctl_register);
3448 
3449 void neigh_sysctl_unregister(struct neigh_parms *p)
3450 {
3451 	if (p->sysctl_table) {
3452 		struct neigh_sysctl_table *t = p->sysctl_table;
3453 		p->sysctl_table = NULL;
3454 		unregister_net_sysctl_table(t->sysctl_header);
3455 		kfree(t);
3456 	}
3457 }
3458 EXPORT_SYMBOL(neigh_sysctl_unregister);
3459 
3460 #endif	/* CONFIG_SYSCTL */
3461 
3462 static int __init neigh_init(void)
3463 {
3464 	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
3465 	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
3466 	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, 0);
3467 
3468 	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3469 		      0);
3470 	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
3471 
3472 	return 0;
3473 }
3474 
3475 subsys_initcall(neigh_init);
3476