xref: /linux/net/core/neighbour.c (revision da1d9caf95def6f0320819cf941c9fd1069ba9e1)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Generic address resolution entity
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
8  *
9  *	Fixes:
10  *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
11  *	Harald Welte		Add neighbour cache statistics like rtstat
12  */
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 
16 #include <linux/slab.h>
17 #include <linux/kmemleak.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
24 #ifdef CONFIG_SYSCTL
25 #include <linux/sysctl.h>
26 #endif
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
30 #include <net/arp.h>
31 #include <net/dst.h>
32 #include <net/sock.h>
33 #include <net/netevent.h>
34 #include <net/netlink.h>
35 #include <linux/rtnetlink.h>
36 #include <linux/random.h>
37 #include <linux/string.h>
38 #include <linux/log2.h>
39 #include <linux/inetdevice.h>
40 #include <net/addrconf.h>
41 
42 #include <trace/events/neigh.h>
43 
44 #define NEIGH_DEBUG 1
45 #define neigh_dbg(level, fmt, ...)		\
46 do {						\
47 	if (level <= NEIGH_DEBUG)		\
48 		pr_debug(fmt, ##__VA_ARGS__);	\
49 } while (0)
50 
51 #define PNEIGH_HASHMASK		0xF
52 
53 static void neigh_timer_handler(struct timer_list *t);
54 static void __neigh_notify(struct neighbour *n, int type, int flags,
55 			   u32 pid);
56 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
57 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
58 				    struct net_device *dev);
59 
60 #ifdef CONFIG_PROC_FS
61 static const struct seq_operations neigh_stat_seq_ops;
62 #endif
63 
64 /*
65    Neighbour hash table buckets are protected with rwlock tbl->lock.
66 
67    - All the scans/updates to hash buckets MUST be made under this lock.
68    - NOTHING clever should be made under this lock: no callbacks
69      to protocol backends, no attempts to send something to network.
70      It will result in deadlocks, if backend/driver wants to use neighbour
71      cache.
72    - If the entry requires some non-trivial actions, increase
73      its reference count and release table lock.
74 
75    Neighbour entries are protected:
76    - with reference count.
77    - with rwlock neigh->lock
78 
79    Reference count prevents destruction.
80 
81    neigh->lock mainly serializes ll address data and its validity state.
82    However, the same lock is used to protect another entry fields:
83     - timer
84     - resolution queue
85 
86    Again, nothing clever shall be made under neigh->lock,
87    the most complicated procedure, which we allow is dev->hard_header.
88    It is supposed, that dev->hard_header is simplistic and does
89    not make callbacks to neighbour tables.
90  */
91 
92 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
93 {
94 	kfree_skb(skb);
95 	return -ENETDOWN;
96 }
97 
98 static void neigh_cleanup_and_release(struct neighbour *neigh)
99 {
100 	trace_neigh_cleanup_and_release(neigh, 0);
101 	__neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
102 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
103 	neigh_release(neigh);
104 }
105 
106 /*
107  * It is random distribution in the interval (1/2)*base...(3/2)*base.
108  * It corresponds to default IPv6 settings and is not overridable,
109  * because it is really reasonable choice.
110  */
111 
112 unsigned long neigh_rand_reach_time(unsigned long base)
113 {
114 	return base ? (prandom_u32() % base) + (base >> 1) : 0;
115 }
116 EXPORT_SYMBOL(neigh_rand_reach_time);
117 
118 static void neigh_mark_dead(struct neighbour *n)
119 {
120 	n->dead = 1;
121 	if (!list_empty(&n->gc_list)) {
122 		list_del_init(&n->gc_list);
123 		atomic_dec(&n->tbl->gc_entries);
124 	}
125 	if (!list_empty(&n->managed_list))
126 		list_del_init(&n->managed_list);
127 }
128 
129 static void neigh_update_gc_list(struct neighbour *n)
130 {
131 	bool on_gc_list, exempt_from_gc;
132 
133 	write_lock_bh(&n->tbl->lock);
134 	write_lock(&n->lock);
135 	if (n->dead)
136 		goto out;
137 
138 	/* remove from the gc list if new state is permanent or if neighbor
139 	 * is externally learned; otherwise entry should be on the gc list
140 	 */
141 	exempt_from_gc = n->nud_state & NUD_PERMANENT ||
142 			 n->flags & NTF_EXT_LEARNED;
143 	on_gc_list = !list_empty(&n->gc_list);
144 
145 	if (exempt_from_gc && on_gc_list) {
146 		list_del_init(&n->gc_list);
147 		atomic_dec(&n->tbl->gc_entries);
148 	} else if (!exempt_from_gc && !on_gc_list) {
149 		/* add entries to the tail; cleaning removes from the front */
150 		list_add_tail(&n->gc_list, &n->tbl->gc_list);
151 		atomic_inc(&n->tbl->gc_entries);
152 	}
153 out:
154 	write_unlock(&n->lock);
155 	write_unlock_bh(&n->tbl->lock);
156 }
157 
158 static void neigh_update_managed_list(struct neighbour *n)
159 {
160 	bool on_managed_list, add_to_managed;
161 
162 	write_lock_bh(&n->tbl->lock);
163 	write_lock(&n->lock);
164 	if (n->dead)
165 		goto out;
166 
167 	add_to_managed = n->flags & NTF_MANAGED;
168 	on_managed_list = !list_empty(&n->managed_list);
169 
170 	if (!add_to_managed && on_managed_list)
171 		list_del_init(&n->managed_list);
172 	else if (add_to_managed && !on_managed_list)
173 		list_add_tail(&n->managed_list, &n->tbl->managed_list);
174 out:
175 	write_unlock(&n->lock);
176 	write_unlock_bh(&n->tbl->lock);
177 }
178 
179 static void neigh_update_flags(struct neighbour *neigh, u32 flags, int *notify,
180 			       bool *gc_update, bool *managed_update)
181 {
182 	u32 ndm_flags, old_flags = neigh->flags;
183 
184 	if (!(flags & NEIGH_UPDATE_F_ADMIN))
185 		return;
186 
187 	ndm_flags  = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
188 	ndm_flags |= (flags & NEIGH_UPDATE_F_MANAGED) ? NTF_MANAGED : 0;
189 
190 	if ((old_flags ^ ndm_flags) & NTF_EXT_LEARNED) {
191 		if (ndm_flags & NTF_EXT_LEARNED)
192 			neigh->flags |= NTF_EXT_LEARNED;
193 		else
194 			neigh->flags &= ~NTF_EXT_LEARNED;
195 		*notify = 1;
196 		*gc_update = true;
197 	}
198 	if ((old_flags ^ ndm_flags) & NTF_MANAGED) {
199 		if (ndm_flags & NTF_MANAGED)
200 			neigh->flags |= NTF_MANAGED;
201 		else
202 			neigh->flags &= ~NTF_MANAGED;
203 		*notify = 1;
204 		*managed_update = true;
205 	}
206 }
207 
208 static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
209 		      struct neigh_table *tbl)
210 {
211 	bool retval = false;
212 
213 	write_lock(&n->lock);
214 	if (refcount_read(&n->refcnt) == 1) {
215 		struct neighbour *neigh;
216 
217 		neigh = rcu_dereference_protected(n->next,
218 						  lockdep_is_held(&tbl->lock));
219 		rcu_assign_pointer(*np, neigh);
220 		neigh_mark_dead(n);
221 		retval = true;
222 	}
223 	write_unlock(&n->lock);
224 	if (retval)
225 		neigh_cleanup_and_release(n);
226 	return retval;
227 }
228 
229 bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
230 {
231 	struct neigh_hash_table *nht;
232 	void *pkey = ndel->primary_key;
233 	u32 hash_val;
234 	struct neighbour *n;
235 	struct neighbour __rcu **np;
236 
237 	nht = rcu_dereference_protected(tbl->nht,
238 					lockdep_is_held(&tbl->lock));
239 	hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
240 	hash_val = hash_val >> (32 - nht->hash_shift);
241 
242 	np = &nht->hash_buckets[hash_val];
243 	while ((n = rcu_dereference_protected(*np,
244 					      lockdep_is_held(&tbl->lock)))) {
245 		if (n == ndel)
246 			return neigh_del(n, np, tbl);
247 		np = &n->next;
248 	}
249 	return false;
250 }
251 
252 static int neigh_forced_gc(struct neigh_table *tbl)
253 {
254 	int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
255 	unsigned long tref = jiffies - 5 * HZ;
256 	struct neighbour *n, *tmp;
257 	int shrunk = 0;
258 
259 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
260 
261 	write_lock_bh(&tbl->lock);
262 
263 	list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
264 		if (refcount_read(&n->refcnt) == 1) {
265 			bool remove = false;
266 
267 			write_lock(&n->lock);
268 			if ((n->nud_state == NUD_FAILED) ||
269 			    (n->nud_state == NUD_NOARP) ||
270 			    (tbl->is_multicast &&
271 			     tbl->is_multicast(n->primary_key)) ||
272 			    time_after(tref, n->updated))
273 				remove = true;
274 			write_unlock(&n->lock);
275 
276 			if (remove && neigh_remove_one(n, tbl))
277 				shrunk++;
278 			if (shrunk >= max_clean)
279 				break;
280 		}
281 	}
282 
283 	tbl->last_flush = jiffies;
284 
285 	write_unlock_bh(&tbl->lock);
286 
287 	return shrunk;
288 }
289 
290 static void neigh_add_timer(struct neighbour *n, unsigned long when)
291 {
292 	neigh_hold(n);
293 	if (unlikely(mod_timer(&n->timer, when))) {
294 		printk("NEIGH: BUG, double timer add, state is %x\n",
295 		       n->nud_state);
296 		dump_stack();
297 	}
298 }
299 
300 static int neigh_del_timer(struct neighbour *n)
301 {
302 	if ((n->nud_state & NUD_IN_TIMER) &&
303 	    del_timer(&n->timer)) {
304 		neigh_release(n);
305 		return 1;
306 	}
307 	return 0;
308 }
309 
310 static void pneigh_queue_purge(struct sk_buff_head *list)
311 {
312 	struct sk_buff *skb;
313 
314 	while ((skb = skb_dequeue(list)) != NULL) {
315 		dev_put(skb->dev);
316 		kfree_skb(skb);
317 	}
318 }
319 
320 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
321 			    bool skip_perm)
322 {
323 	int i;
324 	struct neigh_hash_table *nht;
325 
326 	nht = rcu_dereference_protected(tbl->nht,
327 					lockdep_is_held(&tbl->lock));
328 
329 	for (i = 0; i < (1 << nht->hash_shift); i++) {
330 		struct neighbour *n;
331 		struct neighbour __rcu **np = &nht->hash_buckets[i];
332 
333 		while ((n = rcu_dereference_protected(*np,
334 					lockdep_is_held(&tbl->lock))) != NULL) {
335 			if (dev && n->dev != dev) {
336 				np = &n->next;
337 				continue;
338 			}
339 			if (skip_perm && n->nud_state & NUD_PERMANENT) {
340 				np = &n->next;
341 				continue;
342 			}
343 			rcu_assign_pointer(*np,
344 				   rcu_dereference_protected(n->next,
345 						lockdep_is_held(&tbl->lock)));
346 			write_lock(&n->lock);
347 			neigh_del_timer(n);
348 			neigh_mark_dead(n);
349 			if (refcount_read(&n->refcnt) != 1) {
350 				/* The most unpleasant situation.
351 				   We must destroy neighbour entry,
352 				   but someone still uses it.
353 
354 				   The destroy will be delayed until
355 				   the last user releases us, but
356 				   we must kill timers etc. and move
357 				   it to safe state.
358 				 */
359 				__skb_queue_purge(&n->arp_queue);
360 				n->arp_queue_len_bytes = 0;
361 				n->output = neigh_blackhole;
362 				if (n->nud_state & NUD_VALID)
363 					n->nud_state = NUD_NOARP;
364 				else
365 					n->nud_state = NUD_NONE;
366 				neigh_dbg(2, "neigh %p is stray\n", n);
367 			}
368 			write_unlock(&n->lock);
369 			neigh_cleanup_and_release(n);
370 		}
371 	}
372 }
373 
374 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
375 {
376 	write_lock_bh(&tbl->lock);
377 	neigh_flush_dev(tbl, dev, false);
378 	write_unlock_bh(&tbl->lock);
379 }
380 EXPORT_SYMBOL(neigh_changeaddr);
381 
382 static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
383 			  bool skip_perm)
384 {
385 	write_lock_bh(&tbl->lock);
386 	neigh_flush_dev(tbl, dev, skip_perm);
387 	pneigh_ifdown_and_unlock(tbl, dev);
388 
389 	del_timer_sync(&tbl->proxy_timer);
390 	pneigh_queue_purge(&tbl->proxy_queue);
391 	return 0;
392 }
393 
394 int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
395 {
396 	__neigh_ifdown(tbl, dev, true);
397 	return 0;
398 }
399 EXPORT_SYMBOL(neigh_carrier_down);
400 
401 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
402 {
403 	__neigh_ifdown(tbl, dev, false);
404 	return 0;
405 }
406 EXPORT_SYMBOL(neigh_ifdown);
407 
408 static struct neighbour *neigh_alloc(struct neigh_table *tbl,
409 				     struct net_device *dev,
410 				     u32 flags, bool exempt_from_gc)
411 {
412 	struct neighbour *n = NULL;
413 	unsigned long now = jiffies;
414 	int entries;
415 
416 	if (exempt_from_gc)
417 		goto do_alloc;
418 
419 	entries = atomic_inc_return(&tbl->gc_entries) - 1;
420 	if (entries >= tbl->gc_thresh3 ||
421 	    (entries >= tbl->gc_thresh2 &&
422 	     time_after(now, tbl->last_flush + 5 * HZ))) {
423 		if (!neigh_forced_gc(tbl) &&
424 		    entries >= tbl->gc_thresh3) {
425 			net_info_ratelimited("%s: neighbor table overflow!\n",
426 					     tbl->id);
427 			NEIGH_CACHE_STAT_INC(tbl, table_fulls);
428 			goto out_entries;
429 		}
430 	}
431 
432 do_alloc:
433 	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
434 	if (!n)
435 		goto out_entries;
436 
437 	__skb_queue_head_init(&n->arp_queue);
438 	rwlock_init(&n->lock);
439 	seqlock_init(&n->ha_lock);
440 	n->updated	  = n->used = now;
441 	n->nud_state	  = NUD_NONE;
442 	n->output	  = neigh_blackhole;
443 	n->flags	  = flags;
444 	seqlock_init(&n->hh.hh_lock);
445 	n->parms	  = neigh_parms_clone(&tbl->parms);
446 	timer_setup(&n->timer, neigh_timer_handler, 0);
447 
448 	NEIGH_CACHE_STAT_INC(tbl, allocs);
449 	n->tbl		  = tbl;
450 	refcount_set(&n->refcnt, 1);
451 	n->dead		  = 1;
452 	INIT_LIST_HEAD(&n->gc_list);
453 	INIT_LIST_HEAD(&n->managed_list);
454 
455 	atomic_inc(&tbl->entries);
456 out:
457 	return n;
458 
459 out_entries:
460 	if (!exempt_from_gc)
461 		atomic_dec(&tbl->gc_entries);
462 	goto out;
463 }
464 
465 static void neigh_get_hash_rnd(u32 *x)
466 {
467 	*x = get_random_u32() | 1;
468 }
469 
470 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
471 {
472 	size_t size = (1 << shift) * sizeof(struct neighbour *);
473 	struct neigh_hash_table *ret;
474 	struct neighbour __rcu **buckets;
475 	int i;
476 
477 	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
478 	if (!ret)
479 		return NULL;
480 	if (size <= PAGE_SIZE) {
481 		buckets = kzalloc(size, GFP_ATOMIC);
482 	} else {
483 		buckets = (struct neighbour __rcu **)
484 			  __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
485 					   get_order(size));
486 		kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
487 	}
488 	if (!buckets) {
489 		kfree(ret);
490 		return NULL;
491 	}
492 	ret->hash_buckets = buckets;
493 	ret->hash_shift = shift;
494 	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
495 		neigh_get_hash_rnd(&ret->hash_rnd[i]);
496 	return ret;
497 }
498 
499 static void neigh_hash_free_rcu(struct rcu_head *head)
500 {
501 	struct neigh_hash_table *nht = container_of(head,
502 						    struct neigh_hash_table,
503 						    rcu);
504 	size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
505 	struct neighbour __rcu **buckets = nht->hash_buckets;
506 
507 	if (size <= PAGE_SIZE) {
508 		kfree(buckets);
509 	} else {
510 		kmemleak_free(buckets);
511 		free_pages((unsigned long)buckets, get_order(size));
512 	}
513 	kfree(nht);
514 }
515 
516 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
517 						unsigned long new_shift)
518 {
519 	unsigned int i, hash;
520 	struct neigh_hash_table *new_nht, *old_nht;
521 
522 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
523 
524 	old_nht = rcu_dereference_protected(tbl->nht,
525 					    lockdep_is_held(&tbl->lock));
526 	new_nht = neigh_hash_alloc(new_shift);
527 	if (!new_nht)
528 		return old_nht;
529 
530 	for (i = 0; i < (1 << old_nht->hash_shift); i++) {
531 		struct neighbour *n, *next;
532 
533 		for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
534 						   lockdep_is_held(&tbl->lock));
535 		     n != NULL;
536 		     n = next) {
537 			hash = tbl->hash(n->primary_key, n->dev,
538 					 new_nht->hash_rnd);
539 
540 			hash >>= (32 - new_nht->hash_shift);
541 			next = rcu_dereference_protected(n->next,
542 						lockdep_is_held(&tbl->lock));
543 
544 			rcu_assign_pointer(n->next,
545 					   rcu_dereference_protected(
546 						new_nht->hash_buckets[hash],
547 						lockdep_is_held(&tbl->lock)));
548 			rcu_assign_pointer(new_nht->hash_buckets[hash], n);
549 		}
550 	}
551 
552 	rcu_assign_pointer(tbl->nht, new_nht);
553 	call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
554 	return new_nht;
555 }
556 
557 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
558 			       struct net_device *dev)
559 {
560 	struct neighbour *n;
561 
562 	NEIGH_CACHE_STAT_INC(tbl, lookups);
563 
564 	rcu_read_lock_bh();
565 	n = __neigh_lookup_noref(tbl, pkey, dev);
566 	if (n) {
567 		if (!refcount_inc_not_zero(&n->refcnt))
568 			n = NULL;
569 		NEIGH_CACHE_STAT_INC(tbl, hits);
570 	}
571 
572 	rcu_read_unlock_bh();
573 	return n;
574 }
575 EXPORT_SYMBOL(neigh_lookup);
576 
577 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
578 				     const void *pkey)
579 {
580 	struct neighbour *n;
581 	unsigned int key_len = tbl->key_len;
582 	u32 hash_val;
583 	struct neigh_hash_table *nht;
584 
585 	NEIGH_CACHE_STAT_INC(tbl, lookups);
586 
587 	rcu_read_lock_bh();
588 	nht = rcu_dereference_bh(tbl->nht);
589 	hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
590 
591 	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
592 	     n != NULL;
593 	     n = rcu_dereference_bh(n->next)) {
594 		if (!memcmp(n->primary_key, pkey, key_len) &&
595 		    net_eq(dev_net(n->dev), net)) {
596 			if (!refcount_inc_not_zero(&n->refcnt))
597 				n = NULL;
598 			NEIGH_CACHE_STAT_INC(tbl, hits);
599 			break;
600 		}
601 	}
602 
603 	rcu_read_unlock_bh();
604 	return n;
605 }
606 EXPORT_SYMBOL(neigh_lookup_nodev);
607 
608 static struct neighbour *
609 ___neigh_create(struct neigh_table *tbl, const void *pkey,
610 		struct net_device *dev, u32 flags,
611 		bool exempt_from_gc, bool want_ref)
612 {
613 	u32 hash_val, key_len = tbl->key_len;
614 	struct neighbour *n1, *rc, *n;
615 	struct neigh_hash_table *nht;
616 	int error;
617 
618 	n = neigh_alloc(tbl, dev, flags, exempt_from_gc);
619 	trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);
620 	if (!n) {
621 		rc = ERR_PTR(-ENOBUFS);
622 		goto out;
623 	}
624 
625 	memcpy(n->primary_key, pkey, key_len);
626 	n->dev = dev;
627 	dev_hold_track(dev, &n->dev_tracker, GFP_ATOMIC);
628 
629 	/* Protocol specific setup. */
630 	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
631 		rc = ERR_PTR(error);
632 		goto out_neigh_release;
633 	}
634 
635 	if (dev->netdev_ops->ndo_neigh_construct) {
636 		error = dev->netdev_ops->ndo_neigh_construct(dev, n);
637 		if (error < 0) {
638 			rc = ERR_PTR(error);
639 			goto out_neigh_release;
640 		}
641 	}
642 
643 	/* Device specific setup. */
644 	if (n->parms->neigh_setup &&
645 	    (error = n->parms->neigh_setup(n)) < 0) {
646 		rc = ERR_PTR(error);
647 		goto out_neigh_release;
648 	}
649 
650 	n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
651 
652 	write_lock_bh(&tbl->lock);
653 	nht = rcu_dereference_protected(tbl->nht,
654 					lockdep_is_held(&tbl->lock));
655 
656 	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
657 		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
658 
659 	hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
660 
661 	if (n->parms->dead) {
662 		rc = ERR_PTR(-EINVAL);
663 		goto out_tbl_unlock;
664 	}
665 
666 	for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
667 					    lockdep_is_held(&tbl->lock));
668 	     n1 != NULL;
669 	     n1 = rcu_dereference_protected(n1->next,
670 			lockdep_is_held(&tbl->lock))) {
671 		if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
672 			if (want_ref)
673 				neigh_hold(n1);
674 			rc = n1;
675 			goto out_tbl_unlock;
676 		}
677 	}
678 
679 	n->dead = 0;
680 	if (!exempt_from_gc)
681 		list_add_tail(&n->gc_list, &n->tbl->gc_list);
682 	if (n->flags & NTF_MANAGED)
683 		list_add_tail(&n->managed_list, &n->tbl->managed_list);
684 	if (want_ref)
685 		neigh_hold(n);
686 	rcu_assign_pointer(n->next,
687 			   rcu_dereference_protected(nht->hash_buckets[hash_val],
688 						     lockdep_is_held(&tbl->lock)));
689 	rcu_assign_pointer(nht->hash_buckets[hash_val], n);
690 	write_unlock_bh(&tbl->lock);
691 	neigh_dbg(2, "neigh %p is created\n", n);
692 	rc = n;
693 out:
694 	return rc;
695 out_tbl_unlock:
696 	write_unlock_bh(&tbl->lock);
697 out_neigh_release:
698 	if (!exempt_from_gc)
699 		atomic_dec(&tbl->gc_entries);
700 	neigh_release(n);
701 	goto out;
702 }
703 
704 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
705 				 struct net_device *dev, bool want_ref)
706 {
707 	return ___neigh_create(tbl, pkey, dev, 0, false, want_ref);
708 }
709 EXPORT_SYMBOL(__neigh_create);
710 
711 static u32 pneigh_hash(const void *pkey, unsigned int key_len)
712 {
713 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
714 	hash_val ^= (hash_val >> 16);
715 	hash_val ^= hash_val >> 8;
716 	hash_val ^= hash_val >> 4;
717 	hash_val &= PNEIGH_HASHMASK;
718 	return hash_val;
719 }
720 
721 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
722 					      struct net *net,
723 					      const void *pkey,
724 					      unsigned int key_len,
725 					      struct net_device *dev)
726 {
727 	while (n) {
728 		if (!memcmp(n->key, pkey, key_len) &&
729 		    net_eq(pneigh_net(n), net) &&
730 		    (n->dev == dev || !n->dev))
731 			return n;
732 		n = n->next;
733 	}
734 	return NULL;
735 }
736 
737 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
738 		struct net *net, const void *pkey, struct net_device *dev)
739 {
740 	unsigned int key_len = tbl->key_len;
741 	u32 hash_val = pneigh_hash(pkey, key_len);
742 
743 	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
744 				 net, pkey, key_len, dev);
745 }
746 EXPORT_SYMBOL_GPL(__pneigh_lookup);
747 
748 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
749 				    struct net *net, const void *pkey,
750 				    struct net_device *dev, int creat)
751 {
752 	struct pneigh_entry *n;
753 	unsigned int key_len = tbl->key_len;
754 	u32 hash_val = pneigh_hash(pkey, key_len);
755 
756 	read_lock_bh(&tbl->lock);
757 	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
758 			      net, pkey, key_len, dev);
759 	read_unlock_bh(&tbl->lock);
760 
761 	if (n || !creat)
762 		goto out;
763 
764 	ASSERT_RTNL();
765 
766 	n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
767 	if (!n)
768 		goto out;
769 
770 	write_pnet(&n->net, net);
771 	memcpy(n->key, pkey, key_len);
772 	n->dev = dev;
773 	dev_hold_track(dev, &n->dev_tracker, GFP_KERNEL);
774 
775 	if (tbl->pconstructor && tbl->pconstructor(n)) {
776 		dev_put_track(dev, &n->dev_tracker);
777 		kfree(n);
778 		n = NULL;
779 		goto out;
780 	}
781 
782 	write_lock_bh(&tbl->lock);
783 	n->next = tbl->phash_buckets[hash_val];
784 	tbl->phash_buckets[hash_val] = n;
785 	write_unlock_bh(&tbl->lock);
786 out:
787 	return n;
788 }
789 EXPORT_SYMBOL(pneigh_lookup);
790 
791 
792 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
793 		  struct net_device *dev)
794 {
795 	struct pneigh_entry *n, **np;
796 	unsigned int key_len = tbl->key_len;
797 	u32 hash_val = pneigh_hash(pkey, key_len);
798 
799 	write_lock_bh(&tbl->lock);
800 	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
801 	     np = &n->next) {
802 		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
803 		    net_eq(pneigh_net(n), net)) {
804 			*np = n->next;
805 			write_unlock_bh(&tbl->lock);
806 			if (tbl->pdestructor)
807 				tbl->pdestructor(n);
808 			dev_put_track(n->dev, &n->dev_tracker);
809 			kfree(n);
810 			return 0;
811 		}
812 	}
813 	write_unlock_bh(&tbl->lock);
814 	return -ENOENT;
815 }
816 
817 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
818 				    struct net_device *dev)
819 {
820 	struct pneigh_entry *n, **np, *freelist = NULL;
821 	u32 h;
822 
823 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
824 		np = &tbl->phash_buckets[h];
825 		while ((n = *np) != NULL) {
826 			if (!dev || n->dev == dev) {
827 				*np = n->next;
828 				n->next = freelist;
829 				freelist = n;
830 				continue;
831 			}
832 			np = &n->next;
833 		}
834 	}
835 	write_unlock_bh(&tbl->lock);
836 	while ((n = freelist)) {
837 		freelist = n->next;
838 		n->next = NULL;
839 		if (tbl->pdestructor)
840 			tbl->pdestructor(n);
841 		dev_put_track(n->dev, &n->dev_tracker);
842 		kfree(n);
843 	}
844 	return -ENOENT;
845 }
846 
847 static void neigh_parms_destroy(struct neigh_parms *parms);
848 
849 static inline void neigh_parms_put(struct neigh_parms *parms)
850 {
851 	if (refcount_dec_and_test(&parms->refcnt))
852 		neigh_parms_destroy(parms);
853 }
854 
855 /*
856  *	neighbour must already be out of the table;
857  *
858  */
859 void neigh_destroy(struct neighbour *neigh)
860 {
861 	struct net_device *dev = neigh->dev;
862 
863 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
864 
865 	if (!neigh->dead) {
866 		pr_warn("Destroying alive neighbour %p\n", neigh);
867 		dump_stack();
868 		return;
869 	}
870 
871 	if (neigh_del_timer(neigh))
872 		pr_warn("Impossible event\n");
873 
874 	write_lock_bh(&neigh->lock);
875 	__skb_queue_purge(&neigh->arp_queue);
876 	write_unlock_bh(&neigh->lock);
877 	neigh->arp_queue_len_bytes = 0;
878 
879 	if (dev->netdev_ops->ndo_neigh_destroy)
880 		dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
881 
882 	dev_put_track(dev, &neigh->dev_tracker);
883 	neigh_parms_put(neigh->parms);
884 
885 	neigh_dbg(2, "neigh %p is destroyed\n", neigh);
886 
887 	atomic_dec(&neigh->tbl->entries);
888 	kfree_rcu(neigh, rcu);
889 }
890 EXPORT_SYMBOL(neigh_destroy);
891 
892 /* Neighbour state is suspicious;
893    disable fast path.
894 
895    Called with write_locked neigh.
896  */
897 static void neigh_suspect(struct neighbour *neigh)
898 {
899 	neigh_dbg(2, "neigh %p is suspected\n", neigh);
900 
901 	neigh->output = neigh->ops->output;
902 }
903 
904 /* Neighbour state is OK;
905    enable fast path.
906 
907    Called with write_locked neigh.
908  */
909 static void neigh_connect(struct neighbour *neigh)
910 {
911 	neigh_dbg(2, "neigh %p is connected\n", neigh);
912 
913 	neigh->output = neigh->ops->connected_output;
914 }
915 
916 static void neigh_periodic_work(struct work_struct *work)
917 {
918 	struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
919 	struct neighbour *n;
920 	struct neighbour __rcu **np;
921 	unsigned int i;
922 	struct neigh_hash_table *nht;
923 
924 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
925 
926 	write_lock_bh(&tbl->lock);
927 	nht = rcu_dereference_protected(tbl->nht,
928 					lockdep_is_held(&tbl->lock));
929 
930 	/*
931 	 *	periodically recompute ReachableTime from random function
932 	 */
933 
934 	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
935 		struct neigh_parms *p;
936 		tbl->last_rand = jiffies;
937 		list_for_each_entry(p, &tbl->parms_list, list)
938 			p->reachable_time =
939 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
940 	}
941 
942 	if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
943 		goto out;
944 
945 	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
946 		np = &nht->hash_buckets[i];
947 
948 		while ((n = rcu_dereference_protected(*np,
949 				lockdep_is_held(&tbl->lock))) != NULL) {
950 			unsigned int state;
951 
952 			write_lock(&n->lock);
953 
954 			state = n->nud_state;
955 			if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
956 			    (n->flags & NTF_EXT_LEARNED)) {
957 				write_unlock(&n->lock);
958 				goto next_elt;
959 			}
960 
961 			if (time_before(n->used, n->confirmed))
962 				n->used = n->confirmed;
963 
964 			if (refcount_read(&n->refcnt) == 1 &&
965 			    (state == NUD_FAILED ||
966 			     time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
967 				*np = n->next;
968 				neigh_mark_dead(n);
969 				write_unlock(&n->lock);
970 				neigh_cleanup_and_release(n);
971 				continue;
972 			}
973 			write_unlock(&n->lock);
974 
975 next_elt:
976 			np = &n->next;
977 		}
978 		/*
979 		 * It's fine to release lock here, even if hash table
980 		 * grows while we are preempted.
981 		 */
982 		write_unlock_bh(&tbl->lock);
983 		cond_resched();
984 		write_lock_bh(&tbl->lock);
985 		nht = rcu_dereference_protected(tbl->nht,
986 						lockdep_is_held(&tbl->lock));
987 	}
988 out:
989 	/* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
990 	 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
991 	 * BASE_REACHABLE_TIME.
992 	 */
993 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
994 			      NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
995 	write_unlock_bh(&tbl->lock);
996 }
997 
998 static __inline__ int neigh_max_probes(struct neighbour *n)
999 {
1000 	struct neigh_parms *p = n->parms;
1001 	return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
1002 	       (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
1003 	        NEIGH_VAR(p, MCAST_PROBES));
1004 }
1005 
1006 static void neigh_invalidate(struct neighbour *neigh)
1007 	__releases(neigh->lock)
1008 	__acquires(neigh->lock)
1009 {
1010 	struct sk_buff *skb;
1011 
1012 	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
1013 	neigh_dbg(2, "neigh %p is failed\n", neigh);
1014 	neigh->updated = jiffies;
1015 
1016 	/* It is very thin place. report_unreachable is very complicated
1017 	   routine. Particularly, it can hit the same neighbour entry!
1018 
1019 	   So that, we try to be accurate and avoid dead loop. --ANK
1020 	 */
1021 	while (neigh->nud_state == NUD_FAILED &&
1022 	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1023 		write_unlock(&neigh->lock);
1024 		neigh->ops->error_report(neigh, skb);
1025 		write_lock(&neigh->lock);
1026 	}
1027 	__skb_queue_purge(&neigh->arp_queue);
1028 	neigh->arp_queue_len_bytes = 0;
1029 }
1030 
1031 static void neigh_probe(struct neighbour *neigh)
1032 	__releases(neigh->lock)
1033 {
1034 	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
1035 	/* keep skb alive even if arp_queue overflows */
1036 	if (skb)
1037 		skb = skb_clone(skb, GFP_ATOMIC);
1038 	write_unlock(&neigh->lock);
1039 	if (neigh->ops->solicit)
1040 		neigh->ops->solicit(neigh, skb);
1041 	atomic_inc(&neigh->probes);
1042 	consume_skb(skb);
1043 }
1044 
1045 /* Called when a timer expires for a neighbour entry. */
1046 
1047 static void neigh_timer_handler(struct timer_list *t)
1048 {
1049 	unsigned long now, next;
1050 	struct neighbour *neigh = from_timer(neigh, t, timer);
1051 	unsigned int state;
1052 	int notify = 0;
1053 
1054 	write_lock(&neigh->lock);
1055 
1056 	state = neigh->nud_state;
1057 	now = jiffies;
1058 	next = now + HZ;
1059 
1060 	if (!(state & NUD_IN_TIMER))
1061 		goto out;
1062 
1063 	if (state & NUD_REACHABLE) {
1064 		if (time_before_eq(now,
1065 				   neigh->confirmed + neigh->parms->reachable_time)) {
1066 			neigh_dbg(2, "neigh %p is still alive\n", neigh);
1067 			next = neigh->confirmed + neigh->parms->reachable_time;
1068 		} else if (time_before_eq(now,
1069 					  neigh->used +
1070 					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1071 			neigh_dbg(2, "neigh %p is delayed\n", neigh);
1072 			neigh->nud_state = NUD_DELAY;
1073 			neigh->updated = jiffies;
1074 			neigh_suspect(neigh);
1075 			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
1076 		} else {
1077 			neigh_dbg(2, "neigh %p is suspected\n", neigh);
1078 			neigh->nud_state = NUD_STALE;
1079 			neigh->updated = jiffies;
1080 			neigh_suspect(neigh);
1081 			notify = 1;
1082 		}
1083 	} else if (state & NUD_DELAY) {
1084 		if (time_before_eq(now,
1085 				   neigh->confirmed +
1086 				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1087 			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
1088 			neigh->nud_state = NUD_REACHABLE;
1089 			neigh->updated = jiffies;
1090 			neigh_connect(neigh);
1091 			notify = 1;
1092 			next = neigh->confirmed + neigh->parms->reachable_time;
1093 		} else {
1094 			neigh_dbg(2, "neigh %p is probed\n", neigh);
1095 			neigh->nud_state = NUD_PROBE;
1096 			neigh->updated = jiffies;
1097 			atomic_set(&neigh->probes, 0);
1098 			notify = 1;
1099 			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1100 					 HZ/100);
1101 		}
1102 	} else {
1103 		/* NUD_PROBE|NUD_INCOMPLETE */
1104 		next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100);
1105 	}
1106 
1107 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1108 	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
1109 		neigh->nud_state = NUD_FAILED;
1110 		notify = 1;
1111 		neigh_invalidate(neigh);
1112 		goto out;
1113 	}
1114 
1115 	if (neigh->nud_state & NUD_IN_TIMER) {
1116 		if (time_before(next, jiffies + HZ/100))
1117 			next = jiffies + HZ/100;
1118 		if (!mod_timer(&neigh->timer, next))
1119 			neigh_hold(neigh);
1120 	}
1121 	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
1122 		neigh_probe(neigh);
1123 	} else {
1124 out:
1125 		write_unlock(&neigh->lock);
1126 	}
1127 
1128 	if (notify)
1129 		neigh_update_notify(neigh, 0);
1130 
1131 	trace_neigh_timer_handler(neigh, 0);
1132 
1133 	neigh_release(neigh);
1134 }
1135 
1136 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
1137 		       const bool immediate_ok)
1138 {
1139 	int rc;
1140 	bool immediate_probe = false;
1141 
1142 	write_lock_bh(&neigh->lock);
1143 
1144 	rc = 0;
1145 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1146 		goto out_unlock_bh;
1147 	if (neigh->dead)
1148 		goto out_dead;
1149 
1150 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1151 		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1152 		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
1153 			unsigned long next, now = jiffies;
1154 
1155 			atomic_set(&neigh->probes,
1156 				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
1157 			neigh_del_timer(neigh);
1158 			neigh->nud_state = NUD_INCOMPLETE;
1159 			neigh->updated = now;
1160 			if (!immediate_ok) {
1161 				next = now + 1;
1162 			} else {
1163 				immediate_probe = true;
1164 				next = now + max(NEIGH_VAR(neigh->parms,
1165 							   RETRANS_TIME),
1166 						 HZ / 100);
1167 			}
1168 			neigh_add_timer(neigh, next);
1169 		} else {
1170 			neigh->nud_state = NUD_FAILED;
1171 			neigh->updated = jiffies;
1172 			write_unlock_bh(&neigh->lock);
1173 
1174 			kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_FAILED);
1175 			return 1;
1176 		}
1177 	} else if (neigh->nud_state & NUD_STALE) {
1178 		neigh_dbg(2, "neigh %p is delayed\n", neigh);
1179 		neigh_del_timer(neigh);
1180 		neigh->nud_state = NUD_DELAY;
1181 		neigh->updated = jiffies;
1182 		neigh_add_timer(neigh, jiffies +
1183 				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1184 	}
1185 
1186 	if (neigh->nud_state == NUD_INCOMPLETE) {
1187 		if (skb) {
1188 			while (neigh->arp_queue_len_bytes + skb->truesize >
1189 			       NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1190 				struct sk_buff *buff;
1191 
1192 				buff = __skb_dequeue(&neigh->arp_queue);
1193 				if (!buff)
1194 					break;
1195 				neigh->arp_queue_len_bytes -= buff->truesize;
1196 				kfree_skb_reason(buff, SKB_DROP_REASON_NEIGH_QUEUEFULL);
1197 				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1198 			}
1199 			skb_dst_force(skb);
1200 			__skb_queue_tail(&neigh->arp_queue, skb);
1201 			neigh->arp_queue_len_bytes += skb->truesize;
1202 		}
1203 		rc = 1;
1204 	}
1205 out_unlock_bh:
1206 	if (immediate_probe)
1207 		neigh_probe(neigh);
1208 	else
1209 		write_unlock(&neigh->lock);
1210 	local_bh_enable();
1211 	trace_neigh_event_send_done(neigh, rc);
1212 	return rc;
1213 
1214 out_dead:
1215 	if (neigh->nud_state & NUD_STALE)
1216 		goto out_unlock_bh;
1217 	write_unlock_bh(&neigh->lock);
1218 	kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_DEAD);
1219 	trace_neigh_event_send_dead(neigh, 1);
1220 	return 1;
1221 }
1222 EXPORT_SYMBOL(__neigh_event_send);
1223 
1224 static void neigh_update_hhs(struct neighbour *neigh)
1225 {
1226 	struct hh_cache *hh;
1227 	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1228 		= NULL;
1229 
1230 	if (neigh->dev->header_ops)
1231 		update = neigh->dev->header_ops->cache_update;
1232 
1233 	if (update) {
1234 		hh = &neigh->hh;
1235 		if (READ_ONCE(hh->hh_len)) {
1236 			write_seqlock_bh(&hh->hh_lock);
1237 			update(hh, neigh->dev, neigh->ha);
1238 			write_sequnlock_bh(&hh->hh_lock);
1239 		}
1240 	}
1241 }
1242 
1243 /* Generic update routine.
1244    -- lladdr is new lladdr or NULL, if it is not supplied.
1245    -- new    is new state.
1246    -- flags
1247 	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1248 				if it is different.
1249 	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1250 				lladdr instead of overriding it
1251 				if it is different.
1252 	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
1253 	NEIGH_UPDATE_F_USE	means that the entry is user triggered.
1254 	NEIGH_UPDATE_F_MANAGED	means that the entry will be auto-refreshed.
1255 	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1256 				NTF_ROUTER flag.
1257 	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
1258 				a router.
1259 
1260    Caller MUST hold reference count on the entry.
1261  */
1262 static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
1263 			  u8 new, u32 flags, u32 nlmsg_pid,
1264 			  struct netlink_ext_ack *extack)
1265 {
1266 	bool gc_update = false, managed_update = false;
1267 	int update_isrouter = 0;
1268 	struct net_device *dev;
1269 	int err, notify = 0;
1270 	u8 old;
1271 
1272 	trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
1273 
1274 	write_lock_bh(&neigh->lock);
1275 
1276 	dev    = neigh->dev;
1277 	old    = neigh->nud_state;
1278 	err    = -EPERM;
1279 
1280 	if (neigh->dead) {
1281 		NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
1282 		new = old;
1283 		goto out;
1284 	}
1285 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1286 	    (old & (NUD_NOARP | NUD_PERMANENT)))
1287 		goto out;
1288 
1289 	neigh_update_flags(neigh, flags, &notify, &gc_update, &managed_update);
1290 	if (flags & (NEIGH_UPDATE_F_USE | NEIGH_UPDATE_F_MANAGED)) {
1291 		new = old & ~NUD_PERMANENT;
1292 		neigh->nud_state = new;
1293 		err = 0;
1294 		goto out;
1295 	}
1296 
1297 	if (!(new & NUD_VALID)) {
1298 		neigh_del_timer(neigh);
1299 		if (old & NUD_CONNECTED)
1300 			neigh_suspect(neigh);
1301 		neigh->nud_state = new;
1302 		err = 0;
1303 		notify = old & NUD_VALID;
1304 		if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1305 		    (new & NUD_FAILED)) {
1306 			neigh_invalidate(neigh);
1307 			notify = 1;
1308 		}
1309 		goto out;
1310 	}
1311 
1312 	/* Compare new lladdr with cached one */
1313 	if (!dev->addr_len) {
1314 		/* First case: device needs no address. */
1315 		lladdr = neigh->ha;
1316 	} else if (lladdr) {
1317 		/* The second case: if something is already cached
1318 		   and a new address is proposed:
1319 		   - compare new & old
1320 		   - if they are different, check override flag
1321 		 */
1322 		if ((old & NUD_VALID) &&
1323 		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1324 			lladdr = neigh->ha;
1325 	} else {
1326 		/* No address is supplied; if we know something,
1327 		   use it, otherwise discard the request.
1328 		 */
1329 		err = -EINVAL;
1330 		if (!(old & NUD_VALID)) {
1331 			NL_SET_ERR_MSG(extack, "No link layer address given");
1332 			goto out;
1333 		}
1334 		lladdr = neigh->ha;
1335 	}
1336 
1337 	/* Update confirmed timestamp for neighbour entry after we
1338 	 * received ARP packet even if it doesn't change IP to MAC binding.
1339 	 */
1340 	if (new & NUD_CONNECTED)
1341 		neigh->confirmed = jiffies;
1342 
1343 	/* If entry was valid and address is not changed,
1344 	   do not change entry state, if new one is STALE.
1345 	 */
1346 	err = 0;
1347 	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1348 	if (old & NUD_VALID) {
1349 		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1350 			update_isrouter = 0;
1351 			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1352 			    (old & NUD_CONNECTED)) {
1353 				lladdr = neigh->ha;
1354 				new = NUD_STALE;
1355 			} else
1356 				goto out;
1357 		} else {
1358 			if (lladdr == neigh->ha && new == NUD_STALE &&
1359 			    !(flags & NEIGH_UPDATE_F_ADMIN))
1360 				new = old;
1361 		}
1362 	}
1363 
1364 	/* Update timestamp only once we know we will make a change to the
1365 	 * neighbour entry. Otherwise we risk to move the locktime window with
1366 	 * noop updates and ignore relevant ARP updates.
1367 	 */
1368 	if (new != old || lladdr != neigh->ha)
1369 		neigh->updated = jiffies;
1370 
1371 	if (new != old) {
1372 		neigh_del_timer(neigh);
1373 		if (new & NUD_PROBE)
1374 			atomic_set(&neigh->probes, 0);
1375 		if (new & NUD_IN_TIMER)
1376 			neigh_add_timer(neigh, (jiffies +
1377 						((new & NUD_REACHABLE) ?
1378 						 neigh->parms->reachable_time :
1379 						 0)));
1380 		neigh->nud_state = new;
1381 		notify = 1;
1382 	}
1383 
1384 	if (lladdr != neigh->ha) {
1385 		write_seqlock(&neigh->ha_lock);
1386 		memcpy(&neigh->ha, lladdr, dev->addr_len);
1387 		write_sequnlock(&neigh->ha_lock);
1388 		neigh_update_hhs(neigh);
1389 		if (!(new & NUD_CONNECTED))
1390 			neigh->confirmed = jiffies -
1391 				      (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1392 		notify = 1;
1393 	}
1394 	if (new == old)
1395 		goto out;
1396 	if (new & NUD_CONNECTED)
1397 		neigh_connect(neigh);
1398 	else
1399 		neigh_suspect(neigh);
1400 	if (!(old & NUD_VALID)) {
1401 		struct sk_buff *skb;
1402 
1403 		/* Again: avoid dead loop if something went wrong */
1404 
1405 		while (neigh->nud_state & NUD_VALID &&
1406 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1407 			struct dst_entry *dst = skb_dst(skb);
1408 			struct neighbour *n2, *n1 = neigh;
1409 			write_unlock_bh(&neigh->lock);
1410 
1411 			rcu_read_lock();
1412 
1413 			/* Why not just use 'neigh' as-is?  The problem is that
1414 			 * things such as shaper, eql, and sch_teql can end up
1415 			 * using alternative, different, neigh objects to output
1416 			 * the packet in the output path.  So what we need to do
1417 			 * here is re-lookup the top-level neigh in the path so
1418 			 * we can reinject the packet there.
1419 			 */
1420 			n2 = NULL;
1421 			if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
1422 				n2 = dst_neigh_lookup_skb(dst, skb);
1423 				if (n2)
1424 					n1 = n2;
1425 			}
1426 			n1->output(n1, skb);
1427 			if (n2)
1428 				neigh_release(n2);
1429 			rcu_read_unlock();
1430 
1431 			write_lock_bh(&neigh->lock);
1432 		}
1433 		__skb_queue_purge(&neigh->arp_queue);
1434 		neigh->arp_queue_len_bytes = 0;
1435 	}
1436 out:
1437 	if (update_isrouter)
1438 		neigh_update_is_router(neigh, flags, &notify);
1439 	write_unlock_bh(&neigh->lock);
1440 	if (((new ^ old) & NUD_PERMANENT) || gc_update)
1441 		neigh_update_gc_list(neigh);
1442 	if (managed_update)
1443 		neigh_update_managed_list(neigh);
1444 	if (notify)
1445 		neigh_update_notify(neigh, nlmsg_pid);
1446 	trace_neigh_update_done(neigh, err);
1447 	return err;
1448 }
1449 
1450 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1451 		 u32 flags, u32 nlmsg_pid)
1452 {
1453 	return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
1454 }
1455 EXPORT_SYMBOL(neigh_update);
1456 
1457 /* Update the neigh to listen temporarily for probe responses, even if it is
1458  * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1459  */
1460 void __neigh_set_probe_once(struct neighbour *neigh)
1461 {
1462 	if (neigh->dead)
1463 		return;
1464 	neigh->updated = jiffies;
1465 	if (!(neigh->nud_state & NUD_FAILED))
1466 		return;
1467 	neigh->nud_state = NUD_INCOMPLETE;
1468 	atomic_set(&neigh->probes, neigh_max_probes(neigh));
1469 	neigh_add_timer(neigh,
1470 			jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1471 				      HZ/100));
1472 }
1473 EXPORT_SYMBOL(__neigh_set_probe_once);
1474 
1475 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1476 				 u8 *lladdr, void *saddr,
1477 				 struct net_device *dev)
1478 {
1479 	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1480 						 lladdr || !dev->addr_len);
1481 	if (neigh)
1482 		neigh_update(neigh, lladdr, NUD_STALE,
1483 			     NEIGH_UPDATE_F_OVERRIDE, 0);
1484 	return neigh;
1485 }
1486 EXPORT_SYMBOL(neigh_event_ns);
1487 
1488 /* called with read_lock_bh(&n->lock); */
1489 static void neigh_hh_init(struct neighbour *n)
1490 {
1491 	struct net_device *dev = n->dev;
1492 	__be16 prot = n->tbl->protocol;
1493 	struct hh_cache	*hh = &n->hh;
1494 
1495 	write_lock_bh(&n->lock);
1496 
1497 	/* Only one thread can come in here and initialize the
1498 	 * hh_cache entry.
1499 	 */
1500 	if (!hh->hh_len)
1501 		dev->header_ops->cache(n, hh, prot);
1502 
1503 	write_unlock_bh(&n->lock);
1504 }
1505 
1506 /* Slow and careful. */
1507 
1508 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1509 {
1510 	int rc = 0;
1511 
1512 	if (!neigh_event_send(neigh, skb)) {
1513 		int err;
1514 		struct net_device *dev = neigh->dev;
1515 		unsigned int seq;
1516 
1517 		if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
1518 			neigh_hh_init(neigh);
1519 
1520 		do {
1521 			__skb_pull(skb, skb_network_offset(skb));
1522 			seq = read_seqbegin(&neigh->ha_lock);
1523 			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1524 					      neigh->ha, NULL, skb->len);
1525 		} while (read_seqretry(&neigh->ha_lock, seq));
1526 
1527 		if (err >= 0)
1528 			rc = dev_queue_xmit(skb);
1529 		else
1530 			goto out_kfree_skb;
1531 	}
1532 out:
1533 	return rc;
1534 out_kfree_skb:
1535 	rc = -EINVAL;
1536 	kfree_skb(skb);
1537 	goto out;
1538 }
1539 EXPORT_SYMBOL(neigh_resolve_output);
1540 
1541 /* As fast as possible without hh cache */
1542 
1543 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1544 {
1545 	struct net_device *dev = neigh->dev;
1546 	unsigned int seq;
1547 	int err;
1548 
1549 	do {
1550 		__skb_pull(skb, skb_network_offset(skb));
1551 		seq = read_seqbegin(&neigh->ha_lock);
1552 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1553 				      neigh->ha, NULL, skb->len);
1554 	} while (read_seqretry(&neigh->ha_lock, seq));
1555 
1556 	if (err >= 0)
1557 		err = dev_queue_xmit(skb);
1558 	else {
1559 		err = -EINVAL;
1560 		kfree_skb(skb);
1561 	}
1562 	return err;
1563 }
1564 EXPORT_SYMBOL(neigh_connected_output);
1565 
1566 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1567 {
1568 	return dev_queue_xmit(skb);
1569 }
1570 EXPORT_SYMBOL(neigh_direct_output);
1571 
1572 static void neigh_managed_work(struct work_struct *work)
1573 {
1574 	struct neigh_table *tbl = container_of(work, struct neigh_table,
1575 					       managed_work.work);
1576 	struct neighbour *neigh;
1577 
1578 	write_lock_bh(&tbl->lock);
1579 	list_for_each_entry(neigh, &tbl->managed_list, managed_list)
1580 		neigh_event_send_probe(neigh, NULL, false);
1581 	queue_delayed_work(system_power_efficient_wq, &tbl->managed_work,
1582 			   max(NEIGH_VAR(&tbl->parms, DELAY_PROBE_TIME), HZ));
1583 	write_unlock_bh(&tbl->lock);
1584 }
1585 
1586 static void neigh_proxy_process(struct timer_list *t)
1587 {
1588 	struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1589 	long sched_next = 0;
1590 	unsigned long now = jiffies;
1591 	struct sk_buff *skb, *n;
1592 
1593 	spin_lock(&tbl->proxy_queue.lock);
1594 
1595 	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1596 		long tdif = NEIGH_CB(skb)->sched_next - now;
1597 
1598 		if (tdif <= 0) {
1599 			struct net_device *dev = skb->dev;
1600 
1601 			__skb_unlink(skb, &tbl->proxy_queue);
1602 			if (tbl->proxy_redo && netif_running(dev)) {
1603 				rcu_read_lock();
1604 				tbl->proxy_redo(skb);
1605 				rcu_read_unlock();
1606 			} else {
1607 				kfree_skb(skb);
1608 			}
1609 
1610 			dev_put(dev);
1611 		} else if (!sched_next || tdif < sched_next)
1612 			sched_next = tdif;
1613 	}
1614 	del_timer(&tbl->proxy_timer);
1615 	if (sched_next)
1616 		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1617 	spin_unlock(&tbl->proxy_queue.lock);
1618 }
1619 
1620 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1621 		    struct sk_buff *skb)
1622 {
1623 	unsigned long sched_next = jiffies +
1624 			prandom_u32_max(NEIGH_VAR(p, PROXY_DELAY));
1625 
1626 	if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1627 		kfree_skb(skb);
1628 		return;
1629 	}
1630 
1631 	NEIGH_CB(skb)->sched_next = sched_next;
1632 	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1633 
1634 	spin_lock(&tbl->proxy_queue.lock);
1635 	if (del_timer(&tbl->proxy_timer)) {
1636 		if (time_before(tbl->proxy_timer.expires, sched_next))
1637 			sched_next = tbl->proxy_timer.expires;
1638 	}
1639 	skb_dst_drop(skb);
1640 	dev_hold(skb->dev);
1641 	__skb_queue_tail(&tbl->proxy_queue, skb);
1642 	mod_timer(&tbl->proxy_timer, sched_next);
1643 	spin_unlock(&tbl->proxy_queue.lock);
1644 }
1645 EXPORT_SYMBOL(pneigh_enqueue);
1646 
1647 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1648 						      struct net *net, int ifindex)
1649 {
1650 	struct neigh_parms *p;
1651 
1652 	list_for_each_entry(p, &tbl->parms_list, list) {
1653 		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1654 		    (!p->dev && !ifindex && net_eq(net, &init_net)))
1655 			return p;
1656 	}
1657 
1658 	return NULL;
1659 }
1660 
1661 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1662 				      struct neigh_table *tbl)
1663 {
1664 	struct neigh_parms *p;
1665 	struct net *net = dev_net(dev);
1666 	const struct net_device_ops *ops = dev->netdev_ops;
1667 
1668 	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1669 	if (p) {
1670 		p->tbl		  = tbl;
1671 		refcount_set(&p->refcnt, 1);
1672 		p->reachable_time =
1673 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1674 		dev_hold_track(dev, &p->dev_tracker, GFP_KERNEL);
1675 		p->dev = dev;
1676 		write_pnet(&p->net, net);
1677 		p->sysctl_table = NULL;
1678 
1679 		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1680 			dev_put_track(dev, &p->dev_tracker);
1681 			kfree(p);
1682 			return NULL;
1683 		}
1684 
1685 		write_lock_bh(&tbl->lock);
1686 		list_add(&p->list, &tbl->parms.list);
1687 		write_unlock_bh(&tbl->lock);
1688 
1689 		neigh_parms_data_state_cleanall(p);
1690 	}
1691 	return p;
1692 }
1693 EXPORT_SYMBOL(neigh_parms_alloc);
1694 
1695 static void neigh_rcu_free_parms(struct rcu_head *head)
1696 {
1697 	struct neigh_parms *parms =
1698 		container_of(head, struct neigh_parms, rcu_head);
1699 
1700 	neigh_parms_put(parms);
1701 }
1702 
1703 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1704 {
1705 	if (!parms || parms == &tbl->parms)
1706 		return;
1707 	write_lock_bh(&tbl->lock);
1708 	list_del(&parms->list);
1709 	parms->dead = 1;
1710 	write_unlock_bh(&tbl->lock);
1711 	dev_put_track(parms->dev, &parms->dev_tracker);
1712 	call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1713 }
1714 EXPORT_SYMBOL(neigh_parms_release);
1715 
1716 static void neigh_parms_destroy(struct neigh_parms *parms)
1717 {
1718 	kfree(parms);
1719 }
1720 
1721 static struct lock_class_key neigh_table_proxy_queue_class;
1722 
1723 static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1724 
1725 void neigh_table_init(int index, struct neigh_table *tbl)
1726 {
1727 	unsigned long now = jiffies;
1728 	unsigned long phsize;
1729 
1730 	INIT_LIST_HEAD(&tbl->parms_list);
1731 	INIT_LIST_HEAD(&tbl->gc_list);
1732 	INIT_LIST_HEAD(&tbl->managed_list);
1733 
1734 	list_add(&tbl->parms.list, &tbl->parms_list);
1735 	write_pnet(&tbl->parms.net, &init_net);
1736 	refcount_set(&tbl->parms.refcnt, 1);
1737 	tbl->parms.reachable_time =
1738 			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1739 
1740 	tbl->stats = alloc_percpu(struct neigh_statistics);
1741 	if (!tbl->stats)
1742 		panic("cannot create neighbour cache statistics");
1743 
1744 #ifdef CONFIG_PROC_FS
1745 	if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1746 			      &neigh_stat_seq_ops, tbl))
1747 		panic("cannot create neighbour proc dir entry");
1748 #endif
1749 
1750 	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1751 
1752 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1753 	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1754 
1755 	if (!tbl->nht || !tbl->phash_buckets)
1756 		panic("cannot allocate neighbour cache hashes");
1757 
1758 	if (!tbl->entry_size)
1759 		tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1760 					tbl->key_len, NEIGH_PRIV_ALIGN);
1761 	else
1762 		WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1763 
1764 	rwlock_init(&tbl->lock);
1765 
1766 	INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1767 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1768 			tbl->parms.reachable_time);
1769 	INIT_DEFERRABLE_WORK(&tbl->managed_work, neigh_managed_work);
1770 	queue_delayed_work(system_power_efficient_wq, &tbl->managed_work, 0);
1771 
1772 	timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1773 	skb_queue_head_init_class(&tbl->proxy_queue,
1774 			&neigh_table_proxy_queue_class);
1775 
1776 	tbl->last_flush = now;
1777 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1778 
1779 	neigh_tables[index] = tbl;
1780 }
1781 EXPORT_SYMBOL(neigh_table_init);
1782 
1783 int neigh_table_clear(int index, struct neigh_table *tbl)
1784 {
1785 	neigh_tables[index] = NULL;
1786 	/* It is not clean... Fix it to unload IPv6 module safely */
1787 	cancel_delayed_work_sync(&tbl->managed_work);
1788 	cancel_delayed_work_sync(&tbl->gc_work);
1789 	del_timer_sync(&tbl->proxy_timer);
1790 	pneigh_queue_purge(&tbl->proxy_queue);
1791 	neigh_ifdown(tbl, NULL);
1792 	if (atomic_read(&tbl->entries))
1793 		pr_crit("neighbour leakage\n");
1794 
1795 	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1796 		 neigh_hash_free_rcu);
1797 	tbl->nht = NULL;
1798 
1799 	kfree(tbl->phash_buckets);
1800 	tbl->phash_buckets = NULL;
1801 
1802 	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1803 
1804 	free_percpu(tbl->stats);
1805 	tbl->stats = NULL;
1806 
1807 	return 0;
1808 }
1809 EXPORT_SYMBOL(neigh_table_clear);
1810 
1811 static struct neigh_table *neigh_find_table(int family)
1812 {
1813 	struct neigh_table *tbl = NULL;
1814 
1815 	switch (family) {
1816 	case AF_INET:
1817 		tbl = neigh_tables[NEIGH_ARP_TABLE];
1818 		break;
1819 	case AF_INET6:
1820 		tbl = neigh_tables[NEIGH_ND_TABLE];
1821 		break;
1822 	case AF_DECnet:
1823 		tbl = neigh_tables[NEIGH_DN_TABLE];
1824 		break;
1825 	}
1826 
1827 	return tbl;
1828 }
1829 
1830 const struct nla_policy nda_policy[NDA_MAX+1] = {
1831 	[NDA_UNSPEC]		= { .strict_start_type = NDA_NH_ID },
1832 	[NDA_DST]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1833 	[NDA_LLADDR]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1834 	[NDA_CACHEINFO]		= { .len = sizeof(struct nda_cacheinfo) },
1835 	[NDA_PROBES]		= { .type = NLA_U32 },
1836 	[NDA_VLAN]		= { .type = NLA_U16 },
1837 	[NDA_PORT]		= { .type = NLA_U16 },
1838 	[NDA_VNI]		= { .type = NLA_U32 },
1839 	[NDA_IFINDEX]		= { .type = NLA_U32 },
1840 	[NDA_MASTER]		= { .type = NLA_U32 },
1841 	[NDA_PROTOCOL]		= { .type = NLA_U8 },
1842 	[NDA_NH_ID]		= { .type = NLA_U32 },
1843 	[NDA_FLAGS_EXT]		= NLA_POLICY_MASK(NLA_U32, NTF_EXT_MASK),
1844 	[NDA_FDB_EXT_ATTRS]	= { .type = NLA_NESTED },
1845 };
1846 
1847 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1848 			struct netlink_ext_ack *extack)
1849 {
1850 	struct net *net = sock_net(skb->sk);
1851 	struct ndmsg *ndm;
1852 	struct nlattr *dst_attr;
1853 	struct neigh_table *tbl;
1854 	struct neighbour *neigh;
1855 	struct net_device *dev = NULL;
1856 	int err = -EINVAL;
1857 
1858 	ASSERT_RTNL();
1859 	if (nlmsg_len(nlh) < sizeof(*ndm))
1860 		goto out;
1861 
1862 	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1863 	if (!dst_attr) {
1864 		NL_SET_ERR_MSG(extack, "Network address not specified");
1865 		goto out;
1866 	}
1867 
1868 	ndm = nlmsg_data(nlh);
1869 	if (ndm->ndm_ifindex) {
1870 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1871 		if (dev == NULL) {
1872 			err = -ENODEV;
1873 			goto out;
1874 		}
1875 	}
1876 
1877 	tbl = neigh_find_table(ndm->ndm_family);
1878 	if (tbl == NULL)
1879 		return -EAFNOSUPPORT;
1880 
1881 	if (nla_len(dst_attr) < (int)tbl->key_len) {
1882 		NL_SET_ERR_MSG(extack, "Invalid network address");
1883 		goto out;
1884 	}
1885 
1886 	if (ndm->ndm_flags & NTF_PROXY) {
1887 		err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1888 		goto out;
1889 	}
1890 
1891 	if (dev == NULL)
1892 		goto out;
1893 
1894 	neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1895 	if (neigh == NULL) {
1896 		err = -ENOENT;
1897 		goto out;
1898 	}
1899 
1900 	err = __neigh_update(neigh, NULL, NUD_FAILED,
1901 			     NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
1902 			     NETLINK_CB(skb).portid, extack);
1903 	write_lock_bh(&tbl->lock);
1904 	neigh_release(neigh);
1905 	neigh_remove_one(neigh, tbl);
1906 	write_unlock_bh(&tbl->lock);
1907 
1908 out:
1909 	return err;
1910 }
1911 
1912 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1913 		     struct netlink_ext_ack *extack)
1914 {
1915 	int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
1916 		    NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1917 	struct net *net = sock_net(skb->sk);
1918 	struct ndmsg *ndm;
1919 	struct nlattr *tb[NDA_MAX+1];
1920 	struct neigh_table *tbl;
1921 	struct net_device *dev = NULL;
1922 	struct neighbour *neigh;
1923 	void *dst, *lladdr;
1924 	u8 protocol = 0;
1925 	u32 ndm_flags;
1926 	int err;
1927 
1928 	ASSERT_RTNL();
1929 	err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
1930 				     nda_policy, extack);
1931 	if (err < 0)
1932 		goto out;
1933 
1934 	err = -EINVAL;
1935 	if (!tb[NDA_DST]) {
1936 		NL_SET_ERR_MSG(extack, "Network address not specified");
1937 		goto out;
1938 	}
1939 
1940 	ndm = nlmsg_data(nlh);
1941 	ndm_flags = ndm->ndm_flags;
1942 	if (tb[NDA_FLAGS_EXT]) {
1943 		u32 ext = nla_get_u32(tb[NDA_FLAGS_EXT]);
1944 
1945 		BUILD_BUG_ON(sizeof(neigh->flags) * BITS_PER_BYTE <
1946 			     (sizeof(ndm->ndm_flags) * BITS_PER_BYTE +
1947 			      hweight32(NTF_EXT_MASK)));
1948 		ndm_flags |= (ext << NTF_EXT_SHIFT);
1949 	}
1950 	if (ndm->ndm_ifindex) {
1951 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1952 		if (dev == NULL) {
1953 			err = -ENODEV;
1954 			goto out;
1955 		}
1956 
1957 		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
1958 			NL_SET_ERR_MSG(extack, "Invalid link address");
1959 			goto out;
1960 		}
1961 	}
1962 
1963 	tbl = neigh_find_table(ndm->ndm_family);
1964 	if (tbl == NULL)
1965 		return -EAFNOSUPPORT;
1966 
1967 	if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
1968 		NL_SET_ERR_MSG(extack, "Invalid network address");
1969 		goto out;
1970 	}
1971 
1972 	dst = nla_data(tb[NDA_DST]);
1973 	lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1974 
1975 	if (tb[NDA_PROTOCOL])
1976 		protocol = nla_get_u8(tb[NDA_PROTOCOL]);
1977 	if (ndm_flags & NTF_PROXY) {
1978 		struct pneigh_entry *pn;
1979 
1980 		if (ndm_flags & NTF_MANAGED) {
1981 			NL_SET_ERR_MSG(extack, "Invalid NTF_* flag combination");
1982 			goto out;
1983 		}
1984 
1985 		err = -ENOBUFS;
1986 		pn = pneigh_lookup(tbl, net, dst, dev, 1);
1987 		if (pn) {
1988 			pn->flags = ndm_flags;
1989 			if (protocol)
1990 				pn->protocol = protocol;
1991 			err = 0;
1992 		}
1993 		goto out;
1994 	}
1995 
1996 	if (!dev) {
1997 		NL_SET_ERR_MSG(extack, "Device not specified");
1998 		goto out;
1999 	}
2000 
2001 	if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
2002 		err = -EINVAL;
2003 		goto out;
2004 	}
2005 
2006 	neigh = neigh_lookup(tbl, dst, dev);
2007 	if (neigh == NULL) {
2008 		bool ndm_permanent  = ndm->ndm_state & NUD_PERMANENT;
2009 		bool exempt_from_gc = ndm_permanent ||
2010 				      ndm_flags & NTF_EXT_LEARNED;
2011 
2012 		if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
2013 			err = -ENOENT;
2014 			goto out;
2015 		}
2016 		if (ndm_permanent && (ndm_flags & NTF_MANAGED)) {
2017 			NL_SET_ERR_MSG(extack, "Invalid NTF_* flag for permanent entry");
2018 			err = -EINVAL;
2019 			goto out;
2020 		}
2021 
2022 		neigh = ___neigh_create(tbl, dst, dev,
2023 					ndm_flags &
2024 					(NTF_EXT_LEARNED | NTF_MANAGED),
2025 					exempt_from_gc, true);
2026 		if (IS_ERR(neigh)) {
2027 			err = PTR_ERR(neigh);
2028 			goto out;
2029 		}
2030 	} else {
2031 		if (nlh->nlmsg_flags & NLM_F_EXCL) {
2032 			err = -EEXIST;
2033 			neigh_release(neigh);
2034 			goto out;
2035 		}
2036 
2037 		if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
2038 			flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
2039 				   NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
2040 	}
2041 
2042 	if (protocol)
2043 		neigh->protocol = protocol;
2044 	if (ndm_flags & NTF_EXT_LEARNED)
2045 		flags |= NEIGH_UPDATE_F_EXT_LEARNED;
2046 	if (ndm_flags & NTF_ROUTER)
2047 		flags |= NEIGH_UPDATE_F_ISROUTER;
2048 	if (ndm_flags & NTF_MANAGED)
2049 		flags |= NEIGH_UPDATE_F_MANAGED;
2050 	if (ndm_flags & NTF_USE)
2051 		flags |= NEIGH_UPDATE_F_USE;
2052 
2053 	err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
2054 			     NETLINK_CB(skb).portid, extack);
2055 	if (!err && ndm_flags & (NTF_USE | NTF_MANAGED)) {
2056 		neigh_event_send(neigh, NULL);
2057 		err = 0;
2058 	}
2059 	neigh_release(neigh);
2060 out:
2061 	return err;
2062 }
2063 
2064 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
2065 {
2066 	struct nlattr *nest;
2067 
2068 	nest = nla_nest_start_noflag(skb, NDTA_PARMS);
2069 	if (nest == NULL)
2070 		return -ENOBUFS;
2071 
2072 	if ((parms->dev &&
2073 	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
2074 	    nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
2075 	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
2076 			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
2077 	    /* approximative value for deprecated QUEUE_LEN (in packets) */
2078 	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
2079 			NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
2080 	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
2081 	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
2082 	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
2083 			NEIGH_VAR(parms, UCAST_PROBES)) ||
2084 	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
2085 			NEIGH_VAR(parms, MCAST_PROBES)) ||
2086 	    nla_put_u32(skb, NDTPA_MCAST_REPROBES,
2087 			NEIGH_VAR(parms, MCAST_REPROBES)) ||
2088 	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
2089 			  NDTPA_PAD) ||
2090 	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
2091 			  NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
2092 	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
2093 			  NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
2094 	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
2095 			  NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
2096 	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
2097 			  NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
2098 	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
2099 			  NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
2100 	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
2101 			  NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
2102 	    nla_put_msecs(skb, NDTPA_LOCKTIME,
2103 			  NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
2104 		goto nla_put_failure;
2105 	return nla_nest_end(skb, nest);
2106 
2107 nla_put_failure:
2108 	nla_nest_cancel(skb, nest);
2109 	return -EMSGSIZE;
2110 }
2111 
2112 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
2113 			      u32 pid, u32 seq, int type, int flags)
2114 {
2115 	struct nlmsghdr *nlh;
2116 	struct ndtmsg *ndtmsg;
2117 
2118 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2119 	if (nlh == NULL)
2120 		return -EMSGSIZE;
2121 
2122 	ndtmsg = nlmsg_data(nlh);
2123 
2124 	read_lock_bh(&tbl->lock);
2125 	ndtmsg->ndtm_family = tbl->family;
2126 	ndtmsg->ndtm_pad1   = 0;
2127 	ndtmsg->ndtm_pad2   = 0;
2128 
2129 	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
2130 	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
2131 	    nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
2132 	    nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
2133 	    nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
2134 		goto nla_put_failure;
2135 	{
2136 		unsigned long now = jiffies;
2137 		long flush_delta = now - tbl->last_flush;
2138 		long rand_delta = now - tbl->last_rand;
2139 		struct neigh_hash_table *nht;
2140 		struct ndt_config ndc = {
2141 			.ndtc_key_len		= tbl->key_len,
2142 			.ndtc_entry_size	= tbl->entry_size,
2143 			.ndtc_entries		= atomic_read(&tbl->entries),
2144 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
2145 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
2146 			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
2147 		};
2148 
2149 		rcu_read_lock_bh();
2150 		nht = rcu_dereference_bh(tbl->nht);
2151 		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
2152 		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
2153 		rcu_read_unlock_bh();
2154 
2155 		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
2156 			goto nla_put_failure;
2157 	}
2158 
2159 	{
2160 		int cpu;
2161 		struct ndt_stats ndst;
2162 
2163 		memset(&ndst, 0, sizeof(ndst));
2164 
2165 		for_each_possible_cpu(cpu) {
2166 			struct neigh_statistics	*st;
2167 
2168 			st = per_cpu_ptr(tbl->stats, cpu);
2169 			ndst.ndts_allocs		+= st->allocs;
2170 			ndst.ndts_destroys		+= st->destroys;
2171 			ndst.ndts_hash_grows		+= st->hash_grows;
2172 			ndst.ndts_res_failed		+= st->res_failed;
2173 			ndst.ndts_lookups		+= st->lookups;
2174 			ndst.ndts_hits			+= st->hits;
2175 			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
2176 			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
2177 			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
2178 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
2179 			ndst.ndts_table_fulls		+= st->table_fulls;
2180 		}
2181 
2182 		if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
2183 				  NDTA_PAD))
2184 			goto nla_put_failure;
2185 	}
2186 
2187 	BUG_ON(tbl->parms.dev);
2188 	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
2189 		goto nla_put_failure;
2190 
2191 	read_unlock_bh(&tbl->lock);
2192 	nlmsg_end(skb, nlh);
2193 	return 0;
2194 
2195 nla_put_failure:
2196 	read_unlock_bh(&tbl->lock);
2197 	nlmsg_cancel(skb, nlh);
2198 	return -EMSGSIZE;
2199 }
2200 
2201 static int neightbl_fill_param_info(struct sk_buff *skb,
2202 				    struct neigh_table *tbl,
2203 				    struct neigh_parms *parms,
2204 				    u32 pid, u32 seq, int type,
2205 				    unsigned int flags)
2206 {
2207 	struct ndtmsg *ndtmsg;
2208 	struct nlmsghdr *nlh;
2209 
2210 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2211 	if (nlh == NULL)
2212 		return -EMSGSIZE;
2213 
2214 	ndtmsg = nlmsg_data(nlh);
2215 
2216 	read_lock_bh(&tbl->lock);
2217 	ndtmsg->ndtm_family = tbl->family;
2218 	ndtmsg->ndtm_pad1   = 0;
2219 	ndtmsg->ndtm_pad2   = 0;
2220 
2221 	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2222 	    neightbl_fill_parms(skb, parms) < 0)
2223 		goto errout;
2224 
2225 	read_unlock_bh(&tbl->lock);
2226 	nlmsg_end(skb, nlh);
2227 	return 0;
2228 errout:
2229 	read_unlock_bh(&tbl->lock);
2230 	nlmsg_cancel(skb, nlh);
2231 	return -EMSGSIZE;
2232 }
2233 
2234 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
2235 	[NDTA_NAME]		= { .type = NLA_STRING },
2236 	[NDTA_THRESH1]		= { .type = NLA_U32 },
2237 	[NDTA_THRESH2]		= { .type = NLA_U32 },
2238 	[NDTA_THRESH3]		= { .type = NLA_U32 },
2239 	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
2240 	[NDTA_PARMS]		= { .type = NLA_NESTED },
2241 };
2242 
2243 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
2244 	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
2245 	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
2246 	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
2247 	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
2248 	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
2249 	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
2250 	[NDTPA_MCAST_REPROBES]		= { .type = NLA_U32 },
2251 	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
2252 	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
2253 	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
2254 	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
2255 	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
2256 	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
2257 	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
2258 };
2259 
2260 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2261 			struct netlink_ext_ack *extack)
2262 {
2263 	struct net *net = sock_net(skb->sk);
2264 	struct neigh_table *tbl;
2265 	struct ndtmsg *ndtmsg;
2266 	struct nlattr *tb[NDTA_MAX+1];
2267 	bool found = false;
2268 	int err, tidx;
2269 
2270 	err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2271 				     nl_neightbl_policy, extack);
2272 	if (err < 0)
2273 		goto errout;
2274 
2275 	if (tb[NDTA_NAME] == NULL) {
2276 		err = -EINVAL;
2277 		goto errout;
2278 	}
2279 
2280 	ndtmsg = nlmsg_data(nlh);
2281 
2282 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2283 		tbl = neigh_tables[tidx];
2284 		if (!tbl)
2285 			continue;
2286 		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2287 			continue;
2288 		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2289 			found = true;
2290 			break;
2291 		}
2292 	}
2293 
2294 	if (!found)
2295 		return -ENOENT;
2296 
2297 	/*
2298 	 * We acquire tbl->lock to be nice to the periodic timers and
2299 	 * make sure they always see a consistent set of values.
2300 	 */
2301 	write_lock_bh(&tbl->lock);
2302 
2303 	if (tb[NDTA_PARMS]) {
2304 		struct nlattr *tbp[NDTPA_MAX+1];
2305 		struct neigh_parms *p;
2306 		int i, ifindex = 0;
2307 
2308 		err = nla_parse_nested_deprecated(tbp, NDTPA_MAX,
2309 						  tb[NDTA_PARMS],
2310 						  nl_ntbl_parm_policy, extack);
2311 		if (err < 0)
2312 			goto errout_tbl_lock;
2313 
2314 		if (tbp[NDTPA_IFINDEX])
2315 			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2316 
2317 		p = lookup_neigh_parms(tbl, net, ifindex);
2318 		if (p == NULL) {
2319 			err = -ENOENT;
2320 			goto errout_tbl_lock;
2321 		}
2322 
2323 		for (i = 1; i <= NDTPA_MAX; i++) {
2324 			if (tbp[i] == NULL)
2325 				continue;
2326 
2327 			switch (i) {
2328 			case NDTPA_QUEUE_LEN:
2329 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2330 					      nla_get_u32(tbp[i]) *
2331 					      SKB_TRUESIZE(ETH_FRAME_LEN));
2332 				break;
2333 			case NDTPA_QUEUE_LENBYTES:
2334 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2335 					      nla_get_u32(tbp[i]));
2336 				break;
2337 			case NDTPA_PROXY_QLEN:
2338 				NEIGH_VAR_SET(p, PROXY_QLEN,
2339 					      nla_get_u32(tbp[i]));
2340 				break;
2341 			case NDTPA_APP_PROBES:
2342 				NEIGH_VAR_SET(p, APP_PROBES,
2343 					      nla_get_u32(tbp[i]));
2344 				break;
2345 			case NDTPA_UCAST_PROBES:
2346 				NEIGH_VAR_SET(p, UCAST_PROBES,
2347 					      nla_get_u32(tbp[i]));
2348 				break;
2349 			case NDTPA_MCAST_PROBES:
2350 				NEIGH_VAR_SET(p, MCAST_PROBES,
2351 					      nla_get_u32(tbp[i]));
2352 				break;
2353 			case NDTPA_MCAST_REPROBES:
2354 				NEIGH_VAR_SET(p, MCAST_REPROBES,
2355 					      nla_get_u32(tbp[i]));
2356 				break;
2357 			case NDTPA_BASE_REACHABLE_TIME:
2358 				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2359 					      nla_get_msecs(tbp[i]));
2360 				/* update reachable_time as well, otherwise, the change will
2361 				 * only be effective after the next time neigh_periodic_work
2362 				 * decides to recompute it (can be multiple minutes)
2363 				 */
2364 				p->reachable_time =
2365 					neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2366 				break;
2367 			case NDTPA_GC_STALETIME:
2368 				NEIGH_VAR_SET(p, GC_STALETIME,
2369 					      nla_get_msecs(tbp[i]));
2370 				break;
2371 			case NDTPA_DELAY_PROBE_TIME:
2372 				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2373 					      nla_get_msecs(tbp[i]));
2374 				call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2375 				break;
2376 			case NDTPA_RETRANS_TIME:
2377 				NEIGH_VAR_SET(p, RETRANS_TIME,
2378 					      nla_get_msecs(tbp[i]));
2379 				break;
2380 			case NDTPA_ANYCAST_DELAY:
2381 				NEIGH_VAR_SET(p, ANYCAST_DELAY,
2382 					      nla_get_msecs(tbp[i]));
2383 				break;
2384 			case NDTPA_PROXY_DELAY:
2385 				NEIGH_VAR_SET(p, PROXY_DELAY,
2386 					      nla_get_msecs(tbp[i]));
2387 				break;
2388 			case NDTPA_LOCKTIME:
2389 				NEIGH_VAR_SET(p, LOCKTIME,
2390 					      nla_get_msecs(tbp[i]));
2391 				break;
2392 			}
2393 		}
2394 	}
2395 
2396 	err = -ENOENT;
2397 	if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2398 	     tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2399 	    !net_eq(net, &init_net))
2400 		goto errout_tbl_lock;
2401 
2402 	if (tb[NDTA_THRESH1])
2403 		tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2404 
2405 	if (tb[NDTA_THRESH2])
2406 		tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2407 
2408 	if (tb[NDTA_THRESH3])
2409 		tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2410 
2411 	if (tb[NDTA_GC_INTERVAL])
2412 		tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2413 
2414 	err = 0;
2415 
2416 errout_tbl_lock:
2417 	write_unlock_bh(&tbl->lock);
2418 errout:
2419 	return err;
2420 }
2421 
2422 static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
2423 				    struct netlink_ext_ack *extack)
2424 {
2425 	struct ndtmsg *ndtm;
2426 
2427 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
2428 		NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
2429 		return -EINVAL;
2430 	}
2431 
2432 	ndtm = nlmsg_data(nlh);
2433 	if (ndtm->ndtm_pad1  || ndtm->ndtm_pad2) {
2434 		NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
2435 		return -EINVAL;
2436 	}
2437 
2438 	if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
2439 		NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
2440 		return -EINVAL;
2441 	}
2442 
2443 	return 0;
2444 }
2445 
2446 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2447 {
2448 	const struct nlmsghdr *nlh = cb->nlh;
2449 	struct net *net = sock_net(skb->sk);
2450 	int family, tidx, nidx = 0;
2451 	int tbl_skip = cb->args[0];
2452 	int neigh_skip = cb->args[1];
2453 	struct neigh_table *tbl;
2454 
2455 	if (cb->strict_check) {
2456 		int err = neightbl_valid_dump_info(nlh, cb->extack);
2457 
2458 		if (err < 0)
2459 			return err;
2460 	}
2461 
2462 	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2463 
2464 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2465 		struct neigh_parms *p;
2466 
2467 		tbl = neigh_tables[tidx];
2468 		if (!tbl)
2469 			continue;
2470 
2471 		if (tidx < tbl_skip || (family && tbl->family != family))
2472 			continue;
2473 
2474 		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2475 				       nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2476 				       NLM_F_MULTI) < 0)
2477 			break;
2478 
2479 		nidx = 0;
2480 		p = list_next_entry(&tbl->parms, list);
2481 		list_for_each_entry_from(p, &tbl->parms_list, list) {
2482 			if (!net_eq(neigh_parms_net(p), net))
2483 				continue;
2484 
2485 			if (nidx < neigh_skip)
2486 				goto next;
2487 
2488 			if (neightbl_fill_param_info(skb, tbl, p,
2489 						     NETLINK_CB(cb->skb).portid,
2490 						     nlh->nlmsg_seq,
2491 						     RTM_NEWNEIGHTBL,
2492 						     NLM_F_MULTI) < 0)
2493 				goto out;
2494 		next:
2495 			nidx++;
2496 		}
2497 
2498 		neigh_skip = 0;
2499 	}
2500 out:
2501 	cb->args[0] = tidx;
2502 	cb->args[1] = nidx;
2503 
2504 	return skb->len;
2505 }
2506 
2507 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2508 			   u32 pid, u32 seq, int type, unsigned int flags)
2509 {
2510 	u32 neigh_flags, neigh_flags_ext;
2511 	unsigned long now = jiffies;
2512 	struct nda_cacheinfo ci;
2513 	struct nlmsghdr *nlh;
2514 	struct ndmsg *ndm;
2515 
2516 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2517 	if (nlh == NULL)
2518 		return -EMSGSIZE;
2519 
2520 	neigh_flags_ext = neigh->flags >> NTF_EXT_SHIFT;
2521 	neigh_flags     = neigh->flags & NTF_OLD_MASK;
2522 
2523 	ndm = nlmsg_data(nlh);
2524 	ndm->ndm_family	 = neigh->ops->family;
2525 	ndm->ndm_pad1    = 0;
2526 	ndm->ndm_pad2    = 0;
2527 	ndm->ndm_flags	 = neigh_flags;
2528 	ndm->ndm_type	 = neigh->type;
2529 	ndm->ndm_ifindex = neigh->dev->ifindex;
2530 
2531 	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2532 		goto nla_put_failure;
2533 
2534 	read_lock_bh(&neigh->lock);
2535 	ndm->ndm_state	 = neigh->nud_state;
2536 	if (neigh->nud_state & NUD_VALID) {
2537 		char haddr[MAX_ADDR_LEN];
2538 
2539 		neigh_ha_snapshot(haddr, neigh, neigh->dev);
2540 		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2541 			read_unlock_bh(&neigh->lock);
2542 			goto nla_put_failure;
2543 		}
2544 	}
2545 
2546 	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2547 	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2548 	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2549 	ci.ndm_refcnt	 = refcount_read(&neigh->refcnt) - 1;
2550 	read_unlock_bh(&neigh->lock);
2551 
2552 	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2553 	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2554 		goto nla_put_failure;
2555 
2556 	if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
2557 		goto nla_put_failure;
2558 	if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2559 		goto nla_put_failure;
2560 
2561 	nlmsg_end(skb, nlh);
2562 	return 0;
2563 
2564 nla_put_failure:
2565 	nlmsg_cancel(skb, nlh);
2566 	return -EMSGSIZE;
2567 }
2568 
2569 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2570 			    u32 pid, u32 seq, int type, unsigned int flags,
2571 			    struct neigh_table *tbl)
2572 {
2573 	u32 neigh_flags, neigh_flags_ext;
2574 	struct nlmsghdr *nlh;
2575 	struct ndmsg *ndm;
2576 
2577 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2578 	if (nlh == NULL)
2579 		return -EMSGSIZE;
2580 
2581 	neigh_flags_ext = pn->flags >> NTF_EXT_SHIFT;
2582 	neigh_flags     = pn->flags & NTF_OLD_MASK;
2583 
2584 	ndm = nlmsg_data(nlh);
2585 	ndm->ndm_family	 = tbl->family;
2586 	ndm->ndm_pad1    = 0;
2587 	ndm->ndm_pad2    = 0;
2588 	ndm->ndm_flags	 = neigh_flags | NTF_PROXY;
2589 	ndm->ndm_type	 = RTN_UNICAST;
2590 	ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2591 	ndm->ndm_state	 = NUD_NONE;
2592 
2593 	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2594 		goto nla_put_failure;
2595 
2596 	if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
2597 		goto nla_put_failure;
2598 	if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2599 		goto nla_put_failure;
2600 
2601 	nlmsg_end(skb, nlh);
2602 	return 0;
2603 
2604 nla_put_failure:
2605 	nlmsg_cancel(skb, nlh);
2606 	return -EMSGSIZE;
2607 }
2608 
2609 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2610 {
2611 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2612 	__neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2613 }
2614 
2615 static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2616 {
2617 	struct net_device *master;
2618 
2619 	if (!master_idx)
2620 		return false;
2621 
2622 	master = dev ? netdev_master_upper_dev_get(dev) : NULL;
2623 
2624 	/* 0 is already used to denote NDA_MASTER wasn't passed, therefore need another
2625 	 * invalid value for ifindex to denote "no master".
2626 	 */
2627 	if (master_idx == -1)
2628 		return !!master;
2629 
2630 	if (!master || master->ifindex != master_idx)
2631 		return true;
2632 
2633 	return false;
2634 }
2635 
2636 static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2637 {
2638 	if (filter_idx && (!dev || dev->ifindex != filter_idx))
2639 		return true;
2640 
2641 	return false;
2642 }
2643 
2644 struct neigh_dump_filter {
2645 	int master_idx;
2646 	int dev_idx;
2647 };
2648 
2649 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2650 			    struct netlink_callback *cb,
2651 			    struct neigh_dump_filter *filter)
2652 {
2653 	struct net *net = sock_net(skb->sk);
2654 	struct neighbour *n;
2655 	int rc, h, s_h = cb->args[1];
2656 	int idx, s_idx = idx = cb->args[2];
2657 	struct neigh_hash_table *nht;
2658 	unsigned int flags = NLM_F_MULTI;
2659 
2660 	if (filter->dev_idx || filter->master_idx)
2661 		flags |= NLM_F_DUMP_FILTERED;
2662 
2663 	rcu_read_lock_bh();
2664 	nht = rcu_dereference_bh(tbl->nht);
2665 
2666 	for (h = s_h; h < (1 << nht->hash_shift); h++) {
2667 		if (h > s_h)
2668 			s_idx = 0;
2669 		for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2670 		     n != NULL;
2671 		     n = rcu_dereference_bh(n->next)) {
2672 			if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2673 				goto next;
2674 			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2675 			    neigh_master_filtered(n->dev, filter->master_idx))
2676 				goto next;
2677 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2678 					    cb->nlh->nlmsg_seq,
2679 					    RTM_NEWNEIGH,
2680 					    flags) < 0) {
2681 				rc = -1;
2682 				goto out;
2683 			}
2684 next:
2685 			idx++;
2686 		}
2687 	}
2688 	rc = skb->len;
2689 out:
2690 	rcu_read_unlock_bh();
2691 	cb->args[1] = h;
2692 	cb->args[2] = idx;
2693 	return rc;
2694 }
2695 
2696 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2697 			     struct netlink_callback *cb,
2698 			     struct neigh_dump_filter *filter)
2699 {
2700 	struct pneigh_entry *n;
2701 	struct net *net = sock_net(skb->sk);
2702 	int rc, h, s_h = cb->args[3];
2703 	int idx, s_idx = idx = cb->args[4];
2704 	unsigned int flags = NLM_F_MULTI;
2705 
2706 	if (filter->dev_idx || filter->master_idx)
2707 		flags |= NLM_F_DUMP_FILTERED;
2708 
2709 	read_lock_bh(&tbl->lock);
2710 
2711 	for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2712 		if (h > s_h)
2713 			s_idx = 0;
2714 		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2715 			if (idx < s_idx || pneigh_net(n) != net)
2716 				goto next;
2717 			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2718 			    neigh_master_filtered(n->dev, filter->master_idx))
2719 				goto next;
2720 			if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2721 					    cb->nlh->nlmsg_seq,
2722 					    RTM_NEWNEIGH, flags, tbl) < 0) {
2723 				read_unlock_bh(&tbl->lock);
2724 				rc = -1;
2725 				goto out;
2726 			}
2727 		next:
2728 			idx++;
2729 		}
2730 	}
2731 
2732 	read_unlock_bh(&tbl->lock);
2733 	rc = skb->len;
2734 out:
2735 	cb->args[3] = h;
2736 	cb->args[4] = idx;
2737 	return rc;
2738 
2739 }
2740 
2741 static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2742 				bool strict_check,
2743 				struct neigh_dump_filter *filter,
2744 				struct netlink_ext_ack *extack)
2745 {
2746 	struct nlattr *tb[NDA_MAX + 1];
2747 	int err, i;
2748 
2749 	if (strict_check) {
2750 		struct ndmsg *ndm;
2751 
2752 		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2753 			NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
2754 			return -EINVAL;
2755 		}
2756 
2757 		ndm = nlmsg_data(nlh);
2758 		if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_ifindex ||
2759 		    ndm->ndm_state || ndm->ndm_type) {
2760 			NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2761 			return -EINVAL;
2762 		}
2763 
2764 		if (ndm->ndm_flags & ~NTF_PROXY) {
2765 			NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
2766 			return -EINVAL;
2767 		}
2768 
2769 		err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg),
2770 						    tb, NDA_MAX, nda_policy,
2771 						    extack);
2772 	} else {
2773 		err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb,
2774 					     NDA_MAX, nda_policy, extack);
2775 	}
2776 	if (err < 0)
2777 		return err;
2778 
2779 	for (i = 0; i <= NDA_MAX; ++i) {
2780 		if (!tb[i])
2781 			continue;
2782 
2783 		/* all new attributes should require strict_check */
2784 		switch (i) {
2785 		case NDA_IFINDEX:
2786 			filter->dev_idx = nla_get_u32(tb[i]);
2787 			break;
2788 		case NDA_MASTER:
2789 			filter->master_idx = nla_get_u32(tb[i]);
2790 			break;
2791 		default:
2792 			if (strict_check) {
2793 				NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
2794 				return -EINVAL;
2795 			}
2796 		}
2797 	}
2798 
2799 	return 0;
2800 }
2801 
2802 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2803 {
2804 	const struct nlmsghdr *nlh = cb->nlh;
2805 	struct neigh_dump_filter filter = {};
2806 	struct neigh_table *tbl;
2807 	int t, family, s_t;
2808 	int proxy = 0;
2809 	int err;
2810 
2811 	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2812 
2813 	/* check for full ndmsg structure presence, family member is
2814 	 * the same for both structures
2815 	 */
2816 	if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2817 	    ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
2818 		proxy = 1;
2819 
2820 	err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
2821 	if (err < 0 && cb->strict_check)
2822 		return err;
2823 
2824 	s_t = cb->args[0];
2825 
2826 	for (t = 0; t < NEIGH_NR_TABLES; t++) {
2827 		tbl = neigh_tables[t];
2828 
2829 		if (!tbl)
2830 			continue;
2831 		if (t < s_t || (family && tbl->family != family))
2832 			continue;
2833 		if (t > s_t)
2834 			memset(&cb->args[1], 0, sizeof(cb->args) -
2835 						sizeof(cb->args[0]));
2836 		if (proxy)
2837 			err = pneigh_dump_table(tbl, skb, cb, &filter);
2838 		else
2839 			err = neigh_dump_table(tbl, skb, cb, &filter);
2840 		if (err < 0)
2841 			break;
2842 	}
2843 
2844 	cb->args[0] = t;
2845 	return skb->len;
2846 }
2847 
2848 static int neigh_valid_get_req(const struct nlmsghdr *nlh,
2849 			       struct neigh_table **tbl,
2850 			       void **dst, int *dev_idx, u8 *ndm_flags,
2851 			       struct netlink_ext_ack *extack)
2852 {
2853 	struct nlattr *tb[NDA_MAX + 1];
2854 	struct ndmsg *ndm;
2855 	int err, i;
2856 
2857 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2858 		NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
2859 		return -EINVAL;
2860 	}
2861 
2862 	ndm = nlmsg_data(nlh);
2863 	if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_state ||
2864 	    ndm->ndm_type) {
2865 		NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
2866 		return -EINVAL;
2867 	}
2868 
2869 	if (ndm->ndm_flags & ~NTF_PROXY) {
2870 		NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
2871 		return -EINVAL;
2872 	}
2873 
2874 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
2875 					    NDA_MAX, nda_policy, extack);
2876 	if (err < 0)
2877 		return err;
2878 
2879 	*ndm_flags = ndm->ndm_flags;
2880 	*dev_idx = ndm->ndm_ifindex;
2881 	*tbl = neigh_find_table(ndm->ndm_family);
2882 	if (*tbl == NULL) {
2883 		NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
2884 		return -EAFNOSUPPORT;
2885 	}
2886 
2887 	for (i = 0; i <= NDA_MAX; ++i) {
2888 		if (!tb[i])
2889 			continue;
2890 
2891 		switch (i) {
2892 		case NDA_DST:
2893 			if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
2894 				NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
2895 				return -EINVAL;
2896 			}
2897 			*dst = nla_data(tb[i]);
2898 			break;
2899 		default:
2900 			NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
2901 			return -EINVAL;
2902 		}
2903 	}
2904 
2905 	return 0;
2906 }
2907 
2908 static inline size_t neigh_nlmsg_size(void)
2909 {
2910 	return NLMSG_ALIGN(sizeof(struct ndmsg))
2911 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2912 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2913 	       + nla_total_size(sizeof(struct nda_cacheinfo))
2914 	       + nla_total_size(4)  /* NDA_PROBES */
2915 	       + nla_total_size(4)  /* NDA_FLAGS_EXT */
2916 	       + nla_total_size(1); /* NDA_PROTOCOL */
2917 }
2918 
2919 static int neigh_get_reply(struct net *net, struct neighbour *neigh,
2920 			   u32 pid, u32 seq)
2921 {
2922 	struct sk_buff *skb;
2923 	int err = 0;
2924 
2925 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
2926 	if (!skb)
2927 		return -ENOBUFS;
2928 
2929 	err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
2930 	if (err) {
2931 		kfree_skb(skb);
2932 		goto errout;
2933 	}
2934 
2935 	err = rtnl_unicast(skb, net, pid);
2936 errout:
2937 	return err;
2938 }
2939 
2940 static inline size_t pneigh_nlmsg_size(void)
2941 {
2942 	return NLMSG_ALIGN(sizeof(struct ndmsg))
2943 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2944 	       + nla_total_size(4)  /* NDA_FLAGS_EXT */
2945 	       + nla_total_size(1); /* NDA_PROTOCOL */
2946 }
2947 
2948 static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
2949 			    u32 pid, u32 seq, struct neigh_table *tbl)
2950 {
2951 	struct sk_buff *skb;
2952 	int err = 0;
2953 
2954 	skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
2955 	if (!skb)
2956 		return -ENOBUFS;
2957 
2958 	err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
2959 	if (err) {
2960 		kfree_skb(skb);
2961 		goto errout;
2962 	}
2963 
2964 	err = rtnl_unicast(skb, net, pid);
2965 errout:
2966 	return err;
2967 }
2968 
2969 static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2970 		     struct netlink_ext_ack *extack)
2971 {
2972 	struct net *net = sock_net(in_skb->sk);
2973 	struct net_device *dev = NULL;
2974 	struct neigh_table *tbl = NULL;
2975 	struct neighbour *neigh;
2976 	void *dst = NULL;
2977 	u8 ndm_flags = 0;
2978 	int dev_idx = 0;
2979 	int err;
2980 
2981 	err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
2982 				  extack);
2983 	if (err < 0)
2984 		return err;
2985 
2986 	if (dev_idx) {
2987 		dev = __dev_get_by_index(net, dev_idx);
2988 		if (!dev) {
2989 			NL_SET_ERR_MSG(extack, "Unknown device ifindex");
2990 			return -ENODEV;
2991 		}
2992 	}
2993 
2994 	if (!dst) {
2995 		NL_SET_ERR_MSG(extack, "Network address not specified");
2996 		return -EINVAL;
2997 	}
2998 
2999 	if (ndm_flags & NTF_PROXY) {
3000 		struct pneigh_entry *pn;
3001 
3002 		pn = pneigh_lookup(tbl, net, dst, dev, 0);
3003 		if (!pn) {
3004 			NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
3005 			return -ENOENT;
3006 		}
3007 		return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid,
3008 					nlh->nlmsg_seq, tbl);
3009 	}
3010 
3011 	if (!dev) {
3012 		NL_SET_ERR_MSG(extack, "No device specified");
3013 		return -EINVAL;
3014 	}
3015 
3016 	neigh = neigh_lookup(tbl, dst, dev);
3017 	if (!neigh) {
3018 		NL_SET_ERR_MSG(extack, "Neighbour entry not found");
3019 		return -ENOENT;
3020 	}
3021 
3022 	err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
3023 			      nlh->nlmsg_seq);
3024 
3025 	neigh_release(neigh);
3026 
3027 	return err;
3028 }
3029 
3030 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
3031 {
3032 	int chain;
3033 	struct neigh_hash_table *nht;
3034 
3035 	rcu_read_lock_bh();
3036 	nht = rcu_dereference_bh(tbl->nht);
3037 
3038 	read_lock(&tbl->lock); /* avoid resizes */
3039 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
3040 		struct neighbour *n;
3041 
3042 		for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
3043 		     n != NULL;
3044 		     n = rcu_dereference_bh(n->next))
3045 			cb(n, cookie);
3046 	}
3047 	read_unlock(&tbl->lock);
3048 	rcu_read_unlock_bh();
3049 }
3050 EXPORT_SYMBOL(neigh_for_each);
3051 
3052 /* The tbl->lock must be held as a writer and BH disabled. */
3053 void __neigh_for_each_release(struct neigh_table *tbl,
3054 			      int (*cb)(struct neighbour *))
3055 {
3056 	int chain;
3057 	struct neigh_hash_table *nht;
3058 
3059 	nht = rcu_dereference_protected(tbl->nht,
3060 					lockdep_is_held(&tbl->lock));
3061 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
3062 		struct neighbour *n;
3063 		struct neighbour __rcu **np;
3064 
3065 		np = &nht->hash_buckets[chain];
3066 		while ((n = rcu_dereference_protected(*np,
3067 					lockdep_is_held(&tbl->lock))) != NULL) {
3068 			int release;
3069 
3070 			write_lock(&n->lock);
3071 			release = cb(n);
3072 			if (release) {
3073 				rcu_assign_pointer(*np,
3074 					rcu_dereference_protected(n->next,
3075 						lockdep_is_held(&tbl->lock)));
3076 				neigh_mark_dead(n);
3077 			} else
3078 				np = &n->next;
3079 			write_unlock(&n->lock);
3080 			if (release)
3081 				neigh_cleanup_and_release(n);
3082 		}
3083 	}
3084 }
3085 EXPORT_SYMBOL(__neigh_for_each_release);
3086 
3087 int neigh_xmit(int index, struct net_device *dev,
3088 	       const void *addr, struct sk_buff *skb)
3089 {
3090 	int err = -EAFNOSUPPORT;
3091 	if (likely(index < NEIGH_NR_TABLES)) {
3092 		struct neigh_table *tbl;
3093 		struct neighbour *neigh;
3094 
3095 		tbl = neigh_tables[index];
3096 		if (!tbl)
3097 			goto out;
3098 		rcu_read_lock_bh();
3099 		if (index == NEIGH_ARP_TABLE) {
3100 			u32 key = *((u32 *)addr);
3101 
3102 			neigh = __ipv4_neigh_lookup_noref(dev, key);
3103 		} else {
3104 			neigh = __neigh_lookup_noref(tbl, addr, dev);
3105 		}
3106 		if (!neigh)
3107 			neigh = __neigh_create(tbl, addr, dev, false);
3108 		err = PTR_ERR(neigh);
3109 		if (IS_ERR(neigh)) {
3110 			rcu_read_unlock_bh();
3111 			goto out_kfree_skb;
3112 		}
3113 		err = neigh->output(neigh, skb);
3114 		rcu_read_unlock_bh();
3115 	}
3116 	else if (index == NEIGH_LINK_TABLE) {
3117 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
3118 				      addr, NULL, skb->len);
3119 		if (err < 0)
3120 			goto out_kfree_skb;
3121 		err = dev_queue_xmit(skb);
3122 	}
3123 out:
3124 	return err;
3125 out_kfree_skb:
3126 	kfree_skb(skb);
3127 	goto out;
3128 }
3129 EXPORT_SYMBOL(neigh_xmit);
3130 
3131 #ifdef CONFIG_PROC_FS
3132 
3133 static struct neighbour *neigh_get_first(struct seq_file *seq)
3134 {
3135 	struct neigh_seq_state *state = seq->private;
3136 	struct net *net = seq_file_net(seq);
3137 	struct neigh_hash_table *nht = state->nht;
3138 	struct neighbour *n = NULL;
3139 	int bucket;
3140 
3141 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
3142 	for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
3143 		n = rcu_dereference_bh(nht->hash_buckets[bucket]);
3144 
3145 		while (n) {
3146 			if (!net_eq(dev_net(n->dev), net))
3147 				goto next;
3148 			if (state->neigh_sub_iter) {
3149 				loff_t fakep = 0;
3150 				void *v;
3151 
3152 				v = state->neigh_sub_iter(state, n, &fakep);
3153 				if (!v)
3154 					goto next;
3155 			}
3156 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3157 				break;
3158 			if (n->nud_state & ~NUD_NOARP)
3159 				break;
3160 next:
3161 			n = rcu_dereference_bh(n->next);
3162 		}
3163 
3164 		if (n)
3165 			break;
3166 	}
3167 	state->bucket = bucket;
3168 
3169 	return n;
3170 }
3171 
3172 static struct neighbour *neigh_get_next(struct seq_file *seq,
3173 					struct neighbour *n,
3174 					loff_t *pos)
3175 {
3176 	struct neigh_seq_state *state = seq->private;
3177 	struct net *net = seq_file_net(seq);
3178 	struct neigh_hash_table *nht = state->nht;
3179 
3180 	if (state->neigh_sub_iter) {
3181 		void *v = state->neigh_sub_iter(state, n, pos);
3182 		if (v)
3183 			return n;
3184 	}
3185 	n = rcu_dereference_bh(n->next);
3186 
3187 	while (1) {
3188 		while (n) {
3189 			if (!net_eq(dev_net(n->dev), net))
3190 				goto next;
3191 			if (state->neigh_sub_iter) {
3192 				void *v = state->neigh_sub_iter(state, n, pos);
3193 				if (v)
3194 					return n;
3195 				goto next;
3196 			}
3197 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3198 				break;
3199 
3200 			if (n->nud_state & ~NUD_NOARP)
3201 				break;
3202 next:
3203 			n = rcu_dereference_bh(n->next);
3204 		}
3205 
3206 		if (n)
3207 			break;
3208 
3209 		if (++state->bucket >= (1 << nht->hash_shift))
3210 			break;
3211 
3212 		n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
3213 	}
3214 
3215 	if (n && pos)
3216 		--(*pos);
3217 	return n;
3218 }
3219 
3220 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
3221 {
3222 	struct neighbour *n = neigh_get_first(seq);
3223 
3224 	if (n) {
3225 		--(*pos);
3226 		while (*pos) {
3227 			n = neigh_get_next(seq, n, pos);
3228 			if (!n)
3229 				break;
3230 		}
3231 	}
3232 	return *pos ? NULL : n;
3233 }
3234 
3235 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
3236 {
3237 	struct neigh_seq_state *state = seq->private;
3238 	struct net *net = seq_file_net(seq);
3239 	struct neigh_table *tbl = state->tbl;
3240 	struct pneigh_entry *pn = NULL;
3241 	int bucket;
3242 
3243 	state->flags |= NEIGH_SEQ_IS_PNEIGH;
3244 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
3245 		pn = tbl->phash_buckets[bucket];
3246 		while (pn && !net_eq(pneigh_net(pn), net))
3247 			pn = pn->next;
3248 		if (pn)
3249 			break;
3250 	}
3251 	state->bucket = bucket;
3252 
3253 	return pn;
3254 }
3255 
3256 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
3257 					    struct pneigh_entry *pn,
3258 					    loff_t *pos)
3259 {
3260 	struct neigh_seq_state *state = seq->private;
3261 	struct net *net = seq_file_net(seq);
3262 	struct neigh_table *tbl = state->tbl;
3263 
3264 	do {
3265 		pn = pn->next;
3266 	} while (pn && !net_eq(pneigh_net(pn), net));
3267 
3268 	while (!pn) {
3269 		if (++state->bucket > PNEIGH_HASHMASK)
3270 			break;
3271 		pn = tbl->phash_buckets[state->bucket];
3272 		while (pn && !net_eq(pneigh_net(pn), net))
3273 			pn = pn->next;
3274 		if (pn)
3275 			break;
3276 	}
3277 
3278 	if (pn && pos)
3279 		--(*pos);
3280 
3281 	return pn;
3282 }
3283 
3284 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
3285 {
3286 	struct pneigh_entry *pn = pneigh_get_first(seq);
3287 
3288 	if (pn) {
3289 		--(*pos);
3290 		while (*pos) {
3291 			pn = pneigh_get_next(seq, pn, pos);
3292 			if (!pn)
3293 				break;
3294 		}
3295 	}
3296 	return *pos ? NULL : pn;
3297 }
3298 
3299 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
3300 {
3301 	struct neigh_seq_state *state = seq->private;
3302 	void *rc;
3303 	loff_t idxpos = *pos;
3304 
3305 	rc = neigh_get_idx(seq, &idxpos);
3306 	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3307 		rc = pneigh_get_idx(seq, &idxpos);
3308 
3309 	return rc;
3310 }
3311 
3312 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
3313 	__acquires(tbl->lock)
3314 	__acquires(rcu_bh)
3315 {
3316 	struct neigh_seq_state *state = seq->private;
3317 
3318 	state->tbl = tbl;
3319 	state->bucket = 0;
3320 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
3321 
3322 	rcu_read_lock_bh();
3323 	state->nht = rcu_dereference_bh(tbl->nht);
3324 	read_lock(&tbl->lock);
3325 
3326 	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
3327 }
3328 EXPORT_SYMBOL(neigh_seq_start);
3329 
3330 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3331 {
3332 	struct neigh_seq_state *state;
3333 	void *rc;
3334 
3335 	if (v == SEQ_START_TOKEN) {
3336 		rc = neigh_get_first(seq);
3337 		goto out;
3338 	}
3339 
3340 	state = seq->private;
3341 	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
3342 		rc = neigh_get_next(seq, v, NULL);
3343 		if (rc)
3344 			goto out;
3345 		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3346 			rc = pneigh_get_first(seq);
3347 	} else {
3348 		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
3349 		rc = pneigh_get_next(seq, v, NULL);
3350 	}
3351 out:
3352 	++(*pos);
3353 	return rc;
3354 }
3355 EXPORT_SYMBOL(neigh_seq_next);
3356 
3357 void neigh_seq_stop(struct seq_file *seq, void *v)
3358 	__releases(tbl->lock)
3359 	__releases(rcu_bh)
3360 {
3361 	struct neigh_seq_state *state = seq->private;
3362 	struct neigh_table *tbl = state->tbl;
3363 
3364 	read_unlock(&tbl->lock);
3365 	rcu_read_unlock_bh();
3366 }
3367 EXPORT_SYMBOL(neigh_seq_stop);
3368 
3369 /* statistics via seq_file */
3370 
3371 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
3372 {
3373 	struct neigh_table *tbl = pde_data(file_inode(seq->file));
3374 	int cpu;
3375 
3376 	if (*pos == 0)
3377 		return SEQ_START_TOKEN;
3378 
3379 	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
3380 		if (!cpu_possible(cpu))
3381 			continue;
3382 		*pos = cpu+1;
3383 		return per_cpu_ptr(tbl->stats, cpu);
3384 	}
3385 	return NULL;
3386 }
3387 
3388 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3389 {
3390 	struct neigh_table *tbl = pde_data(file_inode(seq->file));
3391 	int cpu;
3392 
3393 	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
3394 		if (!cpu_possible(cpu))
3395 			continue;
3396 		*pos = cpu+1;
3397 		return per_cpu_ptr(tbl->stats, cpu);
3398 	}
3399 	(*pos)++;
3400 	return NULL;
3401 }
3402 
3403 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
3404 {
3405 
3406 }
3407 
3408 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
3409 {
3410 	struct neigh_table *tbl = pde_data(file_inode(seq->file));
3411 	struct neigh_statistics *st = v;
3412 
3413 	if (v == SEQ_START_TOKEN) {
3414 		seq_puts(seq, "entries  allocs   destroys hash_grows lookups  hits     res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
3415 		return 0;
3416 	}
3417 
3418 	seq_printf(seq, "%08x %08lx %08lx %08lx   %08lx %08lx %08lx   "
3419 			"%08lx         %08lx         %08lx         "
3420 			"%08lx       %08lx            %08lx\n",
3421 		   atomic_read(&tbl->entries),
3422 
3423 		   st->allocs,
3424 		   st->destroys,
3425 		   st->hash_grows,
3426 
3427 		   st->lookups,
3428 		   st->hits,
3429 
3430 		   st->res_failed,
3431 
3432 		   st->rcv_probes_mcast,
3433 		   st->rcv_probes_ucast,
3434 
3435 		   st->periodic_gc_runs,
3436 		   st->forced_gc_runs,
3437 		   st->unres_discards,
3438 		   st->table_fulls
3439 		   );
3440 
3441 	return 0;
3442 }
3443 
3444 static const struct seq_operations neigh_stat_seq_ops = {
3445 	.start	= neigh_stat_seq_start,
3446 	.next	= neigh_stat_seq_next,
3447 	.stop	= neigh_stat_seq_stop,
3448 	.show	= neigh_stat_seq_show,
3449 };
3450 #endif /* CONFIG_PROC_FS */
3451 
3452 static void __neigh_notify(struct neighbour *n, int type, int flags,
3453 			   u32 pid)
3454 {
3455 	struct net *net = dev_net(n->dev);
3456 	struct sk_buff *skb;
3457 	int err = -ENOBUFS;
3458 
3459 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
3460 	if (skb == NULL)
3461 		goto errout;
3462 
3463 	err = neigh_fill_info(skb, n, pid, 0, type, flags);
3464 	if (err < 0) {
3465 		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3466 		WARN_ON(err == -EMSGSIZE);
3467 		kfree_skb(skb);
3468 		goto errout;
3469 	}
3470 	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3471 	return;
3472 errout:
3473 	if (err < 0)
3474 		rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3475 }
3476 
3477 void neigh_app_ns(struct neighbour *n)
3478 {
3479 	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
3480 }
3481 EXPORT_SYMBOL(neigh_app_ns);
3482 
3483 #ifdef CONFIG_SYSCTL
3484 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
3485 
3486 static int proc_unres_qlen(struct ctl_table *ctl, int write,
3487 			   void *buffer, size_t *lenp, loff_t *ppos)
3488 {
3489 	int size, ret;
3490 	struct ctl_table tmp = *ctl;
3491 
3492 	tmp.extra1 = SYSCTL_ZERO;
3493 	tmp.extra2 = &unres_qlen_max;
3494 	tmp.data = &size;
3495 
3496 	size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
3497 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3498 
3499 	if (write && !ret)
3500 		*(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
3501 	return ret;
3502 }
3503 
3504 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
3505 						   int family)
3506 {
3507 	switch (family) {
3508 	case AF_INET:
3509 		return __in_dev_arp_parms_get_rcu(dev);
3510 	case AF_INET6:
3511 		return __in6_dev_nd_parms_get_rcu(dev);
3512 	}
3513 	return NULL;
3514 }
3515 
3516 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
3517 				  int index)
3518 {
3519 	struct net_device *dev;
3520 	int family = neigh_parms_family(p);
3521 
3522 	rcu_read_lock();
3523 	for_each_netdev_rcu(net, dev) {
3524 		struct neigh_parms *dst_p =
3525 				neigh_get_dev_parms_rcu(dev, family);
3526 
3527 		if (dst_p && !test_bit(index, dst_p->data_state))
3528 			dst_p->data[index] = p->data[index];
3529 	}
3530 	rcu_read_unlock();
3531 }
3532 
3533 static void neigh_proc_update(struct ctl_table *ctl, int write)
3534 {
3535 	struct net_device *dev = ctl->extra1;
3536 	struct neigh_parms *p = ctl->extra2;
3537 	struct net *net = neigh_parms_net(p);
3538 	int index = (int *) ctl->data - p->data;
3539 
3540 	if (!write)
3541 		return;
3542 
3543 	set_bit(index, p->data_state);
3544 	if (index == NEIGH_VAR_DELAY_PROBE_TIME)
3545 		call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
3546 	if (!dev) /* NULL dev means this is default value */
3547 		neigh_copy_dflt_parms(net, p, index);
3548 }
3549 
3550 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
3551 					   void *buffer, size_t *lenp,
3552 					   loff_t *ppos)
3553 {
3554 	struct ctl_table tmp = *ctl;
3555 	int ret;
3556 
3557 	tmp.extra1 = SYSCTL_ZERO;
3558 	tmp.extra2 = SYSCTL_INT_MAX;
3559 
3560 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3561 	neigh_proc_update(ctl, write);
3562 	return ret;
3563 }
3564 
3565 int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer,
3566 			size_t *lenp, loff_t *ppos)
3567 {
3568 	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3569 
3570 	neigh_proc_update(ctl, write);
3571 	return ret;
3572 }
3573 EXPORT_SYMBOL(neigh_proc_dointvec);
3574 
3575 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer,
3576 				size_t *lenp, loff_t *ppos)
3577 {
3578 	int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3579 
3580 	neigh_proc_update(ctl, write);
3581 	return ret;
3582 }
3583 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3584 
3585 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
3586 					      void *buffer, size_t *lenp,
3587 					      loff_t *ppos)
3588 {
3589 	int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3590 
3591 	neigh_proc_update(ctl, write);
3592 	return ret;
3593 }
3594 
3595 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
3596 				   void *buffer, size_t *lenp, loff_t *ppos)
3597 {
3598 	int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3599 
3600 	neigh_proc_update(ctl, write);
3601 	return ret;
3602 }
3603 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3604 
3605 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
3606 					  void *buffer, size_t *lenp,
3607 					  loff_t *ppos)
3608 {
3609 	int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3610 
3611 	neigh_proc_update(ctl, write);
3612 	return ret;
3613 }
3614 
3615 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3616 					  void *buffer, size_t *lenp,
3617 					  loff_t *ppos)
3618 {
3619 	struct neigh_parms *p = ctl->extra2;
3620 	int ret;
3621 
3622 	if (strcmp(ctl->procname, "base_reachable_time") == 0)
3623 		ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3624 	else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3625 		ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3626 	else
3627 		ret = -1;
3628 
3629 	if (write && ret == 0) {
3630 		/* update reachable_time as well, otherwise, the change will
3631 		 * only be effective after the next time neigh_periodic_work
3632 		 * decides to recompute it
3633 		 */
3634 		p->reachable_time =
3635 			neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3636 	}
3637 	return ret;
3638 }
3639 
3640 #define NEIGH_PARMS_DATA_OFFSET(index)	\
3641 	(&((struct neigh_parms *) 0)->data[index])
3642 
3643 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3644 	[NEIGH_VAR_ ## attr] = { \
3645 		.procname	= name, \
3646 		.data		= NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3647 		.maxlen		= sizeof(int), \
3648 		.mode		= mval, \
3649 		.proc_handler	= proc, \
3650 	}
3651 
3652 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3653 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3654 
3655 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3656 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3657 
3658 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3659 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3660 
3661 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3662 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3663 
3664 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3665 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3666 
3667 static struct neigh_sysctl_table {
3668 	struct ctl_table_header *sysctl_header;
3669 	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3670 } neigh_sysctl_template __read_mostly = {
3671 	.neigh_vars = {
3672 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3673 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3674 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3675 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3676 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3677 		NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3678 		NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3679 		NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3680 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3681 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3682 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3683 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3684 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3685 		NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3686 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3687 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3688 		[NEIGH_VAR_GC_INTERVAL] = {
3689 			.procname	= "gc_interval",
3690 			.maxlen		= sizeof(int),
3691 			.mode		= 0644,
3692 			.proc_handler	= proc_dointvec_jiffies,
3693 		},
3694 		[NEIGH_VAR_GC_THRESH1] = {
3695 			.procname	= "gc_thresh1",
3696 			.maxlen		= sizeof(int),
3697 			.mode		= 0644,
3698 			.extra1		= SYSCTL_ZERO,
3699 			.extra2		= SYSCTL_INT_MAX,
3700 			.proc_handler	= proc_dointvec_minmax,
3701 		},
3702 		[NEIGH_VAR_GC_THRESH2] = {
3703 			.procname	= "gc_thresh2",
3704 			.maxlen		= sizeof(int),
3705 			.mode		= 0644,
3706 			.extra1		= SYSCTL_ZERO,
3707 			.extra2		= SYSCTL_INT_MAX,
3708 			.proc_handler	= proc_dointvec_minmax,
3709 		},
3710 		[NEIGH_VAR_GC_THRESH3] = {
3711 			.procname	= "gc_thresh3",
3712 			.maxlen		= sizeof(int),
3713 			.mode		= 0644,
3714 			.extra1		= SYSCTL_ZERO,
3715 			.extra2		= SYSCTL_INT_MAX,
3716 			.proc_handler	= proc_dointvec_minmax,
3717 		},
3718 		{},
3719 	},
3720 };
3721 
3722 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3723 			  proc_handler *handler)
3724 {
3725 	int i;
3726 	struct neigh_sysctl_table *t;
3727 	const char *dev_name_source;
3728 	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3729 	char *p_name;
3730 
3731 	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL_ACCOUNT);
3732 	if (!t)
3733 		goto err;
3734 
3735 	for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3736 		t->neigh_vars[i].data += (long) p;
3737 		t->neigh_vars[i].extra1 = dev;
3738 		t->neigh_vars[i].extra2 = p;
3739 	}
3740 
3741 	if (dev) {
3742 		dev_name_source = dev->name;
3743 		/* Terminate the table early */
3744 		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3745 		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3746 	} else {
3747 		struct neigh_table *tbl = p->tbl;
3748 		dev_name_source = "default";
3749 		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3750 		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3751 		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3752 		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3753 	}
3754 
3755 	if (handler) {
3756 		/* RetransTime */
3757 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3758 		/* ReachableTime */
3759 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3760 		/* RetransTime (in milliseconds)*/
3761 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3762 		/* ReachableTime (in milliseconds) */
3763 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3764 	} else {
3765 		/* Those handlers will update p->reachable_time after
3766 		 * base_reachable_time(_ms) is set to ensure the new timer starts being
3767 		 * applied after the next neighbour update instead of waiting for
3768 		 * neigh_periodic_work to update its value (can be multiple minutes)
3769 		 * So any handler that replaces them should do this as well
3770 		 */
3771 		/* ReachableTime */
3772 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3773 			neigh_proc_base_reachable_time;
3774 		/* ReachableTime (in milliseconds) */
3775 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3776 			neigh_proc_base_reachable_time;
3777 	}
3778 
3779 	switch (neigh_parms_family(p)) {
3780 	case AF_INET:
3781 	      p_name = "ipv4";
3782 	      break;
3783 	case AF_INET6:
3784 	      p_name = "ipv6";
3785 	      break;
3786 	default:
3787 	      BUG();
3788 	}
3789 
3790 	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3791 		p_name, dev_name_source);
3792 	t->sysctl_header =
3793 		register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3794 	if (!t->sysctl_header)
3795 		goto free;
3796 
3797 	p->sysctl_table = t;
3798 	return 0;
3799 
3800 free:
3801 	kfree(t);
3802 err:
3803 	return -ENOBUFS;
3804 }
3805 EXPORT_SYMBOL(neigh_sysctl_register);
3806 
3807 void neigh_sysctl_unregister(struct neigh_parms *p)
3808 {
3809 	if (p->sysctl_table) {
3810 		struct neigh_sysctl_table *t = p->sysctl_table;
3811 		p->sysctl_table = NULL;
3812 		unregister_net_sysctl_table(t->sysctl_header);
3813 		kfree(t);
3814 	}
3815 }
3816 EXPORT_SYMBOL(neigh_sysctl_unregister);
3817 
3818 #endif	/* CONFIG_SYSCTL */
3819 
3820 static int __init neigh_init(void)
3821 {
3822 	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
3823 	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
3824 	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0);
3825 
3826 	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3827 		      0);
3828 	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
3829 
3830 	return 0;
3831 }
3832 
3833 subsys_initcall(neigh_init);
3834