xref: /linux/net/core/neighbour.c (revision 2c7e63d702f6c4209c5af833308e7fcbc7d4ab17)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Generic address resolution entity
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
8  *
9  *	Fixes:
10  *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
11  *	Harald Welte		Add neighbour cache statistics like rtstat
12  */
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 
16 #include <linux/slab.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/socket.h>
21 #include <linux/netdevice.h>
22 #include <linux/proc_fs.h>
23 #ifdef CONFIG_SYSCTL
24 #include <linux/sysctl.h>
25 #endif
26 #include <linux/times.h>
27 #include <net/net_namespace.h>
28 #include <net/neighbour.h>
29 #include <net/arp.h>
30 #include <net/dst.h>
31 #include <net/ip.h>
32 #include <net/sock.h>
33 #include <net/netevent.h>
34 #include <net/netlink.h>
35 #include <linux/rtnetlink.h>
36 #include <linux/random.h>
37 #include <linux/string.h>
38 #include <linux/log2.h>
39 #include <linux/inetdevice.h>
40 #include <net/addrconf.h>
41 
42 #include <trace/events/neigh.h>
43 
44 #define NEIGH_DEBUG 1
45 #define neigh_dbg(level, fmt, ...)		\
46 do {						\
47 	if (level <= NEIGH_DEBUG)		\
48 		pr_debug(fmt, ##__VA_ARGS__);	\
49 } while (0)
50 
51 #define PNEIGH_HASHMASK		0xF
52 
53 static void neigh_timer_handler(struct timer_list *t);
54 static void neigh_notify(struct neighbour *n, int type, int flags, u32 pid);
55 static void __neigh_notify(struct neighbour *n, int type, int flags, u32 pid);
56 static void pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
57 			  bool skip_perm);
58 
59 #ifdef CONFIG_PROC_FS
60 static const struct seq_operations neigh_stat_seq_ops;
61 #endif
62 
neigh_get_dev_table(struct net_device * dev,int family)63 static struct hlist_head *neigh_get_dev_table(struct net_device *dev, int family)
64 {
65 	int i;
66 
67 	switch (family) {
68 	default:
69 		DEBUG_NET_WARN_ON_ONCE(1);
70 		fallthrough; /* to avoid panic by null-ptr-deref */
71 	case AF_INET:
72 		i = NEIGH_ARP_TABLE;
73 		break;
74 	case AF_INET6:
75 		i = NEIGH_ND_TABLE;
76 		break;
77 	}
78 
79 	return &dev->neighbours[i];
80 }
81 
82 /*
83    Neighbour hash table buckets are protected with tbl->lock.
84 
85    - All the scans/updates to hash buckets MUST be made under this lock.
86    - NOTHING clever should be made under this lock: no callbacks
87      to protocol backends, no attempts to send something to network.
88      It will result in deadlocks, if backend/driver wants to use neighbour
89      cache.
90    - If the entry requires some non-trivial actions, increase
91      its reference count and release table lock.
92 
93    Neighbour entries are protected:
94    - with reference count.
95    - with rwlock neigh->lock
96 
97    Reference count prevents destruction.
98 
99    neigh->lock mainly serializes ll address data and its validity state.
100    However, the same lock is used to protect another entry fields:
101     - timer
102     - resolution queue
103 
104    Again, nothing clever shall be made under neigh->lock,
105    the most complicated procedure, which we allow is dev->hard_header.
106    It is supposed, that dev->hard_header is simplistic and does
107    not make callbacks to neighbour tables.
108  */
109 
neigh_blackhole(struct neighbour * neigh,struct sk_buff * skb)110 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
111 {
112 	kfree_skb(skb);
113 	return -ENETDOWN;
114 }
115 
neigh_cleanup_and_release(struct neighbour * neigh)116 static void neigh_cleanup_and_release(struct neighbour *neigh)
117 {
118 	trace_neigh_cleanup_and_release(neigh, 0);
119 	neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
120 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
121 	neigh_release(neigh);
122 }
123 
124 /*
125  * It is random distribution in the interval (1/2)*base...(3/2)*base.
126  * It corresponds to default IPv6 settings and is not overridable,
127  * because it is really reasonable choice.
128  */
129 
neigh_rand_reach_time(unsigned long base)130 unsigned long neigh_rand_reach_time(unsigned long base)
131 {
132 	return base ? get_random_u32_below(base) + (base >> 1) : 0;
133 }
134 EXPORT_SYMBOL(neigh_rand_reach_time);
135 
neigh_mark_dead(struct neighbour * n)136 static void neigh_mark_dead(struct neighbour *n)
137 {
138 	n->dead = 1;
139 	if (!list_empty(&n->gc_list)) {
140 		list_del_init(&n->gc_list);
141 		atomic_dec(&n->tbl->gc_entries);
142 	}
143 	if (!list_empty(&n->managed_list))
144 		list_del_init(&n->managed_list);
145 }
146 
neigh_update_gc_list(struct neighbour * n)147 static void neigh_update_gc_list(struct neighbour *n)
148 {
149 	bool on_gc_list, exempt_from_gc;
150 
151 	spin_lock_bh(&n->tbl->lock);
152 	write_lock(&n->lock);
153 	if (n->dead)
154 		goto out;
155 
156 	/* remove from the gc list if new state is permanent or if neighbor is
157 	 * externally learned / validated; otherwise entry should be on the gc
158 	 * list
159 	 */
160 	exempt_from_gc = n->nud_state & NUD_PERMANENT ||
161 			 n->flags & (NTF_EXT_LEARNED | NTF_EXT_VALIDATED);
162 	on_gc_list = !list_empty(&n->gc_list);
163 
164 	if (exempt_from_gc && on_gc_list) {
165 		list_del_init(&n->gc_list);
166 		atomic_dec(&n->tbl->gc_entries);
167 	} else if (!exempt_from_gc && !on_gc_list) {
168 		/* add entries to the tail; cleaning removes from the front */
169 		list_add_tail(&n->gc_list, &n->tbl->gc_list);
170 		atomic_inc(&n->tbl->gc_entries);
171 	}
172 out:
173 	write_unlock(&n->lock);
174 	spin_unlock_bh(&n->tbl->lock);
175 }
176 
neigh_update_managed_list(struct neighbour * n)177 static void neigh_update_managed_list(struct neighbour *n)
178 {
179 	bool on_managed_list, add_to_managed;
180 
181 	spin_lock_bh(&n->tbl->lock);
182 	write_lock(&n->lock);
183 	if (n->dead)
184 		goto out;
185 
186 	add_to_managed = n->flags & NTF_MANAGED;
187 	on_managed_list = !list_empty(&n->managed_list);
188 
189 	if (!add_to_managed && on_managed_list)
190 		list_del_init(&n->managed_list);
191 	else if (add_to_managed && !on_managed_list)
192 		list_add_tail(&n->managed_list, &n->tbl->managed_list);
193 out:
194 	write_unlock(&n->lock);
195 	spin_unlock_bh(&n->tbl->lock);
196 }
197 
neigh_update_flags(struct neighbour * neigh,u32 flags,int * notify,bool * gc_update,bool * managed_update)198 static void neigh_update_flags(struct neighbour *neigh, u32 flags, int *notify,
199 			       bool *gc_update, bool *managed_update)
200 {
201 	u32 ndm_flags, old_flags = neigh->flags;
202 
203 	if (!(flags & NEIGH_UPDATE_F_ADMIN))
204 		return;
205 
206 	ndm_flags  = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
207 	ndm_flags |= (flags & NEIGH_UPDATE_F_MANAGED) ? NTF_MANAGED : 0;
208 	ndm_flags |= (flags & NEIGH_UPDATE_F_EXT_VALIDATED) ? NTF_EXT_VALIDATED : 0;
209 
210 	if ((old_flags ^ ndm_flags) & NTF_EXT_LEARNED) {
211 		if (ndm_flags & NTF_EXT_LEARNED)
212 			neigh->flags |= NTF_EXT_LEARNED;
213 		else
214 			neigh->flags &= ~NTF_EXT_LEARNED;
215 		*notify = 1;
216 		*gc_update = true;
217 	}
218 	if ((old_flags ^ ndm_flags) & NTF_MANAGED) {
219 		if (ndm_flags & NTF_MANAGED)
220 			neigh->flags |= NTF_MANAGED;
221 		else
222 			neigh->flags &= ~NTF_MANAGED;
223 		*notify = 1;
224 		*managed_update = true;
225 	}
226 	if ((old_flags ^ ndm_flags) & NTF_EXT_VALIDATED) {
227 		if (ndm_flags & NTF_EXT_VALIDATED)
228 			neigh->flags |= NTF_EXT_VALIDATED;
229 		else
230 			neigh->flags &= ~NTF_EXT_VALIDATED;
231 		*notify = 1;
232 		*gc_update = true;
233 	}
234 }
235 
neigh_remove_one(struct neighbour * n)236 bool neigh_remove_one(struct neighbour *n)
237 {
238 	bool retval = false;
239 
240 	write_lock(&n->lock);
241 	if (refcount_read(&n->refcnt) == 1) {
242 		hlist_del_rcu(&n->hash);
243 		hlist_del_rcu(&n->dev_list);
244 		neigh_mark_dead(n);
245 		retval = true;
246 	}
247 	write_unlock(&n->lock);
248 	if (retval)
249 		neigh_cleanup_and_release(n);
250 	return retval;
251 }
252 
neigh_forced_gc(struct neigh_table * tbl)253 static int neigh_forced_gc(struct neigh_table *tbl)
254 {
255 	int max_clean = atomic_read(&tbl->gc_entries) -
256 			READ_ONCE(tbl->gc_thresh2);
257 	u64 tmax = ktime_get_ns() + NSEC_PER_MSEC;
258 	unsigned long tref = jiffies - 5 * HZ;
259 	struct neighbour *n, *tmp;
260 	int shrunk = 0;
261 	int loop = 0;
262 
263 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
264 
265 	spin_lock_bh(&tbl->lock);
266 
267 	list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
268 		if (refcount_read(&n->refcnt) == 1) {
269 			bool remove = false;
270 
271 			write_lock(&n->lock);
272 			if ((n->nud_state == NUD_FAILED) ||
273 			    (n->nud_state == NUD_NOARP) ||
274 			    (tbl->is_multicast &&
275 			     tbl->is_multicast(n->primary_key)) ||
276 			    !time_in_range(n->updated, tref, jiffies))
277 				remove = true;
278 			write_unlock(&n->lock);
279 
280 			if (remove && neigh_remove_one(n))
281 				shrunk++;
282 			if (shrunk >= max_clean)
283 				break;
284 			if (++loop == 16) {
285 				if (ktime_get_ns() > tmax)
286 					goto unlock;
287 				loop = 0;
288 			}
289 		}
290 	}
291 
292 	WRITE_ONCE(tbl->last_flush, jiffies);
293 unlock:
294 	spin_unlock_bh(&tbl->lock);
295 
296 	return shrunk;
297 }
298 
neigh_add_timer(struct neighbour * n,unsigned long when)299 static void neigh_add_timer(struct neighbour *n, unsigned long when)
300 {
301 	/* Use safe distance from the jiffies - LONG_MAX point while timer
302 	 * is running in DELAY/PROBE state but still show to user space
303 	 * large times in the past.
304 	 */
305 	unsigned long mint = jiffies - (LONG_MAX - 86400 * HZ);
306 
307 	neigh_hold(n);
308 	if (!time_in_range(n->confirmed, mint, jiffies))
309 		n->confirmed = mint;
310 	if (time_before(n->used, n->confirmed))
311 		n->used = n->confirmed;
312 	if (unlikely(mod_timer(&n->timer, when))) {
313 		printk("NEIGH: BUG, double timer add, state is %x\n",
314 		       n->nud_state);
315 		dump_stack();
316 	}
317 }
318 
neigh_del_timer(struct neighbour * n)319 static int neigh_del_timer(struct neighbour *n)
320 {
321 	if ((n->nud_state & NUD_IN_TIMER) &&
322 	    timer_delete(&n->timer)) {
323 		neigh_release(n);
324 		return 1;
325 	}
326 	return 0;
327 }
328 
neigh_get_dev_parms_rcu(struct net_device * dev,int family)329 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
330 						   int family)
331 {
332 	switch (family) {
333 	case AF_INET:
334 		return __in_dev_arp_parms_get_rcu(dev);
335 	case AF_INET6:
336 		return __in6_dev_nd_parms_get_rcu(dev);
337 	}
338 	return NULL;
339 }
340 
neigh_parms_qlen_dec(struct net_device * dev,int family)341 static void neigh_parms_qlen_dec(struct net_device *dev, int family)
342 {
343 	struct neigh_parms *p;
344 
345 	rcu_read_lock();
346 	p = neigh_get_dev_parms_rcu(dev, family);
347 	if (p)
348 		p->qlen--;
349 	rcu_read_unlock();
350 }
351 
pneigh_queue_purge(struct sk_buff_head * list,struct net * net,int family)352 static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net,
353 			       int family)
354 {
355 	struct sk_buff_head tmp;
356 	unsigned long flags;
357 	struct sk_buff *skb;
358 
359 	skb_queue_head_init(&tmp);
360 	spin_lock_irqsave(&list->lock, flags);
361 	skb = skb_peek(list);
362 	while (skb != NULL) {
363 		struct sk_buff *skb_next = skb_peek_next(skb, list);
364 		struct net_device *dev = skb->dev;
365 
366 		if (net == NULL || net_eq(dev_net(dev), net)) {
367 			neigh_parms_qlen_dec(dev, family);
368 			__skb_unlink(skb, list);
369 			__skb_queue_tail(&tmp, skb);
370 		}
371 		skb = skb_next;
372 	}
373 	spin_unlock_irqrestore(&list->lock, flags);
374 
375 	while ((skb = __skb_dequeue(&tmp))) {
376 		dev_put(skb->dev);
377 		kfree_skb(skb);
378 	}
379 }
380 
neigh_flush_one(struct neighbour * n)381 static void neigh_flush_one(struct neighbour *n)
382 {
383 	hlist_del_rcu(&n->hash);
384 	hlist_del_rcu(&n->dev_list);
385 
386 	write_lock(&n->lock);
387 
388 	neigh_del_timer(n);
389 	neigh_mark_dead(n);
390 
391 	if (refcount_read(&n->refcnt) != 1) {
392 		/* The most unpleasant situation.
393 		 * We must destroy neighbour entry,
394 		 * but someone still uses it.
395 		 *
396 		 * The destroy will be delayed until
397 		 * the last user releases us, but
398 		 * we must kill timers etc. and move
399 		 * it to safe state.
400 		 */
401 		__skb_queue_purge(&n->arp_queue);
402 		n->arp_queue_len_bytes = 0;
403 		WRITE_ONCE(n->output, neigh_blackhole);
404 
405 		if (n->nud_state & NUD_VALID)
406 			n->nud_state = NUD_NOARP;
407 		else
408 			n->nud_state = NUD_NONE;
409 
410 		neigh_dbg(2, "neigh %p is stray\n", n);
411 	}
412 
413 	write_unlock(&n->lock);
414 
415 	neigh_cleanup_and_release(n);
416 }
417 
neigh_flush_dev(struct neigh_table * tbl,struct net_device * dev,bool skip_perm)418 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
419 			    bool skip_perm)
420 {
421 	struct hlist_head *dev_head;
422 	struct hlist_node *tmp;
423 	struct neighbour *n;
424 
425 	dev_head = neigh_get_dev_table(dev, tbl->family);
426 
427 	hlist_for_each_entry_safe(n, tmp, dev_head, dev_list) {
428 		if (skip_perm &&
429 		    (n->nud_state & NUD_PERMANENT ||
430 		     n->flags & NTF_EXT_VALIDATED))
431 			continue;
432 
433 		neigh_flush_one(n);
434 	}
435 }
436 
neigh_flush_table(struct neigh_table * tbl)437 static void neigh_flush_table(struct neigh_table *tbl)
438 {
439 	struct neigh_hash_table *nht;
440 	int i;
441 
442 	nht = rcu_dereference_protected(tbl->nht,
443 					lockdep_is_held(&tbl->lock));
444 
445 	for (i = 0; i < (1 << nht->hash_shift); i++) {
446 		struct hlist_node *tmp;
447 		struct neighbour *n;
448 
449 		neigh_for_each_in_bucket_safe(n, tmp, &nht->hash_heads[i])
450 			neigh_flush_one(n);
451 	}
452 }
453 
neigh_changeaddr(struct neigh_table * tbl,struct net_device * dev)454 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
455 {
456 	spin_lock_bh(&tbl->lock);
457 	neigh_flush_dev(tbl, dev, false);
458 	spin_unlock_bh(&tbl->lock);
459 }
460 EXPORT_SYMBOL(neigh_changeaddr);
461 
__neigh_ifdown(struct neigh_table * tbl,struct net_device * dev,bool skip_perm)462 static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
463 			  bool skip_perm)
464 {
465 	spin_lock_bh(&tbl->lock);
466 	if (likely(dev)) {
467 		neigh_flush_dev(tbl, dev, skip_perm);
468 	} else {
469 		DEBUG_NET_WARN_ON_ONCE(skip_perm);
470 		neigh_flush_table(tbl);
471 	}
472 	spin_unlock_bh(&tbl->lock);
473 
474 	pneigh_ifdown(tbl, dev, skip_perm);
475 	pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL,
476 			   tbl->family);
477 	if (skb_queue_empty_lockless(&tbl->proxy_queue))
478 		timer_delete_sync(&tbl->proxy_timer);
479 	return 0;
480 }
481 
neigh_carrier_down(struct neigh_table * tbl,struct net_device * dev)482 int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
483 {
484 	__neigh_ifdown(tbl, dev, true);
485 	return 0;
486 }
487 EXPORT_SYMBOL(neigh_carrier_down);
488 
neigh_ifdown(struct neigh_table * tbl,struct net_device * dev)489 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
490 {
491 	__neigh_ifdown(tbl, dev, false);
492 	return 0;
493 }
494 EXPORT_SYMBOL(neigh_ifdown);
495 
neigh_alloc(struct neigh_table * tbl,struct net_device * dev,u32 flags,bool exempt_from_gc)496 static struct neighbour *neigh_alloc(struct neigh_table *tbl,
497 				     struct net_device *dev,
498 				     u32 flags, bool exempt_from_gc)
499 {
500 	struct neighbour *n = NULL;
501 	unsigned long now = jiffies;
502 	int entries, gc_thresh3;
503 
504 	if (exempt_from_gc)
505 		goto do_alloc;
506 
507 	entries = atomic_inc_return(&tbl->gc_entries) - 1;
508 	gc_thresh3 = READ_ONCE(tbl->gc_thresh3);
509 	if (entries >= gc_thresh3 ||
510 	    (entries >= READ_ONCE(tbl->gc_thresh2) &&
511 	     time_after(now, READ_ONCE(tbl->last_flush) + 5 * HZ))) {
512 		if (!neigh_forced_gc(tbl) && entries >= gc_thresh3) {
513 			net_info_ratelimited("%s: neighbor table overflow!\n",
514 					     tbl->id);
515 			NEIGH_CACHE_STAT_INC(tbl, table_fulls);
516 			goto out_entries;
517 		}
518 	}
519 
520 do_alloc:
521 	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
522 	if (!n)
523 		goto out_entries;
524 
525 	__skb_queue_head_init(&n->arp_queue);
526 	rwlock_init(&n->lock);
527 	seqlock_init(&n->ha_lock);
528 	n->updated	  = n->used = now;
529 	n->nud_state	  = NUD_NONE;
530 	n->output	  = neigh_blackhole;
531 	n->flags	  = flags;
532 	seqlock_init(&n->hh.hh_lock);
533 	n->parms	  = neigh_parms_clone(&tbl->parms);
534 	timer_setup(&n->timer, neigh_timer_handler, 0);
535 
536 	NEIGH_CACHE_STAT_INC(tbl, allocs);
537 	n->tbl		  = tbl;
538 	refcount_set(&n->refcnt, 1);
539 	n->dead		  = 1;
540 	INIT_LIST_HEAD(&n->gc_list);
541 	INIT_LIST_HEAD(&n->managed_list);
542 
543 	atomic_inc(&tbl->entries);
544 out:
545 	return n;
546 
547 out_entries:
548 	if (!exempt_from_gc)
549 		atomic_dec(&tbl->gc_entries);
550 	goto out;
551 }
552 
neigh_get_hash_rnd(u32 * x)553 static void neigh_get_hash_rnd(u32 *x)
554 {
555 	*x = get_random_u32() | 1;
556 }
557 
neigh_hash_alloc(unsigned int shift)558 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
559 {
560 	size_t size = (1 << shift) * sizeof(struct hlist_head);
561 	struct hlist_head *hash_heads;
562 	struct neigh_hash_table *ret;
563 	int i;
564 
565 	ret = kmalloc_obj(*ret, GFP_ATOMIC);
566 	if (!ret)
567 		return NULL;
568 
569 	hash_heads = kzalloc(size, GFP_ATOMIC);
570 	if (!hash_heads) {
571 		kfree(ret);
572 		return NULL;
573 	}
574 	ret->hash_heads = hash_heads;
575 	ret->hash_shift = shift;
576 	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
577 		neigh_get_hash_rnd(&ret->hash_rnd[i]);
578 	return ret;
579 }
580 
neigh_hash_free_rcu(struct rcu_head * head)581 static void neigh_hash_free_rcu(struct rcu_head *head)
582 {
583 	struct neigh_hash_table *nht = container_of(head,
584 						    struct neigh_hash_table,
585 						    rcu);
586 
587 	kfree(nht->hash_heads);
588 	kfree(nht);
589 }
590 
neigh_hash_grow(struct neigh_table * tbl,unsigned long new_shift)591 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
592 						unsigned long new_shift)
593 {
594 	unsigned int i, hash;
595 	struct neigh_hash_table *new_nht, *old_nht;
596 
597 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
598 
599 	old_nht = rcu_dereference_protected(tbl->nht,
600 					    lockdep_is_held(&tbl->lock));
601 	new_nht = neigh_hash_alloc(new_shift);
602 	if (!new_nht)
603 		return old_nht;
604 
605 	for (i = 0; i < (1 << old_nht->hash_shift); i++) {
606 		struct hlist_node *tmp;
607 		struct neighbour *n;
608 
609 		neigh_for_each_in_bucket_safe(n, tmp, &old_nht->hash_heads[i]) {
610 			hash = tbl->hash(n->primary_key, n->dev,
611 					 new_nht->hash_rnd);
612 
613 			hash >>= (32 - new_nht->hash_shift);
614 
615 			hlist_del_rcu(&n->hash);
616 			hlist_add_head_rcu(&n->hash, &new_nht->hash_heads[hash]);
617 		}
618 	}
619 
620 	rcu_assign_pointer(tbl->nht, new_nht);
621 	call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
622 	return new_nht;
623 }
624 
neigh_lookup(struct neigh_table * tbl,const void * pkey,struct net_device * dev)625 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
626 			       struct net_device *dev)
627 {
628 	struct neighbour *n;
629 
630 	NEIGH_CACHE_STAT_INC(tbl, lookups);
631 
632 	rcu_read_lock();
633 	n = __neigh_lookup_noref(tbl, pkey, dev);
634 	if (n) {
635 		if (!refcount_inc_not_zero(&n->refcnt))
636 			n = NULL;
637 		NEIGH_CACHE_STAT_INC(tbl, hits);
638 	}
639 
640 	rcu_read_unlock();
641 	return n;
642 }
643 EXPORT_SYMBOL(neigh_lookup);
644 
645 static struct neighbour *
___neigh_create(struct neigh_table * tbl,const void * pkey,struct net_device * dev,u32 flags,bool exempt_from_gc,bool want_ref)646 ___neigh_create(struct neigh_table *tbl, const void *pkey,
647 		struct net_device *dev, u32 flags,
648 		bool exempt_from_gc, bool want_ref)
649 {
650 	u32 hash_val, key_len = tbl->key_len;
651 	struct neighbour *n1, *rc, *n;
652 	struct neigh_hash_table *nht;
653 	int error;
654 
655 	n = neigh_alloc(tbl, dev, flags, exempt_from_gc);
656 	trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);
657 	if (!n) {
658 		rc = ERR_PTR(-ENOBUFS);
659 		goto out;
660 	}
661 
662 	memcpy(n->primary_key, pkey, key_len);
663 	n->dev = dev;
664 	netdev_hold(dev, &n->dev_tracker, GFP_ATOMIC);
665 
666 	/* Protocol specific setup. */
667 	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
668 		rc = ERR_PTR(error);
669 		goto out_neigh_release;
670 	}
671 
672 	if (dev->netdev_ops->ndo_neigh_construct) {
673 		error = dev->netdev_ops->ndo_neigh_construct(dev, n);
674 		if (error < 0) {
675 			rc = ERR_PTR(error);
676 			goto out_neigh_release;
677 		}
678 	}
679 
680 	/* Device specific setup. */
681 	if (n->parms->neigh_setup &&
682 	    (error = n->parms->neigh_setup(n)) < 0) {
683 		rc = ERR_PTR(error);
684 		goto out_neigh_release;
685 	}
686 
687 	n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
688 
689 	spin_lock_bh(&tbl->lock);
690 	nht = rcu_dereference_protected(tbl->nht,
691 					lockdep_is_held(&tbl->lock));
692 
693 	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
694 		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
695 
696 	hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
697 
698 	if (n->parms->dead) {
699 		rc = ERR_PTR(-EINVAL);
700 		goto out_tbl_unlock;
701 	}
702 
703 	neigh_for_each_in_bucket(n1, &nht->hash_heads[hash_val]) {
704 		if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
705 			if (want_ref)
706 				neigh_hold(n1);
707 			rc = n1;
708 			goto out_tbl_unlock;
709 		}
710 	}
711 
712 	n->dead = 0;
713 	if (!exempt_from_gc)
714 		list_add_tail(&n->gc_list, &n->tbl->gc_list);
715 	if (n->flags & NTF_MANAGED)
716 		list_add_tail(&n->managed_list, &n->tbl->managed_list);
717 	if (want_ref)
718 		neigh_hold(n);
719 	hlist_add_head_rcu(&n->hash, &nht->hash_heads[hash_val]);
720 
721 	hlist_add_head_rcu(&n->dev_list,
722 			   neigh_get_dev_table(dev, tbl->family));
723 
724 	spin_unlock_bh(&tbl->lock);
725 	neigh_dbg(2, "neigh %p is created\n", n);
726 	rc = n;
727 out:
728 	return rc;
729 out_tbl_unlock:
730 	spin_unlock_bh(&tbl->lock);
731 out_neigh_release:
732 	if (!exempt_from_gc)
733 		atomic_dec(&tbl->gc_entries);
734 	neigh_release(n);
735 	goto out;
736 }
737 
__neigh_create(struct neigh_table * tbl,const void * pkey,struct net_device * dev,bool want_ref)738 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
739 				 struct net_device *dev, bool want_ref)
740 {
741 	bool exempt_from_gc = !!(dev->flags & IFF_LOOPBACK);
742 
743 	return ___neigh_create(tbl, pkey, dev, 0, exempt_from_gc, want_ref);
744 }
745 EXPORT_SYMBOL(__neigh_create);
746 
pneigh_hash(const void * pkey,unsigned int key_len)747 static u32 pneigh_hash(const void *pkey, unsigned int key_len)
748 {
749 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
750 	hash_val ^= (hash_val >> 16);
751 	hash_val ^= hash_val >> 8;
752 	hash_val ^= hash_val >> 4;
753 	hash_val &= PNEIGH_HASHMASK;
754 	return hash_val;
755 }
756 
pneigh_lookup(struct neigh_table * tbl,struct net * net,const void * pkey,struct net_device * dev)757 struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl,
758 				   struct net *net, const void *pkey,
759 				   struct net_device *dev)
760 {
761 	struct pneigh_entry *n;
762 	unsigned int key_len;
763 	u32 hash_val;
764 
765 	key_len = tbl->key_len;
766 	hash_val = pneigh_hash(pkey, key_len);
767 	n = rcu_dereference_check(tbl->phash_buckets[hash_val],
768 				  lockdep_is_held(&tbl->phash_lock));
769 
770 	while (n) {
771 		if (!memcmp(n->key, pkey, key_len) &&
772 		    net_eq(pneigh_net(n), net) &&
773 		    (n->dev == dev || !n->dev))
774 			return n;
775 
776 		n = rcu_dereference_check(n->next, lockdep_is_held(&tbl->phash_lock));
777 	}
778 
779 	return NULL;
780 }
781 EXPORT_IPV6_MOD(pneigh_lookup);
782 
pneigh_create(struct neigh_table * tbl,struct net * net,const void * pkey,struct net_device * dev,u32 flags,u8 protocol,bool permanent)783 int pneigh_create(struct neigh_table *tbl, struct net *net,
784 		  const void *pkey, struct net_device *dev,
785 		  u32 flags, u8 protocol, bool permanent)
786 {
787 	struct pneigh_entry *n;
788 	unsigned int key_len;
789 	u32 hash_val;
790 	int err = 0;
791 
792 	mutex_lock(&tbl->phash_lock);
793 
794 	n = pneigh_lookup(tbl, net, pkey, dev);
795 	if (n)
796 		goto update;
797 
798 	key_len = tbl->key_len;
799 	n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
800 	if (!n) {
801 		err = -ENOBUFS;
802 		goto out;
803 	}
804 
805 	write_pnet(&n->net, net);
806 	memcpy(n->key, pkey, key_len);
807 	n->dev = dev;
808 	netdev_hold(dev, &n->dev_tracker, GFP_KERNEL);
809 
810 	if (tbl->pconstructor && tbl->pconstructor(n)) {
811 		netdev_put(dev, &n->dev_tracker);
812 		kfree(n);
813 		err = -ENOBUFS;
814 		goto out;
815 	}
816 
817 	hash_val = pneigh_hash(pkey, key_len);
818 	n->next = tbl->phash_buckets[hash_val];
819 	rcu_assign_pointer(tbl->phash_buckets[hash_val], n);
820 update:
821 	WRITE_ONCE(n->flags, flags);
822 	n->permanent = permanent;
823 	if (protocol)
824 		WRITE_ONCE(n->protocol, protocol);
825 out:
826 	mutex_unlock(&tbl->phash_lock);
827 	return err;
828 }
829 
pneigh_destroy(struct rcu_head * rcu)830 static void pneigh_destroy(struct rcu_head *rcu)
831 {
832 	struct pneigh_entry *n = container_of(rcu, struct pneigh_entry, rcu);
833 
834 	netdev_put(n->dev, &n->dev_tracker);
835 	kfree(n);
836 }
837 
pneigh_delete(struct neigh_table * tbl,struct net * net,const void * pkey,struct net_device * dev)838 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
839 		  struct net_device *dev)
840 {
841 	struct pneigh_entry *n, __rcu **np;
842 	unsigned int key_len;
843 	u32 hash_val;
844 
845 	key_len = tbl->key_len;
846 	hash_val = pneigh_hash(pkey, key_len);
847 
848 	mutex_lock(&tbl->phash_lock);
849 
850 	for (np = &tbl->phash_buckets[hash_val];
851 	     (n = rcu_dereference_protected(*np, 1)) != NULL;
852 	     np = &n->next) {
853 		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
854 		    net_eq(pneigh_net(n), net)) {
855 			rcu_assign_pointer(*np, n->next);
856 
857 			mutex_unlock(&tbl->phash_lock);
858 
859 			if (tbl->pdestructor)
860 				tbl->pdestructor(n);
861 
862 			call_rcu(&n->rcu, pneigh_destroy);
863 			return 0;
864 		}
865 	}
866 
867 	mutex_unlock(&tbl->phash_lock);
868 	return -ENOENT;
869 }
870 
pneigh_ifdown(struct neigh_table * tbl,struct net_device * dev,bool skip_perm)871 static void pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
872 			  bool skip_perm)
873 {
874 	struct pneigh_entry *n, __rcu **np;
875 	LIST_HEAD(head);
876 	u32 h;
877 
878 	mutex_lock(&tbl->phash_lock);
879 
880 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
881 		np = &tbl->phash_buckets[h];
882 		while ((n = rcu_dereference_protected(*np, 1)) != NULL) {
883 			if (skip_perm && n->permanent)
884 				goto skip;
885 			if (!dev || n->dev == dev) {
886 				rcu_assign_pointer(*np, n->next);
887 				list_add(&n->free_node, &head);
888 				continue;
889 			}
890 skip:
891 			np = &n->next;
892 		}
893 	}
894 
895 	mutex_unlock(&tbl->phash_lock);
896 
897 	while (!list_empty(&head)) {
898 		n = list_first_entry(&head, typeof(*n), free_node);
899 		list_del(&n->free_node);
900 
901 		if (tbl->pdestructor)
902 			tbl->pdestructor(n);
903 
904 		call_rcu(&n->rcu, pneigh_destroy);
905 	}
906 }
907 
neigh_parms_put(struct neigh_parms * parms)908 static inline void neigh_parms_put(struct neigh_parms *parms)
909 {
910 	if (refcount_dec_and_test(&parms->refcnt))
911 		kfree(parms);
912 }
913 
914 /*
915  *	neighbour must already be out of the table;
916  *
917  */
neigh_destroy(struct neighbour * neigh)918 void neigh_destroy(struct neighbour *neigh)
919 {
920 	struct net_device *dev = neigh->dev;
921 
922 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
923 
924 	if (!neigh->dead) {
925 		pr_warn("Destroying alive neighbour %p\n", neigh);
926 		dump_stack();
927 		return;
928 	}
929 
930 	if (neigh_del_timer(neigh))
931 		pr_warn("Impossible event\n");
932 
933 	write_lock_bh(&neigh->lock);
934 	__skb_queue_purge(&neigh->arp_queue);
935 	write_unlock_bh(&neigh->lock);
936 	neigh->arp_queue_len_bytes = 0;
937 
938 	if (dev->netdev_ops->ndo_neigh_destroy)
939 		dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
940 
941 	netdev_put(dev, &neigh->dev_tracker);
942 	neigh_parms_put(neigh->parms);
943 
944 	neigh_dbg(2, "neigh %p is destroyed\n", neigh);
945 
946 	atomic_dec(&neigh->tbl->entries);
947 	kfree_rcu(neigh, rcu);
948 }
949 EXPORT_SYMBOL(neigh_destroy);
950 
951 /* Neighbour state is suspicious;
952    disable fast path.
953 
954    Called with write_locked neigh.
955  */
neigh_suspect(struct neighbour * neigh)956 static void neigh_suspect(struct neighbour *neigh)
957 {
958 	neigh_dbg(2, "neigh %p is suspected\n", neigh);
959 
960 	WRITE_ONCE(neigh->output, neigh->ops->output);
961 }
962 
963 /* Neighbour state is OK;
964    enable fast path.
965 
966    Called with write_locked neigh.
967  */
neigh_connect(struct neighbour * neigh)968 static void neigh_connect(struct neighbour *neigh)
969 {
970 	neigh_dbg(2, "neigh %p is connected\n", neigh);
971 
972 	WRITE_ONCE(neigh->output, neigh->ops->connected_output);
973 }
974 
neigh_periodic_work(struct work_struct * work)975 static void neigh_periodic_work(struct work_struct *work)
976 {
977 	struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
978 	struct neigh_hash_table *nht;
979 	struct hlist_node *tmp;
980 	struct neighbour *n;
981 	unsigned int i;
982 
983 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
984 
985 	spin_lock_bh(&tbl->lock);
986 	nht = rcu_dereference_protected(tbl->nht,
987 					lockdep_is_held(&tbl->lock));
988 
989 	/*
990 	 *	periodically recompute ReachableTime from random function
991 	 */
992 
993 	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
994 		struct neigh_parms *p;
995 
996 		WRITE_ONCE(tbl->last_rand, jiffies);
997 		list_for_each_entry(p, &tbl->parms_list, list)
998 			neigh_set_reach_time(p);
999 	}
1000 
1001 	if (atomic_read(&tbl->entries) < READ_ONCE(tbl->gc_thresh1))
1002 		goto out;
1003 
1004 	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
1005 		neigh_for_each_in_bucket_safe(n, tmp, &nht->hash_heads[i]) {
1006 			unsigned int state;
1007 
1008 			write_lock(&n->lock);
1009 
1010 			state = n->nud_state;
1011 			if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
1012 			    (n->flags &
1013 			     (NTF_EXT_LEARNED | NTF_EXT_VALIDATED))) {
1014 				write_unlock(&n->lock);
1015 				continue;
1016 			}
1017 
1018 			if (time_before(n->used, n->confirmed) &&
1019 			    time_is_before_eq_jiffies(n->confirmed))
1020 				n->used = n->confirmed;
1021 
1022 			if (refcount_read(&n->refcnt) == 1 &&
1023 			    (state == NUD_FAILED ||
1024 			     !time_in_range_open(jiffies, n->used,
1025 						 n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
1026 				hlist_del_rcu(&n->hash);
1027 				hlist_del_rcu(&n->dev_list);
1028 				neigh_mark_dead(n);
1029 				write_unlock(&n->lock);
1030 				neigh_cleanup_and_release(n);
1031 				continue;
1032 			}
1033 			write_unlock(&n->lock);
1034 		}
1035 		/*
1036 		 * It's fine to release lock here, even if hash table
1037 		 * grows while we are preempted.
1038 		 */
1039 		spin_unlock_bh(&tbl->lock);
1040 		cond_resched();
1041 		spin_lock_bh(&tbl->lock);
1042 		nht = rcu_dereference_protected(tbl->nht,
1043 						lockdep_is_held(&tbl->lock));
1044 	}
1045 out:
1046 	/* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
1047 	 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
1048 	 * BASE_REACHABLE_TIME.
1049 	 */
1050 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1051 			      NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
1052 	spin_unlock_bh(&tbl->lock);
1053 }
1054 
neigh_max_probes(struct neighbour * n)1055 static __inline__ int neigh_max_probes(struct neighbour *n)
1056 {
1057 	struct neigh_parms *p = n->parms;
1058 	return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
1059 	       (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
1060 	        NEIGH_VAR(p, MCAST_PROBES));
1061 }
1062 
neigh_invalidate(struct neighbour * neigh)1063 static void neigh_invalidate(struct neighbour *neigh)
1064 	__releases(neigh->lock)
1065 	__acquires(neigh->lock)
1066 {
1067 	struct sk_buff *skb;
1068 
1069 	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
1070 	neigh_dbg(2, "neigh %p is failed\n", neigh);
1071 	neigh->updated = jiffies;
1072 
1073 	/* It is very thin place. report_unreachable is very complicated
1074 	   routine. Particularly, it can hit the same neighbour entry!
1075 
1076 	   So that, we try to be accurate and avoid dead loop. --ANK
1077 	 */
1078 	while (neigh->nud_state == NUD_FAILED &&
1079 	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1080 		write_unlock(&neigh->lock);
1081 		neigh->ops->error_report(neigh, skb);
1082 		write_lock(&neigh->lock);
1083 	}
1084 	__skb_queue_purge(&neigh->arp_queue);
1085 	neigh->arp_queue_len_bytes = 0;
1086 }
1087 
neigh_probe(struct neighbour * neigh)1088 static void neigh_probe(struct neighbour *neigh)
1089 	__releases(neigh->lock)
1090 {
1091 	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
1092 	/* keep skb alive even if arp_queue overflows */
1093 	if (skb)
1094 		skb = skb_clone(skb, GFP_ATOMIC);
1095 	write_unlock(&neigh->lock);
1096 	if (neigh->ops->solicit)
1097 		neigh->ops->solicit(neigh, skb);
1098 	atomic_inc(&neigh->probes);
1099 	consume_skb(skb);
1100 }
1101 
1102 /* Called when a timer expires for a neighbour entry. */
1103 
neigh_timer_handler(struct timer_list * t)1104 static void neigh_timer_handler(struct timer_list *t)
1105 {
1106 	unsigned long now, next;
1107 	struct neighbour *neigh = timer_container_of(neigh, t, timer);
1108 	bool skip_probe = false;
1109 	unsigned int state;
1110 	int notify = 0;
1111 
1112 	write_lock(&neigh->lock);
1113 
1114 	state = neigh->nud_state;
1115 	now = jiffies;
1116 	next = now + HZ;
1117 
1118 	if (!(state & NUD_IN_TIMER))
1119 		goto out;
1120 
1121 	if (state & NUD_REACHABLE) {
1122 		if (time_before_eq(now,
1123 				   neigh->confirmed + neigh->parms->reachable_time)) {
1124 			neigh_dbg(2, "neigh %p is still alive\n", neigh);
1125 			next = neigh->confirmed + neigh->parms->reachable_time;
1126 		} else if (time_before_eq(now,
1127 					  neigh->used +
1128 					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1129 			neigh_dbg(2, "neigh %p is delayed\n", neigh);
1130 			WRITE_ONCE(neigh->nud_state, NUD_DELAY);
1131 			neigh->updated = jiffies;
1132 			neigh_suspect(neigh);
1133 			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
1134 		} else {
1135 			neigh_dbg(2, "neigh %p is suspected\n", neigh);
1136 			WRITE_ONCE(neigh->nud_state, NUD_STALE);
1137 			neigh->updated = jiffies;
1138 			neigh_suspect(neigh);
1139 			notify = 1;
1140 		}
1141 	} else if (state & NUD_DELAY) {
1142 		if (time_before_eq(now,
1143 				   neigh->confirmed +
1144 				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1145 			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
1146 			WRITE_ONCE(neigh->nud_state, NUD_REACHABLE);
1147 			neigh->updated = jiffies;
1148 			neigh_connect(neigh);
1149 			notify = 1;
1150 			next = neigh->confirmed + neigh->parms->reachable_time;
1151 		} else {
1152 			neigh_dbg(2, "neigh %p is probed\n", neigh);
1153 			WRITE_ONCE(neigh->nud_state, NUD_PROBE);
1154 			neigh->updated = jiffies;
1155 			atomic_set(&neigh->probes, 0);
1156 			notify = 1;
1157 			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1158 					 HZ/100);
1159 		}
1160 	} else {
1161 		/* NUD_PROBE|NUD_INCOMPLETE */
1162 		next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100);
1163 	}
1164 
1165 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1166 	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
1167 		if (neigh->nud_state == NUD_PROBE &&
1168 		    neigh->flags & NTF_EXT_VALIDATED) {
1169 			WRITE_ONCE(neigh->nud_state, NUD_STALE);
1170 			neigh->updated = jiffies;
1171 		} else {
1172 			WRITE_ONCE(neigh->nud_state, NUD_FAILED);
1173 			neigh_invalidate(neigh);
1174 		}
1175 		notify = 1;
1176 		skip_probe = true;
1177 	}
1178 
1179 	if (notify)
1180 		__neigh_notify(neigh, RTM_NEWNEIGH, 0, 0);
1181 
1182 	if (skip_probe)
1183 		goto out;
1184 
1185 	if (neigh->nud_state & NUD_IN_TIMER) {
1186 		if (time_before(next, jiffies + HZ/100))
1187 			next = jiffies + HZ/100;
1188 		if (!mod_timer(&neigh->timer, next))
1189 			neigh_hold(neigh);
1190 	}
1191 	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
1192 		neigh_probe(neigh);
1193 	} else {
1194 out:
1195 		write_unlock(&neigh->lock);
1196 	}
1197 
1198 	if (notify)
1199 		call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
1200 
1201 	trace_neigh_timer_handler(neigh, 0);
1202 
1203 	neigh_release(neigh);
1204 }
1205 
__neigh_event_send(struct neighbour * neigh,struct sk_buff * skb,const bool immediate_ok)1206 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
1207 		       const bool immediate_ok)
1208 {
1209 	int rc;
1210 	bool immediate_probe = false;
1211 
1212 	write_lock_bh(&neigh->lock);
1213 
1214 	rc = 0;
1215 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1216 		goto out_unlock_bh;
1217 	if (neigh->dead)
1218 		goto out_dead;
1219 
1220 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1221 		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1222 		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
1223 			unsigned long next, now = jiffies;
1224 
1225 			atomic_set(&neigh->probes,
1226 				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
1227 			neigh_del_timer(neigh);
1228 			WRITE_ONCE(neigh->nud_state, NUD_INCOMPLETE);
1229 			neigh->updated = now;
1230 			if (!immediate_ok) {
1231 				next = now + 1;
1232 			} else {
1233 				immediate_probe = true;
1234 				next = now + max(NEIGH_VAR(neigh->parms,
1235 							   RETRANS_TIME),
1236 						 HZ / 100);
1237 			}
1238 			neigh_add_timer(neigh, next);
1239 		} else {
1240 			WRITE_ONCE(neigh->nud_state, NUD_FAILED);
1241 			neigh->updated = jiffies;
1242 			write_unlock_bh(&neigh->lock);
1243 
1244 			kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_FAILED);
1245 			return 1;
1246 		}
1247 	} else if (neigh->nud_state & NUD_STALE) {
1248 		neigh_dbg(2, "neigh %p is delayed\n", neigh);
1249 		neigh_del_timer(neigh);
1250 		WRITE_ONCE(neigh->nud_state, NUD_DELAY);
1251 		neigh->updated = jiffies;
1252 		neigh_add_timer(neigh, jiffies +
1253 				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1254 	}
1255 
1256 	if (neigh->nud_state == NUD_INCOMPLETE) {
1257 		if (skb) {
1258 			while (neigh->arp_queue_len_bytes + skb->truesize >
1259 			       NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1260 				struct sk_buff *buff;
1261 
1262 				buff = __skb_dequeue(&neigh->arp_queue);
1263 				if (!buff)
1264 					break;
1265 				neigh->arp_queue_len_bytes -= buff->truesize;
1266 				kfree_skb_reason(buff, SKB_DROP_REASON_NEIGH_QUEUEFULL);
1267 				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1268 			}
1269 			skb_dst_force(skb);
1270 			__skb_queue_tail(&neigh->arp_queue, skb);
1271 			neigh->arp_queue_len_bytes += skb->truesize;
1272 		}
1273 		rc = 1;
1274 	}
1275 out_unlock_bh:
1276 	if (immediate_probe)
1277 		neigh_probe(neigh);
1278 	else
1279 		write_unlock(&neigh->lock);
1280 	local_bh_enable();
1281 	trace_neigh_event_send_done(neigh, rc);
1282 	return rc;
1283 
1284 out_dead:
1285 	if (neigh->nud_state & NUD_STALE)
1286 		goto out_unlock_bh;
1287 	write_unlock_bh(&neigh->lock);
1288 	kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_DEAD);
1289 	trace_neigh_event_send_dead(neigh, 1);
1290 	return 1;
1291 }
1292 EXPORT_SYMBOL(__neigh_event_send);
1293 
neigh_update_hhs(struct neighbour * neigh)1294 static void neigh_update_hhs(struct neighbour *neigh)
1295 {
1296 	struct hh_cache *hh;
1297 	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1298 		= NULL;
1299 
1300 	if (neigh->dev->header_ops)
1301 		update = neigh->dev->header_ops->cache_update;
1302 
1303 	if (update) {
1304 		hh = &neigh->hh;
1305 		if (READ_ONCE(hh->hh_len)) {
1306 			write_seqlock_bh(&hh->hh_lock);
1307 			update(hh, neigh->dev, neigh->ha);
1308 			write_sequnlock_bh(&hh->hh_lock);
1309 		}
1310 	}
1311 }
1312 
neigh_update_process_arp_queue(struct neighbour * neigh)1313 static void neigh_update_process_arp_queue(struct neighbour *neigh)
1314 	__releases(neigh->lock)
1315 	__acquires(neigh->lock)
1316 {
1317 	struct sk_buff *skb;
1318 
1319 	/* Again: avoid deadlock if something went wrong. */
1320 	while (neigh->nud_state & NUD_VALID &&
1321 	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1322 		struct dst_entry *dst = skb_dst(skb);
1323 		struct neighbour *n2, *n1 = neigh;
1324 
1325 		write_unlock_bh(&neigh->lock);
1326 
1327 		rcu_read_lock();
1328 
1329 		/* Why not just use 'neigh' as-is?  The problem is that
1330 		 * things such as shaper, eql, and sch_teql can end up
1331 		 * using alternative, different, neigh objects to output
1332 		 * the packet in the output path.  So what we need to do
1333 		 * here is re-lookup the top-level neigh in the path so
1334 		 * we can reinject the packet there.
1335 		 */
1336 		n2 = NULL;
1337 		if (dst &&
1338 		    READ_ONCE(dst->obsolete) != DST_OBSOLETE_DEAD) {
1339 			n2 = dst_neigh_lookup_skb(dst, skb);
1340 			if (n2)
1341 				n1 = n2;
1342 		}
1343 		READ_ONCE(n1->output)(n1, skb);
1344 		if (n2)
1345 			neigh_release(n2);
1346 		rcu_read_unlock();
1347 
1348 		write_lock_bh(&neigh->lock);
1349 	}
1350 	__skb_queue_purge(&neigh->arp_queue);
1351 	neigh->arp_queue_len_bytes = 0;
1352 }
1353 
1354 /* Generic update routine.
1355    -- lladdr is new lladdr or NULL, if it is not supplied.
1356    -- new    is new state.
1357    -- flags
1358 	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1359 				if it is different.
1360 	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1361 				lladdr instead of overriding it
1362 				if it is different.
1363 	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
1364 	NEIGH_UPDATE_F_USE	means that the entry is user triggered.
1365 	NEIGH_UPDATE_F_MANAGED	means that the entry will be auto-refreshed.
1366 	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1367 				NTF_ROUTER flag.
1368 	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
1369 				a router.
1370 	NEIGH_UPDATE_F_EXT_VALIDATED means that the entry will not be removed
1371 				or invalidated.
1372 
1373    Caller MUST hold reference count on the entry.
1374  */
__neigh_update(struct neighbour * neigh,const u8 * lladdr,u8 new,u32 flags,u32 nlmsg_pid,struct netlink_ext_ack * extack)1375 static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
1376 			  u8 new, u32 flags, u32 nlmsg_pid,
1377 			  struct netlink_ext_ack *extack)
1378 {
1379 	bool gc_update = false, managed_update = false;
1380 	bool process_arp_queue = false;
1381 	int update_isrouter = 0;
1382 	struct net_device *dev;
1383 	int err, notify = 0;
1384 	u8 old;
1385 
1386 	trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
1387 
1388 	write_lock_bh(&neigh->lock);
1389 
1390 	dev    = neigh->dev;
1391 	old    = neigh->nud_state;
1392 	err    = -EPERM;
1393 
1394 	if (neigh->dead) {
1395 		NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
1396 		new = old;
1397 		goto out;
1398 	}
1399 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1400 	    (old & (NUD_NOARP | NUD_PERMANENT)))
1401 		goto out;
1402 
1403 	neigh_update_flags(neigh, flags, &notify, &gc_update, &managed_update);
1404 	if (flags & (NEIGH_UPDATE_F_USE | NEIGH_UPDATE_F_MANAGED)) {
1405 		new = old & ~NUD_PERMANENT;
1406 		WRITE_ONCE(neigh->nud_state, new);
1407 		err = 0;
1408 		goto out;
1409 	}
1410 
1411 	if (!(new & NUD_VALID)) {
1412 		neigh_del_timer(neigh);
1413 		if (old & NUD_CONNECTED)
1414 			neigh_suspect(neigh);
1415 		WRITE_ONCE(neigh->nud_state, new);
1416 		err = 0;
1417 		notify = old & NUD_VALID;
1418 		if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1419 		    (new & NUD_FAILED)) {
1420 			neigh_invalidate(neigh);
1421 			notify = 1;
1422 		}
1423 		goto out;
1424 	}
1425 
1426 	/* Compare new lladdr with cached one */
1427 	if (!dev->addr_len) {
1428 		/* First case: device needs no address. */
1429 		lladdr = neigh->ha;
1430 	} else if (lladdr) {
1431 		/* The second case: if something is already cached
1432 		   and a new address is proposed:
1433 		   - compare new & old
1434 		   - if they are different, check override flag
1435 		 */
1436 		if ((old & NUD_VALID) &&
1437 		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1438 			lladdr = neigh->ha;
1439 	} else {
1440 		/* No address is supplied; if we know something,
1441 		   use it, otherwise discard the request.
1442 		 */
1443 		err = -EINVAL;
1444 		if (!(old & NUD_VALID)) {
1445 			NL_SET_ERR_MSG(extack, "No link layer address given");
1446 			goto out;
1447 		}
1448 		lladdr = neigh->ha;
1449 	}
1450 
1451 	/* Update confirmed timestamp for neighbour entry after we
1452 	 * received ARP packet even if it doesn't change IP to MAC binding.
1453 	 */
1454 	if (new & NUD_CONNECTED)
1455 		neigh->confirmed = jiffies;
1456 
1457 	/* If entry was valid and address is not changed,
1458 	   do not change entry state, if new one is STALE.
1459 	 */
1460 	err = 0;
1461 	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1462 	if (old & NUD_VALID) {
1463 		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1464 			update_isrouter = 0;
1465 			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1466 			    (old & NUD_CONNECTED)) {
1467 				lladdr = neigh->ha;
1468 				new = NUD_STALE;
1469 			} else
1470 				goto out;
1471 		} else {
1472 			if (lladdr == neigh->ha && new == NUD_STALE &&
1473 			    !(flags & NEIGH_UPDATE_F_ADMIN))
1474 				new = old;
1475 		}
1476 	}
1477 
1478 	/* Update timestamp only once we know we will make a change to the
1479 	 * neighbour entry. Otherwise we risk to move the locktime window with
1480 	 * noop updates and ignore relevant ARP updates.
1481 	 */
1482 	if (new != old || lladdr != neigh->ha)
1483 		neigh->updated = jiffies;
1484 
1485 	if (new != old) {
1486 		neigh_del_timer(neigh);
1487 		if (new & NUD_PROBE)
1488 			atomic_set(&neigh->probes, 0);
1489 		if (new & NUD_IN_TIMER)
1490 			neigh_add_timer(neigh, (jiffies +
1491 						((new & NUD_REACHABLE) ?
1492 						 neigh->parms->reachable_time :
1493 						 0)));
1494 		WRITE_ONCE(neigh->nud_state, new);
1495 		notify = 1;
1496 	}
1497 
1498 	if (lladdr != neigh->ha) {
1499 		write_seqlock(&neigh->ha_lock);
1500 		memcpy(&neigh->ha, lladdr, dev->addr_len);
1501 		write_sequnlock(&neigh->ha_lock);
1502 		neigh_update_hhs(neigh);
1503 		if (!(new & NUD_CONNECTED))
1504 			neigh->confirmed = jiffies -
1505 				      (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1506 		notify = 1;
1507 	}
1508 	if (new == old)
1509 		goto out;
1510 	if (new & NUD_CONNECTED)
1511 		neigh_connect(neigh);
1512 	else
1513 		neigh_suspect(neigh);
1514 
1515 	if (!(old & NUD_VALID))
1516 		process_arp_queue = true;
1517 
1518 out:
1519 	if (update_isrouter)
1520 		neigh_update_is_router(neigh, flags, &notify);
1521 
1522 	if (notify)
1523 		__neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
1524 
1525 	if (process_arp_queue)
1526 		neigh_update_process_arp_queue(neigh);
1527 
1528 	write_unlock_bh(&neigh->lock);
1529 
1530 	if (((new ^ old) & NUD_PERMANENT) || gc_update)
1531 		neigh_update_gc_list(neigh);
1532 	if (managed_update)
1533 		neigh_update_managed_list(neigh);
1534 
1535 	if (notify)
1536 		call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
1537 
1538 	trace_neigh_update_done(neigh, err);
1539 	return err;
1540 }
1541 
neigh_update(struct neighbour * neigh,const u8 * lladdr,u8 new,u32 flags,u32 nlmsg_pid)1542 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1543 		 u32 flags, u32 nlmsg_pid)
1544 {
1545 	return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
1546 }
1547 EXPORT_SYMBOL(neigh_update);
1548 
1549 /* Update the neigh to listen temporarily for probe responses, even if it is
1550  * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1551  */
__neigh_set_probe_once(struct neighbour * neigh)1552 void __neigh_set_probe_once(struct neighbour *neigh)
1553 {
1554 	if (neigh->dead)
1555 		return;
1556 	neigh->updated = jiffies;
1557 	if (!(neigh->nud_state & NUD_FAILED))
1558 		return;
1559 	WRITE_ONCE(neigh->nud_state, NUD_INCOMPLETE);
1560 	atomic_set(&neigh->probes, neigh_max_probes(neigh));
1561 	neigh_add_timer(neigh,
1562 			jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1563 				      HZ/100));
1564 }
1565 EXPORT_SYMBOL(__neigh_set_probe_once);
1566 
neigh_event_ns(struct neigh_table * tbl,u8 * lladdr,void * saddr,struct net_device * dev)1567 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1568 				 u8 *lladdr, void *saddr,
1569 				 struct net_device *dev)
1570 {
1571 	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1572 						 lladdr || !dev->addr_len);
1573 	if (neigh)
1574 		neigh_update(neigh, lladdr, NUD_STALE,
1575 			     NEIGH_UPDATE_F_OVERRIDE, 0);
1576 	return neigh;
1577 }
1578 EXPORT_SYMBOL(neigh_event_ns);
1579 
1580 /* called with read_lock_bh(&n->lock); */
neigh_hh_init(struct neighbour * n)1581 static void neigh_hh_init(struct neighbour *n)
1582 {
1583 	struct net_device *dev = n->dev;
1584 	__be16 prot = n->tbl->protocol;
1585 	struct hh_cache	*hh = &n->hh;
1586 
1587 	write_lock_bh(&n->lock);
1588 
1589 	/* Only one thread can come in here and initialize the
1590 	 * hh_cache entry.
1591 	 */
1592 	if (!hh->hh_len)
1593 		dev->header_ops->cache(n, hh, prot);
1594 
1595 	write_unlock_bh(&n->lock);
1596 }
1597 
1598 /* Slow and careful. */
1599 
neigh_resolve_output(struct neighbour * neigh,struct sk_buff * skb)1600 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1601 {
1602 	int rc = 0;
1603 
1604 	if (!neigh_event_send(neigh, skb)) {
1605 		int err;
1606 		struct net_device *dev = neigh->dev;
1607 		unsigned int seq;
1608 
1609 		if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
1610 			neigh_hh_init(neigh);
1611 
1612 		do {
1613 			__skb_pull(skb, skb_network_offset(skb));
1614 			seq = read_seqbegin(&neigh->ha_lock);
1615 			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1616 					      neigh->ha, NULL, skb->len);
1617 		} while (read_seqretry(&neigh->ha_lock, seq));
1618 
1619 		if (err >= 0)
1620 			rc = dev_queue_xmit(skb);
1621 		else
1622 			goto out_kfree_skb;
1623 	}
1624 out:
1625 	return rc;
1626 out_kfree_skb:
1627 	rc = -EINVAL;
1628 	kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_HH_FILLFAIL);
1629 	goto out;
1630 }
1631 EXPORT_SYMBOL(neigh_resolve_output);
1632 
1633 /* As fast as possible without hh cache */
1634 
neigh_connected_output(struct neighbour * neigh,struct sk_buff * skb)1635 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1636 {
1637 	struct net_device *dev = neigh->dev;
1638 	unsigned int seq;
1639 	int err;
1640 
1641 	do {
1642 		__skb_pull(skb, skb_network_offset(skb));
1643 		seq = read_seqbegin(&neigh->ha_lock);
1644 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1645 				      neigh->ha, NULL, skb->len);
1646 	} while (read_seqretry(&neigh->ha_lock, seq));
1647 
1648 	if (err >= 0)
1649 		err = dev_queue_xmit(skb);
1650 	else {
1651 		err = -EINVAL;
1652 		kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_HH_FILLFAIL);
1653 	}
1654 	return err;
1655 }
1656 EXPORT_SYMBOL(neigh_connected_output);
1657 
neigh_direct_output(struct neighbour * neigh,struct sk_buff * skb)1658 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1659 {
1660 	return dev_queue_xmit(skb);
1661 }
1662 EXPORT_SYMBOL(neigh_direct_output);
1663 
neigh_managed_work(struct work_struct * work)1664 static void neigh_managed_work(struct work_struct *work)
1665 {
1666 	struct neigh_table *tbl = container_of(work, struct neigh_table,
1667 					       managed_work.work);
1668 	struct neighbour *neigh;
1669 
1670 	spin_lock_bh(&tbl->lock);
1671 	list_for_each_entry(neigh, &tbl->managed_list, managed_list)
1672 		neigh_event_send_probe(neigh, NULL, false);
1673 	queue_delayed_work(system_power_efficient_wq, &tbl->managed_work,
1674 			   NEIGH_VAR(&tbl->parms, INTERVAL_PROBE_TIME_MS));
1675 	spin_unlock_bh(&tbl->lock);
1676 }
1677 
neigh_proxy_process(struct timer_list * t)1678 static void neigh_proxy_process(struct timer_list *t)
1679 {
1680 	struct neigh_table *tbl = timer_container_of(tbl, t, proxy_timer);
1681 	long sched_next = 0;
1682 	unsigned long now = jiffies;
1683 	struct sk_buff *skb, *n;
1684 
1685 	spin_lock(&tbl->proxy_queue.lock);
1686 
1687 	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1688 		long tdif = NEIGH_CB(skb)->sched_next - now;
1689 
1690 		if (tdif <= 0) {
1691 			struct net_device *dev = skb->dev;
1692 
1693 			neigh_parms_qlen_dec(dev, tbl->family);
1694 			__skb_unlink(skb, &tbl->proxy_queue);
1695 
1696 			if (tbl->proxy_redo && netif_running(dev)) {
1697 				rcu_read_lock();
1698 				tbl->proxy_redo(skb);
1699 				rcu_read_unlock();
1700 			} else {
1701 				kfree_skb(skb);
1702 			}
1703 
1704 			dev_put(dev);
1705 		} else if (!sched_next || tdif < sched_next)
1706 			sched_next = tdif;
1707 	}
1708 	timer_delete(&tbl->proxy_timer);
1709 	if (sched_next)
1710 		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1711 	spin_unlock(&tbl->proxy_queue.lock);
1712 }
1713 
neigh_proxy_delay(struct neigh_parms * p)1714 static unsigned long neigh_proxy_delay(struct neigh_parms *p)
1715 {
1716 	/* If proxy_delay is zero, do not call get_random_u32_below()
1717 	 * as it is undefined behavior.
1718 	 */
1719 	unsigned long proxy_delay = NEIGH_VAR(p, PROXY_DELAY);
1720 
1721 	return proxy_delay ?
1722 	       jiffies + get_random_u32_below(proxy_delay) : jiffies;
1723 }
1724 
pneigh_enqueue(struct neigh_table * tbl,struct neigh_parms * p,struct sk_buff * skb)1725 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1726 		    struct sk_buff *skb)
1727 {
1728 	unsigned long sched_next = neigh_proxy_delay(p);
1729 
1730 	if (p->qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1731 		kfree_skb(skb);
1732 		return;
1733 	}
1734 
1735 	NEIGH_CB(skb)->sched_next = sched_next;
1736 	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1737 
1738 	spin_lock(&tbl->proxy_queue.lock);
1739 	if (timer_delete(&tbl->proxy_timer)) {
1740 		if (time_before(tbl->proxy_timer.expires, sched_next))
1741 			sched_next = tbl->proxy_timer.expires;
1742 	}
1743 	skb_dst_drop(skb);
1744 	dev_hold(skb->dev);
1745 	__skb_queue_tail(&tbl->proxy_queue, skb);
1746 	p->qlen++;
1747 	mod_timer(&tbl->proxy_timer, sched_next);
1748 	spin_unlock(&tbl->proxy_queue.lock);
1749 }
1750 EXPORT_SYMBOL(pneigh_enqueue);
1751 
lookup_neigh_parms(struct neigh_table * tbl,struct net * net,int ifindex)1752 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1753 						      struct net *net, int ifindex)
1754 {
1755 	struct neigh_parms *p;
1756 
1757 	list_for_each_entry(p, &tbl->parms_list, list) {
1758 		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1759 		    (!p->dev && !ifindex && net_eq(net, &init_net)))
1760 			return p;
1761 	}
1762 
1763 	return NULL;
1764 }
1765 
neigh_parms_alloc(struct net_device * dev,struct neigh_table * tbl)1766 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1767 				      struct neigh_table *tbl)
1768 {
1769 	struct neigh_parms *p;
1770 	struct net *net = dev_net(dev);
1771 	const struct net_device_ops *ops = dev->netdev_ops;
1772 
1773 	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1774 	if (p) {
1775 		p->tbl		  = tbl;
1776 		refcount_set(&p->refcnt, 1);
1777 		neigh_set_reach_time(p);
1778 		p->qlen = 0;
1779 		netdev_hold(dev, &p->dev_tracker, GFP_KERNEL);
1780 		p->dev = dev;
1781 		write_pnet(&p->net, net);
1782 		p->sysctl_table = NULL;
1783 
1784 		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1785 			netdev_put(dev, &p->dev_tracker);
1786 			kfree(p);
1787 			return NULL;
1788 		}
1789 
1790 		spin_lock_bh(&tbl->lock);
1791 		list_add_rcu(&p->list, &tbl->parms.list);
1792 		spin_unlock_bh(&tbl->lock);
1793 
1794 		neigh_parms_data_state_cleanall(p);
1795 	}
1796 	return p;
1797 }
1798 EXPORT_SYMBOL(neigh_parms_alloc);
1799 
neigh_rcu_free_parms(struct rcu_head * head)1800 static void neigh_rcu_free_parms(struct rcu_head *head)
1801 {
1802 	struct neigh_parms *parms =
1803 		container_of(head, struct neigh_parms, rcu_head);
1804 
1805 	neigh_parms_put(parms);
1806 }
1807 
neigh_parms_release(struct neigh_table * tbl,struct neigh_parms * parms)1808 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1809 {
1810 	if (!parms || parms == &tbl->parms)
1811 		return;
1812 
1813 	spin_lock_bh(&tbl->lock);
1814 	list_del_rcu(&parms->list);
1815 	parms->dead = 1;
1816 	spin_unlock_bh(&tbl->lock);
1817 
1818 	netdev_put(parms->dev, &parms->dev_tracker);
1819 	call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1820 }
1821 EXPORT_SYMBOL(neigh_parms_release);
1822 
1823 static struct lock_class_key neigh_table_proxy_queue_class;
1824 
1825 static struct neigh_table __rcu *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1826 
neigh_table_init(int index,struct neigh_table * tbl)1827 void neigh_table_init(int index, struct neigh_table *tbl)
1828 {
1829 	unsigned long now = jiffies;
1830 	unsigned long phsize;
1831 
1832 	INIT_LIST_HEAD(&tbl->parms_list);
1833 	INIT_LIST_HEAD(&tbl->gc_list);
1834 	INIT_LIST_HEAD(&tbl->managed_list);
1835 
1836 	list_add(&tbl->parms.list, &tbl->parms_list);
1837 	write_pnet(&tbl->parms.net, &init_net);
1838 	refcount_set(&tbl->parms.refcnt, 1);
1839 	neigh_set_reach_time(&tbl->parms);
1840 	tbl->parms.qlen = 0;
1841 
1842 	tbl->stats = alloc_percpu(struct neigh_statistics);
1843 	if (!tbl->stats)
1844 		panic("cannot create neighbour cache statistics");
1845 
1846 #ifdef CONFIG_PROC_FS
1847 	if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1848 			      &neigh_stat_seq_ops, tbl))
1849 		panic("cannot create neighbour proc dir entry");
1850 #endif
1851 
1852 	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1853 
1854 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1855 	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1856 
1857 	if (!tbl->nht || !tbl->phash_buckets)
1858 		panic("cannot allocate neighbour cache hashes");
1859 
1860 	if (!tbl->entry_size)
1861 		tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1862 					tbl->key_len, NEIGH_PRIV_ALIGN);
1863 	else
1864 		WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1865 
1866 	spin_lock_init(&tbl->lock);
1867 	mutex_init(&tbl->phash_lock);
1868 
1869 	INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1870 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1871 			tbl->parms.reachable_time);
1872 	INIT_DEFERRABLE_WORK(&tbl->managed_work, neigh_managed_work);
1873 	queue_delayed_work(system_power_efficient_wq, &tbl->managed_work, 0);
1874 
1875 	timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1876 	skb_queue_head_init_class(&tbl->proxy_queue,
1877 			&neigh_table_proxy_queue_class);
1878 
1879 	tbl->last_flush = now;
1880 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1881 
1882 	rcu_assign_pointer(neigh_tables[index], tbl);
1883 }
1884 EXPORT_SYMBOL(neigh_table_init);
1885 
1886 /*
1887  * Only called from ndisc_cleanup(), which means this is dead code
1888  * because we no longer can unload IPv6 module.
1889  */
neigh_table_clear(int index,struct neigh_table * tbl)1890 int neigh_table_clear(int index, struct neigh_table *tbl)
1891 {
1892 	RCU_INIT_POINTER(neigh_tables[index], NULL);
1893 	synchronize_rcu();
1894 
1895 	/* It is not clean... Fix it to unload IPv6 module safely */
1896 	cancel_delayed_work_sync(&tbl->managed_work);
1897 	cancel_delayed_work_sync(&tbl->gc_work);
1898 	timer_delete_sync(&tbl->proxy_timer);
1899 	pneigh_queue_purge(&tbl->proxy_queue, NULL, tbl->family);
1900 	neigh_ifdown(tbl, NULL);
1901 	if (atomic_read(&tbl->entries))
1902 		pr_crit("neighbour leakage\n");
1903 
1904 	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1905 		 neigh_hash_free_rcu);
1906 	tbl->nht = NULL;
1907 
1908 	kfree(tbl->phash_buckets);
1909 	tbl->phash_buckets = NULL;
1910 
1911 	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1912 
1913 	free_percpu(tbl->stats);
1914 	tbl->stats = NULL;
1915 
1916 	return 0;
1917 }
1918 EXPORT_SYMBOL(neigh_table_clear);
1919 
neigh_find_table(int family)1920 static struct neigh_table *neigh_find_table(int family)
1921 {
1922 	struct neigh_table *tbl = NULL;
1923 
1924 	switch (family) {
1925 	case AF_INET:
1926 		tbl = rcu_dereference_rtnl(neigh_tables[NEIGH_ARP_TABLE]);
1927 		break;
1928 	case AF_INET6:
1929 		tbl = rcu_dereference_rtnl(neigh_tables[NEIGH_ND_TABLE]);
1930 		break;
1931 	}
1932 
1933 	return tbl;
1934 }
1935 
1936 const struct nla_policy nda_policy[NDA_MAX+1] = {
1937 	[NDA_UNSPEC]		= { .strict_start_type = NDA_NH_ID },
1938 	[NDA_DST]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1939 	[NDA_LLADDR]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1940 	[NDA_CACHEINFO]		= { .len = sizeof(struct nda_cacheinfo) },
1941 	[NDA_PROBES]		= { .type = NLA_U32 },
1942 	[NDA_VLAN]		= { .type = NLA_U16 },
1943 	[NDA_PORT]		= { .type = NLA_U16 },
1944 	[NDA_VNI]		= { .type = NLA_U32 },
1945 	[NDA_IFINDEX]		= { .type = NLA_U32 },
1946 	[NDA_MASTER]		= { .type = NLA_U32 },
1947 	[NDA_PROTOCOL]		= { .type = NLA_U8 },
1948 	[NDA_NH_ID]		= { .type = NLA_U32 },
1949 	[NDA_FLAGS_EXT]		= NLA_POLICY_MASK(NLA_U32, NTF_EXT_MASK),
1950 	[NDA_FDB_EXT_ATTRS]	= { .type = NLA_NESTED },
1951 };
1952 
neigh_delete(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)1953 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1954 			struct netlink_ext_ack *extack)
1955 {
1956 	struct net *net = sock_net(skb->sk);
1957 	struct ndmsg *ndm;
1958 	struct nlattr *dst_attr;
1959 	struct neigh_table *tbl;
1960 	struct neighbour *neigh;
1961 	struct net_device *dev = NULL;
1962 	int err = -EINVAL;
1963 
1964 	ASSERT_RTNL();
1965 	if (nlmsg_len(nlh) < sizeof(*ndm))
1966 		goto out;
1967 
1968 	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1969 	if (!dst_attr) {
1970 		NL_SET_ERR_MSG(extack, "Network address not specified");
1971 		goto out;
1972 	}
1973 
1974 	ndm = nlmsg_data(nlh);
1975 	if (ndm->ndm_ifindex) {
1976 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1977 		if (dev == NULL) {
1978 			err = -ENODEV;
1979 			goto out;
1980 		}
1981 	}
1982 
1983 	tbl = neigh_find_table(ndm->ndm_family);
1984 	if (tbl == NULL)
1985 		return -EAFNOSUPPORT;
1986 
1987 	if (nla_len(dst_attr) < (int)tbl->key_len) {
1988 		NL_SET_ERR_MSG(extack, "Invalid network address");
1989 		goto out;
1990 	}
1991 
1992 	if (ndm->ndm_flags & NTF_PROXY) {
1993 		err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1994 		goto out;
1995 	}
1996 
1997 	if (dev == NULL)
1998 		goto out;
1999 
2000 	neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
2001 	if (neigh == NULL) {
2002 		err = -ENOENT;
2003 		goto out;
2004 	}
2005 
2006 	err = __neigh_update(neigh, NULL, NUD_FAILED,
2007 			     NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
2008 			     NETLINK_CB(skb).portid, extack);
2009 	spin_lock_bh(&tbl->lock);
2010 	neigh_release(neigh);
2011 	neigh_remove_one(neigh);
2012 	spin_unlock_bh(&tbl->lock);
2013 
2014 out:
2015 	return err;
2016 }
2017 
neigh_add(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)2018 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
2019 		     struct netlink_ext_ack *extack)
2020 {
2021 	int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
2022 		    NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
2023 	struct net *net = sock_net(skb->sk);
2024 	struct ndmsg *ndm;
2025 	struct nlattr *tb[NDA_MAX+1];
2026 	struct neigh_table *tbl;
2027 	struct net_device *dev = NULL;
2028 	struct neighbour *neigh;
2029 	void *dst, *lladdr;
2030 	u8 protocol = 0;
2031 	u32 ndm_flags;
2032 	int err;
2033 
2034 	ASSERT_RTNL();
2035 	err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
2036 				     nda_policy, extack);
2037 	if (err < 0)
2038 		goto out;
2039 
2040 	err = -EINVAL;
2041 	if (!tb[NDA_DST]) {
2042 		NL_SET_ERR_MSG(extack, "Network address not specified");
2043 		goto out;
2044 	}
2045 
2046 	ndm = nlmsg_data(nlh);
2047 	ndm_flags = ndm->ndm_flags;
2048 	if (tb[NDA_FLAGS_EXT]) {
2049 		u32 ext = nla_get_u32(tb[NDA_FLAGS_EXT]);
2050 
2051 		BUILD_BUG_ON(sizeof(neigh->flags) * BITS_PER_BYTE <
2052 			     (sizeof(ndm->ndm_flags) * BITS_PER_BYTE +
2053 			      hweight32(NTF_EXT_MASK)));
2054 		ndm_flags |= (ext << NTF_EXT_SHIFT);
2055 	}
2056 	if (ndm->ndm_ifindex) {
2057 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
2058 		if (dev == NULL) {
2059 			err = -ENODEV;
2060 			goto out;
2061 		}
2062 
2063 		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
2064 			NL_SET_ERR_MSG(extack, "Invalid link address");
2065 			goto out;
2066 		}
2067 	}
2068 
2069 	tbl = neigh_find_table(ndm->ndm_family);
2070 	if (tbl == NULL)
2071 		return -EAFNOSUPPORT;
2072 
2073 	if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
2074 		NL_SET_ERR_MSG(extack, "Invalid network address");
2075 		goto out;
2076 	}
2077 
2078 	dst = nla_data(tb[NDA_DST]);
2079 	lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
2080 
2081 	if (tb[NDA_PROTOCOL])
2082 		protocol = nla_get_u8(tb[NDA_PROTOCOL]);
2083 	if (ndm_flags & NTF_PROXY) {
2084 		if (ndm_flags & (NTF_MANAGED | NTF_EXT_VALIDATED)) {
2085 			NL_SET_ERR_MSG(extack, "Invalid NTF_* flag combination");
2086 			goto out;
2087 		}
2088 
2089 		err = pneigh_create(tbl, net, dst, dev, ndm_flags, protocol,
2090 				    !!(ndm->ndm_state & NUD_PERMANENT));
2091 		goto out;
2092 	}
2093 
2094 	if (!dev) {
2095 		NL_SET_ERR_MSG(extack, "Device not specified");
2096 		goto out;
2097 	}
2098 
2099 	if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
2100 		err = -EINVAL;
2101 		goto out;
2102 	}
2103 
2104 	neigh = neigh_lookup(tbl, dst, dev);
2105 	if (neigh == NULL) {
2106 		bool ndm_permanent  = ndm->ndm_state & NUD_PERMANENT;
2107 		bool exempt_from_gc = ndm_permanent ||
2108 				      ndm_flags & (NTF_EXT_LEARNED |
2109 						   NTF_EXT_VALIDATED);
2110 
2111 		if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
2112 			err = -ENOENT;
2113 			goto out;
2114 		}
2115 		if (ndm_permanent && (ndm_flags & NTF_MANAGED)) {
2116 			NL_SET_ERR_MSG(extack, "Invalid NTF_* flag for permanent entry");
2117 			err = -EINVAL;
2118 			goto out;
2119 		}
2120 		if (ndm_flags & NTF_EXT_VALIDATED) {
2121 			u8 state = ndm->ndm_state;
2122 
2123 			/* NTF_USE and NTF_MANAGED will result in the neighbor
2124 			 * being created with an invalid state (NUD_NONE).
2125 			 */
2126 			if (ndm_flags & (NTF_USE | NTF_MANAGED))
2127 				state = NUD_NONE;
2128 
2129 			if (!(state & NUD_VALID)) {
2130 				NL_SET_ERR_MSG(extack,
2131 					       "Cannot create externally validated neighbor with an invalid state");
2132 				err = -EINVAL;
2133 				goto out;
2134 			}
2135 		}
2136 
2137 		neigh = ___neigh_create(tbl, dst, dev,
2138 					ndm_flags &
2139 					(NTF_EXT_LEARNED | NTF_MANAGED |
2140 					 NTF_EXT_VALIDATED),
2141 					exempt_from_gc, true);
2142 		if (IS_ERR(neigh)) {
2143 			err = PTR_ERR(neigh);
2144 			goto out;
2145 		}
2146 	} else {
2147 		if (nlh->nlmsg_flags & NLM_F_EXCL) {
2148 			err = -EEXIST;
2149 			neigh_release(neigh);
2150 			goto out;
2151 		}
2152 		if (ndm_flags & NTF_EXT_VALIDATED) {
2153 			u8 state = ndm->ndm_state;
2154 
2155 			/* NTF_USE and NTF_MANAGED do not update the existing
2156 			 * state other than clearing it if it was
2157 			 * NUD_PERMANENT.
2158 			 */
2159 			if (ndm_flags & (NTF_USE | NTF_MANAGED))
2160 				state = READ_ONCE(neigh->nud_state) & ~NUD_PERMANENT;
2161 
2162 			if (!(state & NUD_VALID)) {
2163 				NL_SET_ERR_MSG(extack,
2164 					       "Cannot mark neighbor as externally validated with an invalid state");
2165 				err = -EINVAL;
2166 				neigh_release(neigh);
2167 				goto out;
2168 			}
2169 		}
2170 
2171 		if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
2172 			flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
2173 				   NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
2174 	}
2175 
2176 	if (protocol)
2177 		neigh->protocol = protocol;
2178 	if (ndm_flags & NTF_EXT_LEARNED)
2179 		flags |= NEIGH_UPDATE_F_EXT_LEARNED;
2180 	if (ndm_flags & NTF_ROUTER)
2181 		flags |= NEIGH_UPDATE_F_ISROUTER;
2182 	if (ndm_flags & NTF_MANAGED)
2183 		flags |= NEIGH_UPDATE_F_MANAGED;
2184 	if (ndm_flags & NTF_USE)
2185 		flags |= NEIGH_UPDATE_F_USE;
2186 	if (ndm_flags & NTF_EXT_VALIDATED)
2187 		flags |= NEIGH_UPDATE_F_EXT_VALIDATED;
2188 
2189 	err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
2190 			     NETLINK_CB(skb).portid, extack);
2191 	if (!err && ndm_flags & (NTF_USE | NTF_MANAGED))
2192 		neigh_event_send(neigh, NULL);
2193 	neigh_release(neigh);
2194 out:
2195 	return err;
2196 }
2197 
neightbl_fill_parms(struct sk_buff * skb,struct neigh_parms * parms)2198 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
2199 {
2200 	struct nlattr *nest;
2201 
2202 	nest = nla_nest_start_noflag(skb, NDTA_PARMS);
2203 	if (nest == NULL)
2204 		return -ENOBUFS;
2205 
2206 	if ((parms->dev &&
2207 	     nla_put_u32(skb, NDTPA_IFINDEX, READ_ONCE(parms->dev->ifindex))) ||
2208 	    nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
2209 	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
2210 			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
2211 	    /* approximative value for deprecated QUEUE_LEN (in packets) */
2212 	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
2213 			NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
2214 	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
2215 	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
2216 	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
2217 			NEIGH_VAR(parms, UCAST_PROBES)) ||
2218 	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
2219 			NEIGH_VAR(parms, MCAST_PROBES)) ||
2220 	    nla_put_u32(skb, NDTPA_MCAST_REPROBES,
2221 			NEIGH_VAR(parms, MCAST_REPROBES)) ||
2222 	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, READ_ONCE(parms->reachable_time),
2223 			  NDTPA_PAD) ||
2224 	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
2225 			  NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
2226 	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
2227 			  NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
2228 	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
2229 			  NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
2230 	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
2231 			  NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
2232 	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
2233 			  NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
2234 	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
2235 			  NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
2236 	    nla_put_msecs(skb, NDTPA_LOCKTIME,
2237 			  NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD) ||
2238 	    nla_put_msecs(skb, NDTPA_INTERVAL_PROBE_TIME_MS,
2239 			  NEIGH_VAR(parms, INTERVAL_PROBE_TIME_MS), NDTPA_PAD))
2240 		goto nla_put_failure;
2241 	return nla_nest_end(skb, nest);
2242 
2243 nla_put_failure:
2244 	nla_nest_cancel(skb, nest);
2245 	return -EMSGSIZE;
2246 }
2247 
neightbl_fill_info(struct sk_buff * skb,struct neigh_table * tbl,u32 pid,u32 seq,int type,int flags)2248 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
2249 			      u32 pid, u32 seq, int type, int flags)
2250 {
2251 	struct nlmsghdr *nlh;
2252 	struct ndtmsg *ndtmsg;
2253 
2254 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2255 	if (nlh == NULL)
2256 		return -EMSGSIZE;
2257 
2258 	ndtmsg = nlmsg_data(nlh);
2259 	ndtmsg->ndtm_family = tbl->family;
2260 	ndtmsg->ndtm_pad1   = 0;
2261 	ndtmsg->ndtm_pad2   = 0;
2262 
2263 	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
2264 	    nla_put_msecs(skb, NDTA_GC_INTERVAL, READ_ONCE(tbl->gc_interval),
2265 			  NDTA_PAD) ||
2266 	    nla_put_u32(skb, NDTA_THRESH1, READ_ONCE(tbl->gc_thresh1)) ||
2267 	    nla_put_u32(skb, NDTA_THRESH2, READ_ONCE(tbl->gc_thresh2)) ||
2268 	    nla_put_u32(skb, NDTA_THRESH3, READ_ONCE(tbl->gc_thresh3)))
2269 		goto nla_put_failure;
2270 	{
2271 		unsigned long now = jiffies;
2272 		long flush_delta = now - READ_ONCE(tbl->last_flush);
2273 		long rand_delta = now - READ_ONCE(tbl->last_rand);
2274 		struct neigh_hash_table *nht;
2275 		struct ndt_config ndc = {
2276 			.ndtc_key_len		= tbl->key_len,
2277 			.ndtc_entry_size	= tbl->entry_size,
2278 			.ndtc_entries		= atomic_read(&tbl->entries),
2279 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
2280 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
2281 			.ndtc_proxy_qlen	= READ_ONCE(tbl->proxy_queue.qlen),
2282 		};
2283 
2284 		nht = rcu_dereference(tbl->nht);
2285 		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
2286 		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
2287 
2288 		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
2289 			goto nla_put_failure;
2290 	}
2291 
2292 	{
2293 		int cpu;
2294 		struct ndt_stats ndst;
2295 
2296 		memset(&ndst, 0, sizeof(ndst));
2297 
2298 		for_each_possible_cpu(cpu) {
2299 			struct neigh_statistics	*st;
2300 
2301 			st = per_cpu_ptr(tbl->stats, cpu);
2302 			ndst.ndts_allocs		+= READ_ONCE(st->allocs);
2303 			ndst.ndts_destroys		+= READ_ONCE(st->destroys);
2304 			ndst.ndts_hash_grows		+= READ_ONCE(st->hash_grows);
2305 			ndst.ndts_res_failed		+= READ_ONCE(st->res_failed);
2306 			ndst.ndts_lookups		+= READ_ONCE(st->lookups);
2307 			ndst.ndts_hits			+= READ_ONCE(st->hits);
2308 			ndst.ndts_rcv_probes_mcast	+= READ_ONCE(st->rcv_probes_mcast);
2309 			ndst.ndts_rcv_probes_ucast	+= READ_ONCE(st->rcv_probes_ucast);
2310 			ndst.ndts_periodic_gc_runs	+= READ_ONCE(st->periodic_gc_runs);
2311 			ndst.ndts_forced_gc_runs	+= READ_ONCE(st->forced_gc_runs);
2312 			ndst.ndts_table_fulls		+= READ_ONCE(st->table_fulls);
2313 		}
2314 
2315 		if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
2316 				  NDTA_PAD))
2317 			goto nla_put_failure;
2318 	}
2319 
2320 	BUG_ON(tbl->parms.dev);
2321 	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
2322 		goto nla_put_failure;
2323 
2324 	nlmsg_end(skb, nlh);
2325 	return 0;
2326 
2327 nla_put_failure:
2328 	nlmsg_cancel(skb, nlh);
2329 	return -EMSGSIZE;
2330 }
2331 
neightbl_fill_param_info(struct sk_buff * skb,struct neigh_table * tbl,struct neigh_parms * parms,u32 pid,u32 seq,int type,unsigned int flags)2332 static int neightbl_fill_param_info(struct sk_buff *skb,
2333 				    struct neigh_table *tbl,
2334 				    struct neigh_parms *parms,
2335 				    u32 pid, u32 seq, int type,
2336 				    unsigned int flags)
2337 {
2338 	struct ndtmsg *ndtmsg;
2339 	struct nlmsghdr *nlh;
2340 
2341 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2342 	if (nlh == NULL)
2343 		return -EMSGSIZE;
2344 
2345 	ndtmsg = nlmsg_data(nlh);
2346 	ndtmsg->ndtm_family = tbl->family;
2347 	ndtmsg->ndtm_pad1   = 0;
2348 	ndtmsg->ndtm_pad2   = 0;
2349 
2350 	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2351 	    neightbl_fill_parms(skb, parms) < 0)
2352 		goto errout;
2353 
2354 	nlmsg_end(skb, nlh);
2355 	return 0;
2356 errout:
2357 	nlmsg_cancel(skb, nlh);
2358 	return -EMSGSIZE;
2359 }
2360 
2361 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
2362 	[NDTA_NAME]		= { .type = NLA_STRING },
2363 	[NDTA_THRESH1]		= { .type = NLA_U32 },
2364 	[NDTA_THRESH2]		= { .type = NLA_U32 },
2365 	[NDTA_THRESH3]		= { .type = NLA_U32 },
2366 	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
2367 	[NDTA_PARMS]		= { .type = NLA_NESTED },
2368 };
2369 
2370 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
2371 	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
2372 	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
2373 	[NDTPA_QUEUE_LENBYTES]		= { .type = NLA_U32 },
2374 	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
2375 	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
2376 	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
2377 	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
2378 	[NDTPA_MCAST_REPROBES]		= { .type = NLA_U32 },
2379 	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
2380 	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
2381 	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
2382 	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
2383 	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
2384 	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
2385 	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
2386 	[NDTPA_INTERVAL_PROBE_TIME_MS]	= { .type = NLA_U64, .min = 1 },
2387 };
2388 
neightbl_set(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)2389 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2390 			struct netlink_ext_ack *extack)
2391 {
2392 	struct net *net = sock_net(skb->sk);
2393 	struct nlattr *tb[NDTA_MAX + 1];
2394 	struct neigh_table *tbl;
2395 	struct ndtmsg *ndtmsg;
2396 	bool found = false;
2397 	int err, tidx;
2398 
2399 	err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2400 				     nl_neightbl_policy, extack);
2401 	if (err < 0)
2402 		goto errout;
2403 
2404 	if (tb[NDTA_NAME] == NULL) {
2405 		err = -EINVAL;
2406 		goto errout;
2407 	}
2408 
2409 	ndtmsg = nlmsg_data(nlh);
2410 
2411 	rcu_read_lock();
2412 
2413 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2414 		tbl = rcu_dereference(neigh_tables[tidx]);
2415 		if (!tbl)
2416 			continue;
2417 
2418 		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2419 			continue;
2420 
2421 		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2422 			found = true;
2423 			break;
2424 		}
2425 	}
2426 
2427 	if (!found) {
2428 		rcu_read_unlock();
2429 		err = -ENOENT;
2430 		goto errout;
2431 	}
2432 
2433 	/*
2434 	 * We acquire tbl->lock to be nice to the periodic timers and
2435 	 * make sure they always see a consistent set of values.
2436 	 */
2437 	spin_lock_bh(&tbl->lock);
2438 
2439 	if (tb[NDTA_PARMS]) {
2440 		struct nlattr *tbp[NDTPA_MAX+1];
2441 		struct neigh_parms *p;
2442 		int i, ifindex = 0;
2443 
2444 		err = nla_parse_nested_deprecated(tbp, NDTPA_MAX,
2445 						  tb[NDTA_PARMS],
2446 						  nl_ntbl_parm_policy, extack);
2447 		if (err < 0)
2448 			goto errout_tbl_lock;
2449 
2450 		if (tbp[NDTPA_IFINDEX])
2451 			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2452 
2453 		p = lookup_neigh_parms(tbl, net, ifindex);
2454 		if (p == NULL) {
2455 			err = -ENOENT;
2456 			goto errout_tbl_lock;
2457 		}
2458 
2459 		for (i = 1; i <= NDTPA_MAX; i++) {
2460 			if (tbp[i] == NULL)
2461 				continue;
2462 
2463 			switch (i) {
2464 			case NDTPA_QUEUE_LEN:
2465 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2466 					      nla_get_u32(tbp[i]) *
2467 					      SKB_TRUESIZE(ETH_FRAME_LEN));
2468 				break;
2469 			case NDTPA_QUEUE_LENBYTES:
2470 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2471 					      nla_get_u32(tbp[i]));
2472 				break;
2473 			case NDTPA_PROXY_QLEN:
2474 				NEIGH_VAR_SET(p, PROXY_QLEN,
2475 					      nla_get_u32(tbp[i]));
2476 				break;
2477 			case NDTPA_APP_PROBES:
2478 				NEIGH_VAR_SET(p, APP_PROBES,
2479 					      nla_get_u32(tbp[i]));
2480 				break;
2481 			case NDTPA_UCAST_PROBES:
2482 				NEIGH_VAR_SET(p, UCAST_PROBES,
2483 					      nla_get_u32(tbp[i]));
2484 				break;
2485 			case NDTPA_MCAST_PROBES:
2486 				NEIGH_VAR_SET(p, MCAST_PROBES,
2487 					      nla_get_u32(tbp[i]));
2488 				break;
2489 			case NDTPA_MCAST_REPROBES:
2490 				NEIGH_VAR_SET(p, MCAST_REPROBES,
2491 					      nla_get_u32(tbp[i]));
2492 				break;
2493 			case NDTPA_BASE_REACHABLE_TIME:
2494 				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2495 					      nla_get_msecs(tbp[i]));
2496 				/* update reachable_time as well, otherwise, the change will
2497 				 * only be effective after the next time neigh_periodic_work
2498 				 * decides to recompute it (can be multiple minutes)
2499 				 */
2500 				neigh_set_reach_time(p);
2501 				break;
2502 			case NDTPA_GC_STALETIME:
2503 				NEIGH_VAR_SET(p, GC_STALETIME,
2504 					      nla_get_msecs(tbp[i]));
2505 				break;
2506 			case NDTPA_DELAY_PROBE_TIME:
2507 				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2508 					      nla_get_msecs(tbp[i]));
2509 				call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2510 				break;
2511 			case NDTPA_INTERVAL_PROBE_TIME_MS:
2512 				NEIGH_VAR_SET(p, INTERVAL_PROBE_TIME_MS,
2513 					      nla_get_msecs(tbp[i]));
2514 				break;
2515 			case NDTPA_RETRANS_TIME:
2516 				NEIGH_VAR_SET(p, RETRANS_TIME,
2517 					      nla_get_msecs(tbp[i]));
2518 				break;
2519 			case NDTPA_ANYCAST_DELAY:
2520 				NEIGH_VAR_SET(p, ANYCAST_DELAY,
2521 					      nla_get_msecs(tbp[i]));
2522 				break;
2523 			case NDTPA_PROXY_DELAY:
2524 				NEIGH_VAR_SET(p, PROXY_DELAY,
2525 					      nla_get_msecs(tbp[i]));
2526 				break;
2527 			case NDTPA_LOCKTIME:
2528 				NEIGH_VAR_SET(p, LOCKTIME,
2529 					      nla_get_msecs(tbp[i]));
2530 				break;
2531 			}
2532 		}
2533 	}
2534 
2535 	err = -ENOENT;
2536 	if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2537 	     tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2538 	    !net_eq(net, &init_net))
2539 		goto errout_tbl_lock;
2540 
2541 	if (tb[NDTA_THRESH1])
2542 		WRITE_ONCE(tbl->gc_thresh1, nla_get_u32(tb[NDTA_THRESH1]));
2543 
2544 	if (tb[NDTA_THRESH2])
2545 		WRITE_ONCE(tbl->gc_thresh2, nla_get_u32(tb[NDTA_THRESH2]));
2546 
2547 	if (tb[NDTA_THRESH3])
2548 		WRITE_ONCE(tbl->gc_thresh3, nla_get_u32(tb[NDTA_THRESH3]));
2549 
2550 	if (tb[NDTA_GC_INTERVAL])
2551 		WRITE_ONCE(tbl->gc_interval, nla_get_msecs(tb[NDTA_GC_INTERVAL]));
2552 
2553 	err = 0;
2554 
2555 errout_tbl_lock:
2556 	spin_unlock_bh(&tbl->lock);
2557 	rcu_read_unlock();
2558 errout:
2559 	return err;
2560 }
2561 
neightbl_valid_dump_info(const struct nlmsghdr * nlh,struct netlink_ext_ack * extack)2562 static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
2563 				    struct netlink_ext_ack *extack)
2564 {
2565 	struct ndtmsg *ndtm;
2566 
2567 	ndtm = nlmsg_payload(nlh, sizeof(*ndtm));
2568 	if (!ndtm) {
2569 		NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
2570 		return -EINVAL;
2571 	}
2572 
2573 	if (ndtm->ndtm_pad1  || ndtm->ndtm_pad2) {
2574 		NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
2575 		return -EINVAL;
2576 	}
2577 
2578 	if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
2579 		NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
2580 		return -EINVAL;
2581 	}
2582 
2583 	return 0;
2584 }
2585 
neightbl_dump_info(struct sk_buff * skb,struct netlink_callback * cb)2586 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2587 {
2588 	const struct nlmsghdr *nlh = cb->nlh;
2589 	struct net *net = sock_net(skb->sk);
2590 	int family, tidx, nidx = 0;
2591 	int tbl_skip = cb->args[0];
2592 	int neigh_skip = cb->args[1];
2593 	struct neigh_table *tbl;
2594 
2595 	if (cb->strict_check) {
2596 		int err = neightbl_valid_dump_info(nlh, cb->extack);
2597 
2598 		if (err < 0)
2599 			return err;
2600 	}
2601 
2602 	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2603 
2604 	rcu_read_lock();
2605 
2606 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2607 		struct neigh_parms *p;
2608 
2609 		tbl = rcu_dereference(neigh_tables[tidx]);
2610 		if (!tbl)
2611 			continue;
2612 
2613 		if (tidx < tbl_skip || (family && tbl->family != family))
2614 			continue;
2615 
2616 		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2617 				       nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2618 				       NLM_F_MULTI) < 0)
2619 			break;
2620 
2621 		nidx = 0;
2622 		p = list_next_entry(&tbl->parms, list);
2623 		list_for_each_entry_from_rcu(p, &tbl->parms_list, list) {
2624 			if (!net_eq(neigh_parms_net(p), net))
2625 				continue;
2626 
2627 			if (nidx < neigh_skip)
2628 				goto next;
2629 
2630 			if (neightbl_fill_param_info(skb, tbl, p,
2631 						     NETLINK_CB(cb->skb).portid,
2632 						     nlh->nlmsg_seq,
2633 						     RTM_NEWNEIGHTBL,
2634 						     NLM_F_MULTI) < 0)
2635 				goto out;
2636 		next:
2637 			nidx++;
2638 		}
2639 
2640 		neigh_skip = 0;
2641 	}
2642 out:
2643 	rcu_read_unlock();
2644 
2645 	cb->args[0] = tidx;
2646 	cb->args[1] = nidx;
2647 
2648 	return skb->len;
2649 }
2650 
__neigh_fill_info(struct sk_buff * skb,struct neighbour * neigh,u32 pid,u32 seq,int type,unsigned int flags)2651 static int __neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2652 			     u32 pid, u32 seq, int type, unsigned int flags)
2653 {
2654 	u32 neigh_flags, neigh_flags_ext;
2655 	unsigned long now = jiffies;
2656 	struct nda_cacheinfo ci;
2657 	struct nlmsghdr *nlh;
2658 	struct ndmsg *ndm;
2659 
2660 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2661 	if (nlh == NULL)
2662 		return -EMSGSIZE;
2663 
2664 	neigh_flags_ext = neigh->flags >> NTF_EXT_SHIFT;
2665 	neigh_flags     = neigh->flags & NTF_OLD_MASK;
2666 
2667 	ndm = nlmsg_data(nlh);
2668 	ndm->ndm_family	 = neigh->ops->family;
2669 	ndm->ndm_pad1    = 0;
2670 	ndm->ndm_pad2    = 0;
2671 	ndm->ndm_flags	 = neigh_flags;
2672 	ndm->ndm_type	 = neigh->type;
2673 	ndm->ndm_ifindex = neigh->dev->ifindex;
2674 
2675 	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2676 		goto nla_put_failure;
2677 
2678 	ndm->ndm_state	 = neigh->nud_state;
2679 	if (neigh->nud_state & NUD_VALID) {
2680 		char haddr[MAX_ADDR_LEN];
2681 
2682 		neigh_ha_snapshot(haddr, neigh, neigh->dev);
2683 		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0)
2684 			goto nla_put_failure;
2685 	}
2686 
2687 	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2688 	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2689 	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2690 	ci.ndm_refcnt	 = refcount_read(&neigh->refcnt) - 1;
2691 
2692 	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2693 	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2694 		goto nla_put_failure;
2695 
2696 	if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
2697 		goto nla_put_failure;
2698 	if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2699 		goto nla_put_failure;
2700 
2701 	nlmsg_end(skb, nlh);
2702 	return 0;
2703 
2704 nla_put_failure:
2705 	nlmsg_cancel(skb, nlh);
2706 	return -EMSGSIZE;
2707 }
2708 
neigh_fill_info(struct sk_buff * skb,struct neighbour * neigh,u32 pid,u32 seq,int type,unsigned int flags)2709 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2710 			   u32 pid, u32 seq, int type, unsigned int flags)
2711 	__releases(neigh->lock)
2712 	__acquires(neigh->lock)
2713 {
2714 	int err;
2715 
2716 	read_lock_bh(&neigh->lock);
2717 	err = __neigh_fill_info(skb, neigh, pid, seq, type, flags);
2718 	read_unlock_bh(&neigh->lock);
2719 
2720 	return err;
2721 }
2722 
pneigh_fill_info(struct sk_buff * skb,struct pneigh_entry * pn,u32 pid,u32 seq,int type,unsigned int flags,struct neigh_table * tbl)2723 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2724 			    u32 pid, u32 seq, int type, unsigned int flags,
2725 			    struct neigh_table *tbl)
2726 {
2727 	u32 neigh_flags, neigh_flags_ext;
2728 	struct nlmsghdr *nlh;
2729 	struct ndmsg *ndm;
2730 	u8 protocol;
2731 
2732 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2733 	if (nlh == NULL)
2734 		return -EMSGSIZE;
2735 
2736 	neigh_flags = READ_ONCE(pn->flags);
2737 	neigh_flags_ext = neigh_flags >> NTF_EXT_SHIFT;
2738 	neigh_flags &= NTF_OLD_MASK;
2739 
2740 	ndm = nlmsg_data(nlh);
2741 	ndm->ndm_family	 = tbl->family;
2742 	ndm->ndm_pad1    = 0;
2743 	ndm->ndm_pad2    = 0;
2744 	ndm->ndm_flags	 = neigh_flags | NTF_PROXY;
2745 	ndm->ndm_type	 = RTN_UNICAST;
2746 	ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2747 	ndm->ndm_state	 = NUD_NONE;
2748 
2749 	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2750 		goto nla_put_failure;
2751 
2752 	protocol = READ_ONCE(pn->protocol);
2753 	if (protocol && nla_put_u8(skb, NDA_PROTOCOL, protocol))
2754 		goto nla_put_failure;
2755 	if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2756 		goto nla_put_failure;
2757 
2758 	nlmsg_end(skb, nlh);
2759 	return 0;
2760 
2761 nla_put_failure:
2762 	nlmsg_cancel(skb, nlh);
2763 	return -EMSGSIZE;
2764 }
2765 
neigh_master_filtered(struct net_device * dev,int master_idx)2766 static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2767 {
2768 	struct net_device *master;
2769 
2770 	if (!master_idx)
2771 		return false;
2772 
2773 	master = dev ? netdev_master_upper_dev_get_rcu(dev) : NULL;
2774 
2775 	/* 0 is already used to denote NDA_MASTER wasn't passed, therefore need another
2776 	 * invalid value for ifindex to denote "no master".
2777 	 */
2778 	if (master_idx == -1)
2779 		return !!master;
2780 
2781 	if (!master || master->ifindex != master_idx)
2782 		return true;
2783 
2784 	return false;
2785 }
2786 
neigh_ifindex_filtered(struct net_device * dev,int filter_idx)2787 static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2788 {
2789 	if (filter_idx && (!dev || dev->ifindex != filter_idx))
2790 		return true;
2791 
2792 	return false;
2793 }
2794 
2795 struct neigh_dump_filter {
2796 	int master_idx;
2797 	int dev_idx;
2798 };
2799 
neigh_dump_table(struct neigh_table * tbl,struct sk_buff * skb,struct netlink_callback * cb,struct neigh_dump_filter * filter)2800 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2801 			    struct netlink_callback *cb,
2802 			    struct neigh_dump_filter *filter)
2803 {
2804 	struct net *net = sock_net(skb->sk);
2805 	struct neighbour *n;
2806 	int err = 0, h, s_h = cb->args[1];
2807 	int idx, s_idx = idx = cb->args[2];
2808 	struct neigh_hash_table *nht;
2809 	unsigned int flags = NLM_F_MULTI;
2810 
2811 	if (filter->dev_idx || filter->master_idx)
2812 		flags |= NLM_F_DUMP_FILTERED;
2813 
2814 	nht = rcu_dereference(tbl->nht);
2815 
2816 	for (h = s_h; h < (1 << nht->hash_shift); h++) {
2817 		if (h > s_h)
2818 			s_idx = 0;
2819 		idx = 0;
2820 		neigh_for_each_in_bucket_rcu(n, &nht->hash_heads[h]) {
2821 			if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2822 				goto next;
2823 			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2824 			    neigh_master_filtered(n->dev, filter->master_idx))
2825 				goto next;
2826 			err = neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2827 					      cb->nlh->nlmsg_seq,
2828 					      RTM_NEWNEIGH, flags);
2829 			if (err < 0)
2830 				goto out;
2831 next:
2832 			idx++;
2833 		}
2834 	}
2835 out:
2836 	cb->args[1] = h;
2837 	cb->args[2] = idx;
2838 	return err;
2839 }
2840 
pneigh_dump_table(struct neigh_table * tbl,struct sk_buff * skb,struct netlink_callback * cb,struct neigh_dump_filter * filter)2841 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2842 			     struct netlink_callback *cb,
2843 			     struct neigh_dump_filter *filter)
2844 {
2845 	struct pneigh_entry *n;
2846 	struct net *net = sock_net(skb->sk);
2847 	int err = 0, h, s_h = cb->args[3];
2848 	int idx, s_idx = idx = cb->args[4];
2849 	unsigned int flags = NLM_F_MULTI;
2850 
2851 	if (filter->dev_idx || filter->master_idx)
2852 		flags |= NLM_F_DUMP_FILTERED;
2853 
2854 	for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2855 		if (h > s_h)
2856 			s_idx = 0;
2857 		for (n = rcu_dereference(tbl->phash_buckets[h]), idx = 0;
2858 		     n;
2859 		     n = rcu_dereference(n->next)) {
2860 			if (idx < s_idx || pneigh_net(n) != net)
2861 				goto next;
2862 			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2863 			    neigh_master_filtered(n->dev, filter->master_idx))
2864 				goto next;
2865 			err = pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2866 					       cb->nlh->nlmsg_seq,
2867 					       RTM_NEWNEIGH, flags, tbl);
2868 			if (err < 0)
2869 				goto out;
2870 		next:
2871 			idx++;
2872 		}
2873 	}
2874 
2875 out:
2876 	cb->args[3] = h;
2877 	cb->args[4] = idx;
2878 	return err;
2879 }
2880 
neigh_valid_dump_req(const struct nlmsghdr * nlh,bool strict_check,struct neigh_dump_filter * filter,struct netlink_ext_ack * extack)2881 static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2882 				bool strict_check,
2883 				struct neigh_dump_filter *filter,
2884 				struct netlink_ext_ack *extack)
2885 {
2886 	struct nlattr *tb[NDA_MAX + 1];
2887 	int err, i;
2888 
2889 	if (strict_check) {
2890 		struct ndmsg *ndm;
2891 
2892 		ndm = nlmsg_payload(nlh, sizeof(*ndm));
2893 		if (!ndm) {
2894 			NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
2895 			return -EINVAL;
2896 		}
2897 
2898 		if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_ifindex ||
2899 		    ndm->ndm_state || ndm->ndm_type) {
2900 			NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2901 			return -EINVAL;
2902 		}
2903 
2904 		if (ndm->ndm_flags & ~NTF_PROXY) {
2905 			NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
2906 			return -EINVAL;
2907 		}
2908 
2909 		err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg),
2910 						    tb, NDA_MAX, nda_policy,
2911 						    extack);
2912 	} else {
2913 		err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb,
2914 					     NDA_MAX, nda_policy, extack);
2915 	}
2916 	if (err < 0)
2917 		return err;
2918 
2919 	for (i = 0; i <= NDA_MAX; ++i) {
2920 		if (!tb[i])
2921 			continue;
2922 
2923 		/* all new attributes should require strict_check */
2924 		switch (i) {
2925 		case NDA_IFINDEX:
2926 			filter->dev_idx = nla_get_u32(tb[i]);
2927 			break;
2928 		case NDA_MASTER:
2929 			filter->master_idx = nla_get_u32(tb[i]);
2930 			break;
2931 		default:
2932 			if (strict_check) {
2933 				NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
2934 				return -EINVAL;
2935 			}
2936 		}
2937 	}
2938 
2939 	return 0;
2940 }
2941 
neigh_dump_info(struct sk_buff * skb,struct netlink_callback * cb)2942 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2943 {
2944 	const struct nlmsghdr *nlh = cb->nlh;
2945 	struct neigh_dump_filter filter = {};
2946 	struct neigh_table *tbl;
2947 	int t, family, s_t;
2948 	int proxy = 0;
2949 	int err;
2950 
2951 	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2952 
2953 	/* check for full ndmsg structure presence, family member is
2954 	 * the same for both structures
2955 	 */
2956 	if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2957 	    ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
2958 		proxy = 1;
2959 
2960 	err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
2961 	if (err < 0 && cb->strict_check)
2962 		return err;
2963 	err = 0;
2964 
2965 	s_t = cb->args[0];
2966 
2967 	rcu_read_lock();
2968 	for (t = 0; t < NEIGH_NR_TABLES; t++) {
2969 		tbl = rcu_dereference(neigh_tables[t]);
2970 
2971 		if (!tbl)
2972 			continue;
2973 		if (t < s_t || (family && tbl->family != family))
2974 			continue;
2975 		if (t > s_t)
2976 			memset(&cb->args[1], 0, sizeof(cb->args) -
2977 						sizeof(cb->args[0]));
2978 		if (proxy)
2979 			err = pneigh_dump_table(tbl, skb, cb, &filter);
2980 		else
2981 			err = neigh_dump_table(tbl, skb, cb, &filter);
2982 		if (err < 0)
2983 			break;
2984 	}
2985 	rcu_read_unlock();
2986 
2987 	cb->args[0] = t;
2988 	return err;
2989 }
2990 
neigh_valid_get_req(const struct nlmsghdr * nlh,struct nlattr ** tb,struct netlink_ext_ack * extack)2991 static struct ndmsg *neigh_valid_get_req(const struct nlmsghdr *nlh,
2992 					 struct nlattr **tb,
2993 					 struct netlink_ext_ack *extack)
2994 {
2995 	struct ndmsg *ndm;
2996 	int err, i;
2997 
2998 	ndm = nlmsg_payload(nlh, sizeof(*ndm));
2999 	if (!ndm) {
3000 		NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
3001 		return ERR_PTR(-EINVAL);
3002 	}
3003 
3004 	if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_state ||
3005 	    ndm->ndm_type) {
3006 		NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
3007 		return ERR_PTR(-EINVAL);
3008 	}
3009 
3010 	if (ndm->ndm_flags & ~NTF_PROXY) {
3011 		NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
3012 		return ERR_PTR(-EINVAL);
3013 	}
3014 
3015 	if (!(ndm->ndm_flags & NTF_PROXY) && !ndm->ndm_ifindex) {
3016 		NL_SET_ERR_MSG(extack, "No device specified");
3017 		return ERR_PTR(-EINVAL);
3018 	}
3019 
3020 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
3021 					    NDA_MAX, nda_policy, extack);
3022 	if (err < 0)
3023 		return ERR_PTR(err);
3024 
3025 	for (i = 0; i <= NDA_MAX; ++i) {
3026 		switch (i) {
3027 		case NDA_DST:
3028 			if (!tb[i]) {
3029 				NL_SET_ERR_ATTR_MISS(extack, NULL, NDA_DST);
3030 				return ERR_PTR(-EINVAL);
3031 			}
3032 			break;
3033 		default:
3034 			if (!tb[i])
3035 				continue;
3036 
3037 			NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
3038 			return ERR_PTR(-EINVAL);
3039 		}
3040 	}
3041 
3042 	return ndm;
3043 }
3044 
neigh_nlmsg_size(void)3045 static inline size_t neigh_nlmsg_size(void)
3046 {
3047 	return NLMSG_ALIGN(sizeof(struct ndmsg))
3048 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
3049 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
3050 	       + nla_total_size(sizeof(struct nda_cacheinfo))
3051 	       + nla_total_size(4)  /* NDA_PROBES */
3052 	       + nla_total_size(4)  /* NDA_FLAGS_EXT */
3053 	       + nla_total_size(1); /* NDA_PROTOCOL */
3054 }
3055 
pneigh_nlmsg_size(void)3056 static inline size_t pneigh_nlmsg_size(void)
3057 {
3058 	return NLMSG_ALIGN(sizeof(struct ndmsg))
3059 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
3060 	       + nla_total_size(4)  /* NDA_FLAGS_EXT */
3061 	       + nla_total_size(1); /* NDA_PROTOCOL */
3062 }
3063 
neigh_get(struct sk_buff * in_skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)3064 static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3065 		     struct netlink_ext_ack *extack)
3066 {
3067 	struct net *net = sock_net(in_skb->sk);
3068 	u32 pid = NETLINK_CB(in_skb).portid;
3069 	struct nlattr *tb[NDA_MAX + 1];
3070 	struct net_device *dev = NULL;
3071 	u32 seq = nlh->nlmsg_seq;
3072 	struct neigh_table *tbl;
3073 	struct neighbour *neigh;
3074 	struct sk_buff *skb;
3075 	struct ndmsg *ndm;
3076 	void *dst;
3077 	int err;
3078 
3079 	ndm = neigh_valid_get_req(nlh, tb, extack);
3080 	if (IS_ERR(ndm))
3081 		return PTR_ERR(ndm);
3082 
3083 	if (ndm->ndm_flags & NTF_PROXY)
3084 		skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
3085 	else
3086 		skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
3087 	if (!skb)
3088 		return -ENOBUFS;
3089 
3090 	rcu_read_lock();
3091 
3092 	tbl = neigh_find_table(ndm->ndm_family);
3093 	if (!tbl) {
3094 		NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
3095 		err = -EAFNOSUPPORT;
3096 		goto err_unlock;
3097 	}
3098 
3099 	if (nla_len(tb[NDA_DST]) != (int)tbl->key_len) {
3100 		NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
3101 		err = -EINVAL;
3102 		goto err_unlock;
3103 	}
3104 
3105 	dst = nla_data(tb[NDA_DST]);
3106 
3107 	if (ndm->ndm_ifindex) {
3108 		dev = dev_get_by_index_rcu(net, ndm->ndm_ifindex);
3109 		if (!dev) {
3110 			NL_SET_ERR_MSG(extack, "Unknown device ifindex");
3111 			err = -ENODEV;
3112 			goto err_unlock;
3113 		}
3114 	}
3115 
3116 	if (ndm->ndm_flags & NTF_PROXY) {
3117 		struct pneigh_entry *pn;
3118 
3119 		pn = pneigh_lookup(tbl, net, dst, dev);
3120 		if (!pn) {
3121 			NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
3122 			err = -ENOENT;
3123 			goto err_unlock;
3124 		}
3125 
3126 		err = pneigh_fill_info(skb, pn, pid, seq, RTM_NEWNEIGH, 0, tbl);
3127 		if (err)
3128 			goto err_unlock;
3129 	} else {
3130 		neigh = neigh_lookup(tbl, dst, dev);
3131 		if (!neigh) {
3132 			NL_SET_ERR_MSG(extack, "Neighbour entry not found");
3133 			err = -ENOENT;
3134 			goto err_unlock;
3135 		}
3136 
3137 		err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
3138 		neigh_release(neigh);
3139 		if (err)
3140 			goto err_unlock;
3141 	}
3142 
3143 	rcu_read_unlock();
3144 
3145 	return rtnl_unicast(skb, net, pid);
3146 err_unlock:
3147 	rcu_read_unlock();
3148 	kfree_skb(skb);
3149 	return err;
3150 }
3151 
neigh_for_each(struct neigh_table * tbl,void (* cb)(struct neighbour *,void *),void * cookie)3152 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
3153 {
3154 	int chain;
3155 	struct neigh_hash_table *nht;
3156 
3157 	rcu_read_lock();
3158 	nht = rcu_dereference(tbl->nht);
3159 
3160 	spin_lock_bh(&tbl->lock); /* avoid resizes */
3161 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
3162 		struct neighbour *n;
3163 
3164 		neigh_for_each_in_bucket(n, &nht->hash_heads[chain])
3165 			cb(n, cookie);
3166 	}
3167 	spin_unlock_bh(&tbl->lock);
3168 	rcu_read_unlock();
3169 }
3170 EXPORT_SYMBOL(neigh_for_each);
3171 
3172 /* The tbl->lock must be held as a writer and BH disabled. */
__neigh_for_each_release(struct neigh_table * tbl,int (* cb)(struct neighbour *))3173 void __neigh_for_each_release(struct neigh_table *tbl,
3174 			      int (*cb)(struct neighbour *))
3175 {
3176 	struct neigh_hash_table *nht;
3177 	int chain;
3178 
3179 	nht = rcu_dereference_protected(tbl->nht,
3180 					lockdep_is_held(&tbl->lock));
3181 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
3182 		struct hlist_node *tmp;
3183 		struct neighbour *n;
3184 
3185 		neigh_for_each_in_bucket_safe(n, tmp, &nht->hash_heads[chain]) {
3186 			int release;
3187 
3188 			write_lock(&n->lock);
3189 			release = cb(n);
3190 			if (release) {
3191 				hlist_del_rcu(&n->hash);
3192 				hlist_del_rcu(&n->dev_list);
3193 				neigh_mark_dead(n);
3194 			}
3195 			write_unlock(&n->lock);
3196 			if (release)
3197 				neigh_cleanup_and_release(n);
3198 		}
3199 	}
3200 }
3201 EXPORT_SYMBOL(__neigh_for_each_release);
3202 
neigh_xmit(int index,struct net_device * dev,const void * addr,struct sk_buff * skb)3203 int neigh_xmit(int index, struct net_device *dev,
3204 	       const void *addr, struct sk_buff *skb)
3205 {
3206 	int err = -EAFNOSUPPORT;
3207 
3208 	if (likely(index < NEIGH_NR_TABLES)) {
3209 		struct neigh_table *tbl;
3210 		struct neighbour *neigh;
3211 
3212 		rcu_read_lock();
3213 		tbl = rcu_dereference(neigh_tables[index]);
3214 		if (!tbl)
3215 			goto out_unlock;
3216 		if (index == NEIGH_ARP_TABLE) {
3217 			u32 key = *((u32 *)addr);
3218 
3219 			neigh = __ipv4_neigh_lookup_noref(dev, key);
3220 		} else {
3221 			neigh = __neigh_lookup_noref(tbl, addr, dev);
3222 		}
3223 		if (!neigh)
3224 			neigh = __neigh_create(tbl, addr, dev, false);
3225 		err = PTR_ERR(neigh);
3226 		if (IS_ERR(neigh)) {
3227 			rcu_read_unlock();
3228 			goto out_kfree_skb;
3229 		}
3230 		err = READ_ONCE(neigh->output)(neigh, skb);
3231 out_unlock:
3232 		rcu_read_unlock();
3233 	}
3234 	else if (index == NEIGH_LINK_TABLE) {
3235 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
3236 				      addr, NULL, skb->len);
3237 		if (err < 0)
3238 			goto out_kfree_skb;
3239 		err = dev_queue_xmit(skb);
3240 	}
3241 out:
3242 	return err;
3243 out_kfree_skb:
3244 	kfree_skb(skb);
3245 	goto out;
3246 }
3247 EXPORT_SYMBOL(neigh_xmit);
3248 
3249 #ifdef CONFIG_PROC_FS
3250 
neigh_get_valid(struct seq_file * seq,struct neighbour * n,loff_t * pos)3251 static struct neighbour *neigh_get_valid(struct seq_file *seq,
3252 					 struct neighbour *n,
3253 					 loff_t *pos)
3254 {
3255 	struct neigh_seq_state *state = seq->private;
3256 	struct net *net = seq_file_net(seq);
3257 
3258 	if (!net_eq(dev_net(n->dev), net))
3259 		return NULL;
3260 
3261 	if (state->neigh_sub_iter) {
3262 		loff_t fakep = 0;
3263 		void *v;
3264 
3265 		v = state->neigh_sub_iter(state, n, pos ? pos : &fakep);
3266 		if (!v)
3267 			return NULL;
3268 		if (pos)
3269 			return v;
3270 	}
3271 
3272 	if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3273 		return n;
3274 
3275 	if (READ_ONCE(n->nud_state) & ~NUD_NOARP)
3276 		return n;
3277 
3278 	return NULL;
3279 }
3280 
neigh_get_first(struct seq_file * seq)3281 static struct neighbour *neigh_get_first(struct seq_file *seq)
3282 {
3283 	struct neigh_seq_state *state = seq->private;
3284 	struct neigh_hash_table *nht = state->nht;
3285 	struct neighbour *n, *tmp;
3286 
3287 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
3288 
3289 	while (++state->bucket < (1 << nht->hash_shift)) {
3290 		neigh_for_each_in_bucket(n, &nht->hash_heads[state->bucket]) {
3291 			tmp = neigh_get_valid(seq, n, NULL);
3292 			if (tmp)
3293 				return tmp;
3294 		}
3295 	}
3296 
3297 	return NULL;
3298 }
3299 
neigh_get_next(struct seq_file * seq,struct neighbour * n,loff_t * pos)3300 static struct neighbour *neigh_get_next(struct seq_file *seq,
3301 					struct neighbour *n,
3302 					loff_t *pos)
3303 {
3304 	struct neigh_seq_state *state = seq->private;
3305 	struct neighbour *tmp;
3306 
3307 	if (state->neigh_sub_iter) {
3308 		void *v = state->neigh_sub_iter(state, n, pos);
3309 
3310 		if (v)
3311 			return n;
3312 	}
3313 
3314 	hlist_for_each_entry_continue(n, hash) {
3315 		tmp = neigh_get_valid(seq, n, pos);
3316 		if (tmp) {
3317 			n = tmp;
3318 			goto out;
3319 		}
3320 	}
3321 
3322 	n = neigh_get_first(seq);
3323 out:
3324 	if (n && pos)
3325 		--(*pos);
3326 
3327 	return n;
3328 }
3329 
neigh_get_idx(struct seq_file * seq,loff_t * pos)3330 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
3331 {
3332 	struct neighbour *n = neigh_get_first(seq);
3333 
3334 	if (n) {
3335 		--(*pos);
3336 		while (*pos) {
3337 			n = neigh_get_next(seq, n, pos);
3338 			if (!n)
3339 				break;
3340 		}
3341 	}
3342 	return *pos ? NULL : n;
3343 }
3344 
pneigh_get_first(struct seq_file * seq)3345 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
3346 {
3347 	struct neigh_seq_state *state = seq->private;
3348 	struct net *net = seq_file_net(seq);
3349 	struct neigh_table *tbl = state->tbl;
3350 	struct pneigh_entry *pn = NULL;
3351 	int bucket;
3352 
3353 	state->flags |= NEIGH_SEQ_IS_PNEIGH;
3354 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
3355 		pn = rcu_dereference(tbl->phash_buckets[bucket]);
3356 
3357 		while (pn && !net_eq(pneigh_net(pn), net))
3358 			pn = rcu_dereference(pn->next);
3359 		if (pn)
3360 			break;
3361 	}
3362 	state->bucket = bucket;
3363 
3364 	return pn;
3365 }
3366 
pneigh_get_next(struct seq_file * seq,struct pneigh_entry * pn,loff_t * pos)3367 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
3368 					    struct pneigh_entry *pn,
3369 					    loff_t *pos)
3370 {
3371 	struct neigh_seq_state *state = seq->private;
3372 	struct net *net = seq_file_net(seq);
3373 	struct neigh_table *tbl = state->tbl;
3374 
3375 	do {
3376 		pn = rcu_dereference(pn->next);
3377 	} while (pn && !net_eq(pneigh_net(pn), net));
3378 
3379 	while (!pn) {
3380 		if (++state->bucket > PNEIGH_HASHMASK)
3381 			break;
3382 
3383 		pn = rcu_dereference(tbl->phash_buckets[state->bucket]);
3384 
3385 		while (pn && !net_eq(pneigh_net(pn), net))
3386 			pn = rcu_dereference(pn->next);
3387 		if (pn)
3388 			break;
3389 	}
3390 
3391 	if (pn && pos)
3392 		--(*pos);
3393 
3394 	return pn;
3395 }
3396 
pneigh_get_idx(struct seq_file * seq,loff_t * pos)3397 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
3398 {
3399 	struct pneigh_entry *pn = pneigh_get_first(seq);
3400 
3401 	if (pn) {
3402 		--(*pos);
3403 		while (*pos) {
3404 			pn = pneigh_get_next(seq, pn, pos);
3405 			if (!pn)
3406 				break;
3407 		}
3408 	}
3409 	return *pos ? NULL : pn;
3410 }
3411 
neigh_get_idx_any(struct seq_file * seq,loff_t * pos)3412 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
3413 {
3414 	struct neigh_seq_state *state = seq->private;
3415 	void *rc;
3416 	loff_t idxpos = *pos;
3417 
3418 	rc = neigh_get_idx(seq, &idxpos);
3419 	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3420 		rc = pneigh_get_idx(seq, &idxpos);
3421 
3422 	return rc;
3423 }
3424 
neigh_seq_start(struct seq_file * seq,loff_t * pos,struct neigh_table * tbl,unsigned int neigh_seq_flags)3425 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
3426 	__acquires(tbl->lock)
3427 	__acquires(rcu)
3428 {
3429 	struct neigh_seq_state *state = seq->private;
3430 
3431 	state->tbl = tbl;
3432 	state->bucket = -1;
3433 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
3434 
3435 	rcu_read_lock();
3436 	state->nht = rcu_dereference(tbl->nht);
3437 	spin_lock_bh(&tbl->lock);
3438 
3439 	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
3440 }
3441 EXPORT_SYMBOL(neigh_seq_start);
3442 
neigh_seq_next(struct seq_file * seq,void * v,loff_t * pos)3443 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3444 {
3445 	struct neigh_seq_state *state;
3446 	void *rc;
3447 
3448 	if (v == SEQ_START_TOKEN) {
3449 		rc = neigh_get_first(seq);
3450 		goto out;
3451 	}
3452 
3453 	state = seq->private;
3454 	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
3455 		rc = neigh_get_next(seq, v, NULL);
3456 		if (rc)
3457 			goto out;
3458 		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3459 			rc = pneigh_get_first(seq);
3460 	} else {
3461 		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
3462 		rc = pneigh_get_next(seq, v, NULL);
3463 	}
3464 out:
3465 	++(*pos);
3466 	return rc;
3467 }
3468 EXPORT_SYMBOL(neigh_seq_next);
3469 
neigh_seq_stop(struct seq_file * seq,void * v)3470 void neigh_seq_stop(struct seq_file *seq, void *v)
3471 	__releases(tbl->lock)
3472 	__releases(rcu)
3473 {
3474 	struct neigh_seq_state *state = seq->private;
3475 	struct neigh_table *tbl = state->tbl;
3476 
3477 	spin_unlock_bh(&tbl->lock);
3478 	rcu_read_unlock();
3479 }
3480 EXPORT_SYMBOL(neigh_seq_stop);
3481 
3482 /* statistics via seq_file */
3483 
neigh_stat_seq_start(struct seq_file * seq,loff_t * pos)3484 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
3485 {
3486 	struct neigh_table *tbl = pde_data(file_inode(seq->file));
3487 	int cpu;
3488 
3489 	if (*pos == 0)
3490 		return SEQ_START_TOKEN;
3491 
3492 	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
3493 		if (!cpu_possible(cpu))
3494 			continue;
3495 		*pos = cpu+1;
3496 		return per_cpu_ptr(tbl->stats, cpu);
3497 	}
3498 	return NULL;
3499 }
3500 
neigh_stat_seq_next(struct seq_file * seq,void * v,loff_t * pos)3501 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3502 {
3503 	struct neigh_table *tbl = pde_data(file_inode(seq->file));
3504 	int cpu;
3505 
3506 	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
3507 		if (!cpu_possible(cpu))
3508 			continue;
3509 		*pos = cpu+1;
3510 		return per_cpu_ptr(tbl->stats, cpu);
3511 	}
3512 	(*pos)++;
3513 	return NULL;
3514 }
3515 
neigh_stat_seq_stop(struct seq_file * seq,void * v)3516 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
3517 {
3518 
3519 }
3520 
neigh_stat_seq_show(struct seq_file * seq,void * v)3521 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
3522 {
3523 	struct neigh_table *tbl = pde_data(file_inode(seq->file));
3524 	struct neigh_statistics *st = v;
3525 
3526 	if (v == SEQ_START_TOKEN) {
3527 		seq_puts(seq, "entries  allocs   destroys hash_grows lookups  hits     res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
3528 		return 0;
3529 	}
3530 
3531 	seq_printf(seq, "%08x %08lx %08lx %08lx   %08lx %08lx %08lx   "
3532 			"%08lx         %08lx         %08lx         "
3533 			"%08lx       %08lx            %08lx\n",
3534 		   atomic_read(&tbl->entries),
3535 
3536 		   st->allocs,
3537 		   st->destroys,
3538 		   st->hash_grows,
3539 
3540 		   st->lookups,
3541 		   st->hits,
3542 
3543 		   st->res_failed,
3544 
3545 		   st->rcv_probes_mcast,
3546 		   st->rcv_probes_ucast,
3547 
3548 		   st->periodic_gc_runs,
3549 		   st->forced_gc_runs,
3550 		   st->unres_discards,
3551 		   st->table_fulls
3552 		   );
3553 
3554 	return 0;
3555 }
3556 
3557 static const struct seq_operations neigh_stat_seq_ops = {
3558 	.start	= neigh_stat_seq_start,
3559 	.next	= neigh_stat_seq_next,
3560 	.stop	= neigh_stat_seq_stop,
3561 	.show	= neigh_stat_seq_show,
3562 };
3563 #endif /* CONFIG_PROC_FS */
3564 
__neigh_notify(struct neighbour * n,int type,int flags,u32 pid)3565 static void __neigh_notify(struct neighbour *n, int type, int flags,
3566 			   u32 pid)
3567 {
3568 	struct sk_buff *skb;
3569 	int err = -ENOBUFS;
3570 	struct net *net;
3571 
3572 	rcu_read_lock();
3573 	net = dev_net_rcu(n->dev);
3574 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
3575 	if (skb == NULL)
3576 		goto errout;
3577 
3578 	err = __neigh_fill_info(skb, n, pid, 0, type, flags);
3579 	if (err < 0) {
3580 		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3581 		WARN_ON(err == -EMSGSIZE);
3582 		kfree_skb(skb);
3583 		goto errout;
3584 	}
3585 	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3586 	goto out;
3587 errout:
3588 	rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3589 out:
3590 	rcu_read_unlock();
3591 }
3592 
neigh_notify(struct neighbour * neigh,int type,int flags,u32 pid)3593 static void neigh_notify(struct neighbour *neigh, int type, int flags, u32 pid)
3594 {
3595 	read_lock_bh(&neigh->lock);
3596 	__neigh_notify(neigh, type, flags, pid);
3597 	read_unlock_bh(&neigh->lock);
3598 }
3599 
neigh_app_ns(struct neighbour * n)3600 void neigh_app_ns(struct neighbour *n)
3601 {
3602 	neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
3603 }
3604 EXPORT_SYMBOL(neigh_app_ns);
3605 
3606 #ifdef CONFIG_SYSCTL
3607 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
3608 
proc_unres_qlen(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3609 static int proc_unres_qlen(const struct ctl_table *ctl, int write,
3610 			   void *buffer, size_t *lenp, loff_t *ppos)
3611 {
3612 	int size, ret;
3613 	struct ctl_table tmp = *ctl;
3614 
3615 	tmp.extra1 = SYSCTL_ZERO;
3616 	tmp.extra2 = &unres_qlen_max;
3617 	tmp.data = &size;
3618 
3619 	size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
3620 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3621 
3622 	if (write && !ret)
3623 		*(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
3624 	return ret;
3625 }
3626 
neigh_copy_dflt_parms(struct net * net,struct neigh_parms * p,int index)3627 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
3628 				  int index)
3629 {
3630 	struct net_device *dev;
3631 	int family = neigh_parms_family(p);
3632 
3633 	rcu_read_lock();
3634 	for_each_netdev_rcu(net, dev) {
3635 		struct neigh_parms *dst_p =
3636 				neigh_get_dev_parms_rcu(dev, family);
3637 
3638 		if (dst_p && !test_bit(index, dst_p->data_state))
3639 			dst_p->data[index] = p->data[index];
3640 	}
3641 	rcu_read_unlock();
3642 }
3643 
neigh_proc_update(const struct ctl_table * ctl,int write)3644 static void neigh_proc_update(const struct ctl_table *ctl, int write)
3645 {
3646 	struct net_device *dev = ctl->extra1;
3647 	struct neigh_parms *p = ctl->extra2;
3648 	struct net *net = neigh_parms_net(p);
3649 	int index = (int *) ctl->data - p->data;
3650 
3651 	if (!write)
3652 		return;
3653 
3654 	set_bit(index, p->data_state);
3655 	if (index == NEIGH_VAR_DELAY_PROBE_TIME)
3656 		call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
3657 	if (!dev) /* NULL dev means this is default value */
3658 		neigh_copy_dflt_parms(net, p, index);
3659 }
3660 
neigh_proc_dointvec_zero_intmax(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3661 static int neigh_proc_dointvec_zero_intmax(const struct ctl_table *ctl, int write,
3662 					   void *buffer, size_t *lenp,
3663 					   loff_t *ppos)
3664 {
3665 	struct ctl_table tmp = *ctl;
3666 	int ret;
3667 
3668 	tmp.extra1 = SYSCTL_ZERO;
3669 	tmp.extra2 = SYSCTL_INT_MAX;
3670 
3671 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3672 	neigh_proc_update(ctl, write);
3673 	return ret;
3674 }
3675 
neigh_proc_dointvec_ms_jiffies_positive(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3676 static int neigh_proc_dointvec_ms_jiffies_positive(const struct ctl_table *ctl, int write,
3677 						   void *buffer, size_t *lenp, loff_t *ppos)
3678 {
3679 	struct ctl_table tmp = *ctl;
3680 	int ret;
3681 
3682 	int min = msecs_to_jiffies(1);
3683 
3684 	tmp.extra1 = &min;
3685 	tmp.extra2 = NULL;
3686 
3687 	ret = proc_dointvec_ms_jiffies_minmax(&tmp, write, buffer, lenp, ppos);
3688 	neigh_proc_update(ctl, write);
3689 	return ret;
3690 }
3691 
neigh_proc_dointvec(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3692 int neigh_proc_dointvec(const struct ctl_table *ctl, int write, void *buffer,
3693 			size_t *lenp, loff_t *ppos)
3694 {
3695 	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3696 
3697 	neigh_proc_update(ctl, write);
3698 	return ret;
3699 }
3700 EXPORT_SYMBOL(neigh_proc_dointvec);
3701 
neigh_proc_dointvec_jiffies(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3702 int neigh_proc_dointvec_jiffies(const struct ctl_table *ctl, int write, void *buffer,
3703 				size_t *lenp, loff_t *ppos)
3704 {
3705 	int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3706 
3707 	neigh_proc_update(ctl, write);
3708 	return ret;
3709 }
3710 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3711 
neigh_proc_dointvec_userhz_jiffies(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3712 static int neigh_proc_dointvec_userhz_jiffies(const struct ctl_table *ctl, int write,
3713 					      void *buffer, size_t *lenp,
3714 					      loff_t *ppos)
3715 {
3716 	int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3717 
3718 	neigh_proc_update(ctl, write);
3719 	return ret;
3720 }
3721 
neigh_proc_dointvec_ms_jiffies(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3722 int neigh_proc_dointvec_ms_jiffies(const struct ctl_table *ctl, int write,
3723 				   void *buffer, size_t *lenp, loff_t *ppos)
3724 {
3725 	int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3726 
3727 	neigh_proc_update(ctl, write);
3728 	return ret;
3729 }
3730 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3731 
neigh_proc_dointvec_unres_qlen(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3732 static int neigh_proc_dointvec_unres_qlen(const struct ctl_table *ctl, int write,
3733 					  void *buffer, size_t *lenp,
3734 					  loff_t *ppos)
3735 {
3736 	int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3737 
3738 	neigh_proc_update(ctl, write);
3739 	return ret;
3740 }
3741 
neigh_proc_base_reachable_time(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3742 static int neigh_proc_base_reachable_time(const struct ctl_table *ctl, int write,
3743 					  void *buffer, size_t *lenp,
3744 					  loff_t *ppos)
3745 {
3746 	struct neigh_parms *p = ctl->extra2;
3747 	int ret;
3748 
3749 	if (strcmp(ctl->procname, "base_reachable_time") == 0)
3750 		ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3751 	else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3752 		ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3753 	else
3754 		ret = -1;
3755 
3756 	if (write && ret == 0) {
3757 		/* update reachable_time as well, otherwise, the change will
3758 		 * only be effective after the next time neigh_periodic_work
3759 		 * decides to recompute it
3760 		 */
3761 		neigh_set_reach_time(p);
3762 	}
3763 	return ret;
3764 }
3765 
3766 #define NEIGH_PARMS_DATA_OFFSET(index)	\
3767 	(&((struct neigh_parms *) 0)->data[index])
3768 
3769 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3770 	[NEIGH_VAR_ ## attr] = { \
3771 		.procname	= name, \
3772 		.data		= NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3773 		.maxlen		= sizeof(int), \
3774 		.mode		= mval, \
3775 		.proc_handler	= proc, \
3776 	}
3777 
3778 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3779 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3780 
3781 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3782 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3783 
3784 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3785 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3786 
3787 #define NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(attr, name) \
3788 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies_positive)
3789 
3790 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3791 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3792 
3793 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3794 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3795 
3796 static struct neigh_sysctl_table {
3797 	struct ctl_table_header *sysctl_header;
3798 	struct ctl_table neigh_vars[NEIGH_VAR_MAX];
3799 } neigh_sysctl_template __read_mostly = {
3800 	.neigh_vars = {
3801 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3802 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3803 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3804 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3805 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3806 		NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3807 		NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3808 		NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(INTERVAL_PROBE_TIME_MS,
3809 						       "interval_probe_time_ms"),
3810 		NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3811 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3812 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3813 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3814 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3815 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3816 		NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3817 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3818 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3819 		[NEIGH_VAR_GC_INTERVAL] = {
3820 			.procname	= "gc_interval",
3821 			.maxlen		= sizeof(int),
3822 			.mode		= 0644,
3823 			.proc_handler	= proc_dointvec_jiffies,
3824 		},
3825 		[NEIGH_VAR_GC_THRESH1] = {
3826 			.procname	= "gc_thresh1",
3827 			.maxlen		= sizeof(int),
3828 			.mode		= 0644,
3829 			.extra1		= SYSCTL_ZERO,
3830 			.extra2		= SYSCTL_INT_MAX,
3831 			.proc_handler	= proc_dointvec_minmax,
3832 		},
3833 		[NEIGH_VAR_GC_THRESH2] = {
3834 			.procname	= "gc_thresh2",
3835 			.maxlen		= sizeof(int),
3836 			.mode		= 0644,
3837 			.extra1		= SYSCTL_ZERO,
3838 			.extra2		= SYSCTL_INT_MAX,
3839 			.proc_handler	= proc_dointvec_minmax,
3840 		},
3841 		[NEIGH_VAR_GC_THRESH3] = {
3842 			.procname	= "gc_thresh3",
3843 			.maxlen		= sizeof(int),
3844 			.mode		= 0644,
3845 			.extra1		= SYSCTL_ZERO,
3846 			.extra2		= SYSCTL_INT_MAX,
3847 			.proc_handler	= proc_dointvec_minmax,
3848 		},
3849 	},
3850 };
3851 
neigh_sysctl_register(struct net_device * dev,struct neigh_parms * p,proc_handler * handler)3852 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3853 			  proc_handler *handler)
3854 {
3855 	int i;
3856 	struct neigh_sysctl_table *t;
3857 	const char *dev_name_source;
3858 	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3859 	char *p_name;
3860 	size_t neigh_vars_size;
3861 
3862 	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL_ACCOUNT);
3863 	if (!t)
3864 		goto err;
3865 
3866 	for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3867 		t->neigh_vars[i].data += (long) p;
3868 		t->neigh_vars[i].extra1 = dev;
3869 		t->neigh_vars[i].extra2 = p;
3870 	}
3871 
3872 	neigh_vars_size = ARRAY_SIZE(t->neigh_vars);
3873 	if (dev) {
3874 		dev_name_source = dev->name;
3875 		/* Terminate the table early */
3876 		neigh_vars_size = NEIGH_VAR_BASE_REACHABLE_TIME_MS + 1;
3877 	} else {
3878 		struct neigh_table *tbl = p->tbl;
3879 		dev_name_source = "default";
3880 		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3881 		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3882 		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3883 		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3884 	}
3885 
3886 	if (handler) {
3887 		/* RetransTime */
3888 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3889 		/* ReachableTime */
3890 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3891 		/* RetransTime (in milliseconds)*/
3892 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3893 		/* ReachableTime (in milliseconds) */
3894 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3895 	} else {
3896 		/* Those handlers will update p->reachable_time after
3897 		 * base_reachable_time(_ms) is set to ensure the new timer starts being
3898 		 * applied after the next neighbour update instead of waiting for
3899 		 * neigh_periodic_work to update its value (can be multiple minutes)
3900 		 * So any handler that replaces them should do this as well
3901 		 */
3902 		/* ReachableTime */
3903 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3904 			neigh_proc_base_reachable_time;
3905 		/* ReachableTime (in milliseconds) */
3906 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3907 			neigh_proc_base_reachable_time;
3908 	}
3909 
3910 	switch (neigh_parms_family(p)) {
3911 	case AF_INET:
3912 	      p_name = "ipv4";
3913 	      break;
3914 	case AF_INET6:
3915 	      p_name = "ipv6";
3916 	      break;
3917 	default:
3918 	      BUG();
3919 	}
3920 
3921 	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3922 		p_name, dev_name_source);
3923 	t->sysctl_header = register_net_sysctl_sz(neigh_parms_net(p),
3924 						  neigh_path, t->neigh_vars,
3925 						  neigh_vars_size);
3926 	if (!t->sysctl_header)
3927 		goto free;
3928 
3929 	p->sysctl_table = t;
3930 	return 0;
3931 
3932 free:
3933 	kfree(t);
3934 err:
3935 	return -ENOBUFS;
3936 }
3937 EXPORT_SYMBOL(neigh_sysctl_register);
3938 
neigh_sysctl_unregister(struct neigh_parms * p)3939 void neigh_sysctl_unregister(struct neigh_parms *p)
3940 {
3941 	if (p->sysctl_table) {
3942 		struct neigh_sysctl_table *t = p->sysctl_table;
3943 		p->sysctl_table = NULL;
3944 		unregister_net_sysctl_table(t->sysctl_header);
3945 		kfree(t);
3946 	}
3947 }
3948 EXPORT_SYMBOL(neigh_sysctl_unregister);
3949 
3950 #endif	/* CONFIG_SYSCTL */
3951 
3952 static const struct rtnl_msg_handler neigh_rtnl_msg_handlers[] __initconst = {
3953 	{.msgtype = RTM_NEWNEIGH, .doit = neigh_add},
3954 	{.msgtype = RTM_DELNEIGH, .doit = neigh_delete},
3955 	{.msgtype = RTM_GETNEIGH, .doit = neigh_get, .dumpit = neigh_dump_info,
3956 	 .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED},
3957 	{.msgtype = RTM_GETNEIGHTBL, .dumpit = neightbl_dump_info,
3958 	 .flags = RTNL_FLAG_DUMP_UNLOCKED},
3959 	{.msgtype = RTM_SETNEIGHTBL, .doit = neightbl_set,
3960 	 .flags = RTNL_FLAG_DOIT_UNLOCKED},
3961 };
3962 
neigh_init(void)3963 static int __init neigh_init(void)
3964 {
3965 	rtnl_register_many(neigh_rtnl_msg_handlers);
3966 	return 0;
3967 }
3968 
3969 subsys_initcall(neigh_init);
3970