xref: /linux/net/core/neighbour.c (revision c75c5ab575af7db707689cdbb5a5c458e9a034bb)
1 /*
2  *	Generic address resolution entity
3  *
4  *	Authors:
5  *	Pedro Roque		<roque@di.fc.ul.pt>
6  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *	Fixes:
14  *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
15  *	Harald Welte		Add neighbour cache statistics like rtstat
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include <linux/slab.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/socket.h>
25 #include <linux/netdevice.h>
26 #include <linux/proc_fs.h>
27 #ifdef CONFIG_SYSCTL
28 #include <linux/sysctl.h>
29 #endif
30 #include <linux/times.h>
31 #include <net/net_namespace.h>
32 #include <net/neighbour.h>
33 #include <net/dst.h>
34 #include <net/sock.h>
35 #include <net/netevent.h>
36 #include <net/netlink.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/random.h>
39 #include <linux/string.h>
40 #include <linux/log2.h>
41 
42 #define NEIGH_DEBUG 1
43 
44 #define NEIGH_PRINTK(x...) printk(x)
45 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
46 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
47 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
48 
49 #if NEIGH_DEBUG >= 1
50 #undef NEIGH_PRINTK1
51 #define NEIGH_PRINTK1 NEIGH_PRINTK
52 #endif
53 #if NEIGH_DEBUG >= 2
54 #undef NEIGH_PRINTK2
55 #define NEIGH_PRINTK2 NEIGH_PRINTK
56 #endif
57 
58 #define PNEIGH_HASHMASK		0xF
59 
60 static void neigh_timer_handler(unsigned long arg);
61 static void __neigh_notify(struct neighbour *n, int type, int flags);
62 static void neigh_update_notify(struct neighbour *neigh);
63 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
64 
65 static struct neigh_table *neigh_tables;
66 #ifdef CONFIG_PROC_FS
67 static const struct file_operations neigh_stat_seq_fops;
68 #endif
69 
70 /*
71    Neighbour hash table buckets are protected with rwlock tbl->lock.
72 
73    - All the scans/updates to hash buckets MUST be made under this lock.
74    - NOTHING clever should be made under this lock: no callbacks
75      to protocol backends, no attempts to send something to network.
76      It will result in deadlocks, if backend/driver wants to use neighbour
77      cache.
78    - If the entry requires some non-trivial actions, increase
79      its reference count and release table lock.
80 
81    Neighbour entries are protected:
82    - with reference count.
83    - with rwlock neigh->lock
84 
85    Reference count prevents destruction.
86 
87    neigh->lock mainly serializes ll address data and its validity state.
88    However, the same lock is used to protect another entry fields:
89     - timer
90     - resolution queue
91 
92    Again, nothing clever shall be made under neigh->lock,
93    the most complicated procedure, which we allow is dev->hard_header.
94    It is supposed, that dev->hard_header is simplistic and does
95    not make callbacks to neighbour tables.
96 
97    The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
98    list of neighbour tables. This list is used only in process context,
99  */
100 
101 static DEFINE_RWLOCK(neigh_tbl_lock);
102 
103 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
104 {
105 	kfree_skb(skb);
106 	return -ENETDOWN;
107 }
108 
109 static void neigh_cleanup_and_release(struct neighbour *neigh)
110 {
111 	if (neigh->parms->neigh_cleanup)
112 		neigh->parms->neigh_cleanup(neigh);
113 
114 	__neigh_notify(neigh, RTM_DELNEIGH, 0);
115 	neigh_release(neigh);
116 }
117 
118 /*
119  * It is random distribution in the interval (1/2)*base...(3/2)*base.
120  * It corresponds to default IPv6 settings and is not overridable,
121  * because it is really reasonable choice.
122  */
123 
124 unsigned long neigh_rand_reach_time(unsigned long base)
125 {
126 	return base ? (net_random() % base) + (base >> 1) : 0;
127 }
128 EXPORT_SYMBOL(neigh_rand_reach_time);
129 
130 
131 static int neigh_forced_gc(struct neigh_table *tbl)
132 {
133 	int shrunk = 0;
134 	int i;
135 	struct neigh_hash_table *nht;
136 
137 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
138 
139 	write_lock_bh(&tbl->lock);
140 	nht = rcu_dereference_protected(tbl->nht,
141 					lockdep_is_held(&tbl->lock));
142 	for (i = 0; i < (1 << nht->hash_shift); i++) {
143 		struct neighbour *n;
144 		struct neighbour __rcu **np;
145 
146 		np = &nht->hash_buckets[i];
147 		while ((n = rcu_dereference_protected(*np,
148 					lockdep_is_held(&tbl->lock))) != NULL) {
149 			/* Neighbour record may be discarded if:
150 			 * - nobody refers to it.
151 			 * - it is not permanent
152 			 */
153 			write_lock(&n->lock);
154 			if (atomic_read(&n->refcnt) == 1 &&
155 			    !(n->nud_state & NUD_PERMANENT)) {
156 				rcu_assign_pointer(*np,
157 					rcu_dereference_protected(n->next,
158 						  lockdep_is_held(&tbl->lock)));
159 				n->dead = 1;
160 				shrunk	= 1;
161 				write_unlock(&n->lock);
162 				neigh_cleanup_and_release(n);
163 				continue;
164 			}
165 			write_unlock(&n->lock);
166 			np = &n->next;
167 		}
168 	}
169 
170 	tbl->last_flush = jiffies;
171 
172 	write_unlock_bh(&tbl->lock);
173 
174 	return shrunk;
175 }
176 
177 static void neigh_add_timer(struct neighbour *n, unsigned long when)
178 {
179 	neigh_hold(n);
180 	if (unlikely(mod_timer(&n->timer, when))) {
181 		printk("NEIGH: BUG, double timer add, state is %x\n",
182 		       n->nud_state);
183 		dump_stack();
184 	}
185 }
186 
187 static int neigh_del_timer(struct neighbour *n)
188 {
189 	if ((n->nud_state & NUD_IN_TIMER) &&
190 	    del_timer(&n->timer)) {
191 		neigh_release(n);
192 		return 1;
193 	}
194 	return 0;
195 }
196 
197 static void pneigh_queue_purge(struct sk_buff_head *list)
198 {
199 	struct sk_buff *skb;
200 
201 	while ((skb = skb_dequeue(list)) != NULL) {
202 		dev_put(skb->dev);
203 		kfree_skb(skb);
204 	}
205 }
206 
207 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
208 {
209 	int i;
210 	struct neigh_hash_table *nht;
211 
212 	nht = rcu_dereference_protected(tbl->nht,
213 					lockdep_is_held(&tbl->lock));
214 
215 	for (i = 0; i < (1 << nht->hash_shift); i++) {
216 		struct neighbour *n;
217 		struct neighbour __rcu **np = &nht->hash_buckets[i];
218 
219 		while ((n = rcu_dereference_protected(*np,
220 					lockdep_is_held(&tbl->lock))) != NULL) {
221 			if (dev && n->dev != dev) {
222 				np = &n->next;
223 				continue;
224 			}
225 			rcu_assign_pointer(*np,
226 				   rcu_dereference_protected(n->next,
227 						lockdep_is_held(&tbl->lock)));
228 			write_lock(&n->lock);
229 			neigh_del_timer(n);
230 			n->dead = 1;
231 
232 			if (atomic_read(&n->refcnt) != 1) {
233 				/* The most unpleasant situation.
234 				   We must destroy neighbour entry,
235 				   but someone still uses it.
236 
237 				   The destroy will be delayed until
238 				   the last user releases us, but
239 				   we must kill timers etc. and move
240 				   it to safe state.
241 				 */
242 				skb_queue_purge(&n->arp_queue);
243 				n->arp_queue_len_bytes = 0;
244 				n->output = neigh_blackhole;
245 				if (n->nud_state & NUD_VALID)
246 					n->nud_state = NUD_NOARP;
247 				else
248 					n->nud_state = NUD_NONE;
249 				NEIGH_PRINTK2("neigh %p is stray.\n", n);
250 			}
251 			write_unlock(&n->lock);
252 			neigh_cleanup_and_release(n);
253 		}
254 	}
255 }
256 
257 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
258 {
259 	write_lock_bh(&tbl->lock);
260 	neigh_flush_dev(tbl, dev);
261 	write_unlock_bh(&tbl->lock);
262 }
263 EXPORT_SYMBOL(neigh_changeaddr);
264 
265 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
266 {
267 	write_lock_bh(&tbl->lock);
268 	neigh_flush_dev(tbl, dev);
269 	pneigh_ifdown(tbl, dev);
270 	write_unlock_bh(&tbl->lock);
271 
272 	del_timer_sync(&tbl->proxy_timer);
273 	pneigh_queue_purge(&tbl->proxy_queue);
274 	return 0;
275 }
276 EXPORT_SYMBOL(neigh_ifdown);
277 
278 static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
279 {
280 	struct neighbour *n = NULL;
281 	unsigned long now = jiffies;
282 	int entries;
283 
284 	entries = atomic_inc_return(&tbl->entries) - 1;
285 	if (entries >= tbl->gc_thresh3 ||
286 	    (entries >= tbl->gc_thresh2 &&
287 	     time_after(now, tbl->last_flush + 5 * HZ))) {
288 		if (!neigh_forced_gc(tbl) &&
289 		    entries >= tbl->gc_thresh3)
290 			goto out_entries;
291 	}
292 
293 	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
294 	if (!n)
295 		goto out_entries;
296 
297 	skb_queue_head_init(&n->arp_queue);
298 	rwlock_init(&n->lock);
299 	seqlock_init(&n->ha_lock);
300 	n->updated	  = n->used = now;
301 	n->nud_state	  = NUD_NONE;
302 	n->output	  = neigh_blackhole;
303 	seqlock_init(&n->hh.hh_lock);
304 	n->parms	  = neigh_parms_clone(&tbl->parms);
305 	setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
306 
307 	NEIGH_CACHE_STAT_INC(tbl, allocs);
308 	n->tbl		  = tbl;
309 	atomic_set(&n->refcnt, 1);
310 	n->dead		  = 1;
311 out:
312 	return n;
313 
314 out_entries:
315 	atomic_dec(&tbl->entries);
316 	goto out;
317 }
318 
319 static void neigh_get_hash_rnd(u32 *x)
320 {
321 	get_random_bytes(x, sizeof(*x));
322 	*x |= 1;
323 }
324 
325 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
326 {
327 	size_t size = (1 << shift) * sizeof(struct neighbour *);
328 	struct neigh_hash_table *ret;
329 	struct neighbour __rcu **buckets;
330 	int i;
331 
332 	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
333 	if (!ret)
334 		return NULL;
335 	if (size <= PAGE_SIZE)
336 		buckets = kzalloc(size, GFP_ATOMIC);
337 	else
338 		buckets = (struct neighbour __rcu **)
339 			  __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
340 					   get_order(size));
341 	if (!buckets) {
342 		kfree(ret);
343 		return NULL;
344 	}
345 	ret->hash_buckets = buckets;
346 	ret->hash_shift = shift;
347 	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
348 		neigh_get_hash_rnd(&ret->hash_rnd[i]);
349 	return ret;
350 }
351 
352 static void neigh_hash_free_rcu(struct rcu_head *head)
353 {
354 	struct neigh_hash_table *nht = container_of(head,
355 						    struct neigh_hash_table,
356 						    rcu);
357 	size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
358 	struct neighbour __rcu **buckets = nht->hash_buckets;
359 
360 	if (size <= PAGE_SIZE)
361 		kfree(buckets);
362 	else
363 		free_pages((unsigned long)buckets, get_order(size));
364 	kfree(nht);
365 }
366 
367 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
368 						unsigned long new_shift)
369 {
370 	unsigned int i, hash;
371 	struct neigh_hash_table *new_nht, *old_nht;
372 
373 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
374 
375 	old_nht = rcu_dereference_protected(tbl->nht,
376 					    lockdep_is_held(&tbl->lock));
377 	new_nht = neigh_hash_alloc(new_shift);
378 	if (!new_nht)
379 		return old_nht;
380 
381 	for (i = 0; i < (1 << old_nht->hash_shift); i++) {
382 		struct neighbour *n, *next;
383 
384 		for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
385 						   lockdep_is_held(&tbl->lock));
386 		     n != NULL;
387 		     n = next) {
388 			hash = tbl->hash(n->primary_key, n->dev,
389 					 new_nht->hash_rnd);
390 
391 			hash >>= (32 - new_nht->hash_shift);
392 			next = rcu_dereference_protected(n->next,
393 						lockdep_is_held(&tbl->lock));
394 
395 			rcu_assign_pointer(n->next,
396 					   rcu_dereference_protected(
397 						new_nht->hash_buckets[hash],
398 						lockdep_is_held(&tbl->lock)));
399 			rcu_assign_pointer(new_nht->hash_buckets[hash], n);
400 		}
401 	}
402 
403 	rcu_assign_pointer(tbl->nht, new_nht);
404 	call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
405 	return new_nht;
406 }
407 
408 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
409 			       struct net_device *dev)
410 {
411 	struct neighbour *n;
412 	int key_len = tbl->key_len;
413 	u32 hash_val;
414 	struct neigh_hash_table *nht;
415 
416 	NEIGH_CACHE_STAT_INC(tbl, lookups);
417 
418 	rcu_read_lock_bh();
419 	nht = rcu_dereference_bh(tbl->nht);
420 	hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
421 
422 	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
423 	     n != NULL;
424 	     n = rcu_dereference_bh(n->next)) {
425 		if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
426 			if (!atomic_inc_not_zero(&n->refcnt))
427 				n = NULL;
428 			NEIGH_CACHE_STAT_INC(tbl, hits);
429 			break;
430 		}
431 	}
432 
433 	rcu_read_unlock_bh();
434 	return n;
435 }
436 EXPORT_SYMBOL(neigh_lookup);
437 
438 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
439 				     const void *pkey)
440 {
441 	struct neighbour *n;
442 	int key_len = tbl->key_len;
443 	u32 hash_val;
444 	struct neigh_hash_table *nht;
445 
446 	NEIGH_CACHE_STAT_INC(tbl, lookups);
447 
448 	rcu_read_lock_bh();
449 	nht = rcu_dereference_bh(tbl->nht);
450 	hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
451 
452 	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
453 	     n != NULL;
454 	     n = rcu_dereference_bh(n->next)) {
455 		if (!memcmp(n->primary_key, pkey, key_len) &&
456 		    net_eq(dev_net(n->dev), net)) {
457 			if (!atomic_inc_not_zero(&n->refcnt))
458 				n = NULL;
459 			NEIGH_CACHE_STAT_INC(tbl, hits);
460 			break;
461 		}
462 	}
463 
464 	rcu_read_unlock_bh();
465 	return n;
466 }
467 EXPORT_SYMBOL(neigh_lookup_nodev);
468 
469 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
470 				 struct net_device *dev, bool want_ref)
471 {
472 	u32 hash_val;
473 	int key_len = tbl->key_len;
474 	int error;
475 	struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
476 	struct neigh_hash_table *nht;
477 
478 	if (!n) {
479 		rc = ERR_PTR(-ENOBUFS);
480 		goto out;
481 	}
482 
483 	memcpy(n->primary_key, pkey, key_len);
484 	n->dev = dev;
485 	dev_hold(dev);
486 
487 	/* Protocol specific setup. */
488 	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
489 		rc = ERR_PTR(error);
490 		goto out_neigh_release;
491 	}
492 
493 	if (dev->netdev_ops->ndo_neigh_construct) {
494 		error = dev->netdev_ops->ndo_neigh_construct(n);
495 		if (error < 0) {
496 			rc = ERR_PTR(error);
497 			goto out_neigh_release;
498 		}
499 	}
500 
501 	/* Device specific setup. */
502 	if (n->parms->neigh_setup &&
503 	    (error = n->parms->neigh_setup(n)) < 0) {
504 		rc = ERR_PTR(error);
505 		goto out_neigh_release;
506 	}
507 
508 	n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
509 
510 	write_lock_bh(&tbl->lock);
511 	nht = rcu_dereference_protected(tbl->nht,
512 					lockdep_is_held(&tbl->lock));
513 
514 	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
515 		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
516 
517 	hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
518 
519 	if (n->parms->dead) {
520 		rc = ERR_PTR(-EINVAL);
521 		goto out_tbl_unlock;
522 	}
523 
524 	for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
525 					    lockdep_is_held(&tbl->lock));
526 	     n1 != NULL;
527 	     n1 = rcu_dereference_protected(n1->next,
528 			lockdep_is_held(&tbl->lock))) {
529 		if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
530 			if (want_ref)
531 				neigh_hold(n1);
532 			rc = n1;
533 			goto out_tbl_unlock;
534 		}
535 	}
536 
537 	n->dead = 0;
538 	if (want_ref)
539 		neigh_hold(n);
540 	rcu_assign_pointer(n->next,
541 			   rcu_dereference_protected(nht->hash_buckets[hash_val],
542 						     lockdep_is_held(&tbl->lock)));
543 	rcu_assign_pointer(nht->hash_buckets[hash_val], n);
544 	write_unlock_bh(&tbl->lock);
545 	NEIGH_PRINTK2("neigh %p is created.\n", n);
546 	rc = n;
547 out:
548 	return rc;
549 out_tbl_unlock:
550 	write_unlock_bh(&tbl->lock);
551 out_neigh_release:
552 	neigh_release(n);
553 	goto out;
554 }
555 EXPORT_SYMBOL(__neigh_create);
556 
557 static u32 pneigh_hash(const void *pkey, int key_len)
558 {
559 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
560 	hash_val ^= (hash_val >> 16);
561 	hash_val ^= hash_val >> 8;
562 	hash_val ^= hash_val >> 4;
563 	hash_val &= PNEIGH_HASHMASK;
564 	return hash_val;
565 }
566 
567 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
568 					      struct net *net,
569 					      const void *pkey,
570 					      int key_len,
571 					      struct net_device *dev)
572 {
573 	while (n) {
574 		if (!memcmp(n->key, pkey, key_len) &&
575 		    net_eq(pneigh_net(n), net) &&
576 		    (n->dev == dev || !n->dev))
577 			return n;
578 		n = n->next;
579 	}
580 	return NULL;
581 }
582 
583 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
584 		struct net *net, const void *pkey, struct net_device *dev)
585 {
586 	int key_len = tbl->key_len;
587 	u32 hash_val = pneigh_hash(pkey, key_len);
588 
589 	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
590 				 net, pkey, key_len, dev);
591 }
592 EXPORT_SYMBOL_GPL(__pneigh_lookup);
593 
594 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
595 				    struct net *net, const void *pkey,
596 				    struct net_device *dev, int creat)
597 {
598 	struct pneigh_entry *n;
599 	int key_len = tbl->key_len;
600 	u32 hash_val = pneigh_hash(pkey, key_len);
601 
602 	read_lock_bh(&tbl->lock);
603 	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
604 			      net, pkey, key_len, dev);
605 	read_unlock_bh(&tbl->lock);
606 
607 	if (n || !creat)
608 		goto out;
609 
610 	ASSERT_RTNL();
611 
612 	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
613 	if (!n)
614 		goto out;
615 
616 	write_pnet(&n->net, hold_net(net));
617 	memcpy(n->key, pkey, key_len);
618 	n->dev = dev;
619 	if (dev)
620 		dev_hold(dev);
621 
622 	if (tbl->pconstructor && tbl->pconstructor(n)) {
623 		if (dev)
624 			dev_put(dev);
625 		release_net(net);
626 		kfree(n);
627 		n = NULL;
628 		goto out;
629 	}
630 
631 	write_lock_bh(&tbl->lock);
632 	n->next = tbl->phash_buckets[hash_val];
633 	tbl->phash_buckets[hash_val] = n;
634 	write_unlock_bh(&tbl->lock);
635 out:
636 	return n;
637 }
638 EXPORT_SYMBOL(pneigh_lookup);
639 
640 
641 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
642 		  struct net_device *dev)
643 {
644 	struct pneigh_entry *n, **np;
645 	int key_len = tbl->key_len;
646 	u32 hash_val = pneigh_hash(pkey, key_len);
647 
648 	write_lock_bh(&tbl->lock);
649 	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
650 	     np = &n->next) {
651 		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
652 		    net_eq(pneigh_net(n), net)) {
653 			*np = n->next;
654 			write_unlock_bh(&tbl->lock);
655 			if (tbl->pdestructor)
656 				tbl->pdestructor(n);
657 			if (n->dev)
658 				dev_put(n->dev);
659 			release_net(pneigh_net(n));
660 			kfree(n);
661 			return 0;
662 		}
663 	}
664 	write_unlock_bh(&tbl->lock);
665 	return -ENOENT;
666 }
667 
668 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
669 {
670 	struct pneigh_entry *n, **np;
671 	u32 h;
672 
673 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
674 		np = &tbl->phash_buckets[h];
675 		while ((n = *np) != NULL) {
676 			if (!dev || n->dev == dev) {
677 				*np = n->next;
678 				if (tbl->pdestructor)
679 					tbl->pdestructor(n);
680 				if (n->dev)
681 					dev_put(n->dev);
682 				release_net(pneigh_net(n));
683 				kfree(n);
684 				continue;
685 			}
686 			np = &n->next;
687 		}
688 	}
689 	return -ENOENT;
690 }
691 
692 static void neigh_parms_destroy(struct neigh_parms *parms);
693 
694 static inline void neigh_parms_put(struct neigh_parms *parms)
695 {
696 	if (atomic_dec_and_test(&parms->refcnt))
697 		neigh_parms_destroy(parms);
698 }
699 
700 /*
701  *	neighbour must already be out of the table;
702  *
703  */
704 void neigh_destroy(struct neighbour *neigh)
705 {
706 	struct net_device *dev = neigh->dev;
707 
708 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
709 
710 	if (!neigh->dead) {
711 		pr_warn("Destroying alive neighbour %p\n", neigh);
712 		dump_stack();
713 		return;
714 	}
715 
716 	if (neigh_del_timer(neigh))
717 		pr_warn("Impossible event\n");
718 
719 	skb_queue_purge(&neigh->arp_queue);
720 	neigh->arp_queue_len_bytes = 0;
721 
722 	if (dev->netdev_ops->ndo_neigh_destroy)
723 		dev->netdev_ops->ndo_neigh_destroy(neigh);
724 
725 	dev_put(dev);
726 	neigh_parms_put(neigh->parms);
727 
728 	NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
729 
730 	atomic_dec(&neigh->tbl->entries);
731 	kfree_rcu(neigh, rcu);
732 }
733 EXPORT_SYMBOL(neigh_destroy);
734 
735 /* Neighbour state is suspicious;
736    disable fast path.
737 
738    Called with write_locked neigh.
739  */
740 static void neigh_suspect(struct neighbour *neigh)
741 {
742 	NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
743 
744 	neigh->output = neigh->ops->output;
745 }
746 
747 /* Neighbour state is OK;
748    enable fast path.
749 
750    Called with write_locked neigh.
751  */
752 static void neigh_connect(struct neighbour *neigh)
753 {
754 	NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
755 
756 	neigh->output = neigh->ops->connected_output;
757 }
758 
759 static void neigh_periodic_work(struct work_struct *work)
760 {
761 	struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
762 	struct neighbour *n;
763 	struct neighbour __rcu **np;
764 	unsigned int i;
765 	struct neigh_hash_table *nht;
766 
767 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
768 
769 	write_lock_bh(&tbl->lock);
770 	nht = rcu_dereference_protected(tbl->nht,
771 					lockdep_is_held(&tbl->lock));
772 
773 	if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
774 		goto out;
775 
776 	/*
777 	 *	periodically recompute ReachableTime from random function
778 	 */
779 
780 	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
781 		struct neigh_parms *p;
782 		tbl->last_rand = jiffies;
783 		for (p = &tbl->parms; p; p = p->next)
784 			p->reachable_time =
785 				neigh_rand_reach_time(p->base_reachable_time);
786 	}
787 
788 	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
789 		np = &nht->hash_buckets[i];
790 
791 		while ((n = rcu_dereference_protected(*np,
792 				lockdep_is_held(&tbl->lock))) != NULL) {
793 			unsigned int state;
794 
795 			write_lock(&n->lock);
796 
797 			state = n->nud_state;
798 			if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
799 				write_unlock(&n->lock);
800 				goto next_elt;
801 			}
802 
803 			if (time_before(n->used, n->confirmed))
804 				n->used = n->confirmed;
805 
806 			if (atomic_read(&n->refcnt) == 1 &&
807 			    (state == NUD_FAILED ||
808 			     time_after(jiffies, n->used + n->parms->gc_staletime))) {
809 				*np = n->next;
810 				n->dead = 1;
811 				write_unlock(&n->lock);
812 				neigh_cleanup_and_release(n);
813 				continue;
814 			}
815 			write_unlock(&n->lock);
816 
817 next_elt:
818 			np = &n->next;
819 		}
820 		/*
821 		 * It's fine to release lock here, even if hash table
822 		 * grows while we are preempted.
823 		 */
824 		write_unlock_bh(&tbl->lock);
825 		cond_resched();
826 		write_lock_bh(&tbl->lock);
827 		nht = rcu_dereference_protected(tbl->nht,
828 						lockdep_is_held(&tbl->lock));
829 	}
830 out:
831 	/* Cycle through all hash buckets every base_reachable_time/2 ticks.
832 	 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
833 	 * base_reachable_time.
834 	 */
835 	schedule_delayed_work(&tbl->gc_work,
836 			      tbl->parms.base_reachable_time >> 1);
837 	write_unlock_bh(&tbl->lock);
838 }
839 
840 static __inline__ int neigh_max_probes(struct neighbour *n)
841 {
842 	struct neigh_parms *p = n->parms;
843 	return (n->nud_state & NUD_PROBE) ?
844 		p->ucast_probes :
845 		p->ucast_probes + p->app_probes + p->mcast_probes;
846 }
847 
848 static void neigh_invalidate(struct neighbour *neigh)
849 	__releases(neigh->lock)
850 	__acquires(neigh->lock)
851 {
852 	struct sk_buff *skb;
853 
854 	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
855 	NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
856 	neigh->updated = jiffies;
857 
858 	/* It is very thin place. report_unreachable is very complicated
859 	   routine. Particularly, it can hit the same neighbour entry!
860 
861 	   So that, we try to be accurate and avoid dead loop. --ANK
862 	 */
863 	while (neigh->nud_state == NUD_FAILED &&
864 	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
865 		write_unlock(&neigh->lock);
866 		neigh->ops->error_report(neigh, skb);
867 		write_lock(&neigh->lock);
868 	}
869 	skb_queue_purge(&neigh->arp_queue);
870 	neigh->arp_queue_len_bytes = 0;
871 }
872 
873 static void neigh_probe(struct neighbour *neigh)
874 	__releases(neigh->lock)
875 {
876 	struct sk_buff *skb = skb_peek(&neigh->arp_queue);
877 	/* keep skb alive even if arp_queue overflows */
878 	if (skb)
879 		skb = skb_copy(skb, GFP_ATOMIC);
880 	write_unlock(&neigh->lock);
881 	neigh->ops->solicit(neigh, skb);
882 	atomic_inc(&neigh->probes);
883 	kfree_skb(skb);
884 }
885 
886 /* Called when a timer expires for a neighbour entry. */
887 
888 static void neigh_timer_handler(unsigned long arg)
889 {
890 	unsigned long now, next;
891 	struct neighbour *neigh = (struct neighbour *)arg;
892 	unsigned int state;
893 	int notify = 0;
894 
895 	write_lock(&neigh->lock);
896 
897 	state = neigh->nud_state;
898 	now = jiffies;
899 	next = now + HZ;
900 
901 	if (!(state & NUD_IN_TIMER))
902 		goto out;
903 
904 	if (state & NUD_REACHABLE) {
905 		if (time_before_eq(now,
906 				   neigh->confirmed + neigh->parms->reachable_time)) {
907 			NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
908 			next = neigh->confirmed + neigh->parms->reachable_time;
909 		} else if (time_before_eq(now,
910 					  neigh->used + neigh->parms->delay_probe_time)) {
911 			NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
912 			neigh->nud_state = NUD_DELAY;
913 			neigh->updated = jiffies;
914 			neigh_suspect(neigh);
915 			next = now + neigh->parms->delay_probe_time;
916 		} else {
917 			NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
918 			neigh->nud_state = NUD_STALE;
919 			neigh->updated = jiffies;
920 			neigh_suspect(neigh);
921 			notify = 1;
922 		}
923 	} else if (state & NUD_DELAY) {
924 		if (time_before_eq(now,
925 				   neigh->confirmed + neigh->parms->delay_probe_time)) {
926 			NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
927 			neigh->nud_state = NUD_REACHABLE;
928 			neigh->updated = jiffies;
929 			neigh_connect(neigh);
930 			notify = 1;
931 			next = neigh->confirmed + neigh->parms->reachable_time;
932 		} else {
933 			NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
934 			neigh->nud_state = NUD_PROBE;
935 			neigh->updated = jiffies;
936 			atomic_set(&neigh->probes, 0);
937 			next = now + neigh->parms->retrans_time;
938 		}
939 	} else {
940 		/* NUD_PROBE|NUD_INCOMPLETE */
941 		next = now + neigh->parms->retrans_time;
942 	}
943 
944 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
945 	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
946 		neigh->nud_state = NUD_FAILED;
947 		notify = 1;
948 		neigh_invalidate(neigh);
949 	}
950 
951 	if (neigh->nud_state & NUD_IN_TIMER) {
952 		if (time_before(next, jiffies + HZ/2))
953 			next = jiffies + HZ/2;
954 		if (!mod_timer(&neigh->timer, next))
955 			neigh_hold(neigh);
956 	}
957 	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
958 		neigh_probe(neigh);
959 	} else {
960 out:
961 		write_unlock(&neigh->lock);
962 	}
963 
964 	if (notify)
965 		neigh_update_notify(neigh);
966 
967 	neigh_release(neigh);
968 }
969 
970 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
971 {
972 	int rc;
973 	bool immediate_probe = false;
974 
975 	write_lock_bh(&neigh->lock);
976 
977 	rc = 0;
978 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
979 		goto out_unlock_bh;
980 
981 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
982 		if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
983 			unsigned long next, now = jiffies;
984 
985 			atomic_set(&neigh->probes, neigh->parms->ucast_probes);
986 			neigh->nud_state     = NUD_INCOMPLETE;
987 			neigh->updated = now;
988 			next = now + max(neigh->parms->retrans_time, HZ/2);
989 			neigh_add_timer(neigh, next);
990 			immediate_probe = true;
991 		} else {
992 			neigh->nud_state = NUD_FAILED;
993 			neigh->updated = jiffies;
994 			write_unlock_bh(&neigh->lock);
995 
996 			kfree_skb(skb);
997 			return 1;
998 		}
999 	} else if (neigh->nud_state & NUD_STALE) {
1000 		NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
1001 		neigh->nud_state = NUD_DELAY;
1002 		neigh->updated = jiffies;
1003 		neigh_add_timer(neigh,
1004 				jiffies + neigh->parms->delay_probe_time);
1005 	}
1006 
1007 	if (neigh->nud_state == NUD_INCOMPLETE) {
1008 		if (skb) {
1009 			while (neigh->arp_queue_len_bytes + skb->truesize >
1010 			       neigh->parms->queue_len_bytes) {
1011 				struct sk_buff *buff;
1012 
1013 				buff = __skb_dequeue(&neigh->arp_queue);
1014 				if (!buff)
1015 					break;
1016 				neigh->arp_queue_len_bytes -= buff->truesize;
1017 				kfree_skb(buff);
1018 				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1019 			}
1020 			skb_dst_force(skb);
1021 			__skb_queue_tail(&neigh->arp_queue, skb);
1022 			neigh->arp_queue_len_bytes += skb->truesize;
1023 		}
1024 		rc = 1;
1025 	}
1026 out_unlock_bh:
1027 	if (immediate_probe)
1028 		neigh_probe(neigh);
1029 	else
1030 		write_unlock(&neigh->lock);
1031 	local_bh_enable();
1032 	return rc;
1033 }
1034 EXPORT_SYMBOL(__neigh_event_send);
1035 
1036 static void neigh_update_hhs(struct neighbour *neigh)
1037 {
1038 	struct hh_cache *hh;
1039 	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1040 		= NULL;
1041 
1042 	if (neigh->dev->header_ops)
1043 		update = neigh->dev->header_ops->cache_update;
1044 
1045 	if (update) {
1046 		hh = &neigh->hh;
1047 		if (hh->hh_len) {
1048 			write_seqlock_bh(&hh->hh_lock);
1049 			update(hh, neigh->dev, neigh->ha);
1050 			write_sequnlock_bh(&hh->hh_lock);
1051 		}
1052 	}
1053 }
1054 
1055 
1056 
1057 /* Generic update routine.
1058    -- lladdr is new lladdr or NULL, if it is not supplied.
1059    -- new    is new state.
1060    -- flags
1061 	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1062 				if it is different.
1063 	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1064 				lladdr instead of overriding it
1065 				if it is different.
1066 				It also allows to retain current state
1067 				if lladdr is unchanged.
1068 	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
1069 
1070 	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1071 				NTF_ROUTER flag.
1072 	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
1073 				a router.
1074 
1075    Caller MUST hold reference count on the entry.
1076  */
1077 
1078 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1079 		 u32 flags)
1080 {
1081 	u8 old;
1082 	int err;
1083 	int notify = 0;
1084 	struct net_device *dev;
1085 	int update_isrouter = 0;
1086 
1087 	write_lock_bh(&neigh->lock);
1088 
1089 	dev    = neigh->dev;
1090 	old    = neigh->nud_state;
1091 	err    = -EPERM;
1092 
1093 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1094 	    (old & (NUD_NOARP | NUD_PERMANENT)))
1095 		goto out;
1096 
1097 	if (!(new & NUD_VALID)) {
1098 		neigh_del_timer(neigh);
1099 		if (old & NUD_CONNECTED)
1100 			neigh_suspect(neigh);
1101 		neigh->nud_state = new;
1102 		err = 0;
1103 		notify = old & NUD_VALID;
1104 		if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1105 		    (new & NUD_FAILED)) {
1106 			neigh_invalidate(neigh);
1107 			notify = 1;
1108 		}
1109 		goto out;
1110 	}
1111 
1112 	/* Compare new lladdr with cached one */
1113 	if (!dev->addr_len) {
1114 		/* First case: device needs no address. */
1115 		lladdr = neigh->ha;
1116 	} else if (lladdr) {
1117 		/* The second case: if something is already cached
1118 		   and a new address is proposed:
1119 		   - compare new & old
1120 		   - if they are different, check override flag
1121 		 */
1122 		if ((old & NUD_VALID) &&
1123 		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1124 			lladdr = neigh->ha;
1125 	} else {
1126 		/* No address is supplied; if we know something,
1127 		   use it, otherwise discard the request.
1128 		 */
1129 		err = -EINVAL;
1130 		if (!(old & NUD_VALID))
1131 			goto out;
1132 		lladdr = neigh->ha;
1133 	}
1134 
1135 	if (new & NUD_CONNECTED)
1136 		neigh->confirmed = jiffies;
1137 	neigh->updated = jiffies;
1138 
1139 	/* If entry was valid and address is not changed,
1140 	   do not change entry state, if new one is STALE.
1141 	 */
1142 	err = 0;
1143 	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1144 	if (old & NUD_VALID) {
1145 		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1146 			update_isrouter = 0;
1147 			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1148 			    (old & NUD_CONNECTED)) {
1149 				lladdr = neigh->ha;
1150 				new = NUD_STALE;
1151 			} else
1152 				goto out;
1153 		} else {
1154 			if (lladdr == neigh->ha && new == NUD_STALE &&
1155 			    ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1156 			     (old & NUD_CONNECTED))
1157 			    )
1158 				new = old;
1159 		}
1160 	}
1161 
1162 	if (new != old) {
1163 		neigh_del_timer(neigh);
1164 		if (new & NUD_IN_TIMER)
1165 			neigh_add_timer(neigh, (jiffies +
1166 						((new & NUD_REACHABLE) ?
1167 						 neigh->parms->reachable_time :
1168 						 0)));
1169 		neigh->nud_state = new;
1170 	}
1171 
1172 	if (lladdr != neigh->ha) {
1173 		write_seqlock(&neigh->ha_lock);
1174 		memcpy(&neigh->ha, lladdr, dev->addr_len);
1175 		write_sequnlock(&neigh->ha_lock);
1176 		neigh_update_hhs(neigh);
1177 		if (!(new & NUD_CONNECTED))
1178 			neigh->confirmed = jiffies -
1179 				      (neigh->parms->base_reachable_time << 1);
1180 		notify = 1;
1181 	}
1182 	if (new == old)
1183 		goto out;
1184 	if (new & NUD_CONNECTED)
1185 		neigh_connect(neigh);
1186 	else
1187 		neigh_suspect(neigh);
1188 	if (!(old & NUD_VALID)) {
1189 		struct sk_buff *skb;
1190 
1191 		/* Again: avoid dead loop if something went wrong */
1192 
1193 		while (neigh->nud_state & NUD_VALID &&
1194 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1195 			struct dst_entry *dst = skb_dst(skb);
1196 			struct neighbour *n2, *n1 = neigh;
1197 			write_unlock_bh(&neigh->lock);
1198 
1199 			rcu_read_lock();
1200 
1201 			/* Why not just use 'neigh' as-is?  The problem is that
1202 			 * things such as shaper, eql, and sch_teql can end up
1203 			 * using alternative, different, neigh objects to output
1204 			 * the packet in the output path.  So what we need to do
1205 			 * here is re-lookup the top-level neigh in the path so
1206 			 * we can reinject the packet there.
1207 			 */
1208 			n2 = NULL;
1209 			if (dst) {
1210 				n2 = dst_neigh_lookup_skb(dst, skb);
1211 				if (n2)
1212 					n1 = n2;
1213 			}
1214 			n1->output(n1, skb);
1215 			if (n2)
1216 				neigh_release(n2);
1217 			rcu_read_unlock();
1218 
1219 			write_lock_bh(&neigh->lock);
1220 		}
1221 		skb_queue_purge(&neigh->arp_queue);
1222 		neigh->arp_queue_len_bytes = 0;
1223 	}
1224 out:
1225 	if (update_isrouter) {
1226 		neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1227 			(neigh->flags | NTF_ROUTER) :
1228 			(neigh->flags & ~NTF_ROUTER);
1229 	}
1230 	write_unlock_bh(&neigh->lock);
1231 
1232 	if (notify)
1233 		neigh_update_notify(neigh);
1234 
1235 	return err;
1236 }
1237 EXPORT_SYMBOL(neigh_update);
1238 
1239 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1240 				 u8 *lladdr, void *saddr,
1241 				 struct net_device *dev)
1242 {
1243 	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1244 						 lladdr || !dev->addr_len);
1245 	if (neigh)
1246 		neigh_update(neigh, lladdr, NUD_STALE,
1247 			     NEIGH_UPDATE_F_OVERRIDE);
1248 	return neigh;
1249 }
1250 EXPORT_SYMBOL(neigh_event_ns);
1251 
1252 /* called with read_lock_bh(&n->lock); */
1253 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst)
1254 {
1255 	struct net_device *dev = dst->dev;
1256 	__be16 prot = dst->ops->protocol;
1257 	struct hh_cache	*hh = &n->hh;
1258 
1259 	write_lock_bh(&n->lock);
1260 
1261 	/* Only one thread can come in here and initialize the
1262 	 * hh_cache entry.
1263 	 */
1264 	if (!hh->hh_len)
1265 		dev->header_ops->cache(n, hh, prot);
1266 
1267 	write_unlock_bh(&n->lock);
1268 }
1269 
1270 /* This function can be used in contexts, where only old dev_queue_xmit
1271  * worked, f.e. if you want to override normal output path (eql, shaper),
1272  * but resolution is not made yet.
1273  */
1274 
1275 int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb)
1276 {
1277 	struct net_device *dev = skb->dev;
1278 
1279 	__skb_pull(skb, skb_network_offset(skb));
1280 
1281 	if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1282 			    skb->len) < 0 &&
1283 	    dev->header_ops->rebuild(skb))
1284 		return 0;
1285 
1286 	return dev_queue_xmit(skb);
1287 }
1288 EXPORT_SYMBOL(neigh_compat_output);
1289 
1290 /* Slow and careful. */
1291 
1292 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1293 {
1294 	struct dst_entry *dst = skb_dst(skb);
1295 	int rc = 0;
1296 
1297 	if (!dst)
1298 		goto discard;
1299 
1300 	if (!neigh_event_send(neigh, skb)) {
1301 		int err;
1302 		struct net_device *dev = neigh->dev;
1303 		unsigned int seq;
1304 
1305 		if (dev->header_ops->cache && !neigh->hh.hh_len)
1306 			neigh_hh_init(neigh, dst);
1307 
1308 		do {
1309 			__skb_pull(skb, skb_network_offset(skb));
1310 			seq = read_seqbegin(&neigh->ha_lock);
1311 			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1312 					      neigh->ha, NULL, skb->len);
1313 		} while (read_seqretry(&neigh->ha_lock, seq));
1314 
1315 		if (err >= 0)
1316 			rc = dev_queue_xmit(skb);
1317 		else
1318 			goto out_kfree_skb;
1319 	}
1320 out:
1321 	return rc;
1322 discard:
1323 	NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1324 		      dst, neigh);
1325 out_kfree_skb:
1326 	rc = -EINVAL;
1327 	kfree_skb(skb);
1328 	goto out;
1329 }
1330 EXPORT_SYMBOL(neigh_resolve_output);
1331 
1332 /* As fast as possible without hh cache */
1333 
1334 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1335 {
1336 	struct net_device *dev = neigh->dev;
1337 	unsigned int seq;
1338 	int err;
1339 
1340 	do {
1341 		__skb_pull(skb, skb_network_offset(skb));
1342 		seq = read_seqbegin(&neigh->ha_lock);
1343 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1344 				      neigh->ha, NULL, skb->len);
1345 	} while (read_seqretry(&neigh->ha_lock, seq));
1346 
1347 	if (err >= 0)
1348 		err = dev_queue_xmit(skb);
1349 	else {
1350 		err = -EINVAL;
1351 		kfree_skb(skb);
1352 	}
1353 	return err;
1354 }
1355 EXPORT_SYMBOL(neigh_connected_output);
1356 
1357 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1358 {
1359 	return dev_queue_xmit(skb);
1360 }
1361 EXPORT_SYMBOL(neigh_direct_output);
1362 
1363 static void neigh_proxy_process(unsigned long arg)
1364 {
1365 	struct neigh_table *tbl = (struct neigh_table *)arg;
1366 	long sched_next = 0;
1367 	unsigned long now = jiffies;
1368 	struct sk_buff *skb, *n;
1369 
1370 	spin_lock(&tbl->proxy_queue.lock);
1371 
1372 	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1373 		long tdif = NEIGH_CB(skb)->sched_next - now;
1374 
1375 		if (tdif <= 0) {
1376 			struct net_device *dev = skb->dev;
1377 
1378 			__skb_unlink(skb, &tbl->proxy_queue);
1379 			if (tbl->proxy_redo && netif_running(dev)) {
1380 				rcu_read_lock();
1381 				tbl->proxy_redo(skb);
1382 				rcu_read_unlock();
1383 			} else {
1384 				kfree_skb(skb);
1385 			}
1386 
1387 			dev_put(dev);
1388 		} else if (!sched_next || tdif < sched_next)
1389 			sched_next = tdif;
1390 	}
1391 	del_timer(&tbl->proxy_timer);
1392 	if (sched_next)
1393 		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1394 	spin_unlock(&tbl->proxy_queue.lock);
1395 }
1396 
1397 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1398 		    struct sk_buff *skb)
1399 {
1400 	unsigned long now = jiffies;
1401 	unsigned long sched_next = now + (net_random() % p->proxy_delay);
1402 
1403 	if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1404 		kfree_skb(skb);
1405 		return;
1406 	}
1407 
1408 	NEIGH_CB(skb)->sched_next = sched_next;
1409 	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1410 
1411 	spin_lock(&tbl->proxy_queue.lock);
1412 	if (del_timer(&tbl->proxy_timer)) {
1413 		if (time_before(tbl->proxy_timer.expires, sched_next))
1414 			sched_next = tbl->proxy_timer.expires;
1415 	}
1416 	skb_dst_drop(skb);
1417 	dev_hold(skb->dev);
1418 	__skb_queue_tail(&tbl->proxy_queue, skb);
1419 	mod_timer(&tbl->proxy_timer, sched_next);
1420 	spin_unlock(&tbl->proxy_queue.lock);
1421 }
1422 EXPORT_SYMBOL(pneigh_enqueue);
1423 
1424 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1425 						      struct net *net, int ifindex)
1426 {
1427 	struct neigh_parms *p;
1428 
1429 	for (p = &tbl->parms; p; p = p->next) {
1430 		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1431 		    (!p->dev && !ifindex))
1432 			return p;
1433 	}
1434 
1435 	return NULL;
1436 }
1437 
1438 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1439 				      struct neigh_table *tbl)
1440 {
1441 	struct neigh_parms *p, *ref;
1442 	struct net *net = dev_net(dev);
1443 	const struct net_device_ops *ops = dev->netdev_ops;
1444 
1445 	ref = lookup_neigh_parms(tbl, net, 0);
1446 	if (!ref)
1447 		return NULL;
1448 
1449 	p = kmemdup(ref, sizeof(*p), GFP_KERNEL);
1450 	if (p) {
1451 		p->tbl		  = tbl;
1452 		atomic_set(&p->refcnt, 1);
1453 		p->reachable_time =
1454 				neigh_rand_reach_time(p->base_reachable_time);
1455 
1456 		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1457 			kfree(p);
1458 			return NULL;
1459 		}
1460 
1461 		dev_hold(dev);
1462 		p->dev = dev;
1463 		write_pnet(&p->net, hold_net(net));
1464 		p->sysctl_table = NULL;
1465 		write_lock_bh(&tbl->lock);
1466 		p->next		= tbl->parms.next;
1467 		tbl->parms.next = p;
1468 		write_unlock_bh(&tbl->lock);
1469 	}
1470 	return p;
1471 }
1472 EXPORT_SYMBOL(neigh_parms_alloc);
1473 
1474 static void neigh_rcu_free_parms(struct rcu_head *head)
1475 {
1476 	struct neigh_parms *parms =
1477 		container_of(head, struct neigh_parms, rcu_head);
1478 
1479 	neigh_parms_put(parms);
1480 }
1481 
1482 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1483 {
1484 	struct neigh_parms **p;
1485 
1486 	if (!parms || parms == &tbl->parms)
1487 		return;
1488 	write_lock_bh(&tbl->lock);
1489 	for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1490 		if (*p == parms) {
1491 			*p = parms->next;
1492 			parms->dead = 1;
1493 			write_unlock_bh(&tbl->lock);
1494 			if (parms->dev)
1495 				dev_put(parms->dev);
1496 			call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1497 			return;
1498 		}
1499 	}
1500 	write_unlock_bh(&tbl->lock);
1501 	NEIGH_PRINTK1("neigh_parms_release: not found\n");
1502 }
1503 EXPORT_SYMBOL(neigh_parms_release);
1504 
1505 static void neigh_parms_destroy(struct neigh_parms *parms)
1506 {
1507 	release_net(neigh_parms_net(parms));
1508 	kfree(parms);
1509 }
1510 
1511 static struct lock_class_key neigh_table_proxy_queue_class;
1512 
1513 static void neigh_table_init_no_netlink(struct neigh_table *tbl)
1514 {
1515 	unsigned long now = jiffies;
1516 	unsigned long phsize;
1517 
1518 	write_pnet(&tbl->parms.net, &init_net);
1519 	atomic_set(&tbl->parms.refcnt, 1);
1520 	tbl->parms.reachable_time =
1521 			  neigh_rand_reach_time(tbl->parms.base_reachable_time);
1522 
1523 	tbl->stats = alloc_percpu(struct neigh_statistics);
1524 	if (!tbl->stats)
1525 		panic("cannot create neighbour cache statistics");
1526 
1527 #ifdef CONFIG_PROC_FS
1528 	if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1529 			      &neigh_stat_seq_fops, tbl))
1530 		panic("cannot create neighbour proc dir entry");
1531 #endif
1532 
1533 	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1534 
1535 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1536 	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1537 
1538 	if (!tbl->nht || !tbl->phash_buckets)
1539 		panic("cannot allocate neighbour cache hashes");
1540 
1541 	if (!tbl->entry_size)
1542 		tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1543 					tbl->key_len, NEIGH_PRIV_ALIGN);
1544 	else
1545 		WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1546 
1547 	rwlock_init(&tbl->lock);
1548 	INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1549 	schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time);
1550 	setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1551 	skb_queue_head_init_class(&tbl->proxy_queue,
1552 			&neigh_table_proxy_queue_class);
1553 
1554 	tbl->last_flush = now;
1555 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1556 }
1557 
1558 void neigh_table_init(struct neigh_table *tbl)
1559 {
1560 	struct neigh_table *tmp;
1561 
1562 	neigh_table_init_no_netlink(tbl);
1563 	write_lock(&neigh_tbl_lock);
1564 	for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1565 		if (tmp->family == tbl->family)
1566 			break;
1567 	}
1568 	tbl->next	= neigh_tables;
1569 	neigh_tables	= tbl;
1570 	write_unlock(&neigh_tbl_lock);
1571 
1572 	if (unlikely(tmp)) {
1573 		pr_err("Registering multiple tables for family %d\n",
1574 		       tbl->family);
1575 		dump_stack();
1576 	}
1577 }
1578 EXPORT_SYMBOL(neigh_table_init);
1579 
1580 int neigh_table_clear(struct neigh_table *tbl)
1581 {
1582 	struct neigh_table **tp;
1583 
1584 	/* It is not clean... Fix it to unload IPv6 module safely */
1585 	cancel_delayed_work_sync(&tbl->gc_work);
1586 	del_timer_sync(&tbl->proxy_timer);
1587 	pneigh_queue_purge(&tbl->proxy_queue);
1588 	neigh_ifdown(tbl, NULL);
1589 	if (atomic_read(&tbl->entries))
1590 		pr_crit("neighbour leakage\n");
1591 	write_lock(&neigh_tbl_lock);
1592 	for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1593 		if (*tp == tbl) {
1594 			*tp = tbl->next;
1595 			break;
1596 		}
1597 	}
1598 	write_unlock(&neigh_tbl_lock);
1599 
1600 	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1601 		 neigh_hash_free_rcu);
1602 	tbl->nht = NULL;
1603 
1604 	kfree(tbl->phash_buckets);
1605 	tbl->phash_buckets = NULL;
1606 
1607 	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1608 
1609 	free_percpu(tbl->stats);
1610 	tbl->stats = NULL;
1611 
1612 	return 0;
1613 }
1614 EXPORT_SYMBOL(neigh_table_clear);
1615 
1616 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1617 {
1618 	struct net *net = sock_net(skb->sk);
1619 	struct ndmsg *ndm;
1620 	struct nlattr *dst_attr;
1621 	struct neigh_table *tbl;
1622 	struct net_device *dev = NULL;
1623 	int err = -EINVAL;
1624 
1625 	ASSERT_RTNL();
1626 	if (nlmsg_len(nlh) < sizeof(*ndm))
1627 		goto out;
1628 
1629 	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1630 	if (dst_attr == NULL)
1631 		goto out;
1632 
1633 	ndm = nlmsg_data(nlh);
1634 	if (ndm->ndm_ifindex) {
1635 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1636 		if (dev == NULL) {
1637 			err = -ENODEV;
1638 			goto out;
1639 		}
1640 	}
1641 
1642 	read_lock(&neigh_tbl_lock);
1643 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1644 		struct neighbour *neigh;
1645 
1646 		if (tbl->family != ndm->ndm_family)
1647 			continue;
1648 		read_unlock(&neigh_tbl_lock);
1649 
1650 		if (nla_len(dst_attr) < tbl->key_len)
1651 			goto out;
1652 
1653 		if (ndm->ndm_flags & NTF_PROXY) {
1654 			err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1655 			goto out;
1656 		}
1657 
1658 		if (dev == NULL)
1659 			goto out;
1660 
1661 		neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1662 		if (neigh == NULL) {
1663 			err = -ENOENT;
1664 			goto out;
1665 		}
1666 
1667 		err = neigh_update(neigh, NULL, NUD_FAILED,
1668 				   NEIGH_UPDATE_F_OVERRIDE |
1669 				   NEIGH_UPDATE_F_ADMIN);
1670 		neigh_release(neigh);
1671 		goto out;
1672 	}
1673 	read_unlock(&neigh_tbl_lock);
1674 	err = -EAFNOSUPPORT;
1675 
1676 out:
1677 	return err;
1678 }
1679 
1680 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1681 {
1682 	struct net *net = sock_net(skb->sk);
1683 	struct ndmsg *ndm;
1684 	struct nlattr *tb[NDA_MAX+1];
1685 	struct neigh_table *tbl;
1686 	struct net_device *dev = NULL;
1687 	int err;
1688 
1689 	ASSERT_RTNL();
1690 	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1691 	if (err < 0)
1692 		goto out;
1693 
1694 	err = -EINVAL;
1695 	if (tb[NDA_DST] == NULL)
1696 		goto out;
1697 
1698 	ndm = nlmsg_data(nlh);
1699 	if (ndm->ndm_ifindex) {
1700 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1701 		if (dev == NULL) {
1702 			err = -ENODEV;
1703 			goto out;
1704 		}
1705 
1706 		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1707 			goto out;
1708 	}
1709 
1710 	read_lock(&neigh_tbl_lock);
1711 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1712 		int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1713 		struct neighbour *neigh;
1714 		void *dst, *lladdr;
1715 
1716 		if (tbl->family != ndm->ndm_family)
1717 			continue;
1718 		read_unlock(&neigh_tbl_lock);
1719 
1720 		if (nla_len(tb[NDA_DST]) < tbl->key_len)
1721 			goto out;
1722 		dst = nla_data(tb[NDA_DST]);
1723 		lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1724 
1725 		if (ndm->ndm_flags & NTF_PROXY) {
1726 			struct pneigh_entry *pn;
1727 
1728 			err = -ENOBUFS;
1729 			pn = pneigh_lookup(tbl, net, dst, dev, 1);
1730 			if (pn) {
1731 				pn->flags = ndm->ndm_flags;
1732 				err = 0;
1733 			}
1734 			goto out;
1735 		}
1736 
1737 		if (dev == NULL)
1738 			goto out;
1739 
1740 		neigh = neigh_lookup(tbl, dst, dev);
1741 		if (neigh == NULL) {
1742 			if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1743 				err = -ENOENT;
1744 				goto out;
1745 			}
1746 
1747 			neigh = __neigh_lookup_errno(tbl, dst, dev);
1748 			if (IS_ERR(neigh)) {
1749 				err = PTR_ERR(neigh);
1750 				goto out;
1751 			}
1752 		} else {
1753 			if (nlh->nlmsg_flags & NLM_F_EXCL) {
1754 				err = -EEXIST;
1755 				neigh_release(neigh);
1756 				goto out;
1757 			}
1758 
1759 			if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1760 				flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1761 		}
1762 
1763 		if (ndm->ndm_flags & NTF_USE) {
1764 			neigh_event_send(neigh, NULL);
1765 			err = 0;
1766 		} else
1767 			err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1768 		neigh_release(neigh);
1769 		goto out;
1770 	}
1771 
1772 	read_unlock(&neigh_tbl_lock);
1773 	err = -EAFNOSUPPORT;
1774 out:
1775 	return err;
1776 }
1777 
1778 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1779 {
1780 	struct nlattr *nest;
1781 
1782 	nest = nla_nest_start(skb, NDTA_PARMS);
1783 	if (nest == NULL)
1784 		return -ENOBUFS;
1785 
1786 	if ((parms->dev &&
1787 	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1788 	    nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
1789 	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes) ||
1790 	    /* approximative value for deprecated QUEUE_LEN (in packets) */
1791 	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
1792 			parms->queue_len_bytes / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
1793 	    nla_put_u32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen) ||
1794 	    nla_put_u32(skb, NDTPA_APP_PROBES, parms->app_probes) ||
1795 	    nla_put_u32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes) ||
1796 	    nla_put_u32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes) ||
1797 	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) ||
1798 	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
1799 			  parms->base_reachable_time) ||
1800 	    nla_put_msecs(skb, NDTPA_GC_STALETIME, parms->gc_staletime) ||
1801 	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
1802 			  parms->delay_probe_time) ||
1803 	    nla_put_msecs(skb, NDTPA_RETRANS_TIME, parms->retrans_time) ||
1804 	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay) ||
1805 	    nla_put_msecs(skb, NDTPA_PROXY_DELAY, parms->proxy_delay) ||
1806 	    nla_put_msecs(skb, NDTPA_LOCKTIME, parms->locktime))
1807 		goto nla_put_failure;
1808 	return nla_nest_end(skb, nest);
1809 
1810 nla_put_failure:
1811 	nla_nest_cancel(skb, nest);
1812 	return -EMSGSIZE;
1813 }
1814 
1815 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1816 			      u32 pid, u32 seq, int type, int flags)
1817 {
1818 	struct nlmsghdr *nlh;
1819 	struct ndtmsg *ndtmsg;
1820 
1821 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1822 	if (nlh == NULL)
1823 		return -EMSGSIZE;
1824 
1825 	ndtmsg = nlmsg_data(nlh);
1826 
1827 	read_lock_bh(&tbl->lock);
1828 	ndtmsg->ndtm_family = tbl->family;
1829 	ndtmsg->ndtm_pad1   = 0;
1830 	ndtmsg->ndtm_pad2   = 0;
1831 
1832 	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
1833 	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval) ||
1834 	    nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
1835 	    nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
1836 	    nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
1837 		goto nla_put_failure;
1838 	{
1839 		unsigned long now = jiffies;
1840 		unsigned int flush_delta = now - tbl->last_flush;
1841 		unsigned int rand_delta = now - tbl->last_rand;
1842 		struct neigh_hash_table *nht;
1843 		struct ndt_config ndc = {
1844 			.ndtc_key_len		= tbl->key_len,
1845 			.ndtc_entry_size	= tbl->entry_size,
1846 			.ndtc_entries		= atomic_read(&tbl->entries),
1847 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
1848 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
1849 			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
1850 		};
1851 
1852 		rcu_read_lock_bh();
1853 		nht = rcu_dereference_bh(tbl->nht);
1854 		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
1855 		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1856 		rcu_read_unlock_bh();
1857 
1858 		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
1859 			goto nla_put_failure;
1860 	}
1861 
1862 	{
1863 		int cpu;
1864 		struct ndt_stats ndst;
1865 
1866 		memset(&ndst, 0, sizeof(ndst));
1867 
1868 		for_each_possible_cpu(cpu) {
1869 			struct neigh_statistics	*st;
1870 
1871 			st = per_cpu_ptr(tbl->stats, cpu);
1872 			ndst.ndts_allocs		+= st->allocs;
1873 			ndst.ndts_destroys		+= st->destroys;
1874 			ndst.ndts_hash_grows		+= st->hash_grows;
1875 			ndst.ndts_res_failed		+= st->res_failed;
1876 			ndst.ndts_lookups		+= st->lookups;
1877 			ndst.ndts_hits			+= st->hits;
1878 			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
1879 			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
1880 			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
1881 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
1882 		}
1883 
1884 		if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst))
1885 			goto nla_put_failure;
1886 	}
1887 
1888 	BUG_ON(tbl->parms.dev);
1889 	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1890 		goto nla_put_failure;
1891 
1892 	read_unlock_bh(&tbl->lock);
1893 	return nlmsg_end(skb, nlh);
1894 
1895 nla_put_failure:
1896 	read_unlock_bh(&tbl->lock);
1897 	nlmsg_cancel(skb, nlh);
1898 	return -EMSGSIZE;
1899 }
1900 
1901 static int neightbl_fill_param_info(struct sk_buff *skb,
1902 				    struct neigh_table *tbl,
1903 				    struct neigh_parms *parms,
1904 				    u32 pid, u32 seq, int type,
1905 				    unsigned int flags)
1906 {
1907 	struct ndtmsg *ndtmsg;
1908 	struct nlmsghdr *nlh;
1909 
1910 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1911 	if (nlh == NULL)
1912 		return -EMSGSIZE;
1913 
1914 	ndtmsg = nlmsg_data(nlh);
1915 
1916 	read_lock_bh(&tbl->lock);
1917 	ndtmsg->ndtm_family = tbl->family;
1918 	ndtmsg->ndtm_pad1   = 0;
1919 	ndtmsg->ndtm_pad2   = 0;
1920 
1921 	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1922 	    neightbl_fill_parms(skb, parms) < 0)
1923 		goto errout;
1924 
1925 	read_unlock_bh(&tbl->lock);
1926 	return nlmsg_end(skb, nlh);
1927 errout:
1928 	read_unlock_bh(&tbl->lock);
1929 	nlmsg_cancel(skb, nlh);
1930 	return -EMSGSIZE;
1931 }
1932 
1933 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1934 	[NDTA_NAME]		= { .type = NLA_STRING },
1935 	[NDTA_THRESH1]		= { .type = NLA_U32 },
1936 	[NDTA_THRESH2]		= { .type = NLA_U32 },
1937 	[NDTA_THRESH3]		= { .type = NLA_U32 },
1938 	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
1939 	[NDTA_PARMS]		= { .type = NLA_NESTED },
1940 };
1941 
1942 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1943 	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
1944 	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
1945 	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
1946 	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
1947 	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
1948 	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
1949 	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
1950 	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
1951 	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
1952 	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
1953 	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
1954 	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
1955 	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
1956 };
1957 
1958 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1959 {
1960 	struct net *net = sock_net(skb->sk);
1961 	struct neigh_table *tbl;
1962 	struct ndtmsg *ndtmsg;
1963 	struct nlattr *tb[NDTA_MAX+1];
1964 	int err;
1965 
1966 	err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1967 			  nl_neightbl_policy);
1968 	if (err < 0)
1969 		goto errout;
1970 
1971 	if (tb[NDTA_NAME] == NULL) {
1972 		err = -EINVAL;
1973 		goto errout;
1974 	}
1975 
1976 	ndtmsg = nlmsg_data(nlh);
1977 	read_lock(&neigh_tbl_lock);
1978 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1979 		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1980 			continue;
1981 
1982 		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1983 			break;
1984 	}
1985 
1986 	if (tbl == NULL) {
1987 		err = -ENOENT;
1988 		goto errout_locked;
1989 	}
1990 
1991 	/*
1992 	 * We acquire tbl->lock to be nice to the periodic timers and
1993 	 * make sure they always see a consistent set of values.
1994 	 */
1995 	write_lock_bh(&tbl->lock);
1996 
1997 	if (tb[NDTA_PARMS]) {
1998 		struct nlattr *tbp[NDTPA_MAX+1];
1999 		struct neigh_parms *p;
2000 		int i, ifindex = 0;
2001 
2002 		err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
2003 				       nl_ntbl_parm_policy);
2004 		if (err < 0)
2005 			goto errout_tbl_lock;
2006 
2007 		if (tbp[NDTPA_IFINDEX])
2008 			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2009 
2010 		p = lookup_neigh_parms(tbl, net, ifindex);
2011 		if (p == NULL) {
2012 			err = -ENOENT;
2013 			goto errout_tbl_lock;
2014 		}
2015 
2016 		for (i = 1; i <= NDTPA_MAX; i++) {
2017 			if (tbp[i] == NULL)
2018 				continue;
2019 
2020 			switch (i) {
2021 			case NDTPA_QUEUE_LEN:
2022 				p->queue_len_bytes = nla_get_u32(tbp[i]) *
2023 						     SKB_TRUESIZE(ETH_FRAME_LEN);
2024 				break;
2025 			case NDTPA_QUEUE_LENBYTES:
2026 				p->queue_len_bytes = nla_get_u32(tbp[i]);
2027 				break;
2028 			case NDTPA_PROXY_QLEN:
2029 				p->proxy_qlen = nla_get_u32(tbp[i]);
2030 				break;
2031 			case NDTPA_APP_PROBES:
2032 				p->app_probes = nla_get_u32(tbp[i]);
2033 				break;
2034 			case NDTPA_UCAST_PROBES:
2035 				p->ucast_probes = nla_get_u32(tbp[i]);
2036 				break;
2037 			case NDTPA_MCAST_PROBES:
2038 				p->mcast_probes = nla_get_u32(tbp[i]);
2039 				break;
2040 			case NDTPA_BASE_REACHABLE_TIME:
2041 				p->base_reachable_time = nla_get_msecs(tbp[i]);
2042 				break;
2043 			case NDTPA_GC_STALETIME:
2044 				p->gc_staletime = nla_get_msecs(tbp[i]);
2045 				break;
2046 			case NDTPA_DELAY_PROBE_TIME:
2047 				p->delay_probe_time = nla_get_msecs(tbp[i]);
2048 				break;
2049 			case NDTPA_RETRANS_TIME:
2050 				p->retrans_time = nla_get_msecs(tbp[i]);
2051 				break;
2052 			case NDTPA_ANYCAST_DELAY:
2053 				p->anycast_delay = nla_get_msecs(tbp[i]);
2054 				break;
2055 			case NDTPA_PROXY_DELAY:
2056 				p->proxy_delay = nla_get_msecs(tbp[i]);
2057 				break;
2058 			case NDTPA_LOCKTIME:
2059 				p->locktime = nla_get_msecs(tbp[i]);
2060 				break;
2061 			}
2062 		}
2063 	}
2064 
2065 	if (tb[NDTA_THRESH1])
2066 		tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2067 
2068 	if (tb[NDTA_THRESH2])
2069 		tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2070 
2071 	if (tb[NDTA_THRESH3])
2072 		tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2073 
2074 	if (tb[NDTA_GC_INTERVAL])
2075 		tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2076 
2077 	err = 0;
2078 
2079 errout_tbl_lock:
2080 	write_unlock_bh(&tbl->lock);
2081 errout_locked:
2082 	read_unlock(&neigh_tbl_lock);
2083 errout:
2084 	return err;
2085 }
2086 
2087 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2088 {
2089 	struct net *net = sock_net(skb->sk);
2090 	int family, tidx, nidx = 0;
2091 	int tbl_skip = cb->args[0];
2092 	int neigh_skip = cb->args[1];
2093 	struct neigh_table *tbl;
2094 
2095 	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2096 
2097 	read_lock(&neigh_tbl_lock);
2098 	for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
2099 		struct neigh_parms *p;
2100 
2101 		if (tidx < tbl_skip || (family && tbl->family != family))
2102 			continue;
2103 
2104 		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2105 				       cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2106 				       NLM_F_MULTI) <= 0)
2107 			break;
2108 
2109 		for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
2110 			if (!net_eq(neigh_parms_net(p), net))
2111 				continue;
2112 
2113 			if (nidx < neigh_skip)
2114 				goto next;
2115 
2116 			if (neightbl_fill_param_info(skb, tbl, p,
2117 						     NETLINK_CB(cb->skb).portid,
2118 						     cb->nlh->nlmsg_seq,
2119 						     RTM_NEWNEIGHTBL,
2120 						     NLM_F_MULTI) <= 0)
2121 				goto out;
2122 		next:
2123 			nidx++;
2124 		}
2125 
2126 		neigh_skip = 0;
2127 	}
2128 out:
2129 	read_unlock(&neigh_tbl_lock);
2130 	cb->args[0] = tidx;
2131 	cb->args[1] = nidx;
2132 
2133 	return skb->len;
2134 }
2135 
2136 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2137 			   u32 pid, u32 seq, int type, unsigned int flags)
2138 {
2139 	unsigned long now = jiffies;
2140 	struct nda_cacheinfo ci;
2141 	struct nlmsghdr *nlh;
2142 	struct ndmsg *ndm;
2143 
2144 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2145 	if (nlh == NULL)
2146 		return -EMSGSIZE;
2147 
2148 	ndm = nlmsg_data(nlh);
2149 	ndm->ndm_family	 = neigh->ops->family;
2150 	ndm->ndm_pad1    = 0;
2151 	ndm->ndm_pad2    = 0;
2152 	ndm->ndm_flags	 = neigh->flags;
2153 	ndm->ndm_type	 = neigh->type;
2154 	ndm->ndm_ifindex = neigh->dev->ifindex;
2155 
2156 	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2157 		goto nla_put_failure;
2158 
2159 	read_lock_bh(&neigh->lock);
2160 	ndm->ndm_state	 = neigh->nud_state;
2161 	if (neigh->nud_state & NUD_VALID) {
2162 		char haddr[MAX_ADDR_LEN];
2163 
2164 		neigh_ha_snapshot(haddr, neigh, neigh->dev);
2165 		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2166 			read_unlock_bh(&neigh->lock);
2167 			goto nla_put_failure;
2168 		}
2169 	}
2170 
2171 	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2172 	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2173 	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2174 	ci.ndm_refcnt	 = atomic_read(&neigh->refcnt) - 1;
2175 	read_unlock_bh(&neigh->lock);
2176 
2177 	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2178 	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2179 		goto nla_put_failure;
2180 
2181 	return nlmsg_end(skb, nlh);
2182 
2183 nla_put_failure:
2184 	nlmsg_cancel(skb, nlh);
2185 	return -EMSGSIZE;
2186 }
2187 
2188 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2189 			    u32 pid, u32 seq, int type, unsigned int flags,
2190 			    struct neigh_table *tbl)
2191 {
2192 	struct nlmsghdr *nlh;
2193 	struct ndmsg *ndm;
2194 
2195 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2196 	if (nlh == NULL)
2197 		return -EMSGSIZE;
2198 
2199 	ndm = nlmsg_data(nlh);
2200 	ndm->ndm_family	 = tbl->family;
2201 	ndm->ndm_pad1    = 0;
2202 	ndm->ndm_pad2    = 0;
2203 	ndm->ndm_flags	 = pn->flags | NTF_PROXY;
2204 	ndm->ndm_type	 = NDA_DST;
2205 	ndm->ndm_ifindex = pn->dev->ifindex;
2206 	ndm->ndm_state	 = NUD_NONE;
2207 
2208 	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2209 		goto nla_put_failure;
2210 
2211 	return nlmsg_end(skb, nlh);
2212 
2213 nla_put_failure:
2214 	nlmsg_cancel(skb, nlh);
2215 	return -EMSGSIZE;
2216 }
2217 
2218 static void neigh_update_notify(struct neighbour *neigh)
2219 {
2220 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2221 	__neigh_notify(neigh, RTM_NEWNEIGH, 0);
2222 }
2223 
2224 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2225 			    struct netlink_callback *cb)
2226 {
2227 	struct net *net = sock_net(skb->sk);
2228 	struct neighbour *n;
2229 	int rc, h, s_h = cb->args[1];
2230 	int idx, s_idx = idx = cb->args[2];
2231 	struct neigh_hash_table *nht;
2232 
2233 	rcu_read_lock_bh();
2234 	nht = rcu_dereference_bh(tbl->nht);
2235 
2236 	for (h = s_h; h < (1 << nht->hash_shift); h++) {
2237 		if (h > s_h)
2238 			s_idx = 0;
2239 		for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2240 		     n != NULL;
2241 		     n = rcu_dereference_bh(n->next)) {
2242 			if (!net_eq(dev_net(n->dev), net))
2243 				continue;
2244 			if (idx < s_idx)
2245 				goto next;
2246 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2247 					    cb->nlh->nlmsg_seq,
2248 					    RTM_NEWNEIGH,
2249 					    NLM_F_MULTI) <= 0) {
2250 				rc = -1;
2251 				goto out;
2252 			}
2253 next:
2254 			idx++;
2255 		}
2256 	}
2257 	rc = skb->len;
2258 out:
2259 	rcu_read_unlock_bh();
2260 	cb->args[1] = h;
2261 	cb->args[2] = idx;
2262 	return rc;
2263 }
2264 
2265 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2266 			     struct netlink_callback *cb)
2267 {
2268 	struct pneigh_entry *n;
2269 	struct net *net = sock_net(skb->sk);
2270 	int rc, h, s_h = cb->args[3];
2271 	int idx, s_idx = idx = cb->args[4];
2272 
2273 	read_lock_bh(&tbl->lock);
2274 
2275 	for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2276 		if (h > s_h)
2277 			s_idx = 0;
2278 		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2279 			if (dev_net(n->dev) != net)
2280 				continue;
2281 			if (idx < s_idx)
2282 				goto next;
2283 			if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2284 					    cb->nlh->nlmsg_seq,
2285 					    RTM_NEWNEIGH,
2286 					    NLM_F_MULTI, tbl) <= 0) {
2287 				read_unlock_bh(&tbl->lock);
2288 				rc = -1;
2289 				goto out;
2290 			}
2291 		next:
2292 			idx++;
2293 		}
2294 	}
2295 
2296 	read_unlock_bh(&tbl->lock);
2297 	rc = skb->len;
2298 out:
2299 	cb->args[3] = h;
2300 	cb->args[4] = idx;
2301 	return rc;
2302 
2303 }
2304 
2305 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2306 {
2307 	struct neigh_table *tbl;
2308 	int t, family, s_t;
2309 	int proxy = 0;
2310 	int err;
2311 
2312 	read_lock(&neigh_tbl_lock);
2313 	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2314 
2315 	/* check for full ndmsg structure presence, family member is
2316 	 * the same for both structures
2317 	 */
2318 	if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
2319 	    ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
2320 		proxy = 1;
2321 
2322 	s_t = cb->args[0];
2323 
2324 	for (tbl = neigh_tables, t = 0; tbl;
2325 	     tbl = tbl->next, t++) {
2326 		if (t < s_t || (family && tbl->family != family))
2327 			continue;
2328 		if (t > s_t)
2329 			memset(&cb->args[1], 0, sizeof(cb->args) -
2330 						sizeof(cb->args[0]));
2331 		if (proxy)
2332 			err = pneigh_dump_table(tbl, skb, cb);
2333 		else
2334 			err = neigh_dump_table(tbl, skb, cb);
2335 		if (err < 0)
2336 			break;
2337 	}
2338 	read_unlock(&neigh_tbl_lock);
2339 
2340 	cb->args[0] = t;
2341 	return skb->len;
2342 }
2343 
2344 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2345 {
2346 	int chain;
2347 	struct neigh_hash_table *nht;
2348 
2349 	rcu_read_lock_bh();
2350 	nht = rcu_dereference_bh(tbl->nht);
2351 
2352 	read_lock(&tbl->lock); /* avoid resizes */
2353 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2354 		struct neighbour *n;
2355 
2356 		for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2357 		     n != NULL;
2358 		     n = rcu_dereference_bh(n->next))
2359 			cb(n, cookie);
2360 	}
2361 	read_unlock(&tbl->lock);
2362 	rcu_read_unlock_bh();
2363 }
2364 EXPORT_SYMBOL(neigh_for_each);
2365 
2366 /* The tbl->lock must be held as a writer and BH disabled. */
2367 void __neigh_for_each_release(struct neigh_table *tbl,
2368 			      int (*cb)(struct neighbour *))
2369 {
2370 	int chain;
2371 	struct neigh_hash_table *nht;
2372 
2373 	nht = rcu_dereference_protected(tbl->nht,
2374 					lockdep_is_held(&tbl->lock));
2375 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2376 		struct neighbour *n;
2377 		struct neighbour __rcu **np;
2378 
2379 		np = &nht->hash_buckets[chain];
2380 		while ((n = rcu_dereference_protected(*np,
2381 					lockdep_is_held(&tbl->lock))) != NULL) {
2382 			int release;
2383 
2384 			write_lock(&n->lock);
2385 			release = cb(n);
2386 			if (release) {
2387 				rcu_assign_pointer(*np,
2388 					rcu_dereference_protected(n->next,
2389 						lockdep_is_held(&tbl->lock)));
2390 				n->dead = 1;
2391 			} else
2392 				np = &n->next;
2393 			write_unlock(&n->lock);
2394 			if (release)
2395 				neigh_cleanup_and_release(n);
2396 		}
2397 	}
2398 }
2399 EXPORT_SYMBOL(__neigh_for_each_release);
2400 
2401 #ifdef CONFIG_PROC_FS
2402 
2403 static struct neighbour *neigh_get_first(struct seq_file *seq)
2404 {
2405 	struct neigh_seq_state *state = seq->private;
2406 	struct net *net = seq_file_net(seq);
2407 	struct neigh_hash_table *nht = state->nht;
2408 	struct neighbour *n = NULL;
2409 	int bucket = state->bucket;
2410 
2411 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2412 	for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2413 		n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2414 
2415 		while (n) {
2416 			if (!net_eq(dev_net(n->dev), net))
2417 				goto next;
2418 			if (state->neigh_sub_iter) {
2419 				loff_t fakep = 0;
2420 				void *v;
2421 
2422 				v = state->neigh_sub_iter(state, n, &fakep);
2423 				if (!v)
2424 					goto next;
2425 			}
2426 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2427 				break;
2428 			if (n->nud_state & ~NUD_NOARP)
2429 				break;
2430 next:
2431 			n = rcu_dereference_bh(n->next);
2432 		}
2433 
2434 		if (n)
2435 			break;
2436 	}
2437 	state->bucket = bucket;
2438 
2439 	return n;
2440 }
2441 
2442 static struct neighbour *neigh_get_next(struct seq_file *seq,
2443 					struct neighbour *n,
2444 					loff_t *pos)
2445 {
2446 	struct neigh_seq_state *state = seq->private;
2447 	struct net *net = seq_file_net(seq);
2448 	struct neigh_hash_table *nht = state->nht;
2449 
2450 	if (state->neigh_sub_iter) {
2451 		void *v = state->neigh_sub_iter(state, n, pos);
2452 		if (v)
2453 			return n;
2454 	}
2455 	n = rcu_dereference_bh(n->next);
2456 
2457 	while (1) {
2458 		while (n) {
2459 			if (!net_eq(dev_net(n->dev), net))
2460 				goto next;
2461 			if (state->neigh_sub_iter) {
2462 				void *v = state->neigh_sub_iter(state, n, pos);
2463 				if (v)
2464 					return n;
2465 				goto next;
2466 			}
2467 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2468 				break;
2469 
2470 			if (n->nud_state & ~NUD_NOARP)
2471 				break;
2472 next:
2473 			n = rcu_dereference_bh(n->next);
2474 		}
2475 
2476 		if (n)
2477 			break;
2478 
2479 		if (++state->bucket >= (1 << nht->hash_shift))
2480 			break;
2481 
2482 		n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2483 	}
2484 
2485 	if (n && pos)
2486 		--(*pos);
2487 	return n;
2488 }
2489 
2490 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2491 {
2492 	struct neighbour *n = neigh_get_first(seq);
2493 
2494 	if (n) {
2495 		--(*pos);
2496 		while (*pos) {
2497 			n = neigh_get_next(seq, n, pos);
2498 			if (!n)
2499 				break;
2500 		}
2501 	}
2502 	return *pos ? NULL : n;
2503 }
2504 
2505 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2506 {
2507 	struct neigh_seq_state *state = seq->private;
2508 	struct net *net = seq_file_net(seq);
2509 	struct neigh_table *tbl = state->tbl;
2510 	struct pneigh_entry *pn = NULL;
2511 	int bucket = state->bucket;
2512 
2513 	state->flags |= NEIGH_SEQ_IS_PNEIGH;
2514 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2515 		pn = tbl->phash_buckets[bucket];
2516 		while (pn && !net_eq(pneigh_net(pn), net))
2517 			pn = pn->next;
2518 		if (pn)
2519 			break;
2520 	}
2521 	state->bucket = bucket;
2522 
2523 	return pn;
2524 }
2525 
2526 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2527 					    struct pneigh_entry *pn,
2528 					    loff_t *pos)
2529 {
2530 	struct neigh_seq_state *state = seq->private;
2531 	struct net *net = seq_file_net(seq);
2532 	struct neigh_table *tbl = state->tbl;
2533 
2534 	do {
2535 		pn = pn->next;
2536 	} while (pn && !net_eq(pneigh_net(pn), net));
2537 
2538 	while (!pn) {
2539 		if (++state->bucket > PNEIGH_HASHMASK)
2540 			break;
2541 		pn = tbl->phash_buckets[state->bucket];
2542 		while (pn && !net_eq(pneigh_net(pn), net))
2543 			pn = pn->next;
2544 		if (pn)
2545 			break;
2546 	}
2547 
2548 	if (pn && pos)
2549 		--(*pos);
2550 
2551 	return pn;
2552 }
2553 
2554 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2555 {
2556 	struct pneigh_entry *pn = pneigh_get_first(seq);
2557 
2558 	if (pn) {
2559 		--(*pos);
2560 		while (*pos) {
2561 			pn = pneigh_get_next(seq, pn, pos);
2562 			if (!pn)
2563 				break;
2564 		}
2565 	}
2566 	return *pos ? NULL : pn;
2567 }
2568 
2569 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2570 {
2571 	struct neigh_seq_state *state = seq->private;
2572 	void *rc;
2573 	loff_t idxpos = *pos;
2574 
2575 	rc = neigh_get_idx(seq, &idxpos);
2576 	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2577 		rc = pneigh_get_idx(seq, &idxpos);
2578 
2579 	return rc;
2580 }
2581 
2582 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2583 	__acquires(rcu_bh)
2584 {
2585 	struct neigh_seq_state *state = seq->private;
2586 
2587 	state->tbl = tbl;
2588 	state->bucket = 0;
2589 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2590 
2591 	rcu_read_lock_bh();
2592 	state->nht = rcu_dereference_bh(tbl->nht);
2593 
2594 	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2595 }
2596 EXPORT_SYMBOL(neigh_seq_start);
2597 
2598 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2599 {
2600 	struct neigh_seq_state *state;
2601 	void *rc;
2602 
2603 	if (v == SEQ_START_TOKEN) {
2604 		rc = neigh_get_first(seq);
2605 		goto out;
2606 	}
2607 
2608 	state = seq->private;
2609 	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2610 		rc = neigh_get_next(seq, v, NULL);
2611 		if (rc)
2612 			goto out;
2613 		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2614 			rc = pneigh_get_first(seq);
2615 	} else {
2616 		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2617 		rc = pneigh_get_next(seq, v, NULL);
2618 	}
2619 out:
2620 	++(*pos);
2621 	return rc;
2622 }
2623 EXPORT_SYMBOL(neigh_seq_next);
2624 
2625 void neigh_seq_stop(struct seq_file *seq, void *v)
2626 	__releases(rcu_bh)
2627 {
2628 	rcu_read_unlock_bh();
2629 }
2630 EXPORT_SYMBOL(neigh_seq_stop);
2631 
2632 /* statistics via seq_file */
2633 
2634 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2635 {
2636 	struct neigh_table *tbl = seq->private;
2637 	int cpu;
2638 
2639 	if (*pos == 0)
2640 		return SEQ_START_TOKEN;
2641 
2642 	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2643 		if (!cpu_possible(cpu))
2644 			continue;
2645 		*pos = cpu+1;
2646 		return per_cpu_ptr(tbl->stats, cpu);
2647 	}
2648 	return NULL;
2649 }
2650 
2651 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2652 {
2653 	struct neigh_table *tbl = seq->private;
2654 	int cpu;
2655 
2656 	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2657 		if (!cpu_possible(cpu))
2658 			continue;
2659 		*pos = cpu+1;
2660 		return per_cpu_ptr(tbl->stats, cpu);
2661 	}
2662 	return NULL;
2663 }
2664 
2665 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2666 {
2667 
2668 }
2669 
2670 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2671 {
2672 	struct neigh_table *tbl = seq->private;
2673 	struct neigh_statistics *st = v;
2674 
2675 	if (v == SEQ_START_TOKEN) {
2676 		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards\n");
2677 		return 0;
2678 	}
2679 
2680 	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2681 			"%08lx %08lx  %08lx %08lx %08lx\n",
2682 		   atomic_read(&tbl->entries),
2683 
2684 		   st->allocs,
2685 		   st->destroys,
2686 		   st->hash_grows,
2687 
2688 		   st->lookups,
2689 		   st->hits,
2690 
2691 		   st->res_failed,
2692 
2693 		   st->rcv_probes_mcast,
2694 		   st->rcv_probes_ucast,
2695 
2696 		   st->periodic_gc_runs,
2697 		   st->forced_gc_runs,
2698 		   st->unres_discards
2699 		   );
2700 
2701 	return 0;
2702 }
2703 
2704 static const struct seq_operations neigh_stat_seq_ops = {
2705 	.start	= neigh_stat_seq_start,
2706 	.next	= neigh_stat_seq_next,
2707 	.stop	= neigh_stat_seq_stop,
2708 	.show	= neigh_stat_seq_show,
2709 };
2710 
2711 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2712 {
2713 	int ret = seq_open(file, &neigh_stat_seq_ops);
2714 
2715 	if (!ret) {
2716 		struct seq_file *sf = file->private_data;
2717 		sf->private = PDE(inode)->data;
2718 	}
2719 	return ret;
2720 };
2721 
2722 static const struct file_operations neigh_stat_seq_fops = {
2723 	.owner	 = THIS_MODULE,
2724 	.open 	 = neigh_stat_seq_open,
2725 	.read	 = seq_read,
2726 	.llseek	 = seq_lseek,
2727 	.release = seq_release,
2728 };
2729 
2730 #endif /* CONFIG_PROC_FS */
2731 
2732 static inline size_t neigh_nlmsg_size(void)
2733 {
2734 	return NLMSG_ALIGN(sizeof(struct ndmsg))
2735 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2736 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2737 	       + nla_total_size(sizeof(struct nda_cacheinfo))
2738 	       + nla_total_size(4); /* NDA_PROBES */
2739 }
2740 
2741 static void __neigh_notify(struct neighbour *n, int type, int flags)
2742 {
2743 	struct net *net = dev_net(n->dev);
2744 	struct sk_buff *skb;
2745 	int err = -ENOBUFS;
2746 
2747 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2748 	if (skb == NULL)
2749 		goto errout;
2750 
2751 	err = neigh_fill_info(skb, n, 0, 0, type, flags);
2752 	if (err < 0) {
2753 		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2754 		WARN_ON(err == -EMSGSIZE);
2755 		kfree_skb(skb);
2756 		goto errout;
2757 	}
2758 	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2759 	return;
2760 errout:
2761 	if (err < 0)
2762 		rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2763 }
2764 
2765 #ifdef CONFIG_ARPD
2766 void neigh_app_ns(struct neighbour *n)
2767 {
2768 	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2769 }
2770 EXPORT_SYMBOL(neigh_app_ns);
2771 #endif /* CONFIG_ARPD */
2772 
2773 #ifdef CONFIG_SYSCTL
2774 static int zero;
2775 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
2776 
2777 static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
2778 			   size_t *lenp, loff_t *ppos)
2779 {
2780 	int size, ret;
2781 	ctl_table tmp = *ctl;
2782 
2783 	tmp.extra1 = &zero;
2784 	tmp.extra2 = &unres_qlen_max;
2785 	tmp.data = &size;
2786 
2787 	size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
2788 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2789 
2790 	if (write && !ret)
2791 		*(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
2792 	return ret;
2793 }
2794 
2795 enum {
2796 	NEIGH_VAR_MCAST_PROBE,
2797 	NEIGH_VAR_UCAST_PROBE,
2798 	NEIGH_VAR_APP_PROBE,
2799 	NEIGH_VAR_RETRANS_TIME,
2800 	NEIGH_VAR_BASE_REACHABLE_TIME,
2801 	NEIGH_VAR_DELAY_PROBE_TIME,
2802 	NEIGH_VAR_GC_STALETIME,
2803 	NEIGH_VAR_QUEUE_LEN,
2804 	NEIGH_VAR_QUEUE_LEN_BYTES,
2805 	NEIGH_VAR_PROXY_QLEN,
2806 	NEIGH_VAR_ANYCAST_DELAY,
2807 	NEIGH_VAR_PROXY_DELAY,
2808 	NEIGH_VAR_LOCKTIME,
2809 	NEIGH_VAR_RETRANS_TIME_MS,
2810 	NEIGH_VAR_BASE_REACHABLE_TIME_MS,
2811 	NEIGH_VAR_GC_INTERVAL,
2812 	NEIGH_VAR_GC_THRESH1,
2813 	NEIGH_VAR_GC_THRESH2,
2814 	NEIGH_VAR_GC_THRESH3,
2815 	NEIGH_VAR_MAX
2816 };
2817 
2818 static struct neigh_sysctl_table {
2819 	struct ctl_table_header *sysctl_header;
2820 	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
2821 } neigh_sysctl_template __read_mostly = {
2822 	.neigh_vars = {
2823 		[NEIGH_VAR_MCAST_PROBE] = {
2824 			.procname	= "mcast_solicit",
2825 			.maxlen		= sizeof(int),
2826 			.mode		= 0644,
2827 			.proc_handler	= proc_dointvec,
2828 		},
2829 		[NEIGH_VAR_UCAST_PROBE] = {
2830 			.procname	= "ucast_solicit",
2831 			.maxlen		= sizeof(int),
2832 			.mode		= 0644,
2833 			.proc_handler	= proc_dointvec,
2834 		},
2835 		[NEIGH_VAR_APP_PROBE] = {
2836 			.procname	= "app_solicit",
2837 			.maxlen		= sizeof(int),
2838 			.mode		= 0644,
2839 			.proc_handler	= proc_dointvec,
2840 		},
2841 		[NEIGH_VAR_RETRANS_TIME] = {
2842 			.procname	= "retrans_time",
2843 			.maxlen		= sizeof(int),
2844 			.mode		= 0644,
2845 			.proc_handler	= proc_dointvec_userhz_jiffies,
2846 		},
2847 		[NEIGH_VAR_BASE_REACHABLE_TIME] = {
2848 			.procname	= "base_reachable_time",
2849 			.maxlen		= sizeof(int),
2850 			.mode		= 0644,
2851 			.proc_handler	= proc_dointvec_jiffies,
2852 		},
2853 		[NEIGH_VAR_DELAY_PROBE_TIME] = {
2854 			.procname	= "delay_first_probe_time",
2855 			.maxlen		= sizeof(int),
2856 			.mode		= 0644,
2857 			.proc_handler	= proc_dointvec_jiffies,
2858 		},
2859 		[NEIGH_VAR_GC_STALETIME] = {
2860 			.procname	= "gc_stale_time",
2861 			.maxlen		= sizeof(int),
2862 			.mode		= 0644,
2863 			.proc_handler	= proc_dointvec_jiffies,
2864 		},
2865 		[NEIGH_VAR_QUEUE_LEN] = {
2866 			.procname	= "unres_qlen",
2867 			.maxlen		= sizeof(int),
2868 			.mode		= 0644,
2869 			.proc_handler	= proc_unres_qlen,
2870 		},
2871 		[NEIGH_VAR_QUEUE_LEN_BYTES] = {
2872 			.procname	= "unres_qlen_bytes",
2873 			.maxlen		= sizeof(int),
2874 			.mode		= 0644,
2875 			.extra1		= &zero,
2876 			.proc_handler   = proc_dointvec_minmax,
2877 		},
2878 		[NEIGH_VAR_PROXY_QLEN] = {
2879 			.procname	= "proxy_qlen",
2880 			.maxlen		= sizeof(int),
2881 			.mode		= 0644,
2882 			.proc_handler	= proc_dointvec,
2883 		},
2884 		[NEIGH_VAR_ANYCAST_DELAY] = {
2885 			.procname	= "anycast_delay",
2886 			.maxlen		= sizeof(int),
2887 			.mode		= 0644,
2888 			.proc_handler	= proc_dointvec_userhz_jiffies,
2889 		},
2890 		[NEIGH_VAR_PROXY_DELAY] = {
2891 			.procname	= "proxy_delay",
2892 			.maxlen		= sizeof(int),
2893 			.mode		= 0644,
2894 			.proc_handler	= proc_dointvec_userhz_jiffies,
2895 		},
2896 		[NEIGH_VAR_LOCKTIME] = {
2897 			.procname	= "locktime",
2898 			.maxlen		= sizeof(int),
2899 			.mode		= 0644,
2900 			.proc_handler	= proc_dointvec_userhz_jiffies,
2901 		},
2902 		[NEIGH_VAR_RETRANS_TIME_MS] = {
2903 			.procname	= "retrans_time_ms",
2904 			.maxlen		= sizeof(int),
2905 			.mode		= 0644,
2906 			.proc_handler	= proc_dointvec_ms_jiffies,
2907 		},
2908 		[NEIGH_VAR_BASE_REACHABLE_TIME_MS] = {
2909 			.procname	= "base_reachable_time_ms",
2910 			.maxlen		= sizeof(int),
2911 			.mode		= 0644,
2912 			.proc_handler	= proc_dointvec_ms_jiffies,
2913 		},
2914 		[NEIGH_VAR_GC_INTERVAL] = {
2915 			.procname	= "gc_interval",
2916 			.maxlen		= sizeof(int),
2917 			.mode		= 0644,
2918 			.proc_handler	= proc_dointvec_jiffies,
2919 		},
2920 		[NEIGH_VAR_GC_THRESH1] = {
2921 			.procname	= "gc_thresh1",
2922 			.maxlen		= sizeof(int),
2923 			.mode		= 0644,
2924 			.proc_handler	= proc_dointvec,
2925 		},
2926 		[NEIGH_VAR_GC_THRESH2] = {
2927 			.procname	= "gc_thresh2",
2928 			.maxlen		= sizeof(int),
2929 			.mode		= 0644,
2930 			.proc_handler	= proc_dointvec,
2931 		},
2932 		[NEIGH_VAR_GC_THRESH3] = {
2933 			.procname	= "gc_thresh3",
2934 			.maxlen		= sizeof(int),
2935 			.mode		= 0644,
2936 			.proc_handler	= proc_dointvec,
2937 		},
2938 		{},
2939 	},
2940 };
2941 
2942 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2943 			  char *p_name, proc_handler *handler)
2944 {
2945 	struct neigh_sysctl_table *t;
2946 	const char *dev_name_source = NULL;
2947 	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
2948 
2949 	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
2950 	if (!t)
2951 		goto err;
2952 
2953 	t->neigh_vars[NEIGH_VAR_MCAST_PROBE].data  = &p->mcast_probes;
2954 	t->neigh_vars[NEIGH_VAR_UCAST_PROBE].data  = &p->ucast_probes;
2955 	t->neigh_vars[NEIGH_VAR_APP_PROBE].data  = &p->app_probes;
2956 	t->neigh_vars[NEIGH_VAR_RETRANS_TIME].data  = &p->retrans_time;
2957 	t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].data  = &p->base_reachable_time;
2958 	t->neigh_vars[NEIGH_VAR_DELAY_PROBE_TIME].data  = &p->delay_probe_time;
2959 	t->neigh_vars[NEIGH_VAR_GC_STALETIME].data  = &p->gc_staletime;
2960 	t->neigh_vars[NEIGH_VAR_QUEUE_LEN].data  = &p->queue_len_bytes;
2961 	t->neigh_vars[NEIGH_VAR_QUEUE_LEN_BYTES].data  = &p->queue_len_bytes;
2962 	t->neigh_vars[NEIGH_VAR_PROXY_QLEN].data  = &p->proxy_qlen;
2963 	t->neigh_vars[NEIGH_VAR_ANYCAST_DELAY].data  = &p->anycast_delay;
2964 	t->neigh_vars[NEIGH_VAR_PROXY_DELAY].data = &p->proxy_delay;
2965 	t->neigh_vars[NEIGH_VAR_LOCKTIME].data = &p->locktime;
2966 	t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].data  = &p->retrans_time;
2967 	t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].data  = &p->base_reachable_time;
2968 
2969 	if (dev) {
2970 		dev_name_source = dev->name;
2971 		/* Terminate the table early */
2972 		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
2973 		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
2974 	} else {
2975 		dev_name_source = "default";
2976 		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
2977 		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
2978 		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
2979 		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3;
2980 	}
2981 
2982 
2983 	if (handler) {
2984 		/* RetransTime */
2985 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
2986 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].extra1 = dev;
2987 		/* ReachableTime */
2988 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
2989 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].extra1 = dev;
2990 		/* RetransTime (in milliseconds)*/
2991 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
2992 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].extra1 = dev;
2993 		/* ReachableTime (in milliseconds) */
2994 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
2995 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev;
2996 	}
2997 
2998 	/* Don't export sysctls to unprivileged users */
2999 	if (neigh_parms_net(p)->user_ns != &init_user_ns)
3000 		t->neigh_vars[0].procname = NULL;
3001 
3002 	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3003 		p_name, dev_name_source);
3004 	t->sysctl_header =
3005 		register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3006 	if (!t->sysctl_header)
3007 		goto free;
3008 
3009 	p->sysctl_table = t;
3010 	return 0;
3011 
3012 free:
3013 	kfree(t);
3014 err:
3015 	return -ENOBUFS;
3016 }
3017 EXPORT_SYMBOL(neigh_sysctl_register);
3018 
3019 void neigh_sysctl_unregister(struct neigh_parms *p)
3020 {
3021 	if (p->sysctl_table) {
3022 		struct neigh_sysctl_table *t = p->sysctl_table;
3023 		p->sysctl_table = NULL;
3024 		unregister_net_sysctl_table(t->sysctl_header);
3025 		kfree(t);
3026 	}
3027 }
3028 EXPORT_SYMBOL(neigh_sysctl_unregister);
3029 
3030 #endif	/* CONFIG_SYSCTL */
3031 
3032 static int __init neigh_init(void)
3033 {
3034 	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL);
3035 	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL);
3036 	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL);
3037 
3038 	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3039 		      NULL);
3040 	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL);
3041 
3042 	return 0;
3043 }
3044 
3045 subsys_initcall(neigh_init);
3046 
3047