xref: /linux/net/ipv6/ip6mr.c (revision e60e1ee60630cafef5e430c2ae364877e061d980)
1 /*
2  *	Linux IPv6 multicast routing support for BSD pim6sd
3  *	Based on net/ipv4/ipmr.c.
4  *
5  *	(c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6  *		LSIIT Laboratory, Strasbourg, France
7  *	(c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
8  *		6WIND, Paris, France
9  *	Copyright (C)2007,2008 USAGI/WIDE Project
10  *		YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
11  *
12  *	This program is free software; you can redistribute it and/or
13  *	modify it under the terms of the GNU General Public License
14  *	as published by the Free Software Foundation; either version
15  *	2 of the License, or (at your option) any later version.
16  *
17  */
18 
19 #include <linux/uaccess.h>
20 #include <linux/types.h>
21 #include <linux/sched.h>
22 #include <linux/errno.h>
23 #include <linux/timer.h>
24 #include <linux/mm.h>
25 #include <linux/kernel.h>
26 #include <linux/fcntl.h>
27 #include <linux/stat.h>
28 #include <linux/socket.h>
29 #include <linux/inet.h>
30 #include <linux/netdevice.h>
31 #include <linux/inetdevice.h>
32 #include <linux/proc_fs.h>
33 #include <linux/seq_file.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
36 #include <linux/compat.h>
37 #include <net/protocol.h>
38 #include <linux/skbuff.h>
39 #include <net/sock.h>
40 #include <net/raw.h>
41 #include <linux/notifier.h>
42 #include <linux/if_arp.h>
43 #include <net/checksum.h>
44 #include <net/netlink.h>
45 #include <net/fib_rules.h>
46 
47 #include <net/ipv6.h>
48 #include <net/ip6_route.h>
49 #include <linux/mroute6.h>
50 #include <linux/pim.h>
51 #include <net/addrconf.h>
52 #include <linux/netfilter_ipv6.h>
53 #include <linux/export.h>
54 #include <net/ip6_checksum.h>
55 #include <linux/netconf.h>
56 
57 struct mr6_table {
58 	struct list_head	list;
59 	possible_net_t		net;
60 	u32			id;
61 	struct sock		*mroute6_sk;
62 	struct timer_list	ipmr_expire_timer;
63 	struct list_head	mfc6_unres_queue;
64 	struct list_head	mfc6_cache_array[MFC6_LINES];
65 	struct mif_device	vif6_table[MAXMIFS];
66 	int			maxvif;
67 	atomic_t		cache_resolve_queue_len;
68 	bool			mroute_do_assert;
69 	bool			mroute_do_pim;
70 #ifdef CONFIG_IPV6_PIMSM_V2
71 	int			mroute_reg_vif_num;
72 #endif
73 };
74 
75 struct ip6mr_rule {
76 	struct fib_rule		common;
77 };
78 
79 struct ip6mr_result {
80 	struct mr6_table	*mrt;
81 };
82 
83 /* Big lock, protecting vif table, mrt cache and mroute socket state.
84    Note that the changes are semaphored via rtnl_lock.
85  */
86 
87 static DEFINE_RWLOCK(mrt_lock);
88 
89 /*
90  *	Multicast router control variables
91  */
92 
93 #define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
94 
95 /* Special spinlock for queue of unresolved entries */
96 static DEFINE_SPINLOCK(mfc_unres_lock);
97 
98 /* We return to original Alan's scheme. Hash table of resolved
99    entries is changed only in process context and protected
100    with weak lock mrt_lock. Queue of unresolved entries is protected
101    with strong spinlock mfc_unres_lock.
102 
103    In this case data path is free of exclusive locks at all.
104  */
105 
106 static struct kmem_cache *mrt_cachep __read_mostly;
107 
108 static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
109 static void ip6mr_free_table(struct mr6_table *mrt);
110 
111 static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
112 			   struct sk_buff *skb, struct mfc6_cache *cache);
113 static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
114 			      mifi_t mifi, int assert);
115 static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
116 			       struct mfc6_cache *c, struct rtmsg *rtm);
117 static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
118 			      int cmd);
119 static void mrt6msg_netlink_event(struct mr6_table *mrt, struct sk_buff *pkt);
120 static int ip6mr_rtm_dumproute(struct sk_buff *skb,
121 			       struct netlink_callback *cb);
122 static void mroute_clean_tables(struct mr6_table *mrt, bool all);
123 static void ipmr_expire_process(unsigned long arg);
124 
125 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
126 #define ip6mr_for_each_table(mrt, net) \
127 	list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
128 
129 static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
130 {
131 	struct mr6_table *mrt;
132 
133 	ip6mr_for_each_table(mrt, net) {
134 		if (mrt->id == id)
135 			return mrt;
136 	}
137 	return NULL;
138 }
139 
140 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
141 			    struct mr6_table **mrt)
142 {
143 	int err;
144 	struct ip6mr_result res;
145 	struct fib_lookup_arg arg = {
146 		.result = &res,
147 		.flags = FIB_LOOKUP_NOREF,
148 	};
149 
150 	err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
151 			       flowi6_to_flowi(flp6), 0, &arg);
152 	if (err < 0)
153 		return err;
154 	*mrt = res.mrt;
155 	return 0;
156 }
157 
158 static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
159 			     int flags, struct fib_lookup_arg *arg)
160 {
161 	struct ip6mr_result *res = arg->result;
162 	struct mr6_table *mrt;
163 
164 	switch (rule->action) {
165 	case FR_ACT_TO_TBL:
166 		break;
167 	case FR_ACT_UNREACHABLE:
168 		return -ENETUNREACH;
169 	case FR_ACT_PROHIBIT:
170 		return -EACCES;
171 	case FR_ACT_BLACKHOLE:
172 	default:
173 		return -EINVAL;
174 	}
175 
176 	mrt = ip6mr_get_table(rule->fr_net, rule->table);
177 	if (!mrt)
178 		return -EAGAIN;
179 	res->mrt = mrt;
180 	return 0;
181 }
182 
183 static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
184 {
185 	return 1;
186 }
187 
188 static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
189 	FRA_GENERIC_POLICY,
190 };
191 
192 static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
193 				struct fib_rule_hdr *frh, struct nlattr **tb)
194 {
195 	return 0;
196 }
197 
198 static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
199 			      struct nlattr **tb)
200 {
201 	return 1;
202 }
203 
204 static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
205 			   struct fib_rule_hdr *frh)
206 {
207 	frh->dst_len = 0;
208 	frh->src_len = 0;
209 	frh->tos     = 0;
210 	return 0;
211 }
212 
213 static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
214 	.family		= RTNL_FAMILY_IP6MR,
215 	.rule_size	= sizeof(struct ip6mr_rule),
216 	.addr_size	= sizeof(struct in6_addr),
217 	.action		= ip6mr_rule_action,
218 	.match		= ip6mr_rule_match,
219 	.configure	= ip6mr_rule_configure,
220 	.compare	= ip6mr_rule_compare,
221 	.fill		= ip6mr_rule_fill,
222 	.nlgroup	= RTNLGRP_IPV6_RULE,
223 	.policy		= ip6mr_rule_policy,
224 	.owner		= THIS_MODULE,
225 };
226 
227 static int __net_init ip6mr_rules_init(struct net *net)
228 {
229 	struct fib_rules_ops *ops;
230 	struct mr6_table *mrt;
231 	int err;
232 
233 	ops = fib_rules_register(&ip6mr_rules_ops_template, net);
234 	if (IS_ERR(ops))
235 		return PTR_ERR(ops);
236 
237 	INIT_LIST_HEAD(&net->ipv6.mr6_tables);
238 
239 	mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
240 	if (!mrt) {
241 		err = -ENOMEM;
242 		goto err1;
243 	}
244 
245 	err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
246 	if (err < 0)
247 		goto err2;
248 
249 	net->ipv6.mr6_rules_ops = ops;
250 	return 0;
251 
252 err2:
253 	ip6mr_free_table(mrt);
254 err1:
255 	fib_rules_unregister(ops);
256 	return err;
257 }
258 
259 static void __net_exit ip6mr_rules_exit(struct net *net)
260 {
261 	struct mr6_table *mrt, *next;
262 
263 	rtnl_lock();
264 	list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
265 		list_del(&mrt->list);
266 		ip6mr_free_table(mrt);
267 	}
268 	fib_rules_unregister(net->ipv6.mr6_rules_ops);
269 	rtnl_unlock();
270 }
271 #else
272 #define ip6mr_for_each_table(mrt, net) \
273 	for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
274 
275 static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
276 {
277 	return net->ipv6.mrt6;
278 }
279 
280 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
281 			    struct mr6_table **mrt)
282 {
283 	*mrt = net->ipv6.mrt6;
284 	return 0;
285 }
286 
287 static int __net_init ip6mr_rules_init(struct net *net)
288 {
289 	net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT);
290 	return net->ipv6.mrt6 ? 0 : -ENOMEM;
291 }
292 
293 static void __net_exit ip6mr_rules_exit(struct net *net)
294 {
295 	rtnl_lock();
296 	ip6mr_free_table(net->ipv6.mrt6);
297 	net->ipv6.mrt6 = NULL;
298 	rtnl_unlock();
299 }
300 #endif
301 
302 static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
303 {
304 	struct mr6_table *mrt;
305 	unsigned int i;
306 
307 	mrt = ip6mr_get_table(net, id);
308 	if (mrt)
309 		return mrt;
310 
311 	mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
312 	if (!mrt)
313 		return NULL;
314 	mrt->id = id;
315 	write_pnet(&mrt->net, net);
316 
317 	/* Forwarding cache */
318 	for (i = 0; i < MFC6_LINES; i++)
319 		INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]);
320 
321 	INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
322 
323 	setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
324 		    (unsigned long)mrt);
325 
326 #ifdef CONFIG_IPV6_PIMSM_V2
327 	mrt->mroute_reg_vif_num = -1;
328 #endif
329 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
330 	list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
331 #endif
332 	return mrt;
333 }
334 
335 static void ip6mr_free_table(struct mr6_table *mrt)
336 {
337 	del_timer_sync(&mrt->ipmr_expire_timer);
338 	mroute_clean_tables(mrt, true);
339 	kfree(mrt);
340 }
341 
342 #ifdef CONFIG_PROC_FS
343 
344 struct ipmr_mfc_iter {
345 	struct seq_net_private p;
346 	struct mr6_table *mrt;
347 	struct list_head *cache;
348 	int ct;
349 };
350 
351 
352 static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
353 					   struct ipmr_mfc_iter *it, loff_t pos)
354 {
355 	struct mr6_table *mrt = it->mrt;
356 	struct mfc6_cache *mfc;
357 
358 	read_lock(&mrt_lock);
359 	for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) {
360 		it->cache = &mrt->mfc6_cache_array[it->ct];
361 		list_for_each_entry(mfc, it->cache, list)
362 			if (pos-- == 0)
363 				return mfc;
364 	}
365 	read_unlock(&mrt_lock);
366 
367 	spin_lock_bh(&mfc_unres_lock);
368 	it->cache = &mrt->mfc6_unres_queue;
369 	list_for_each_entry(mfc, it->cache, list)
370 		if (pos-- == 0)
371 			return mfc;
372 	spin_unlock_bh(&mfc_unres_lock);
373 
374 	it->cache = NULL;
375 	return NULL;
376 }
377 
378 /*
379  *	The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
380  */
381 
382 struct ipmr_vif_iter {
383 	struct seq_net_private p;
384 	struct mr6_table *mrt;
385 	int ct;
386 };
387 
388 static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
389 					    struct ipmr_vif_iter *iter,
390 					    loff_t pos)
391 {
392 	struct mr6_table *mrt = iter->mrt;
393 
394 	for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
395 		if (!MIF_EXISTS(mrt, iter->ct))
396 			continue;
397 		if (pos-- == 0)
398 			return &mrt->vif6_table[iter->ct];
399 	}
400 	return NULL;
401 }
402 
403 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
404 	__acquires(mrt_lock)
405 {
406 	struct ipmr_vif_iter *iter = seq->private;
407 	struct net *net = seq_file_net(seq);
408 	struct mr6_table *mrt;
409 
410 	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
411 	if (!mrt)
412 		return ERR_PTR(-ENOENT);
413 
414 	iter->mrt = mrt;
415 
416 	read_lock(&mrt_lock);
417 	return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1)
418 		: SEQ_START_TOKEN;
419 }
420 
421 static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
422 {
423 	struct ipmr_vif_iter *iter = seq->private;
424 	struct net *net = seq_file_net(seq);
425 	struct mr6_table *mrt = iter->mrt;
426 
427 	++*pos;
428 	if (v == SEQ_START_TOKEN)
429 		return ip6mr_vif_seq_idx(net, iter, 0);
430 
431 	while (++iter->ct < mrt->maxvif) {
432 		if (!MIF_EXISTS(mrt, iter->ct))
433 			continue;
434 		return &mrt->vif6_table[iter->ct];
435 	}
436 	return NULL;
437 }
438 
439 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
440 	__releases(mrt_lock)
441 {
442 	read_unlock(&mrt_lock);
443 }
444 
445 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
446 {
447 	struct ipmr_vif_iter *iter = seq->private;
448 	struct mr6_table *mrt = iter->mrt;
449 
450 	if (v == SEQ_START_TOKEN) {
451 		seq_puts(seq,
452 			 "Interface      BytesIn  PktsIn  BytesOut PktsOut Flags\n");
453 	} else {
454 		const struct mif_device *vif = v;
455 		const char *name = vif->dev ? vif->dev->name : "none";
456 
457 		seq_printf(seq,
458 			   "%2td %-10s %8ld %7ld  %8ld %7ld %05X\n",
459 			   vif - mrt->vif6_table,
460 			   name, vif->bytes_in, vif->pkt_in,
461 			   vif->bytes_out, vif->pkt_out,
462 			   vif->flags);
463 	}
464 	return 0;
465 }
466 
467 static const struct seq_operations ip6mr_vif_seq_ops = {
468 	.start = ip6mr_vif_seq_start,
469 	.next  = ip6mr_vif_seq_next,
470 	.stop  = ip6mr_vif_seq_stop,
471 	.show  = ip6mr_vif_seq_show,
472 };
473 
474 static int ip6mr_vif_open(struct inode *inode, struct file *file)
475 {
476 	return seq_open_net(inode, file, &ip6mr_vif_seq_ops,
477 			    sizeof(struct ipmr_vif_iter));
478 }
479 
480 static const struct file_operations ip6mr_vif_fops = {
481 	.owner	 = THIS_MODULE,
482 	.open    = ip6mr_vif_open,
483 	.read    = seq_read,
484 	.llseek  = seq_lseek,
485 	.release = seq_release_net,
486 };
487 
488 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
489 {
490 	struct ipmr_mfc_iter *it = seq->private;
491 	struct net *net = seq_file_net(seq);
492 	struct mr6_table *mrt;
493 
494 	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
495 	if (!mrt)
496 		return ERR_PTR(-ENOENT);
497 
498 	it->mrt = mrt;
499 	return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
500 		: SEQ_START_TOKEN;
501 }
502 
503 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
504 {
505 	struct mfc6_cache *mfc = v;
506 	struct ipmr_mfc_iter *it = seq->private;
507 	struct net *net = seq_file_net(seq);
508 	struct mr6_table *mrt = it->mrt;
509 
510 	++*pos;
511 
512 	if (v == SEQ_START_TOKEN)
513 		return ipmr_mfc_seq_idx(net, seq->private, 0);
514 
515 	if (mfc->list.next != it->cache)
516 		return list_entry(mfc->list.next, struct mfc6_cache, list);
517 
518 	if (it->cache == &mrt->mfc6_unres_queue)
519 		goto end_of_list;
520 
521 	BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]);
522 
523 	while (++it->ct < MFC6_LINES) {
524 		it->cache = &mrt->mfc6_cache_array[it->ct];
525 		if (list_empty(it->cache))
526 			continue;
527 		return list_first_entry(it->cache, struct mfc6_cache, list);
528 	}
529 
530 	/* exhausted cache_array, show unresolved */
531 	read_unlock(&mrt_lock);
532 	it->cache = &mrt->mfc6_unres_queue;
533 	it->ct = 0;
534 
535 	spin_lock_bh(&mfc_unres_lock);
536 	if (!list_empty(it->cache))
537 		return list_first_entry(it->cache, struct mfc6_cache, list);
538 
539  end_of_list:
540 	spin_unlock_bh(&mfc_unres_lock);
541 	it->cache = NULL;
542 
543 	return NULL;
544 }
545 
546 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
547 {
548 	struct ipmr_mfc_iter *it = seq->private;
549 	struct mr6_table *mrt = it->mrt;
550 
551 	if (it->cache == &mrt->mfc6_unres_queue)
552 		spin_unlock_bh(&mfc_unres_lock);
553 	else if (it->cache == &mrt->mfc6_cache_array[it->ct])
554 		read_unlock(&mrt_lock);
555 }
556 
557 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
558 {
559 	int n;
560 
561 	if (v == SEQ_START_TOKEN) {
562 		seq_puts(seq,
563 			 "Group                            "
564 			 "Origin                           "
565 			 "Iif      Pkts  Bytes     Wrong  Oifs\n");
566 	} else {
567 		const struct mfc6_cache *mfc = v;
568 		const struct ipmr_mfc_iter *it = seq->private;
569 		struct mr6_table *mrt = it->mrt;
570 
571 		seq_printf(seq, "%pI6 %pI6 %-3hd",
572 			   &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
573 			   mfc->mf6c_parent);
574 
575 		if (it->cache != &mrt->mfc6_unres_queue) {
576 			seq_printf(seq, " %8lu %8lu %8lu",
577 				   mfc->mfc_un.res.pkt,
578 				   mfc->mfc_un.res.bytes,
579 				   mfc->mfc_un.res.wrong_if);
580 			for (n = mfc->mfc_un.res.minvif;
581 			     n < mfc->mfc_un.res.maxvif; n++) {
582 				if (MIF_EXISTS(mrt, n) &&
583 				    mfc->mfc_un.res.ttls[n] < 255)
584 					seq_printf(seq,
585 						   " %2d:%-3d",
586 						   n, mfc->mfc_un.res.ttls[n]);
587 			}
588 		} else {
589 			/* unresolved mfc_caches don't contain
590 			 * pkt, bytes and wrong_if values
591 			 */
592 			seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
593 		}
594 		seq_putc(seq, '\n');
595 	}
596 	return 0;
597 }
598 
599 static const struct seq_operations ipmr_mfc_seq_ops = {
600 	.start = ipmr_mfc_seq_start,
601 	.next  = ipmr_mfc_seq_next,
602 	.stop  = ipmr_mfc_seq_stop,
603 	.show  = ipmr_mfc_seq_show,
604 };
605 
606 static int ipmr_mfc_open(struct inode *inode, struct file *file)
607 {
608 	return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
609 			    sizeof(struct ipmr_mfc_iter));
610 }
611 
612 static const struct file_operations ip6mr_mfc_fops = {
613 	.owner	 = THIS_MODULE,
614 	.open    = ipmr_mfc_open,
615 	.read    = seq_read,
616 	.llseek  = seq_lseek,
617 	.release = seq_release_net,
618 };
619 #endif
620 
621 #ifdef CONFIG_IPV6_PIMSM_V2
622 
623 static int pim6_rcv(struct sk_buff *skb)
624 {
625 	struct pimreghdr *pim;
626 	struct ipv6hdr   *encap;
627 	struct net_device  *reg_dev = NULL;
628 	struct net *net = dev_net(skb->dev);
629 	struct mr6_table *mrt;
630 	struct flowi6 fl6 = {
631 		.flowi6_iif	= skb->dev->ifindex,
632 		.flowi6_mark	= skb->mark,
633 	};
634 	int reg_vif_num;
635 
636 	if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
637 		goto drop;
638 
639 	pim = (struct pimreghdr *)skb_transport_header(skb);
640 	if (pim->type != ((PIM_VERSION << 4) | PIM_TYPE_REGISTER) ||
641 	    (pim->flags & PIM_NULL_REGISTER) ||
642 	    (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
643 			     sizeof(*pim), IPPROTO_PIM,
644 			     csum_partial((void *)pim, sizeof(*pim), 0)) &&
645 	     csum_fold(skb_checksum(skb, 0, skb->len, 0))))
646 		goto drop;
647 
648 	/* check if the inner packet is destined to mcast group */
649 	encap = (struct ipv6hdr *)(skb_transport_header(skb) +
650 				   sizeof(*pim));
651 
652 	if (!ipv6_addr_is_multicast(&encap->daddr) ||
653 	    encap->payload_len == 0 ||
654 	    ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
655 		goto drop;
656 
657 	if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
658 		goto drop;
659 	reg_vif_num = mrt->mroute_reg_vif_num;
660 
661 	read_lock(&mrt_lock);
662 	if (reg_vif_num >= 0)
663 		reg_dev = mrt->vif6_table[reg_vif_num].dev;
664 	if (reg_dev)
665 		dev_hold(reg_dev);
666 	read_unlock(&mrt_lock);
667 
668 	if (!reg_dev)
669 		goto drop;
670 
671 	skb->mac_header = skb->network_header;
672 	skb_pull(skb, (u8 *)encap - skb->data);
673 	skb_reset_network_header(skb);
674 	skb->protocol = htons(ETH_P_IPV6);
675 	skb->ip_summed = CHECKSUM_NONE;
676 
677 	skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
678 
679 	netif_rx(skb);
680 
681 	dev_put(reg_dev);
682 	return 0;
683  drop:
684 	kfree_skb(skb);
685 	return 0;
686 }
687 
688 static const struct inet6_protocol pim6_protocol = {
689 	.handler	=	pim6_rcv,
690 };
691 
692 /* Service routines creating virtual interfaces: PIMREG */
693 
694 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
695 				      struct net_device *dev)
696 {
697 	struct net *net = dev_net(dev);
698 	struct mr6_table *mrt;
699 	struct flowi6 fl6 = {
700 		.flowi6_oif	= dev->ifindex,
701 		.flowi6_iif	= skb->skb_iif ? : LOOPBACK_IFINDEX,
702 		.flowi6_mark	= skb->mark,
703 	};
704 	int err;
705 
706 	err = ip6mr_fib_lookup(net, &fl6, &mrt);
707 	if (err < 0) {
708 		kfree_skb(skb);
709 		return err;
710 	}
711 
712 	read_lock(&mrt_lock);
713 	dev->stats.tx_bytes += skb->len;
714 	dev->stats.tx_packets++;
715 	ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
716 	read_unlock(&mrt_lock);
717 	kfree_skb(skb);
718 	return NETDEV_TX_OK;
719 }
720 
721 static int reg_vif_get_iflink(const struct net_device *dev)
722 {
723 	return 0;
724 }
725 
726 static const struct net_device_ops reg_vif_netdev_ops = {
727 	.ndo_start_xmit	= reg_vif_xmit,
728 	.ndo_get_iflink = reg_vif_get_iflink,
729 };
730 
731 static void reg_vif_setup(struct net_device *dev)
732 {
733 	dev->type		= ARPHRD_PIMREG;
734 	dev->mtu		= 1500 - sizeof(struct ipv6hdr) - 8;
735 	dev->flags		= IFF_NOARP;
736 	dev->netdev_ops		= &reg_vif_netdev_ops;
737 	dev->needs_free_netdev	= true;
738 	dev->features		|= NETIF_F_NETNS_LOCAL;
739 }
740 
741 static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
742 {
743 	struct net_device *dev;
744 	char name[IFNAMSIZ];
745 
746 	if (mrt->id == RT6_TABLE_DFLT)
747 		sprintf(name, "pim6reg");
748 	else
749 		sprintf(name, "pim6reg%u", mrt->id);
750 
751 	dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
752 	if (!dev)
753 		return NULL;
754 
755 	dev_net_set(dev, net);
756 
757 	if (register_netdevice(dev)) {
758 		free_netdev(dev);
759 		return NULL;
760 	}
761 
762 	if (dev_open(dev))
763 		goto failure;
764 
765 	dev_hold(dev);
766 	return dev;
767 
768 failure:
769 	unregister_netdevice(dev);
770 	return NULL;
771 }
772 #endif
773 
774 /*
775  *	Delete a VIF entry
776  */
777 
778 static int mif6_delete(struct mr6_table *mrt, int vifi, int notify,
779 		       struct list_head *head)
780 {
781 	struct mif_device *v;
782 	struct net_device *dev;
783 	struct inet6_dev *in6_dev;
784 
785 	if (vifi < 0 || vifi >= mrt->maxvif)
786 		return -EADDRNOTAVAIL;
787 
788 	v = &mrt->vif6_table[vifi];
789 
790 	write_lock_bh(&mrt_lock);
791 	dev = v->dev;
792 	v->dev = NULL;
793 
794 	if (!dev) {
795 		write_unlock_bh(&mrt_lock);
796 		return -EADDRNOTAVAIL;
797 	}
798 
799 #ifdef CONFIG_IPV6_PIMSM_V2
800 	if (vifi == mrt->mroute_reg_vif_num)
801 		mrt->mroute_reg_vif_num = -1;
802 #endif
803 
804 	if (vifi + 1 == mrt->maxvif) {
805 		int tmp;
806 		for (tmp = vifi - 1; tmp >= 0; tmp--) {
807 			if (MIF_EXISTS(mrt, tmp))
808 				break;
809 		}
810 		mrt->maxvif = tmp + 1;
811 	}
812 
813 	write_unlock_bh(&mrt_lock);
814 
815 	dev_set_allmulti(dev, -1);
816 
817 	in6_dev = __in6_dev_get(dev);
818 	if (in6_dev) {
819 		in6_dev->cnf.mc_forwarding--;
820 		inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
821 					     NETCONFA_MC_FORWARDING,
822 					     dev->ifindex, &in6_dev->cnf);
823 	}
824 
825 	if ((v->flags & MIFF_REGISTER) && !notify)
826 		unregister_netdevice_queue(dev, head);
827 
828 	dev_put(dev);
829 	return 0;
830 }
831 
832 static inline void ip6mr_cache_free(struct mfc6_cache *c)
833 {
834 	kmem_cache_free(mrt_cachep, c);
835 }
836 
837 /* Destroy an unresolved cache entry, killing queued skbs
838    and reporting error to netlink readers.
839  */
840 
841 static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
842 {
843 	struct net *net = read_pnet(&mrt->net);
844 	struct sk_buff *skb;
845 
846 	atomic_dec(&mrt->cache_resolve_queue_len);
847 
848 	while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
849 		if (ipv6_hdr(skb)->version == 0) {
850 			struct nlmsghdr *nlh = skb_pull(skb,
851 							sizeof(struct ipv6hdr));
852 			nlh->nlmsg_type = NLMSG_ERROR;
853 			nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
854 			skb_trim(skb, nlh->nlmsg_len);
855 			((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
856 			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
857 		} else
858 			kfree_skb(skb);
859 	}
860 
861 	ip6mr_cache_free(c);
862 }
863 
864 
865 /* Timer process for all the unresolved queue. */
866 
867 static void ipmr_do_expire_process(struct mr6_table *mrt)
868 {
869 	unsigned long now = jiffies;
870 	unsigned long expires = 10 * HZ;
871 	struct mfc6_cache *c, *next;
872 
873 	list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
874 		if (time_after(c->mfc_un.unres.expires, now)) {
875 			/* not yet... */
876 			unsigned long interval = c->mfc_un.unres.expires - now;
877 			if (interval < expires)
878 				expires = interval;
879 			continue;
880 		}
881 
882 		list_del(&c->list);
883 		mr6_netlink_event(mrt, c, RTM_DELROUTE);
884 		ip6mr_destroy_unres(mrt, c);
885 	}
886 
887 	if (!list_empty(&mrt->mfc6_unres_queue))
888 		mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
889 }
890 
891 static void ipmr_expire_process(unsigned long arg)
892 {
893 	struct mr6_table *mrt = (struct mr6_table *)arg;
894 
895 	if (!spin_trylock(&mfc_unres_lock)) {
896 		mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
897 		return;
898 	}
899 
900 	if (!list_empty(&mrt->mfc6_unres_queue))
901 		ipmr_do_expire_process(mrt);
902 
903 	spin_unlock(&mfc_unres_lock);
904 }
905 
906 /* Fill oifs list. It is called under write locked mrt_lock. */
907 
908 static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache,
909 				    unsigned char *ttls)
910 {
911 	int vifi;
912 
913 	cache->mfc_un.res.minvif = MAXMIFS;
914 	cache->mfc_un.res.maxvif = 0;
915 	memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
916 
917 	for (vifi = 0; vifi < mrt->maxvif; vifi++) {
918 		if (MIF_EXISTS(mrt, vifi) &&
919 		    ttls[vifi] && ttls[vifi] < 255) {
920 			cache->mfc_un.res.ttls[vifi] = ttls[vifi];
921 			if (cache->mfc_un.res.minvif > vifi)
922 				cache->mfc_un.res.minvif = vifi;
923 			if (cache->mfc_un.res.maxvif <= vifi)
924 				cache->mfc_un.res.maxvif = vifi + 1;
925 		}
926 	}
927 	cache->mfc_un.res.lastuse = jiffies;
928 }
929 
930 static int mif6_add(struct net *net, struct mr6_table *mrt,
931 		    struct mif6ctl *vifc, int mrtsock)
932 {
933 	int vifi = vifc->mif6c_mifi;
934 	struct mif_device *v = &mrt->vif6_table[vifi];
935 	struct net_device *dev;
936 	struct inet6_dev *in6_dev;
937 	int err;
938 
939 	/* Is vif busy ? */
940 	if (MIF_EXISTS(mrt, vifi))
941 		return -EADDRINUSE;
942 
943 	switch (vifc->mif6c_flags) {
944 #ifdef CONFIG_IPV6_PIMSM_V2
945 	case MIFF_REGISTER:
946 		/*
947 		 * Special Purpose VIF in PIM
948 		 * All the packets will be sent to the daemon
949 		 */
950 		if (mrt->mroute_reg_vif_num >= 0)
951 			return -EADDRINUSE;
952 		dev = ip6mr_reg_vif(net, mrt);
953 		if (!dev)
954 			return -ENOBUFS;
955 		err = dev_set_allmulti(dev, 1);
956 		if (err) {
957 			unregister_netdevice(dev);
958 			dev_put(dev);
959 			return err;
960 		}
961 		break;
962 #endif
963 	case 0:
964 		dev = dev_get_by_index(net, vifc->mif6c_pifi);
965 		if (!dev)
966 			return -EADDRNOTAVAIL;
967 		err = dev_set_allmulti(dev, 1);
968 		if (err) {
969 			dev_put(dev);
970 			return err;
971 		}
972 		break;
973 	default:
974 		return -EINVAL;
975 	}
976 
977 	in6_dev = __in6_dev_get(dev);
978 	if (in6_dev) {
979 		in6_dev->cnf.mc_forwarding++;
980 		inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
981 					     NETCONFA_MC_FORWARDING,
982 					     dev->ifindex, &in6_dev->cnf);
983 	}
984 
985 	/*
986 	 *	Fill in the VIF structures
987 	 */
988 	v->rate_limit = vifc->vifc_rate_limit;
989 	v->flags = vifc->mif6c_flags;
990 	if (!mrtsock)
991 		v->flags |= VIFF_STATIC;
992 	v->threshold = vifc->vifc_threshold;
993 	v->bytes_in = 0;
994 	v->bytes_out = 0;
995 	v->pkt_in = 0;
996 	v->pkt_out = 0;
997 	v->link = dev->ifindex;
998 	if (v->flags & MIFF_REGISTER)
999 		v->link = dev_get_iflink(dev);
1000 
1001 	/* And finish update writing critical data */
1002 	write_lock_bh(&mrt_lock);
1003 	v->dev = dev;
1004 #ifdef CONFIG_IPV6_PIMSM_V2
1005 	if (v->flags & MIFF_REGISTER)
1006 		mrt->mroute_reg_vif_num = vifi;
1007 #endif
1008 	if (vifi + 1 > mrt->maxvif)
1009 		mrt->maxvif = vifi + 1;
1010 	write_unlock_bh(&mrt_lock);
1011 	return 0;
1012 }
1013 
1014 static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
1015 					   const struct in6_addr *origin,
1016 					   const struct in6_addr *mcastgrp)
1017 {
1018 	int line = MFC6_HASH(mcastgrp, origin);
1019 	struct mfc6_cache *c;
1020 
1021 	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1022 		if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
1023 		    ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
1024 			return c;
1025 	}
1026 	return NULL;
1027 }
1028 
1029 /* Look for a (*,*,oif) entry */
1030 static struct mfc6_cache *ip6mr_cache_find_any_parent(struct mr6_table *mrt,
1031 						      mifi_t mifi)
1032 {
1033 	int line = MFC6_HASH(&in6addr_any, &in6addr_any);
1034 	struct mfc6_cache *c;
1035 
1036 	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1037 		if (ipv6_addr_any(&c->mf6c_origin) &&
1038 		    ipv6_addr_any(&c->mf6c_mcastgrp) &&
1039 		    (c->mfc_un.res.ttls[mifi] < 255))
1040 			return c;
1041 
1042 	return NULL;
1043 }
1044 
1045 /* Look for a (*,G) entry */
1046 static struct mfc6_cache *ip6mr_cache_find_any(struct mr6_table *mrt,
1047 					       struct in6_addr *mcastgrp,
1048 					       mifi_t mifi)
1049 {
1050 	int line = MFC6_HASH(mcastgrp, &in6addr_any);
1051 	struct mfc6_cache *c, *proxy;
1052 
1053 	if (ipv6_addr_any(mcastgrp))
1054 		goto skip;
1055 
1056 	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1057 		if (ipv6_addr_any(&c->mf6c_origin) &&
1058 		    ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) {
1059 			if (c->mfc_un.res.ttls[mifi] < 255)
1060 				return c;
1061 
1062 			/* It's ok if the mifi is part of the static tree */
1063 			proxy = ip6mr_cache_find_any_parent(mrt,
1064 							    c->mf6c_parent);
1065 			if (proxy && proxy->mfc_un.res.ttls[mifi] < 255)
1066 				return c;
1067 		}
1068 
1069 skip:
1070 	return ip6mr_cache_find_any_parent(mrt, mifi);
1071 }
1072 
1073 /*
1074  *	Allocate a multicast cache entry
1075  */
1076 static struct mfc6_cache *ip6mr_cache_alloc(void)
1077 {
1078 	struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
1079 	if (!c)
1080 		return NULL;
1081 	c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
1082 	c->mfc_un.res.minvif = MAXMIFS;
1083 	return c;
1084 }
1085 
1086 static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
1087 {
1088 	struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
1089 	if (!c)
1090 		return NULL;
1091 	skb_queue_head_init(&c->mfc_un.unres.unresolved);
1092 	c->mfc_un.unres.expires = jiffies + 10 * HZ;
1093 	return c;
1094 }
1095 
1096 /*
1097  *	A cache entry has gone into a resolved state from queued
1098  */
1099 
1100 static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
1101 				struct mfc6_cache *uc, struct mfc6_cache *c)
1102 {
1103 	struct sk_buff *skb;
1104 
1105 	/*
1106 	 *	Play the pending entries through our router
1107 	 */
1108 
1109 	while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
1110 		if (ipv6_hdr(skb)->version == 0) {
1111 			struct nlmsghdr *nlh = skb_pull(skb,
1112 							sizeof(struct ipv6hdr));
1113 
1114 			if (__ip6mr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
1115 				nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1116 			} else {
1117 				nlh->nlmsg_type = NLMSG_ERROR;
1118 				nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1119 				skb_trim(skb, nlh->nlmsg_len);
1120 				((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
1121 			}
1122 			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1123 		} else
1124 			ip6_mr_forward(net, mrt, skb, c);
1125 	}
1126 }
1127 
1128 /*
1129  *	Bounce a cache query up to pim6sd and netlink.
1130  *
1131  *	Called under mrt_lock.
1132  */
1133 
1134 static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
1135 			      mifi_t mifi, int assert)
1136 {
1137 	struct sk_buff *skb;
1138 	struct mrt6msg *msg;
1139 	int ret;
1140 
1141 #ifdef CONFIG_IPV6_PIMSM_V2
1142 	if (assert == MRT6MSG_WHOLEPKT)
1143 		skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1144 						+sizeof(*msg));
1145 	else
1146 #endif
1147 		skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1148 
1149 	if (!skb)
1150 		return -ENOBUFS;
1151 
1152 	/* I suppose that internal messages
1153 	 * do not require checksums */
1154 
1155 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1156 
1157 #ifdef CONFIG_IPV6_PIMSM_V2
1158 	if (assert == MRT6MSG_WHOLEPKT) {
1159 		/* Ugly, but we have no choice with this interface.
1160 		   Duplicate old header, fix length etc.
1161 		   And all this only to mangle msg->im6_msgtype and
1162 		   to set msg->im6_mbz to "mbz" :-)
1163 		 */
1164 		skb_push(skb, -skb_network_offset(pkt));
1165 
1166 		skb_push(skb, sizeof(*msg));
1167 		skb_reset_transport_header(skb);
1168 		msg = (struct mrt6msg *)skb_transport_header(skb);
1169 		msg->im6_mbz = 0;
1170 		msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1171 		msg->im6_mif = mrt->mroute_reg_vif_num;
1172 		msg->im6_pad = 0;
1173 		msg->im6_src = ipv6_hdr(pkt)->saddr;
1174 		msg->im6_dst = ipv6_hdr(pkt)->daddr;
1175 
1176 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1177 	} else
1178 #endif
1179 	{
1180 	/*
1181 	 *	Copy the IP header
1182 	 */
1183 
1184 	skb_put(skb, sizeof(struct ipv6hdr));
1185 	skb_reset_network_header(skb);
1186 	skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1187 
1188 	/*
1189 	 *	Add our header
1190 	 */
1191 	skb_put(skb, sizeof(*msg));
1192 	skb_reset_transport_header(skb);
1193 	msg = (struct mrt6msg *)skb_transport_header(skb);
1194 
1195 	msg->im6_mbz = 0;
1196 	msg->im6_msgtype = assert;
1197 	msg->im6_mif = mifi;
1198 	msg->im6_pad = 0;
1199 	msg->im6_src = ipv6_hdr(pkt)->saddr;
1200 	msg->im6_dst = ipv6_hdr(pkt)->daddr;
1201 
1202 	skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1203 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1204 	}
1205 
1206 	if (!mrt->mroute6_sk) {
1207 		kfree_skb(skb);
1208 		return -EINVAL;
1209 	}
1210 
1211 	mrt6msg_netlink_event(mrt, skb);
1212 
1213 	/*
1214 	 *	Deliver to user space multicast routing algorithms
1215 	 */
1216 	ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
1217 	if (ret < 0) {
1218 		net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1219 		kfree_skb(skb);
1220 	}
1221 
1222 	return ret;
1223 }
1224 
1225 /*
1226  *	Queue a packet for resolution. It gets locked cache entry!
1227  */
1228 
1229 static int
1230 ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb)
1231 {
1232 	bool found = false;
1233 	int err;
1234 	struct mfc6_cache *c;
1235 
1236 	spin_lock_bh(&mfc_unres_lock);
1237 	list_for_each_entry(c, &mrt->mfc6_unres_queue, list) {
1238 		if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1239 		    ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1240 			found = true;
1241 			break;
1242 		}
1243 	}
1244 
1245 	if (!found) {
1246 		/*
1247 		 *	Create a new entry if allowable
1248 		 */
1249 
1250 		if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1251 		    (c = ip6mr_cache_alloc_unres()) == NULL) {
1252 			spin_unlock_bh(&mfc_unres_lock);
1253 
1254 			kfree_skb(skb);
1255 			return -ENOBUFS;
1256 		}
1257 
1258 		/*
1259 		 *	Fill in the new cache entry
1260 		 */
1261 		c->mf6c_parent = -1;
1262 		c->mf6c_origin = ipv6_hdr(skb)->saddr;
1263 		c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1264 
1265 		/*
1266 		 *	Reflect first query at pim6sd
1267 		 */
1268 		err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1269 		if (err < 0) {
1270 			/* If the report failed throw the cache entry
1271 			   out - Brad Parker
1272 			 */
1273 			spin_unlock_bh(&mfc_unres_lock);
1274 
1275 			ip6mr_cache_free(c);
1276 			kfree_skb(skb);
1277 			return err;
1278 		}
1279 
1280 		atomic_inc(&mrt->cache_resolve_queue_len);
1281 		list_add(&c->list, &mrt->mfc6_unres_queue);
1282 		mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1283 
1284 		ipmr_do_expire_process(mrt);
1285 	}
1286 
1287 	/*
1288 	 *	See if we can append the packet
1289 	 */
1290 	if (c->mfc_un.unres.unresolved.qlen > 3) {
1291 		kfree_skb(skb);
1292 		err = -ENOBUFS;
1293 	} else {
1294 		skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1295 		err = 0;
1296 	}
1297 
1298 	spin_unlock_bh(&mfc_unres_lock);
1299 	return err;
1300 }
1301 
1302 /*
1303  *	MFC6 cache manipulation by user space
1304  */
1305 
1306 static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc,
1307 			    int parent)
1308 {
1309 	int line;
1310 	struct mfc6_cache *c, *next;
1311 
1312 	line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1313 
1314 	list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
1315 		if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1316 		    ipv6_addr_equal(&c->mf6c_mcastgrp,
1317 				    &mfc->mf6cc_mcastgrp.sin6_addr) &&
1318 		    (parent == -1 || parent == c->mf6c_parent)) {
1319 			write_lock_bh(&mrt_lock);
1320 			list_del(&c->list);
1321 			write_unlock_bh(&mrt_lock);
1322 
1323 			mr6_netlink_event(mrt, c, RTM_DELROUTE);
1324 			ip6mr_cache_free(c);
1325 			return 0;
1326 		}
1327 	}
1328 	return -ENOENT;
1329 }
1330 
1331 static int ip6mr_device_event(struct notifier_block *this,
1332 			      unsigned long event, void *ptr)
1333 {
1334 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1335 	struct net *net = dev_net(dev);
1336 	struct mr6_table *mrt;
1337 	struct mif_device *v;
1338 	int ct;
1339 
1340 	if (event != NETDEV_UNREGISTER)
1341 		return NOTIFY_DONE;
1342 
1343 	ip6mr_for_each_table(mrt, net) {
1344 		v = &mrt->vif6_table[0];
1345 		for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1346 			if (v->dev == dev)
1347 				mif6_delete(mrt, ct, 1, NULL);
1348 		}
1349 	}
1350 
1351 	return NOTIFY_DONE;
1352 }
1353 
1354 static struct notifier_block ip6_mr_notifier = {
1355 	.notifier_call = ip6mr_device_event
1356 };
1357 
1358 /*
1359  *	Setup for IP multicast routing
1360  */
1361 
1362 static int __net_init ip6mr_net_init(struct net *net)
1363 {
1364 	int err;
1365 
1366 	err = ip6mr_rules_init(net);
1367 	if (err < 0)
1368 		goto fail;
1369 
1370 #ifdef CONFIG_PROC_FS
1371 	err = -ENOMEM;
1372 	if (!proc_create("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_fops))
1373 		goto proc_vif_fail;
1374 	if (!proc_create("ip6_mr_cache", 0, net->proc_net, &ip6mr_mfc_fops))
1375 		goto proc_cache_fail;
1376 #endif
1377 
1378 	return 0;
1379 
1380 #ifdef CONFIG_PROC_FS
1381 proc_cache_fail:
1382 	remove_proc_entry("ip6_mr_vif", net->proc_net);
1383 proc_vif_fail:
1384 	ip6mr_rules_exit(net);
1385 #endif
1386 fail:
1387 	return err;
1388 }
1389 
1390 static void __net_exit ip6mr_net_exit(struct net *net)
1391 {
1392 #ifdef CONFIG_PROC_FS
1393 	remove_proc_entry("ip6_mr_cache", net->proc_net);
1394 	remove_proc_entry("ip6_mr_vif", net->proc_net);
1395 #endif
1396 	ip6mr_rules_exit(net);
1397 }
1398 
1399 static struct pernet_operations ip6mr_net_ops = {
1400 	.init = ip6mr_net_init,
1401 	.exit = ip6mr_net_exit,
1402 };
1403 
1404 int __init ip6_mr_init(void)
1405 {
1406 	int err;
1407 
1408 	mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1409 				       sizeof(struct mfc6_cache),
1410 				       0, SLAB_HWCACHE_ALIGN,
1411 				       NULL);
1412 	if (!mrt_cachep)
1413 		return -ENOMEM;
1414 
1415 	err = register_pernet_subsys(&ip6mr_net_ops);
1416 	if (err)
1417 		goto reg_pernet_fail;
1418 
1419 	err = register_netdevice_notifier(&ip6_mr_notifier);
1420 	if (err)
1421 		goto reg_notif_fail;
1422 #ifdef CONFIG_IPV6_PIMSM_V2
1423 	if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1424 		pr_err("%s: can't add PIM protocol\n", __func__);
1425 		err = -EAGAIN;
1426 		goto add_proto_fail;
1427 	}
1428 #endif
1429 	rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL,
1430 		      ip6mr_rtm_dumproute, 0);
1431 	return 0;
1432 #ifdef CONFIG_IPV6_PIMSM_V2
1433 add_proto_fail:
1434 	unregister_netdevice_notifier(&ip6_mr_notifier);
1435 #endif
1436 reg_notif_fail:
1437 	unregister_pernet_subsys(&ip6mr_net_ops);
1438 reg_pernet_fail:
1439 	kmem_cache_destroy(mrt_cachep);
1440 	return err;
1441 }
1442 
1443 void ip6_mr_cleanup(void)
1444 {
1445 	rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE);
1446 #ifdef CONFIG_IPV6_PIMSM_V2
1447 	inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1448 #endif
1449 	unregister_netdevice_notifier(&ip6_mr_notifier);
1450 	unregister_pernet_subsys(&ip6mr_net_ops);
1451 	kmem_cache_destroy(mrt_cachep);
1452 }
1453 
1454 static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1455 			 struct mf6cctl *mfc, int mrtsock, int parent)
1456 {
1457 	bool found = false;
1458 	int line;
1459 	struct mfc6_cache *uc, *c;
1460 	unsigned char ttls[MAXMIFS];
1461 	int i;
1462 
1463 	if (mfc->mf6cc_parent >= MAXMIFS)
1464 		return -ENFILE;
1465 
1466 	memset(ttls, 255, MAXMIFS);
1467 	for (i = 0; i < MAXMIFS; i++) {
1468 		if (IF_ISSET(i, &mfc->mf6cc_ifset))
1469 			ttls[i] = 1;
1470 
1471 	}
1472 
1473 	line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1474 
1475 	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1476 		if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1477 		    ipv6_addr_equal(&c->mf6c_mcastgrp,
1478 				    &mfc->mf6cc_mcastgrp.sin6_addr) &&
1479 		    (parent == -1 || parent == mfc->mf6cc_parent)) {
1480 			found = true;
1481 			break;
1482 		}
1483 	}
1484 
1485 	if (found) {
1486 		write_lock_bh(&mrt_lock);
1487 		c->mf6c_parent = mfc->mf6cc_parent;
1488 		ip6mr_update_thresholds(mrt, c, ttls);
1489 		if (!mrtsock)
1490 			c->mfc_flags |= MFC_STATIC;
1491 		write_unlock_bh(&mrt_lock);
1492 		mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1493 		return 0;
1494 	}
1495 
1496 	if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1497 	    !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1498 		return -EINVAL;
1499 
1500 	c = ip6mr_cache_alloc();
1501 	if (!c)
1502 		return -ENOMEM;
1503 
1504 	c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1505 	c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1506 	c->mf6c_parent = mfc->mf6cc_parent;
1507 	ip6mr_update_thresholds(mrt, c, ttls);
1508 	if (!mrtsock)
1509 		c->mfc_flags |= MFC_STATIC;
1510 
1511 	write_lock_bh(&mrt_lock);
1512 	list_add(&c->list, &mrt->mfc6_cache_array[line]);
1513 	write_unlock_bh(&mrt_lock);
1514 
1515 	/*
1516 	 *	Check to see if we resolved a queued list. If so we
1517 	 *	need to send on the frames and tidy up.
1518 	 */
1519 	found = false;
1520 	spin_lock_bh(&mfc_unres_lock);
1521 	list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) {
1522 		if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1523 		    ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1524 			list_del(&uc->list);
1525 			atomic_dec(&mrt->cache_resolve_queue_len);
1526 			found = true;
1527 			break;
1528 		}
1529 	}
1530 	if (list_empty(&mrt->mfc6_unres_queue))
1531 		del_timer(&mrt->ipmr_expire_timer);
1532 	spin_unlock_bh(&mfc_unres_lock);
1533 
1534 	if (found) {
1535 		ip6mr_cache_resolve(net, mrt, uc, c);
1536 		ip6mr_cache_free(uc);
1537 	}
1538 	mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1539 	return 0;
1540 }
1541 
1542 /*
1543  *	Close the multicast socket, and clear the vif tables etc
1544  */
1545 
1546 static void mroute_clean_tables(struct mr6_table *mrt, bool all)
1547 {
1548 	int i;
1549 	LIST_HEAD(list);
1550 	struct mfc6_cache *c, *next;
1551 
1552 	/*
1553 	 *	Shut down all active vif entries
1554 	 */
1555 	for (i = 0; i < mrt->maxvif; i++) {
1556 		if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
1557 			continue;
1558 		mif6_delete(mrt, i, 0, &list);
1559 	}
1560 	unregister_netdevice_many(&list);
1561 
1562 	/*
1563 	 *	Wipe the cache
1564 	 */
1565 	for (i = 0; i < MFC6_LINES; i++) {
1566 		list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
1567 			if (!all && (c->mfc_flags & MFC_STATIC))
1568 				continue;
1569 			write_lock_bh(&mrt_lock);
1570 			list_del(&c->list);
1571 			write_unlock_bh(&mrt_lock);
1572 
1573 			mr6_netlink_event(mrt, c, RTM_DELROUTE);
1574 			ip6mr_cache_free(c);
1575 		}
1576 	}
1577 
1578 	if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1579 		spin_lock_bh(&mfc_unres_lock);
1580 		list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
1581 			list_del(&c->list);
1582 			mr6_netlink_event(mrt, c, RTM_DELROUTE);
1583 			ip6mr_destroy_unres(mrt, c);
1584 		}
1585 		spin_unlock_bh(&mfc_unres_lock);
1586 	}
1587 }
1588 
1589 static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk)
1590 {
1591 	int err = 0;
1592 	struct net *net = sock_net(sk);
1593 
1594 	rtnl_lock();
1595 	write_lock_bh(&mrt_lock);
1596 	if (likely(mrt->mroute6_sk == NULL)) {
1597 		mrt->mroute6_sk = sk;
1598 		net->ipv6.devconf_all->mc_forwarding++;
1599 	} else {
1600 		err = -EADDRINUSE;
1601 	}
1602 	write_unlock_bh(&mrt_lock);
1603 
1604 	if (!err)
1605 		inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1606 					     NETCONFA_MC_FORWARDING,
1607 					     NETCONFA_IFINDEX_ALL,
1608 					     net->ipv6.devconf_all);
1609 	rtnl_unlock();
1610 
1611 	return err;
1612 }
1613 
1614 int ip6mr_sk_done(struct sock *sk)
1615 {
1616 	int err = -EACCES;
1617 	struct net *net = sock_net(sk);
1618 	struct mr6_table *mrt;
1619 
1620 	if (sk->sk_type != SOCK_RAW ||
1621 	    inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1622 		return err;
1623 
1624 	rtnl_lock();
1625 	ip6mr_for_each_table(mrt, net) {
1626 		if (sk == mrt->mroute6_sk) {
1627 			write_lock_bh(&mrt_lock);
1628 			mrt->mroute6_sk = NULL;
1629 			net->ipv6.devconf_all->mc_forwarding--;
1630 			write_unlock_bh(&mrt_lock);
1631 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1632 						     NETCONFA_MC_FORWARDING,
1633 						     NETCONFA_IFINDEX_ALL,
1634 						     net->ipv6.devconf_all);
1635 
1636 			mroute_clean_tables(mrt, false);
1637 			err = 0;
1638 			break;
1639 		}
1640 	}
1641 	rtnl_unlock();
1642 
1643 	return err;
1644 }
1645 
1646 struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
1647 {
1648 	struct mr6_table *mrt;
1649 	struct flowi6 fl6 = {
1650 		.flowi6_iif	= skb->skb_iif ? : LOOPBACK_IFINDEX,
1651 		.flowi6_oif	= skb->dev->ifindex,
1652 		.flowi6_mark	= skb->mark,
1653 	};
1654 
1655 	if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1656 		return NULL;
1657 
1658 	return mrt->mroute6_sk;
1659 }
1660 
1661 /*
1662  *	Socket options and virtual interface manipulation. The whole
1663  *	virtual interface system is a complete heap, but unfortunately
1664  *	that's how BSD mrouted happens to think. Maybe one day with a proper
1665  *	MOSPF/PIM router set up we can clean this up.
1666  */
1667 
1668 int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1669 {
1670 	int ret, parent = 0;
1671 	struct mif6ctl vif;
1672 	struct mf6cctl mfc;
1673 	mifi_t mifi;
1674 	struct net *net = sock_net(sk);
1675 	struct mr6_table *mrt;
1676 
1677 	if (sk->sk_type != SOCK_RAW ||
1678 	    inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1679 		return -EOPNOTSUPP;
1680 
1681 	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1682 	if (!mrt)
1683 		return -ENOENT;
1684 
1685 	if (optname != MRT6_INIT) {
1686 		if (sk != mrt->mroute6_sk && !ns_capable(net->user_ns, CAP_NET_ADMIN))
1687 			return -EACCES;
1688 	}
1689 
1690 	switch (optname) {
1691 	case MRT6_INIT:
1692 		if (optlen < sizeof(int))
1693 			return -EINVAL;
1694 
1695 		return ip6mr_sk_init(mrt, sk);
1696 
1697 	case MRT6_DONE:
1698 		return ip6mr_sk_done(sk);
1699 
1700 	case MRT6_ADD_MIF:
1701 		if (optlen < sizeof(vif))
1702 			return -EINVAL;
1703 		if (copy_from_user(&vif, optval, sizeof(vif)))
1704 			return -EFAULT;
1705 		if (vif.mif6c_mifi >= MAXMIFS)
1706 			return -ENFILE;
1707 		rtnl_lock();
1708 		ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk);
1709 		rtnl_unlock();
1710 		return ret;
1711 
1712 	case MRT6_DEL_MIF:
1713 		if (optlen < sizeof(mifi_t))
1714 			return -EINVAL;
1715 		if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1716 			return -EFAULT;
1717 		rtnl_lock();
1718 		ret = mif6_delete(mrt, mifi, 0, NULL);
1719 		rtnl_unlock();
1720 		return ret;
1721 
1722 	/*
1723 	 *	Manipulate the forwarding caches. These live
1724 	 *	in a sort of kernel/user symbiosis.
1725 	 */
1726 	case MRT6_ADD_MFC:
1727 	case MRT6_DEL_MFC:
1728 		parent = -1;
1729 		/* fall through */
1730 	case MRT6_ADD_MFC_PROXY:
1731 	case MRT6_DEL_MFC_PROXY:
1732 		if (optlen < sizeof(mfc))
1733 			return -EINVAL;
1734 		if (copy_from_user(&mfc, optval, sizeof(mfc)))
1735 			return -EFAULT;
1736 		if (parent == 0)
1737 			parent = mfc.mf6cc_parent;
1738 		rtnl_lock();
1739 		if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1740 			ret = ip6mr_mfc_delete(mrt, &mfc, parent);
1741 		else
1742 			ret = ip6mr_mfc_add(net, mrt, &mfc,
1743 					    sk == mrt->mroute6_sk, parent);
1744 		rtnl_unlock();
1745 		return ret;
1746 
1747 	/*
1748 	 *	Control PIM assert (to activate pim will activate assert)
1749 	 */
1750 	case MRT6_ASSERT:
1751 	{
1752 		int v;
1753 
1754 		if (optlen != sizeof(v))
1755 			return -EINVAL;
1756 		if (get_user(v, (int __user *)optval))
1757 			return -EFAULT;
1758 		mrt->mroute_do_assert = v;
1759 		return 0;
1760 	}
1761 
1762 #ifdef CONFIG_IPV6_PIMSM_V2
1763 	case MRT6_PIM:
1764 	{
1765 		int v;
1766 
1767 		if (optlen != sizeof(v))
1768 			return -EINVAL;
1769 		if (get_user(v, (int __user *)optval))
1770 			return -EFAULT;
1771 		v = !!v;
1772 		rtnl_lock();
1773 		ret = 0;
1774 		if (v != mrt->mroute_do_pim) {
1775 			mrt->mroute_do_pim = v;
1776 			mrt->mroute_do_assert = v;
1777 		}
1778 		rtnl_unlock();
1779 		return ret;
1780 	}
1781 
1782 #endif
1783 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1784 	case MRT6_TABLE:
1785 	{
1786 		u32 v;
1787 
1788 		if (optlen != sizeof(u32))
1789 			return -EINVAL;
1790 		if (get_user(v, (u32 __user *)optval))
1791 			return -EFAULT;
1792 		/* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1793 		if (v != RT_TABLE_DEFAULT && v >= 100000000)
1794 			return -EINVAL;
1795 		if (sk == mrt->mroute6_sk)
1796 			return -EBUSY;
1797 
1798 		rtnl_lock();
1799 		ret = 0;
1800 		if (!ip6mr_new_table(net, v))
1801 			ret = -ENOMEM;
1802 		raw6_sk(sk)->ip6mr_table = v;
1803 		rtnl_unlock();
1804 		return ret;
1805 	}
1806 #endif
1807 	/*
1808 	 *	Spurious command, or MRT6_VERSION which you cannot
1809 	 *	set.
1810 	 */
1811 	default:
1812 		return -ENOPROTOOPT;
1813 	}
1814 }
1815 
1816 /*
1817  *	Getsock opt support for the multicast routing system.
1818  */
1819 
1820 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1821 			  int __user *optlen)
1822 {
1823 	int olr;
1824 	int val;
1825 	struct net *net = sock_net(sk);
1826 	struct mr6_table *mrt;
1827 
1828 	if (sk->sk_type != SOCK_RAW ||
1829 	    inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1830 		return -EOPNOTSUPP;
1831 
1832 	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1833 	if (!mrt)
1834 		return -ENOENT;
1835 
1836 	switch (optname) {
1837 	case MRT6_VERSION:
1838 		val = 0x0305;
1839 		break;
1840 #ifdef CONFIG_IPV6_PIMSM_V2
1841 	case MRT6_PIM:
1842 		val = mrt->mroute_do_pim;
1843 		break;
1844 #endif
1845 	case MRT6_ASSERT:
1846 		val = mrt->mroute_do_assert;
1847 		break;
1848 	default:
1849 		return -ENOPROTOOPT;
1850 	}
1851 
1852 	if (get_user(olr, optlen))
1853 		return -EFAULT;
1854 
1855 	olr = min_t(int, olr, sizeof(int));
1856 	if (olr < 0)
1857 		return -EINVAL;
1858 
1859 	if (put_user(olr, optlen))
1860 		return -EFAULT;
1861 	if (copy_to_user(optval, &val, olr))
1862 		return -EFAULT;
1863 	return 0;
1864 }
1865 
1866 /*
1867  *	The IP multicast ioctl support routines.
1868  */
1869 
1870 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1871 {
1872 	struct sioc_sg_req6 sr;
1873 	struct sioc_mif_req6 vr;
1874 	struct mif_device *vif;
1875 	struct mfc6_cache *c;
1876 	struct net *net = sock_net(sk);
1877 	struct mr6_table *mrt;
1878 
1879 	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1880 	if (!mrt)
1881 		return -ENOENT;
1882 
1883 	switch (cmd) {
1884 	case SIOCGETMIFCNT_IN6:
1885 		if (copy_from_user(&vr, arg, sizeof(vr)))
1886 			return -EFAULT;
1887 		if (vr.mifi >= mrt->maxvif)
1888 			return -EINVAL;
1889 		read_lock(&mrt_lock);
1890 		vif = &mrt->vif6_table[vr.mifi];
1891 		if (MIF_EXISTS(mrt, vr.mifi)) {
1892 			vr.icount = vif->pkt_in;
1893 			vr.ocount = vif->pkt_out;
1894 			vr.ibytes = vif->bytes_in;
1895 			vr.obytes = vif->bytes_out;
1896 			read_unlock(&mrt_lock);
1897 
1898 			if (copy_to_user(arg, &vr, sizeof(vr)))
1899 				return -EFAULT;
1900 			return 0;
1901 		}
1902 		read_unlock(&mrt_lock);
1903 		return -EADDRNOTAVAIL;
1904 	case SIOCGETSGCNT_IN6:
1905 		if (copy_from_user(&sr, arg, sizeof(sr)))
1906 			return -EFAULT;
1907 
1908 		read_lock(&mrt_lock);
1909 		c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1910 		if (c) {
1911 			sr.pktcnt = c->mfc_un.res.pkt;
1912 			sr.bytecnt = c->mfc_un.res.bytes;
1913 			sr.wrong_if = c->mfc_un.res.wrong_if;
1914 			read_unlock(&mrt_lock);
1915 
1916 			if (copy_to_user(arg, &sr, sizeof(sr)))
1917 				return -EFAULT;
1918 			return 0;
1919 		}
1920 		read_unlock(&mrt_lock);
1921 		return -EADDRNOTAVAIL;
1922 	default:
1923 		return -ENOIOCTLCMD;
1924 	}
1925 }
1926 
1927 #ifdef CONFIG_COMPAT
1928 struct compat_sioc_sg_req6 {
1929 	struct sockaddr_in6 src;
1930 	struct sockaddr_in6 grp;
1931 	compat_ulong_t pktcnt;
1932 	compat_ulong_t bytecnt;
1933 	compat_ulong_t wrong_if;
1934 };
1935 
1936 struct compat_sioc_mif_req6 {
1937 	mifi_t	mifi;
1938 	compat_ulong_t icount;
1939 	compat_ulong_t ocount;
1940 	compat_ulong_t ibytes;
1941 	compat_ulong_t obytes;
1942 };
1943 
1944 int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1945 {
1946 	struct compat_sioc_sg_req6 sr;
1947 	struct compat_sioc_mif_req6 vr;
1948 	struct mif_device *vif;
1949 	struct mfc6_cache *c;
1950 	struct net *net = sock_net(sk);
1951 	struct mr6_table *mrt;
1952 
1953 	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1954 	if (!mrt)
1955 		return -ENOENT;
1956 
1957 	switch (cmd) {
1958 	case SIOCGETMIFCNT_IN6:
1959 		if (copy_from_user(&vr, arg, sizeof(vr)))
1960 			return -EFAULT;
1961 		if (vr.mifi >= mrt->maxvif)
1962 			return -EINVAL;
1963 		read_lock(&mrt_lock);
1964 		vif = &mrt->vif6_table[vr.mifi];
1965 		if (MIF_EXISTS(mrt, vr.mifi)) {
1966 			vr.icount = vif->pkt_in;
1967 			vr.ocount = vif->pkt_out;
1968 			vr.ibytes = vif->bytes_in;
1969 			vr.obytes = vif->bytes_out;
1970 			read_unlock(&mrt_lock);
1971 
1972 			if (copy_to_user(arg, &vr, sizeof(vr)))
1973 				return -EFAULT;
1974 			return 0;
1975 		}
1976 		read_unlock(&mrt_lock);
1977 		return -EADDRNOTAVAIL;
1978 	case SIOCGETSGCNT_IN6:
1979 		if (copy_from_user(&sr, arg, sizeof(sr)))
1980 			return -EFAULT;
1981 
1982 		read_lock(&mrt_lock);
1983 		c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1984 		if (c) {
1985 			sr.pktcnt = c->mfc_un.res.pkt;
1986 			sr.bytecnt = c->mfc_un.res.bytes;
1987 			sr.wrong_if = c->mfc_un.res.wrong_if;
1988 			read_unlock(&mrt_lock);
1989 
1990 			if (copy_to_user(arg, &sr, sizeof(sr)))
1991 				return -EFAULT;
1992 			return 0;
1993 		}
1994 		read_unlock(&mrt_lock);
1995 		return -EADDRNOTAVAIL;
1996 	default:
1997 		return -ENOIOCTLCMD;
1998 	}
1999 }
2000 #endif
2001 
2002 static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
2003 {
2004 	__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
2005 			IPSTATS_MIB_OUTFORWDATAGRAMS);
2006 	__IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
2007 			IPSTATS_MIB_OUTOCTETS, skb->len);
2008 	return dst_output(net, sk, skb);
2009 }
2010 
2011 /*
2012  *	Processing handlers for ip6mr_forward
2013  */
2014 
2015 static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
2016 			  struct sk_buff *skb, struct mfc6_cache *c, int vifi)
2017 {
2018 	struct ipv6hdr *ipv6h;
2019 	struct mif_device *vif = &mrt->vif6_table[vifi];
2020 	struct net_device *dev;
2021 	struct dst_entry *dst;
2022 	struct flowi6 fl6;
2023 
2024 	if (!vif->dev)
2025 		goto out_free;
2026 
2027 #ifdef CONFIG_IPV6_PIMSM_V2
2028 	if (vif->flags & MIFF_REGISTER) {
2029 		vif->pkt_out++;
2030 		vif->bytes_out += skb->len;
2031 		vif->dev->stats.tx_bytes += skb->len;
2032 		vif->dev->stats.tx_packets++;
2033 		ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
2034 		goto out_free;
2035 	}
2036 #endif
2037 
2038 	ipv6h = ipv6_hdr(skb);
2039 
2040 	fl6 = (struct flowi6) {
2041 		.flowi6_oif = vif->link,
2042 		.daddr = ipv6h->daddr,
2043 	};
2044 
2045 	dst = ip6_route_output(net, NULL, &fl6);
2046 	if (dst->error) {
2047 		dst_release(dst);
2048 		goto out_free;
2049 	}
2050 
2051 	skb_dst_drop(skb);
2052 	skb_dst_set(skb, dst);
2053 
2054 	/*
2055 	 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2056 	 * not only before forwarding, but after forwarding on all output
2057 	 * interfaces. It is clear, if mrouter runs a multicasting
2058 	 * program, it should receive packets not depending to what interface
2059 	 * program is joined.
2060 	 * If we will not make it, the program will have to join on all
2061 	 * interfaces. On the other hand, multihoming host (or router, but
2062 	 * not mrouter) cannot join to more than one interface - it will
2063 	 * result in receiving multiple packets.
2064 	 */
2065 	dev = vif->dev;
2066 	skb->dev = dev;
2067 	vif->pkt_out++;
2068 	vif->bytes_out += skb->len;
2069 
2070 	/* We are about to write */
2071 	/* XXX: extension headers? */
2072 	if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
2073 		goto out_free;
2074 
2075 	ipv6h = ipv6_hdr(skb);
2076 	ipv6h->hop_limit--;
2077 
2078 	IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2079 
2080 	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
2081 		       net, NULL, skb, skb->dev, dev,
2082 		       ip6mr_forward2_finish);
2083 
2084 out_free:
2085 	kfree_skb(skb);
2086 	return 0;
2087 }
2088 
2089 static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
2090 {
2091 	int ct;
2092 
2093 	for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
2094 		if (mrt->vif6_table[ct].dev == dev)
2095 			break;
2096 	}
2097 	return ct;
2098 }
2099 
2100 static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
2101 			   struct sk_buff *skb, struct mfc6_cache *cache)
2102 {
2103 	int psend = -1;
2104 	int vif, ct;
2105 	int true_vifi = ip6mr_find_vif(mrt, skb->dev);
2106 
2107 	vif = cache->mf6c_parent;
2108 	cache->mfc_un.res.pkt++;
2109 	cache->mfc_un.res.bytes += skb->len;
2110 	cache->mfc_un.res.lastuse = jiffies;
2111 
2112 	if (ipv6_addr_any(&cache->mf6c_origin) && true_vifi >= 0) {
2113 		struct mfc6_cache *cache_proxy;
2114 
2115 		/* For an (*,G) entry, we only check that the incoming
2116 		 * interface is part of the static tree.
2117 		 */
2118 		cache_proxy = ip6mr_cache_find_any_parent(mrt, vif);
2119 		if (cache_proxy &&
2120 		    cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
2121 			goto forward;
2122 	}
2123 
2124 	/*
2125 	 * Wrong interface: drop packet and (maybe) send PIM assert.
2126 	 */
2127 	if (mrt->vif6_table[vif].dev != skb->dev) {
2128 		cache->mfc_un.res.wrong_if++;
2129 
2130 		if (true_vifi >= 0 && mrt->mroute_do_assert &&
2131 		    /* pimsm uses asserts, when switching from RPT to SPT,
2132 		       so that we cannot check that packet arrived on an oif.
2133 		       It is bad, but otherwise we would need to move pretty
2134 		       large chunk of pimd to kernel. Ough... --ANK
2135 		     */
2136 		    (mrt->mroute_do_pim ||
2137 		     cache->mfc_un.res.ttls[true_vifi] < 255) &&
2138 		    time_after(jiffies,
2139 			       cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
2140 			cache->mfc_un.res.last_assert = jiffies;
2141 			ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
2142 		}
2143 		goto dont_forward;
2144 	}
2145 
2146 forward:
2147 	mrt->vif6_table[vif].pkt_in++;
2148 	mrt->vif6_table[vif].bytes_in += skb->len;
2149 
2150 	/*
2151 	 *	Forward the frame
2152 	 */
2153 	if (ipv6_addr_any(&cache->mf6c_origin) &&
2154 	    ipv6_addr_any(&cache->mf6c_mcastgrp)) {
2155 		if (true_vifi >= 0 &&
2156 		    true_vifi != cache->mf6c_parent &&
2157 		    ipv6_hdr(skb)->hop_limit >
2158 				cache->mfc_un.res.ttls[cache->mf6c_parent]) {
2159 			/* It's an (*,*) entry and the packet is not coming from
2160 			 * the upstream: forward the packet to the upstream
2161 			 * only.
2162 			 */
2163 			psend = cache->mf6c_parent;
2164 			goto last_forward;
2165 		}
2166 		goto dont_forward;
2167 	}
2168 	for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
2169 		/* For (*,G) entry, don't forward to the incoming interface */
2170 		if ((!ipv6_addr_any(&cache->mf6c_origin) || ct != true_vifi) &&
2171 		    ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
2172 			if (psend != -1) {
2173 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2174 				if (skb2)
2175 					ip6mr_forward2(net, mrt, skb2, cache, psend);
2176 			}
2177 			psend = ct;
2178 		}
2179 	}
2180 last_forward:
2181 	if (psend != -1) {
2182 		ip6mr_forward2(net, mrt, skb, cache, psend);
2183 		return;
2184 	}
2185 
2186 dont_forward:
2187 	kfree_skb(skb);
2188 }
2189 
2190 
2191 /*
2192  *	Multicast packets for forwarding arrive here
2193  */
2194 
2195 int ip6_mr_input(struct sk_buff *skb)
2196 {
2197 	struct mfc6_cache *cache;
2198 	struct net *net = dev_net(skb->dev);
2199 	struct mr6_table *mrt;
2200 	struct flowi6 fl6 = {
2201 		.flowi6_iif	= skb->dev->ifindex,
2202 		.flowi6_mark	= skb->mark,
2203 	};
2204 	int err;
2205 
2206 	err = ip6mr_fib_lookup(net, &fl6, &mrt);
2207 	if (err < 0) {
2208 		kfree_skb(skb);
2209 		return err;
2210 	}
2211 
2212 	read_lock(&mrt_lock);
2213 	cache = ip6mr_cache_find(mrt,
2214 				 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
2215 	if (!cache) {
2216 		int vif = ip6mr_find_vif(mrt, skb->dev);
2217 
2218 		if (vif >= 0)
2219 			cache = ip6mr_cache_find_any(mrt,
2220 						     &ipv6_hdr(skb)->daddr,
2221 						     vif);
2222 	}
2223 
2224 	/*
2225 	 *	No usable cache entry
2226 	 */
2227 	if (!cache) {
2228 		int vif;
2229 
2230 		vif = ip6mr_find_vif(mrt, skb->dev);
2231 		if (vif >= 0) {
2232 			int err = ip6mr_cache_unresolved(mrt, vif, skb);
2233 			read_unlock(&mrt_lock);
2234 
2235 			return err;
2236 		}
2237 		read_unlock(&mrt_lock);
2238 		kfree_skb(skb);
2239 		return -ENODEV;
2240 	}
2241 
2242 	ip6_mr_forward(net, mrt, skb, cache);
2243 
2244 	read_unlock(&mrt_lock);
2245 
2246 	return 0;
2247 }
2248 
2249 
2250 static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2251 			       struct mfc6_cache *c, struct rtmsg *rtm)
2252 {
2253 	struct rta_mfc_stats mfcs;
2254 	struct nlattr *mp_attr;
2255 	struct rtnexthop *nhp;
2256 	unsigned long lastuse;
2257 	int ct;
2258 
2259 	/* If cache is unresolved, don't try to parse IIF and OIF */
2260 	if (c->mf6c_parent >= MAXMIFS) {
2261 		rtm->rtm_flags |= RTNH_F_UNRESOLVED;
2262 		return -ENOENT;
2263 	}
2264 
2265 	if (MIF_EXISTS(mrt, c->mf6c_parent) &&
2266 	    nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0)
2267 		return -EMSGSIZE;
2268 	mp_attr = nla_nest_start(skb, RTA_MULTIPATH);
2269 	if (!mp_attr)
2270 		return -EMSGSIZE;
2271 
2272 	for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2273 		if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2274 			nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
2275 			if (!nhp) {
2276 				nla_nest_cancel(skb, mp_attr);
2277 				return -EMSGSIZE;
2278 			}
2279 
2280 			nhp->rtnh_flags = 0;
2281 			nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2282 			nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex;
2283 			nhp->rtnh_len = sizeof(*nhp);
2284 		}
2285 	}
2286 
2287 	nla_nest_end(skb, mp_attr);
2288 
2289 	lastuse = READ_ONCE(c->mfc_un.res.lastuse);
2290 	lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
2291 
2292 	mfcs.mfcs_packets = c->mfc_un.res.pkt;
2293 	mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2294 	mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2295 	if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
2296 	    nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
2297 			      RTA_PAD))
2298 		return -EMSGSIZE;
2299 
2300 	rtm->rtm_type = RTN_MULTICAST;
2301 	return 1;
2302 }
2303 
2304 int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
2305 		    u32 portid)
2306 {
2307 	int err;
2308 	struct mr6_table *mrt;
2309 	struct mfc6_cache *cache;
2310 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2311 
2312 	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2313 	if (!mrt)
2314 		return -ENOENT;
2315 
2316 	read_lock(&mrt_lock);
2317 	cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2318 	if (!cache && skb->dev) {
2319 		int vif = ip6mr_find_vif(mrt, skb->dev);
2320 
2321 		if (vif >= 0)
2322 			cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2323 						     vif);
2324 	}
2325 
2326 	if (!cache) {
2327 		struct sk_buff *skb2;
2328 		struct ipv6hdr *iph;
2329 		struct net_device *dev;
2330 		int vif;
2331 
2332 		dev = skb->dev;
2333 		if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2334 			read_unlock(&mrt_lock);
2335 			return -ENODEV;
2336 		}
2337 
2338 		/* really correct? */
2339 		skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2340 		if (!skb2) {
2341 			read_unlock(&mrt_lock);
2342 			return -ENOMEM;
2343 		}
2344 
2345 		NETLINK_CB(skb2).portid = portid;
2346 		skb_reset_transport_header(skb2);
2347 
2348 		skb_put(skb2, sizeof(struct ipv6hdr));
2349 		skb_reset_network_header(skb2);
2350 
2351 		iph = ipv6_hdr(skb2);
2352 		iph->version = 0;
2353 		iph->priority = 0;
2354 		iph->flow_lbl[0] = 0;
2355 		iph->flow_lbl[1] = 0;
2356 		iph->flow_lbl[2] = 0;
2357 		iph->payload_len = 0;
2358 		iph->nexthdr = IPPROTO_NONE;
2359 		iph->hop_limit = 0;
2360 		iph->saddr = rt->rt6i_src.addr;
2361 		iph->daddr = rt->rt6i_dst.addr;
2362 
2363 		err = ip6mr_cache_unresolved(mrt, vif, skb2);
2364 		read_unlock(&mrt_lock);
2365 
2366 		return err;
2367 	}
2368 
2369 	if (rtm->rtm_flags & RTM_F_NOTIFY)
2370 		cache->mfc_flags |= MFC_NOTIFY;
2371 
2372 	err = __ip6mr_fill_mroute(mrt, skb, cache, rtm);
2373 	read_unlock(&mrt_lock);
2374 	return err;
2375 }
2376 
2377 static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2378 			     u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2379 			     int flags)
2380 {
2381 	struct nlmsghdr *nlh;
2382 	struct rtmsg *rtm;
2383 	int err;
2384 
2385 	nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2386 	if (!nlh)
2387 		return -EMSGSIZE;
2388 
2389 	rtm = nlmsg_data(nlh);
2390 	rtm->rtm_family   = RTNL_FAMILY_IP6MR;
2391 	rtm->rtm_dst_len  = 128;
2392 	rtm->rtm_src_len  = 128;
2393 	rtm->rtm_tos      = 0;
2394 	rtm->rtm_table    = mrt->id;
2395 	if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2396 		goto nla_put_failure;
2397 	rtm->rtm_type = RTN_MULTICAST;
2398 	rtm->rtm_scope    = RT_SCOPE_UNIVERSE;
2399 	if (c->mfc_flags & MFC_STATIC)
2400 		rtm->rtm_protocol = RTPROT_STATIC;
2401 	else
2402 		rtm->rtm_protocol = RTPROT_MROUTED;
2403 	rtm->rtm_flags    = 0;
2404 
2405 	if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
2406 	    nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
2407 		goto nla_put_failure;
2408 	err = __ip6mr_fill_mroute(mrt, skb, c, rtm);
2409 	/* do not break the dump if cache is unresolved */
2410 	if (err < 0 && err != -ENOENT)
2411 		goto nla_put_failure;
2412 
2413 	nlmsg_end(skb, nlh);
2414 	return 0;
2415 
2416 nla_put_failure:
2417 	nlmsg_cancel(skb, nlh);
2418 	return -EMSGSIZE;
2419 }
2420 
2421 static int mr6_msgsize(bool unresolved, int maxvif)
2422 {
2423 	size_t len =
2424 		NLMSG_ALIGN(sizeof(struct rtmsg))
2425 		+ nla_total_size(4)	/* RTA_TABLE */
2426 		+ nla_total_size(sizeof(struct in6_addr))	/* RTA_SRC */
2427 		+ nla_total_size(sizeof(struct in6_addr))	/* RTA_DST */
2428 		;
2429 
2430 	if (!unresolved)
2431 		len = len
2432 		      + nla_total_size(4)	/* RTA_IIF */
2433 		      + nla_total_size(0)	/* RTA_MULTIPATH */
2434 		      + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2435 						/* RTA_MFC_STATS */
2436 		      + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
2437 		;
2438 
2439 	return len;
2440 }
2441 
2442 static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
2443 			      int cmd)
2444 {
2445 	struct net *net = read_pnet(&mrt->net);
2446 	struct sk_buff *skb;
2447 	int err = -ENOBUFS;
2448 
2449 	skb = nlmsg_new(mr6_msgsize(mfc->mf6c_parent >= MAXMIFS, mrt->maxvif),
2450 			GFP_ATOMIC);
2451 	if (!skb)
2452 		goto errout;
2453 
2454 	err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2455 	if (err < 0)
2456 		goto errout;
2457 
2458 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2459 	return;
2460 
2461 errout:
2462 	kfree_skb(skb);
2463 	if (err < 0)
2464 		rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
2465 }
2466 
2467 static size_t mrt6msg_netlink_msgsize(size_t payloadlen)
2468 {
2469 	size_t len =
2470 		NLMSG_ALIGN(sizeof(struct rtgenmsg))
2471 		+ nla_total_size(1)	/* IP6MRA_CREPORT_MSGTYPE */
2472 		+ nla_total_size(4)	/* IP6MRA_CREPORT_MIF_ID */
2473 					/* IP6MRA_CREPORT_SRC_ADDR */
2474 		+ nla_total_size(sizeof(struct in6_addr))
2475 					/* IP6MRA_CREPORT_DST_ADDR */
2476 		+ nla_total_size(sizeof(struct in6_addr))
2477 					/* IP6MRA_CREPORT_PKT */
2478 		+ nla_total_size(payloadlen)
2479 		;
2480 
2481 	return len;
2482 }
2483 
2484 static void mrt6msg_netlink_event(struct mr6_table *mrt, struct sk_buff *pkt)
2485 {
2486 	struct net *net = read_pnet(&mrt->net);
2487 	struct nlmsghdr *nlh;
2488 	struct rtgenmsg *rtgenm;
2489 	struct mrt6msg *msg;
2490 	struct sk_buff *skb;
2491 	struct nlattr *nla;
2492 	int payloadlen;
2493 
2494 	payloadlen = pkt->len - sizeof(struct mrt6msg);
2495 	msg = (struct mrt6msg *)skb_transport_header(pkt);
2496 
2497 	skb = nlmsg_new(mrt6msg_netlink_msgsize(payloadlen), GFP_ATOMIC);
2498 	if (!skb)
2499 		goto errout;
2500 
2501 	nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
2502 			sizeof(struct rtgenmsg), 0);
2503 	if (!nlh)
2504 		goto errout;
2505 	rtgenm = nlmsg_data(nlh);
2506 	rtgenm->rtgen_family = RTNL_FAMILY_IP6MR;
2507 	if (nla_put_u8(skb, IP6MRA_CREPORT_MSGTYPE, msg->im6_msgtype) ||
2508 	    nla_put_u32(skb, IP6MRA_CREPORT_MIF_ID, msg->im6_mif) ||
2509 	    nla_put_in6_addr(skb, IP6MRA_CREPORT_SRC_ADDR,
2510 			     &msg->im6_src) ||
2511 	    nla_put_in6_addr(skb, IP6MRA_CREPORT_DST_ADDR,
2512 			     &msg->im6_dst))
2513 		goto nla_put_failure;
2514 
2515 	nla = nla_reserve(skb, IP6MRA_CREPORT_PKT, payloadlen);
2516 	if (!nla || skb_copy_bits(pkt, sizeof(struct mrt6msg),
2517 				  nla_data(nla), payloadlen))
2518 		goto nla_put_failure;
2519 
2520 	nlmsg_end(skb, nlh);
2521 
2522 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE_R, NULL, GFP_ATOMIC);
2523 	return;
2524 
2525 nla_put_failure:
2526 	nlmsg_cancel(skb, nlh);
2527 errout:
2528 	kfree_skb(skb);
2529 	rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE_R, -ENOBUFS);
2530 }
2531 
2532 static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2533 {
2534 	struct net *net = sock_net(skb->sk);
2535 	struct mr6_table *mrt;
2536 	struct mfc6_cache *mfc;
2537 	unsigned int t = 0, s_t;
2538 	unsigned int h = 0, s_h;
2539 	unsigned int e = 0, s_e;
2540 
2541 	s_t = cb->args[0];
2542 	s_h = cb->args[1];
2543 	s_e = cb->args[2];
2544 
2545 	read_lock(&mrt_lock);
2546 	ip6mr_for_each_table(mrt, net) {
2547 		if (t < s_t)
2548 			goto next_table;
2549 		if (t > s_t)
2550 			s_h = 0;
2551 		for (h = s_h; h < MFC6_LINES; h++) {
2552 			list_for_each_entry(mfc, &mrt->mfc6_cache_array[h], list) {
2553 				if (e < s_e)
2554 					goto next_entry;
2555 				if (ip6mr_fill_mroute(mrt, skb,
2556 						      NETLINK_CB(cb->skb).portid,
2557 						      cb->nlh->nlmsg_seq,
2558 						      mfc, RTM_NEWROUTE,
2559 						      NLM_F_MULTI) < 0)
2560 					goto done;
2561 next_entry:
2562 				e++;
2563 			}
2564 			e = s_e = 0;
2565 		}
2566 		spin_lock_bh(&mfc_unres_lock);
2567 		list_for_each_entry(mfc, &mrt->mfc6_unres_queue, list) {
2568 			if (e < s_e)
2569 				goto next_entry2;
2570 			if (ip6mr_fill_mroute(mrt, skb,
2571 					      NETLINK_CB(cb->skb).portid,
2572 					      cb->nlh->nlmsg_seq,
2573 					      mfc, RTM_NEWROUTE,
2574 					      NLM_F_MULTI) < 0) {
2575 				spin_unlock_bh(&mfc_unres_lock);
2576 				goto done;
2577 			}
2578 next_entry2:
2579 			e++;
2580 		}
2581 		spin_unlock_bh(&mfc_unres_lock);
2582 		e = s_e = 0;
2583 		s_h = 0;
2584 next_table:
2585 		t++;
2586 	}
2587 done:
2588 	read_unlock(&mrt_lock);
2589 
2590 	cb->args[2] = e;
2591 	cb->args[1] = h;
2592 	cb->args[0] = t;
2593 
2594 	return skb->len;
2595 }
2596