xref: /linux/net/ipv4/ipmr.c (revision b718342a7fbaa2dff5fefc31988c07af8c6cbc21)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	IP multicast routing support for mrouted 3.6/3.8
4  *
5  *		(c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
6  *	  Linux Consultancy and Custom Driver Development
7  *
8  *	Fixes:
9  *	Michael Chastain	:	Incorrect size of copying.
10  *	Alan Cox		:	Added the cache manager code
11  *	Alan Cox		:	Fixed the clone/copy bug and device race.
12  *	Mike McLagan		:	Routing by source
13  *	Malcolm Beattie		:	Buffer handling fixes.
14  *	Alexey Kuznetsov	:	Double buffer free and other fixes.
15  *	SVR Anand		:	Fixed several multicast bugs and problems.
16  *	Alexey Kuznetsov	:	Status, optimisations and more.
17  *	Brad Parker		:	Better behaviour on mrouted upcall
18  *					overflow.
19  *      Carlos Picoto           :       PIMv1 Support
20  *	Pavlin Ivanov Radoslavov:	PIMv2 Registers must checksum only PIM header
21  *					Relax this requirement to work with older peers.
22  */
23 
24 #include <linux/uaccess.h>
25 #include <linux/types.h>
26 #include <linux/cache.h>
27 #include <linux/capability.h>
28 #include <linux/errno.h>
29 #include <linux/mm.h>
30 #include <linux/kernel.h>
31 #include <linux/fcntl.h>
32 #include <linux/stat.h>
33 #include <linux/socket.h>
34 #include <linux/in.h>
35 #include <linux/inet.h>
36 #include <linux/netdevice.h>
37 #include <linux/inetdevice.h>
38 #include <linux/igmp.h>
39 #include <linux/proc_fs.h>
40 #include <linux/seq_file.h>
41 #include <linux/mroute.h>
42 #include <linux/init.h>
43 #include <linux/if_ether.h>
44 #include <linux/slab.h>
45 #include <net/flow.h>
46 #include <net/net_namespace.h>
47 #include <net/ip.h>
48 #include <net/protocol.h>
49 #include <linux/skbuff.h>
50 #include <net/route.h>
51 #include <net/icmp.h>
52 #include <net/udp.h>
53 #include <net/raw.h>
54 #include <linux/notifier.h>
55 #include <linux/if_arp.h>
56 #include <linux/netfilter_ipv4.h>
57 #include <linux/compat.h>
58 #include <linux/export.h>
59 #include <linux/rhashtable.h>
60 #include <net/ip_tunnels.h>
61 #include <net/checksum.h>
62 #include <net/netlink.h>
63 #include <net/fib_rules.h>
64 #include <linux/netconf.h>
65 #include <net/rtnh.h>
66 #include <net/inet_dscp.h>
67 
68 #include <linux/nospec.h>
69 
70 struct ipmr_rule {
71 	struct fib_rule		common;
72 };
73 
74 struct ipmr_result {
75 	struct mr_table		*mrt;
76 };
77 
78 /* Big lock, protecting vif table, mrt cache and mroute socket state.
79  * Note that the changes are semaphored via rtnl_lock.
80  */
81 
82 static DEFINE_SPINLOCK(mrt_lock);
83 
84 static struct net_device *vif_dev_read(const struct vif_device *vif)
85 {
86 	return rcu_dereference(vif->dev);
87 }
88 
89 /* Multicast router control variables */
90 
91 /* Special spinlock for queue of unresolved entries */
92 static DEFINE_SPINLOCK(mfc_unres_lock);
93 
94 /* We return to original Alan's scheme. Hash table of resolved
95  * entries is changed only in process context and protected
96  * with weak lock mrt_lock. Queue of unresolved entries is protected
97  * with strong spinlock mfc_unres_lock.
98  *
99  * In this case data path is free of exclusive locks at all.
100  */
101 
102 static struct kmem_cache *mrt_cachep __ro_after_init;
103 
104 static struct mr_table *ipmr_new_table(struct net *net, u32 id);
105 static void ipmr_free_table(struct mr_table *mrt,
106 			    struct list_head *dev_kill_list);
107 
108 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
109 			  struct net_device *dev, struct sk_buff *skb,
110 			  struct mfc_cache *cache, int local);
111 static int ipmr_cache_report(const struct mr_table *mrt,
112 			     struct sk_buff *pkt, vifi_t vifi, int assert);
113 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
114 				 int cmd);
115 static void igmpmsg_netlink_event(const struct mr_table *mrt, struct sk_buff *pkt);
116 static void mroute_clean_tables(struct mr_table *mrt, int flags,
117 				struct list_head *dev_kill_list);
118 static void ipmr_expire_process(struct timer_list *t);
119 
120 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
121 #define ipmr_for_each_table(mrt, net)					\
122 	list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list,	\
123 				lockdep_rtnl_is_held() ||		\
124 				list_empty(&net->ipv4.mr_tables))
125 
126 static struct mr_table *ipmr_mr_table_iter(struct net *net,
127 					   struct mr_table *mrt)
128 {
129 	struct mr_table *ret;
130 
131 	if (!mrt)
132 		ret = list_entry_rcu(net->ipv4.mr_tables.next,
133 				     struct mr_table, list);
134 	else
135 		ret = list_entry_rcu(mrt->list.next,
136 				     struct mr_table, list);
137 
138 	if (&ret->list == &net->ipv4.mr_tables)
139 		return NULL;
140 	return ret;
141 }
142 
143 static struct mr_table *__ipmr_get_table(struct net *net, u32 id)
144 {
145 	struct mr_table *mrt;
146 
147 	ipmr_for_each_table(mrt, net) {
148 		if (mrt->id == id)
149 			return mrt;
150 	}
151 	return NULL;
152 }
153 
154 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
155 			   struct mr_table **mrt)
156 {
157 	int err;
158 	struct ipmr_result res;
159 	struct fib_lookup_arg arg = {
160 		.result = &res,
161 		.flags = FIB_LOOKUP_NOREF,
162 	};
163 
164 	/* update flow if oif or iif point to device enslaved to l3mdev */
165 	l3mdev_update_flow(net, flowi4_to_flowi(flp4));
166 
167 	err = fib_rules_lookup(net->ipv4.mr_rules_ops,
168 			       flowi4_to_flowi(flp4), 0, &arg);
169 	if (err < 0)
170 		return err;
171 	*mrt = res.mrt;
172 	return 0;
173 }
174 
175 static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
176 			    int flags, struct fib_lookup_arg *arg)
177 {
178 	struct ipmr_result *res = arg->result;
179 	struct mr_table *mrt;
180 
181 	switch (rule->action) {
182 	case FR_ACT_TO_TBL:
183 		break;
184 	case FR_ACT_UNREACHABLE:
185 		return -ENETUNREACH;
186 	case FR_ACT_PROHIBIT:
187 		return -EACCES;
188 	case FR_ACT_BLACKHOLE:
189 	default:
190 		return -EINVAL;
191 	}
192 
193 	arg->table = fib_rule_get_table(rule, arg);
194 
195 	mrt = __ipmr_get_table(rule->fr_net, arg->table);
196 	if (!mrt)
197 		return -EAGAIN;
198 	res->mrt = mrt;
199 	return 0;
200 }
201 
202 static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
203 {
204 	return 1;
205 }
206 
207 static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
208 			       struct fib_rule_hdr *frh, struct nlattr **tb,
209 			       struct netlink_ext_ack *extack)
210 {
211 	return 0;
212 }
213 
214 static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
215 			     struct nlattr **tb)
216 {
217 	return 1;
218 }
219 
220 static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
221 			  struct fib_rule_hdr *frh)
222 {
223 	frh->dst_len = 0;
224 	frh->src_len = 0;
225 	frh->tos     = 0;
226 	return 0;
227 }
228 
229 static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
230 	.family		= RTNL_FAMILY_IPMR,
231 	.rule_size	= sizeof(struct ipmr_rule),
232 	.addr_size	= sizeof(u32),
233 	.action		= ipmr_rule_action,
234 	.match		= ipmr_rule_match,
235 	.configure	= ipmr_rule_configure,
236 	.compare	= ipmr_rule_compare,
237 	.fill		= ipmr_rule_fill,
238 	.nlgroup	= RTNLGRP_IPV4_RULE,
239 	.owner		= THIS_MODULE,
240 };
241 
242 static int __net_init ipmr_rules_init(struct net *net)
243 {
244 	struct fib_rules_ops *ops;
245 	LIST_HEAD(dev_kill_list);
246 	struct mr_table *mrt;
247 	int err;
248 
249 	ops = fib_rules_register(&ipmr_rules_ops_template, net);
250 	if (IS_ERR(ops))
251 		return PTR_ERR(ops);
252 
253 	INIT_LIST_HEAD(&net->ipv4.mr_tables);
254 
255 	mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
256 	if (IS_ERR(mrt)) {
257 		err = PTR_ERR(mrt);
258 		goto err1;
259 	}
260 
261 	err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT);
262 	if (err < 0)
263 		goto err2;
264 
265 	net->ipv4.mr_rules_ops = ops;
266 	return 0;
267 
268 err2:
269 	ipmr_free_table(mrt, &dev_kill_list);
270 err1:
271 	fib_rules_unregister(ops);
272 	return err;
273 }
274 
275 static void __net_exit ipmr_rules_exit(struct net *net)
276 {
277 	fib_rules_unregister(net->ipv4.mr_rules_ops);
278 }
279 
280 static void __net_exit ipmr_rules_exit_rtnl(struct net *net,
281 					    struct list_head *dev_kill_list)
282 {
283 	struct mr_table *mrt, *next;
284 
285 	list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
286 		list_del_rcu(&mrt->list);
287 		ipmr_free_table(mrt, dev_kill_list);
288 	}
289 }
290 
291 static int ipmr_rules_dump(struct net *net, struct notifier_block *nb,
292 			   struct netlink_ext_ack *extack)
293 {
294 	return fib_rules_dump(net, nb, RTNL_FAMILY_IPMR, extack);
295 }
296 
297 static unsigned int ipmr_rules_seq_read(const struct net *net)
298 {
299 	return fib_rules_seq_read(net, RTNL_FAMILY_IPMR);
300 }
301 
302 bool ipmr_rule_default(const struct fib_rule *rule)
303 {
304 	return fib_rule_matchall(rule) && rule->table == RT_TABLE_DEFAULT;
305 }
306 EXPORT_SYMBOL(ipmr_rule_default);
307 #else
308 static struct mr_table *ipmr_mr_table_iter(struct net *net,
309 					   struct mr_table *mrt)
310 {
311 	if (!mrt)
312 		return rcu_dereference(net->ipv4.mrt);
313 	return NULL;
314 }
315 
316 static struct mr_table *__ipmr_get_table(struct net *net, u32 id)
317 {
318 	return rcu_dereference_check(net->ipv4.mrt,
319 				     lockdep_rtnl_is_held() ||
320 				     !rcu_access_pointer(net->ipv4.mrt));
321 }
322 
323 #define ipmr_for_each_table(mrt, net)				\
324 	for (mrt = __ipmr_get_table(net, 0); mrt; mrt = NULL)
325 
326 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
327 			   struct mr_table **mrt)
328 {
329 	*mrt = rcu_dereference(net->ipv4.mrt);
330 	if (!*mrt)
331 		return -EAGAIN;
332 	return 0;
333 }
334 
335 static int __net_init ipmr_rules_init(struct net *net)
336 {
337 	struct mr_table *mrt;
338 
339 	mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
340 	if (IS_ERR(mrt))
341 		return PTR_ERR(mrt);
342 
343 	rcu_assign_pointer(net->ipv4.mrt, mrt);
344 	return 0;
345 }
346 
347 static void __net_exit ipmr_rules_exit(struct net *net)
348 {
349 }
350 
351 static void __net_exit ipmr_rules_exit_rtnl(struct net *net,
352 					    struct list_head *dev_kill_list)
353 {
354 	struct mr_table *mrt = rcu_dereference_protected(net->ipv4.mrt, 1);
355 
356 	RCU_INIT_POINTER(net->ipv4.mrt, NULL);
357 	ipmr_free_table(mrt, dev_kill_list);
358 }
359 
360 static int ipmr_rules_dump(struct net *net, struct notifier_block *nb,
361 			   struct netlink_ext_ack *extack)
362 {
363 	return 0;
364 }
365 
366 static unsigned int ipmr_rules_seq_read(const struct net *net)
367 {
368 	return 0;
369 }
370 
371 bool ipmr_rule_default(const struct fib_rule *rule)
372 {
373 	return true;
374 }
375 EXPORT_SYMBOL(ipmr_rule_default);
376 #endif
377 
378 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
379 {
380 	struct mr_table *mrt;
381 
382 	rcu_read_lock();
383 	mrt = __ipmr_get_table(net, id);
384 	rcu_read_unlock();
385 
386 	return mrt;
387 }
388 
389 static inline int ipmr_hash_cmp(struct rhashtable_compare_arg *arg,
390 				const void *ptr)
391 {
392 	const struct mfc_cache_cmp_arg *cmparg = arg->key;
393 	const struct mfc_cache *c = ptr;
394 
395 	return cmparg->mfc_mcastgrp != c->mfc_mcastgrp ||
396 	       cmparg->mfc_origin != c->mfc_origin;
397 }
398 
399 static const struct rhashtable_params ipmr_rht_params = {
400 	.head_offset = offsetof(struct mr_mfc, mnode),
401 	.key_offset = offsetof(struct mfc_cache, cmparg),
402 	.key_len = sizeof(struct mfc_cache_cmp_arg),
403 	.nelem_hint = 3,
404 	.obj_cmpfn = ipmr_hash_cmp,
405 	.automatic_shrinking = true,
406 };
407 
408 static void ipmr_new_table_set(struct mr_table *mrt,
409 			       struct net *net)
410 {
411 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
412 	list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
413 #endif
414 }
415 
416 static struct mfc_cache_cmp_arg ipmr_mr_table_ops_cmparg_any = {
417 	.mfc_mcastgrp = htonl(INADDR_ANY),
418 	.mfc_origin = htonl(INADDR_ANY),
419 };
420 
421 static struct mr_table_ops ipmr_mr_table_ops = {
422 	.rht_params = &ipmr_rht_params,
423 	.cmparg_any = &ipmr_mr_table_ops_cmparg_any,
424 };
425 
426 static struct mr_table *ipmr_new_table(struct net *net, u32 id)
427 {
428 	struct mr_table *mrt;
429 
430 	/* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
431 	if (id != RT_TABLE_DEFAULT && id >= 1000000000)
432 		return ERR_PTR(-EINVAL);
433 
434 	mrt = __ipmr_get_table(net, id);
435 	if (mrt)
436 		return mrt;
437 
438 	return mr_table_alloc(net, id, &ipmr_mr_table_ops,
439 			      ipmr_expire_process, ipmr_new_table_set);
440 }
441 
442 static void ipmr_free_table(struct mr_table *mrt, struct list_head *dev_kill_list)
443 {
444 	struct net *net = read_pnet(&mrt->net);
445 	LIST_HEAD(ipmr_dev_kill_list);
446 
447 	WARN_ON_ONCE(!mr_can_free_table(net));
448 
449 	mroute_clean_tables(mrt, MRT_FLUSH_VIFS | MRT_FLUSH_VIFS_STATIC |
450 			    MRT_FLUSH_MFC | MRT_FLUSH_MFC_STATIC,
451 			    &ipmr_dev_kill_list);
452 	timer_shutdown_sync(&mrt->ipmr_expire_timer);
453 	mr_table_free(mrt);
454 
455 	WARN_ON_ONCE(!net_initialized(net) && !list_empty(&ipmr_dev_kill_list));
456 	list_splice(&ipmr_dev_kill_list, dev_kill_list);
457 }
458 
459 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
460 
461 /* Initialize ipmr pimreg/tunnel in_device */
462 static bool ipmr_init_vif_indev(const struct net_device *dev)
463 {
464 	struct in_device *in_dev;
465 
466 	ASSERT_RTNL();
467 
468 	in_dev = __in_dev_get_rtnl(dev);
469 	if (!in_dev)
470 		return false;
471 	ipv4_devconf_setall(in_dev);
472 	neigh_parms_data_state_setall(in_dev->arp_parms);
473 	IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
474 
475 	return true;
476 }
477 
478 static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
479 {
480 	struct net_device *tunnel_dev, *new_dev;
481 	struct ip_tunnel_parm_kern p = { };
482 	int err;
483 
484 	tunnel_dev = __dev_get_by_name(net, "tunl0");
485 	if (!tunnel_dev)
486 		goto out;
487 
488 	p.iph.daddr = v->vifc_rmt_addr.s_addr;
489 	p.iph.saddr = v->vifc_lcl_addr.s_addr;
490 	p.iph.version = 4;
491 	p.iph.ihl = 5;
492 	p.iph.protocol = IPPROTO_IPIP;
493 	sprintf(p.name, "dvmrp%d", v->vifc_vifi);
494 
495 	if (!tunnel_dev->netdev_ops->ndo_tunnel_ctl)
496 		goto out;
497 	err = tunnel_dev->netdev_ops->ndo_tunnel_ctl(tunnel_dev, &p,
498 			SIOCADDTUNNEL);
499 	if (err)
500 		goto out;
501 
502 	new_dev = __dev_get_by_name(net, p.name);
503 	if (!new_dev)
504 		goto out;
505 
506 	new_dev->flags |= IFF_MULTICAST;
507 	if (!ipmr_init_vif_indev(new_dev))
508 		goto out_unregister;
509 	if (dev_open(new_dev, NULL))
510 		goto out_unregister;
511 	dev_hold(new_dev);
512 	err = dev_set_allmulti(new_dev, 1);
513 	if (err) {
514 		dev_close(new_dev);
515 		tunnel_dev->netdev_ops->ndo_tunnel_ctl(tunnel_dev, &p,
516 				SIOCDELTUNNEL);
517 		dev_put(new_dev);
518 		new_dev = ERR_PTR(err);
519 	}
520 	return new_dev;
521 
522 out_unregister:
523 	unregister_netdevice(new_dev);
524 out:
525 	return ERR_PTR(-ENOBUFS);
526 }
527 
528 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
529 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
530 {
531 	struct net *net = dev_net(dev);
532 	struct mr_table *mrt;
533 	struct flowi4 fl4 = {
534 		.flowi4_oif	= dev->ifindex,
535 		.flowi4_iif	= skb->skb_iif ? : LOOPBACK_IFINDEX,
536 		.flowi4_mark	= skb->mark,
537 	};
538 	int err;
539 
540 	err = ipmr_fib_lookup(net, &fl4, &mrt);
541 	if (err < 0) {
542 		kfree_skb(skb);
543 		return err;
544 	}
545 
546 	DEV_STATS_ADD(dev, tx_bytes, skb->len);
547 	DEV_STATS_INC(dev, tx_packets);
548 	rcu_read_lock();
549 
550 	/* Pairs with WRITE_ONCE() in vif_add() and vif_delete() */
551 	ipmr_cache_report(mrt, skb, READ_ONCE(mrt->mroute_reg_vif_num),
552 			  IGMPMSG_WHOLEPKT);
553 
554 	rcu_read_unlock();
555 	kfree_skb(skb);
556 	return NETDEV_TX_OK;
557 }
558 
559 static int reg_vif_get_iflink(const struct net_device *dev)
560 {
561 	return 0;
562 }
563 
564 static const struct net_device_ops reg_vif_netdev_ops = {
565 	.ndo_start_xmit	= reg_vif_xmit,
566 	.ndo_get_iflink = reg_vif_get_iflink,
567 };
568 
569 static void reg_vif_setup(struct net_device *dev)
570 {
571 	dev->type		= ARPHRD_PIMREG;
572 	dev->mtu		= ETH_DATA_LEN - sizeof(struct iphdr) - 8;
573 	dev->flags		= IFF_NOARP;
574 	dev->netdev_ops		= &reg_vif_netdev_ops;
575 	dev->needs_free_netdev	= true;
576 	dev->netns_immutable	= true;
577 }
578 
579 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
580 {
581 	struct net_device *dev;
582 	char name[IFNAMSIZ];
583 
584 	if (mrt->id == RT_TABLE_DEFAULT)
585 		sprintf(name, "pimreg");
586 	else
587 		sprintf(name, "pimreg%u", mrt->id);
588 
589 	dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
590 
591 	if (!dev)
592 		return NULL;
593 
594 	dev_net_set(dev, net);
595 
596 	if (register_netdevice(dev)) {
597 		free_netdev(dev);
598 		return NULL;
599 	}
600 
601 	if (!ipmr_init_vif_indev(dev))
602 		goto failure;
603 	if (dev_open(dev, NULL))
604 		goto failure;
605 
606 	dev_hold(dev);
607 
608 	return dev;
609 
610 failure:
611 	unregister_netdevice(dev);
612 	return NULL;
613 }
614 
615 /* called with rcu_read_lock() */
616 static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
617 		     unsigned int pimlen)
618 {
619 	struct net_device *reg_dev = NULL;
620 	struct iphdr *encap;
621 	int vif_num;
622 
623 	encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
624 	/* Check that:
625 	 * a. packet is really sent to a multicast group
626 	 * b. packet is not a NULL-REGISTER
627 	 * c. packet is not truncated
628 	 */
629 	if (!ipv4_is_multicast(encap->daddr) ||
630 	    encap->tot_len == 0 ||
631 	    ntohs(encap->tot_len) + pimlen > skb->len)
632 		return 1;
633 
634 	/* Pairs with WRITE_ONCE() in vif_add()/vid_delete() */
635 	vif_num = READ_ONCE(mrt->mroute_reg_vif_num);
636 	if (vif_num >= 0)
637 		reg_dev = vif_dev_read(&mrt->vif_table[vif_num]);
638 	if (!reg_dev)
639 		return 1;
640 
641 	skb->mac_header = skb->network_header;
642 	skb_pull(skb, (u8 *)encap - skb->data);
643 	skb_reset_network_header(skb);
644 	skb->protocol = htons(ETH_P_IP);
645 	skb->ip_summed = CHECKSUM_NONE;
646 
647 	skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
648 
649 	netif_rx(skb);
650 
651 	return NET_RX_SUCCESS;
652 }
653 #else
654 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
655 {
656 	return NULL;
657 }
658 #endif
659 
660 static int call_ipmr_vif_entry_notifiers(struct net *net,
661 					 enum fib_event_type event_type,
662 					 struct vif_device *vif,
663 					 struct net_device *vif_dev,
664 					 vifi_t vif_index, u32 tb_id)
665 {
666 	return mr_call_vif_notifiers(net, RTNL_FAMILY_IPMR, event_type,
667 				     vif, vif_dev, vif_index, tb_id,
668 				     &net->ipv4.ipmr_seq);
669 }
670 
671 static int call_ipmr_mfc_entry_notifiers(struct net *net,
672 					 enum fib_event_type event_type,
673 					 struct mfc_cache *mfc, u32 tb_id)
674 {
675 	return mr_call_mfc_notifiers(net, RTNL_FAMILY_IPMR, event_type,
676 				     &mfc->_c, tb_id, &net->ipv4.ipmr_seq);
677 }
678 
679 /**
680  *	vif_delete - Delete a VIF entry
681  *	@mrt: Table to delete from
682  *	@vifi: VIF identifier to delete
683  *	@notify: Set to 1, if the caller is a notifier_call
684  *	@head: if unregistering the VIF, place it on this queue
685  */
686 static int vif_delete(struct mr_table *mrt, int vifi, int notify,
687 		      struct list_head *head)
688 {
689 	struct net *net = read_pnet(&mrt->net);
690 	struct vif_device *v;
691 	struct net_device *dev;
692 	struct in_device *in_dev;
693 
694 	if (vifi < 0 || vifi >= mrt->maxvif)
695 		return -EADDRNOTAVAIL;
696 
697 	v = &mrt->vif_table[vifi];
698 
699 	dev = rtnl_dereference(v->dev);
700 	if (!dev)
701 		return -EADDRNOTAVAIL;
702 
703 	spin_lock(&mrt_lock);
704 	call_ipmr_vif_entry_notifiers(net, FIB_EVENT_VIF_DEL, v, dev,
705 				      vifi, mrt->id);
706 	RCU_INIT_POINTER(v->dev, NULL);
707 
708 	if (vifi == mrt->mroute_reg_vif_num) {
709 		/* Pairs with READ_ONCE() in ipmr_cache_report() and reg_vif_xmit() */
710 		WRITE_ONCE(mrt->mroute_reg_vif_num, -1);
711 	}
712 	if (vifi + 1 == mrt->maxvif) {
713 		int tmp;
714 
715 		for (tmp = vifi - 1; tmp >= 0; tmp--) {
716 			if (VIF_EXISTS(mrt, tmp))
717 				break;
718 		}
719 		WRITE_ONCE(mrt->maxvif, tmp + 1);
720 	}
721 
722 	spin_unlock(&mrt_lock);
723 
724 	dev_set_allmulti(dev, -1);
725 
726 	in_dev = __in_dev_get_rtnl(dev);
727 	if (in_dev) {
728 		IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
729 		inet_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
730 					    NETCONFA_MC_FORWARDING,
731 					    dev->ifindex, &in_dev->cnf);
732 		ip_rt_multicast_event(in_dev);
733 	}
734 
735 	if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
736 		unregister_netdevice_queue(dev, head);
737 
738 	netdev_put(dev, &v->dev_tracker);
739 	return 0;
740 }
741 
742 static void ipmr_cache_free_rcu(struct rcu_head *head)
743 {
744 	struct mr_mfc *c = container_of(head, struct mr_mfc, rcu);
745 
746 	kmem_cache_free(mrt_cachep, (struct mfc_cache *)c);
747 }
748 
749 static void ipmr_cache_free(struct mfc_cache *c)
750 {
751 	call_rcu(&c->_c.rcu, ipmr_cache_free_rcu);
752 }
753 
754 /* Destroy an unresolved cache entry, killing queued skbs
755  * and reporting error to netlink readers.
756  */
757 static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
758 {
759 	struct net *net = read_pnet(&mrt->net);
760 	struct sk_buff *skb;
761 	struct nlmsgerr *e;
762 
763 	atomic_dec(&mrt->cache_resolve_queue_len);
764 
765 	while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved))) {
766 		if (ip_hdr(skb)->version == 0) {
767 			struct nlmsghdr *nlh = skb_pull(skb,
768 							sizeof(struct iphdr));
769 			nlh->nlmsg_type = NLMSG_ERROR;
770 			nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
771 			skb_trim(skb, nlh->nlmsg_len);
772 			e = nlmsg_data(nlh);
773 			e->error = -ETIMEDOUT;
774 			memset(&e->msg, 0, sizeof(e->msg));
775 
776 			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
777 		} else {
778 			kfree_skb(skb);
779 		}
780 	}
781 
782 	ipmr_cache_free(c);
783 }
784 
785 /* Timer process for the unresolved queue. */
786 static void ipmr_expire_process(struct timer_list *t)
787 {
788 	struct mr_table *mrt = timer_container_of(mrt, t, ipmr_expire_timer);
789 	struct mr_mfc *c, *next;
790 	unsigned long expires;
791 	unsigned long now;
792 
793 	if (!spin_trylock(&mfc_unres_lock)) {
794 		mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
795 		return;
796 	}
797 
798 	if (list_empty(&mrt->mfc_unres_queue))
799 		goto out;
800 
801 	now = jiffies;
802 	expires = 10*HZ;
803 
804 	list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
805 		if (time_after(c->mfc_un.unres.expires, now)) {
806 			unsigned long interval = c->mfc_un.unres.expires - now;
807 			if (interval < expires)
808 				expires = interval;
809 			continue;
810 		}
811 
812 		list_del(&c->list);
813 		mroute_netlink_event(mrt, (struct mfc_cache *)c, RTM_DELROUTE);
814 		ipmr_destroy_unres(mrt, (struct mfc_cache *)c);
815 	}
816 
817 	if (!list_empty(&mrt->mfc_unres_queue))
818 		mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
819 
820 out:
821 	spin_unlock(&mfc_unres_lock);
822 }
823 
824 /* Fill oifs list. It is called under locked mrt_lock. */
825 static void ipmr_update_thresholds(struct mr_table *mrt, struct mr_mfc *cache,
826 				   unsigned char *ttls)
827 {
828 	int vifi;
829 
830 	cache->mfc_un.res.minvif = MAXVIFS;
831 	cache->mfc_un.res.maxvif = 0;
832 	memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
833 
834 	for (vifi = 0; vifi < mrt->maxvif; vifi++) {
835 		if (VIF_EXISTS(mrt, vifi) &&
836 		    ttls[vifi] && ttls[vifi] < 255) {
837 			cache->mfc_un.res.ttls[vifi] = ttls[vifi];
838 			if (cache->mfc_un.res.minvif > vifi)
839 				cache->mfc_un.res.minvif = vifi;
840 			if (cache->mfc_un.res.maxvif <= vifi)
841 				cache->mfc_un.res.maxvif = vifi + 1;
842 		}
843 	}
844 	WRITE_ONCE(cache->mfc_un.res.lastuse, jiffies);
845 }
846 
847 static int vif_add(struct net *net, struct mr_table *mrt,
848 		   struct vifctl *vifc, int mrtsock)
849 {
850 	struct netdev_phys_item_id ppid = { };
851 	int vifi = vifc->vifc_vifi;
852 	struct vif_device *v = &mrt->vif_table[vifi];
853 	struct net_device *dev;
854 	struct in_device *in_dev;
855 	int err;
856 
857 	/* Is vif busy ? */
858 	if (VIF_EXISTS(mrt, vifi))
859 		return -EADDRINUSE;
860 
861 	switch (vifc->vifc_flags) {
862 	case VIFF_REGISTER:
863 		if (!ipmr_pimsm_enabled())
864 			return -EINVAL;
865 		/* Special Purpose VIF in PIM
866 		 * All the packets will be sent to the daemon
867 		 */
868 		if (mrt->mroute_reg_vif_num >= 0)
869 			return -EADDRINUSE;
870 		dev = ipmr_reg_vif(net, mrt);
871 		if (!dev)
872 			return -ENOBUFS;
873 		err = dev_set_allmulti(dev, 1);
874 		if (err) {
875 			unregister_netdevice(dev);
876 			dev_put(dev);
877 			return err;
878 		}
879 		break;
880 	case VIFF_TUNNEL:
881 		dev = ipmr_new_tunnel(net, vifc);
882 		if (IS_ERR(dev))
883 			return PTR_ERR(dev);
884 		break;
885 	case VIFF_USE_IFINDEX:
886 	case 0:
887 		if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
888 			dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
889 			if (dev && !__in_dev_get_rtnl(dev)) {
890 				dev_put(dev);
891 				return -EADDRNOTAVAIL;
892 			}
893 		} else {
894 			dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
895 		}
896 		if (!dev)
897 			return -EADDRNOTAVAIL;
898 		err = dev_set_allmulti(dev, 1);
899 		if (err) {
900 			dev_put(dev);
901 			return err;
902 		}
903 		break;
904 	default:
905 		return -EINVAL;
906 	}
907 
908 	in_dev = __in_dev_get_rtnl(dev);
909 	if (!in_dev) {
910 		dev_put(dev);
911 		return -EADDRNOTAVAIL;
912 	}
913 	IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
914 	inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_MC_FORWARDING,
915 				    dev->ifindex, &in_dev->cnf);
916 	ip_rt_multicast_event(in_dev);
917 
918 	/* Fill in the VIF structures */
919 	vif_device_init(v, dev, vifc->vifc_rate_limit,
920 			vifc->vifc_threshold,
921 			vifc->vifc_flags | (!mrtsock ? VIFF_STATIC : 0),
922 			(VIFF_TUNNEL | VIFF_REGISTER));
923 
924 	err = netif_get_port_parent_id(dev, &ppid, true);
925 	if (err == 0) {
926 		memcpy(v->dev_parent_id.id, ppid.id, ppid.id_len);
927 		v->dev_parent_id.id_len = ppid.id_len;
928 	} else {
929 		v->dev_parent_id.id_len = 0;
930 	}
931 
932 	v->local = vifc->vifc_lcl_addr.s_addr;
933 	v->remote = vifc->vifc_rmt_addr.s_addr;
934 
935 	/* And finish update writing critical data */
936 	spin_lock(&mrt_lock);
937 	rcu_assign_pointer(v->dev, dev);
938 	netdev_tracker_alloc(dev, &v->dev_tracker, GFP_ATOMIC);
939 	if (v->flags & VIFF_REGISTER) {
940 		/* Pairs with READ_ONCE() in ipmr_cache_report() and reg_vif_xmit() */
941 		WRITE_ONCE(mrt->mroute_reg_vif_num, vifi);
942 	}
943 	if (vifi+1 > mrt->maxvif)
944 		WRITE_ONCE(mrt->maxvif, vifi + 1);
945 	spin_unlock(&mrt_lock);
946 	call_ipmr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD, v, dev,
947 				      vifi, mrt->id);
948 	return 0;
949 }
950 
951 /* called with rcu_read_lock() */
952 static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
953 					 __be32 origin,
954 					 __be32 mcastgrp)
955 {
956 	struct mfc_cache_cmp_arg arg = {
957 			.mfc_mcastgrp = mcastgrp,
958 			.mfc_origin = origin
959 	};
960 
961 	return mr_mfc_find(mrt, &arg);
962 }
963 
964 /* Look for a (*,G) entry */
965 static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
966 					     __be32 mcastgrp, int vifi)
967 {
968 	struct mfc_cache_cmp_arg arg = {
969 			.mfc_mcastgrp = mcastgrp,
970 			.mfc_origin = htonl(INADDR_ANY)
971 	};
972 
973 	if (mcastgrp == htonl(INADDR_ANY))
974 		return mr_mfc_find_any_parent(mrt, vifi);
975 	return mr_mfc_find_any(mrt, vifi, &arg);
976 }
977 
978 /* Look for a (S,G,iif) entry if parent != -1 */
979 static struct mfc_cache *ipmr_cache_find_parent(struct mr_table *mrt,
980 						__be32 origin, __be32 mcastgrp,
981 						int parent)
982 {
983 	struct mfc_cache_cmp_arg arg = {
984 			.mfc_mcastgrp = mcastgrp,
985 			.mfc_origin = origin,
986 	};
987 
988 	return mr_mfc_find_parent(mrt, &arg, parent);
989 }
990 
991 /* Allocate a multicast cache entry */
992 static struct mfc_cache *ipmr_cache_alloc(void)
993 {
994 	struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
995 
996 	if (c) {
997 		c->_c.mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
998 		c->_c.mfc_un.res.minvif = MAXVIFS;
999 		c->_c.free = ipmr_cache_free_rcu;
1000 		refcount_set(&c->_c.mfc_un.res.refcount, 1);
1001 	}
1002 	return c;
1003 }
1004 
1005 static struct mfc_cache *ipmr_cache_alloc_unres(void)
1006 {
1007 	struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
1008 
1009 	if (c) {
1010 		skb_queue_head_init(&c->_c.mfc_un.unres.unresolved);
1011 		c->_c.mfc_un.unres.expires = jiffies + 10 * HZ;
1012 	}
1013 	return c;
1014 }
1015 
1016 /* A cache entry has gone into a resolved state from queued */
1017 static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
1018 			       struct mfc_cache *uc, struct mfc_cache *c)
1019 {
1020 	struct sk_buff *skb;
1021 	struct nlmsgerr *e;
1022 
1023 	/* Play the pending entries through our router */
1024 	while ((skb = __skb_dequeue(&uc->_c.mfc_un.unres.unresolved))) {
1025 		if (ip_hdr(skb)->version == 0) {
1026 			struct nlmsghdr *nlh = skb_pull(skb,
1027 							sizeof(struct iphdr));
1028 
1029 			if (mr_fill_mroute(mrt, skb, &c->_c,
1030 					   nlmsg_data(nlh)) > 0) {
1031 				nlh->nlmsg_len = skb_tail_pointer(skb) -
1032 						 (u8 *)nlh;
1033 			} else {
1034 				nlh->nlmsg_type = NLMSG_ERROR;
1035 				nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1036 				skb_trim(skb, nlh->nlmsg_len);
1037 				e = nlmsg_data(nlh);
1038 				e->error = -EMSGSIZE;
1039 				memset(&e->msg, 0, sizeof(e->msg));
1040 			}
1041 
1042 			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1043 		} else {
1044 			rcu_read_lock();
1045 			ip_mr_forward(net, mrt, skb->dev, skb, c, 0);
1046 			rcu_read_unlock();
1047 		}
1048 	}
1049 }
1050 
1051 /* Bounce a cache query up to mrouted and netlink.
1052  *
1053  * Called under rcu_read_lock().
1054  */
1055 static int ipmr_cache_report(const struct mr_table *mrt,
1056 			     struct sk_buff *pkt, vifi_t vifi, int assert)
1057 {
1058 	const int ihl = ip_hdrlen(pkt);
1059 	struct sock *mroute_sk;
1060 	struct igmphdr *igmp;
1061 	struct igmpmsg *msg;
1062 	struct sk_buff *skb;
1063 	int ret;
1064 
1065 	mroute_sk = rcu_dereference(mrt->mroute_sk);
1066 	if (!mroute_sk)
1067 		return -EINVAL;
1068 
1069 	if (assert == IGMPMSG_WHOLEPKT || assert == IGMPMSG_WRVIFWHOLE)
1070 		skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
1071 	else
1072 		skb = alloc_skb(128, GFP_ATOMIC);
1073 
1074 	if (!skb)
1075 		return -ENOBUFS;
1076 
1077 	if (assert == IGMPMSG_WHOLEPKT || assert == IGMPMSG_WRVIFWHOLE) {
1078 		/* Ugly, but we have no choice with this interface.
1079 		 * Duplicate old header, fix ihl, length etc.
1080 		 * And all this only to mangle msg->im_msgtype and
1081 		 * to set msg->im_mbz to "mbz" :-)
1082 		 */
1083 		skb_push(skb, sizeof(struct iphdr));
1084 		skb_reset_network_header(skb);
1085 		skb_reset_transport_header(skb);
1086 		msg = (struct igmpmsg *)skb_network_header(skb);
1087 		memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
1088 		msg->im_msgtype = assert;
1089 		msg->im_mbz = 0;
1090 		if (assert == IGMPMSG_WRVIFWHOLE) {
1091 			msg->im_vif = vifi;
1092 			msg->im_vif_hi = vifi >> 8;
1093 		} else {
1094 			/* Pairs with WRITE_ONCE() in vif_add() and vif_delete() */
1095 			int vif_num = READ_ONCE(mrt->mroute_reg_vif_num);
1096 
1097 			msg->im_vif = vif_num;
1098 			msg->im_vif_hi = vif_num >> 8;
1099 		}
1100 		ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
1101 		ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
1102 					     sizeof(struct iphdr));
1103 	} else {
1104 		/* Copy the IP header */
1105 		skb_set_network_header(skb, skb->len);
1106 		skb_put(skb, ihl);
1107 		skb_copy_to_linear_data(skb, pkt->data, ihl);
1108 		/* Flag to the kernel this is a route add */
1109 		ip_hdr(skb)->protocol = 0;
1110 		msg = (struct igmpmsg *)skb_network_header(skb);
1111 		msg->im_vif = vifi;
1112 		msg->im_vif_hi = vifi >> 8;
1113 		ipv4_pktinfo_prepare(mroute_sk, pkt, false);
1114 		memcpy(skb->cb, pkt->cb, sizeof(skb->cb));
1115 		/* Add our header */
1116 		igmp = skb_put(skb, sizeof(struct igmphdr));
1117 		igmp->type = assert;
1118 		msg->im_msgtype = assert;
1119 		igmp->code = 0;
1120 		ip_hdr(skb)->tot_len = htons(skb->len);	/* Fix the length */
1121 		skb->transport_header = skb->network_header;
1122 	}
1123 
1124 	igmpmsg_netlink_event(mrt, skb);
1125 
1126 	/* Deliver to mrouted */
1127 	ret = sock_queue_rcv_skb(mroute_sk, skb);
1128 
1129 	if (ret < 0) {
1130 		net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
1131 		kfree_skb(skb);
1132 	}
1133 
1134 	return ret;
1135 }
1136 
1137 /* Queue a packet for resolution. It gets locked cache entry! */
1138 /* Called under rcu_read_lock() */
1139 static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
1140 				 struct sk_buff *skb, struct net_device *dev)
1141 {
1142 	struct net *net = read_pnet(&mrt->net);
1143 	const struct iphdr *iph = ip_hdr(skb);
1144 	struct mfc_cache *c = NULL;
1145 	bool found = false;
1146 	int err;
1147 
1148 	spin_lock_bh(&mfc_unres_lock);
1149 
1150 	if (!check_net(net)) {
1151 		err = -EINVAL;
1152 		goto err;
1153 	}
1154 
1155 	list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) {
1156 		if (c->mfc_mcastgrp == iph->daddr &&
1157 		    c->mfc_origin == iph->saddr) {
1158 			found = true;
1159 			break;
1160 		}
1161 	}
1162 
1163 	if (!found) {
1164 		/* Create a new entry if allowable */
1165 		c = ipmr_cache_alloc_unres();
1166 		if (!c) {
1167 			err = -ENOBUFS;
1168 			goto err;
1169 		}
1170 
1171 		/* Fill in the new cache entry */
1172 		c->_c.mfc_parent = -1;
1173 		c->mfc_origin	= iph->saddr;
1174 		c->mfc_mcastgrp	= iph->daddr;
1175 
1176 		/* Reflect first query at mrouted. */
1177 		err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
1178 		if (err < 0)
1179 			goto err;
1180 
1181 		atomic_inc(&mrt->cache_resolve_queue_len);
1182 		list_add(&c->_c.list, &mrt->mfc_unres_queue);
1183 		mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1184 
1185 		if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1186 			mod_timer(&mrt->ipmr_expire_timer,
1187 				  c->_c.mfc_un.unres.expires);
1188 	}
1189 
1190 	/* See if we can append the packet */
1191 	if (c->_c.mfc_un.unres.unresolved.qlen > 3) {
1192 		c = NULL;
1193 		err = -ENOBUFS;
1194 		goto err;
1195 	}
1196 
1197 	if (dev) {
1198 		skb->dev = dev;
1199 		skb->skb_iif = dev->ifindex;
1200 	}
1201 
1202 	skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb);
1203 
1204 	spin_unlock_bh(&mfc_unres_lock);
1205 	return 0;
1206 
1207 err:
1208 	spin_unlock_bh(&mfc_unres_lock);
1209 	if (c)
1210 		ipmr_cache_free(c);
1211 	kfree_skb(skb);
1212 	return err;
1213 }
1214 
1215 /* MFC cache manipulation by user space mroute daemon */
1216 
1217 static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
1218 {
1219 	struct net *net = read_pnet(&mrt->net);
1220 	struct mfc_cache *c;
1221 
1222 	rcu_read_lock();
1223 	c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
1224 				   mfc->mfcc_mcastgrp.s_addr, parent);
1225 	rcu_read_unlock();
1226 	if (!c)
1227 		return -ENOENT;
1228 	rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ipmr_rht_params);
1229 	list_del_rcu(&c->_c.list);
1230 	call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, c, mrt->id);
1231 	mroute_netlink_event(mrt, c, RTM_DELROUTE);
1232 	mr_cache_put(&c->_c);
1233 
1234 	return 0;
1235 }
1236 
1237 static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1238 			struct mfcctl *mfc, int mrtsock, int parent)
1239 {
1240 	struct mfc_cache *uc, *c;
1241 	struct mr_mfc *_uc;
1242 	bool found;
1243 	int ret;
1244 
1245 	if (mfc->mfcc_parent >= MAXVIFS)
1246 		return -ENFILE;
1247 
1248 	rcu_read_lock();
1249 	c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
1250 				   mfc->mfcc_mcastgrp.s_addr, parent);
1251 	rcu_read_unlock();
1252 	if (c) {
1253 		spin_lock(&mrt_lock);
1254 		c->_c.mfc_parent = mfc->mfcc_parent;
1255 		ipmr_update_thresholds(mrt, &c->_c, mfc->mfcc_ttls);
1256 		if (!mrtsock)
1257 			c->_c.mfc_flags |= MFC_STATIC;
1258 		spin_unlock(&mrt_lock);
1259 		call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, c,
1260 					      mrt->id);
1261 		mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1262 		return 0;
1263 	}
1264 
1265 	if (mfc->mfcc_mcastgrp.s_addr != htonl(INADDR_ANY) &&
1266 	    !ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1267 		return -EINVAL;
1268 
1269 	c = ipmr_cache_alloc();
1270 	if (!c)
1271 		return -ENOMEM;
1272 
1273 	c->mfc_origin = mfc->mfcc_origin.s_addr;
1274 	c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
1275 	c->_c.mfc_parent = mfc->mfcc_parent;
1276 	ipmr_update_thresholds(mrt, &c->_c, mfc->mfcc_ttls);
1277 	if (!mrtsock)
1278 		c->_c.mfc_flags |= MFC_STATIC;
1279 
1280 	ret = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->_c.mnode,
1281 				  ipmr_rht_params);
1282 	if (ret) {
1283 		pr_err("ipmr: rhtable insert error %d\n", ret);
1284 		ipmr_cache_free(c);
1285 		return ret;
1286 	}
1287 	list_add_tail_rcu(&c->_c.list, &mrt->mfc_cache_list);
1288 	/* Check to see if we resolved a queued list. If so we
1289 	 * need to send on the frames and tidy up.
1290 	 */
1291 	found = false;
1292 	spin_lock_bh(&mfc_unres_lock);
1293 	list_for_each_entry(_uc, &mrt->mfc_unres_queue, list) {
1294 		uc = (struct mfc_cache *)_uc;
1295 		if (uc->mfc_origin == c->mfc_origin &&
1296 		    uc->mfc_mcastgrp == c->mfc_mcastgrp) {
1297 			list_del(&_uc->list);
1298 			atomic_dec(&mrt->cache_resolve_queue_len);
1299 			found = true;
1300 			break;
1301 		}
1302 	}
1303 	if (list_empty(&mrt->mfc_unres_queue))
1304 		timer_delete(&mrt->ipmr_expire_timer);
1305 	spin_unlock_bh(&mfc_unres_lock);
1306 
1307 	if (found) {
1308 		ipmr_cache_resolve(net, mrt, uc, c);
1309 		ipmr_cache_free(uc);
1310 	}
1311 	call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD, c, mrt->id);
1312 	mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1313 	return 0;
1314 }
1315 
1316 /* Close the multicast socket, and clear the vif tables etc */
1317 static void mroute_clean_tables(struct mr_table *mrt, int flags,
1318 				struct list_head *dev_kill_list)
1319 {
1320 	struct net *net = read_pnet(&mrt->net);
1321 	struct mfc_cache *cache;
1322 	struct mr_mfc *c, *tmp;
1323 	int i;
1324 
1325 	/* Shut down all active vif entries */
1326 	if (flags & (MRT_FLUSH_VIFS | MRT_FLUSH_VIFS_STATIC)) {
1327 		for (i = 0; i < mrt->maxvif; i++) {
1328 			if (((mrt->vif_table[i].flags & VIFF_STATIC) &&
1329 			     !(flags & MRT_FLUSH_VIFS_STATIC)) ||
1330 			    (!(mrt->vif_table[i].flags & VIFF_STATIC) && !(flags & MRT_FLUSH_VIFS)))
1331 				continue;
1332 			vif_delete(mrt, i, 0, dev_kill_list);
1333 		}
1334 	}
1335 
1336 	/* Wipe the cache */
1337 	if (flags & (MRT_FLUSH_MFC | MRT_FLUSH_MFC_STATIC)) {
1338 		mutex_lock(&net->ipv4.mfc_mutex);
1339 
1340 		list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
1341 			if (((c->mfc_flags & MFC_STATIC) && !(flags & MRT_FLUSH_MFC_STATIC)) ||
1342 			    (!(c->mfc_flags & MFC_STATIC) && !(flags & MRT_FLUSH_MFC)))
1343 				continue;
1344 			rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
1345 			list_del_rcu(&c->list);
1346 			cache = (struct mfc_cache *)c;
1347 			call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, cache,
1348 						      mrt->id);
1349 			mroute_netlink_event(mrt, cache, RTM_DELROUTE);
1350 			mr_cache_put(c);
1351 		}
1352 
1353 		mutex_unlock(&net->ipv4.mfc_mutex);
1354 	}
1355 
1356 	if (flags & MRT_FLUSH_MFC) {
1357 		if (atomic_read(&mrt->cache_resolve_queue_len) != 0 || !check_net(net)) {
1358 			spin_lock_bh(&mfc_unres_lock);
1359 			list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
1360 				list_del(&c->list);
1361 				cache = (struct mfc_cache *)c;
1362 				mroute_netlink_event(mrt, cache, RTM_DELROUTE);
1363 				ipmr_destroy_unres(mrt, cache);
1364 			}
1365 			spin_unlock_bh(&mfc_unres_lock);
1366 		}
1367 	}
1368 }
1369 
1370 /* called from ip_ra_control(), before an RCU grace period,
1371  * we don't need to call synchronize_rcu() here
1372  */
1373 static void mrtsock_destruct(struct sock *sk)
1374 {
1375 	struct net *net = sock_net(sk);
1376 	LIST_HEAD(dev_kill_list);
1377 	struct mr_table *mrt;
1378 
1379 	rtnl_lock();
1380 
1381 	ipmr_for_each_table(mrt, net) {
1382 		if (sk == rtnl_dereference(mrt->mroute_sk)) {
1383 			IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1384 			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
1385 						    NETCONFA_MC_FORWARDING,
1386 						    NETCONFA_IFINDEX_ALL,
1387 						    net->ipv4.devconf_all);
1388 			RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1389 			mroute_clean_tables(mrt, MRT_FLUSH_VIFS | MRT_FLUSH_MFC,
1390 					    &dev_kill_list);
1391 		}
1392 	}
1393 
1394 	unregister_netdevice_many(&dev_kill_list);
1395 
1396 	rtnl_unlock();
1397 }
1398 
1399 /* Socket options and virtual interface manipulation. The whole
1400  * virtual interface system is a complete heap, but unfortunately
1401  * that's how BSD mrouted happens to think. Maybe one day with a proper
1402  * MOSPF/PIM router set up we can clean this up.
1403  */
1404 
1405 int ip_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval,
1406 			 unsigned int optlen)
1407 {
1408 	struct net *net = sock_net(sk);
1409 	int val, ret = 0, parent = 0;
1410 	struct mr_table *mrt;
1411 	struct vifctl vif;
1412 	struct mfcctl mfc;
1413 	bool do_wrvifwhole;
1414 	u32 uval;
1415 
1416 	/* There's one exception to the lock - MRT_DONE which needs to unlock */
1417 	rtnl_lock();
1418 	if (sk->sk_type != SOCK_RAW ||
1419 	    inet_sk(sk)->inet_num != IPPROTO_IGMP) {
1420 		ret = -EOPNOTSUPP;
1421 		goto out_unlock;
1422 	}
1423 
1424 	mrt = __ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1425 	if (!mrt) {
1426 		ret = -ENOENT;
1427 		goto out_unlock;
1428 	}
1429 	if (optname != MRT_INIT) {
1430 		if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1431 		    !ns_capable(net->user_ns, CAP_NET_ADMIN)) {
1432 			ret = -EACCES;
1433 			goto out_unlock;
1434 		}
1435 	}
1436 
1437 	switch (optname) {
1438 	case MRT_INIT:
1439 		if (optlen != sizeof(int)) {
1440 			ret = -EINVAL;
1441 			break;
1442 		}
1443 		if (rtnl_dereference(mrt->mroute_sk)) {
1444 			ret = -EADDRINUSE;
1445 			break;
1446 		}
1447 
1448 		ret = ip_ra_control(sk, 1, mrtsock_destruct);
1449 		if (ret == 0) {
1450 			rcu_assign_pointer(mrt->mroute_sk, sk);
1451 			IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1452 			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
1453 						    NETCONFA_MC_FORWARDING,
1454 						    NETCONFA_IFINDEX_ALL,
1455 						    net->ipv4.devconf_all);
1456 		}
1457 		break;
1458 	case MRT_DONE:
1459 		if (sk != rcu_access_pointer(mrt->mroute_sk)) {
1460 			ret = -EACCES;
1461 		} else {
1462 			/* We need to unlock here because mrtsock_destruct takes
1463 			 * care of rtnl itself and we can't change that due to
1464 			 * the IP_ROUTER_ALERT setsockopt which runs without it.
1465 			 */
1466 			rtnl_unlock();
1467 			ret = ip_ra_control(sk, 0, NULL);
1468 			goto out;
1469 		}
1470 		break;
1471 	case MRT_ADD_VIF:
1472 	case MRT_DEL_VIF:
1473 		if (optlen != sizeof(vif)) {
1474 			ret = -EINVAL;
1475 			break;
1476 		}
1477 		if (copy_from_sockptr(&vif, optval, sizeof(vif))) {
1478 			ret = -EFAULT;
1479 			break;
1480 		}
1481 		if (vif.vifc_vifi >= MAXVIFS) {
1482 			ret = -ENFILE;
1483 			break;
1484 		}
1485 		if (optname == MRT_ADD_VIF) {
1486 			ret = vif_add(net, mrt, &vif,
1487 				      sk == rtnl_dereference(mrt->mroute_sk));
1488 		} else {
1489 			ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1490 		}
1491 		break;
1492 	/* Manipulate the forwarding caches. These live
1493 	 * in a sort of kernel/user symbiosis.
1494 	 */
1495 	case MRT_ADD_MFC:
1496 	case MRT_DEL_MFC:
1497 		parent = -1;
1498 		fallthrough;
1499 	case MRT_ADD_MFC_PROXY:
1500 	case MRT_DEL_MFC_PROXY:
1501 		if (optlen != sizeof(mfc)) {
1502 			ret = -EINVAL;
1503 			break;
1504 		}
1505 		if (copy_from_sockptr(&mfc, optval, sizeof(mfc))) {
1506 			ret = -EFAULT;
1507 			break;
1508 		}
1509 		if (parent == 0)
1510 			parent = mfc.mfcc_parent;
1511 
1512 		mutex_lock(&net->ipv4.mfc_mutex);
1513 
1514 		if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY)
1515 			ret = ipmr_mfc_delete(mrt, &mfc, parent);
1516 		else
1517 			ret = ipmr_mfc_add(net, mrt, &mfc,
1518 					   sk == rtnl_dereference(mrt->mroute_sk),
1519 					   parent);
1520 
1521 		mutex_unlock(&net->ipv4.mfc_mutex);
1522 		break;
1523 	case MRT_FLUSH: {
1524 		LIST_HEAD(dev_kill_list);
1525 
1526 		if (optlen != sizeof(val)) {
1527 			ret = -EINVAL;
1528 			break;
1529 		}
1530 		if (copy_from_sockptr(&val, optval, sizeof(val))) {
1531 			ret = -EFAULT;
1532 			break;
1533 		}
1534 
1535 		mroute_clean_tables(mrt, val, &dev_kill_list);
1536 		unregister_netdevice_many(&dev_kill_list);
1537 		break;
1538 	}
1539 	/* Control PIM assert. */
1540 	case MRT_ASSERT:
1541 		if (optlen != sizeof(val)) {
1542 			ret = -EINVAL;
1543 			break;
1544 		}
1545 		if (copy_from_sockptr(&val, optval, sizeof(val))) {
1546 			ret = -EFAULT;
1547 			break;
1548 		}
1549 		WRITE_ONCE(mrt->mroute_do_assert, val);
1550 		break;
1551 	case MRT_PIM:
1552 		if (!ipmr_pimsm_enabled()) {
1553 			ret = -ENOPROTOOPT;
1554 			break;
1555 		}
1556 		if (optlen != sizeof(val)) {
1557 			ret = -EINVAL;
1558 			break;
1559 		}
1560 		if (copy_from_sockptr(&val, optval, sizeof(val))) {
1561 			ret = -EFAULT;
1562 			break;
1563 		}
1564 
1565 		do_wrvifwhole = (val == IGMPMSG_WRVIFWHOLE);
1566 		val = !!val;
1567 		if (val != mrt->mroute_do_pim) {
1568 			WRITE_ONCE(mrt->mroute_do_pim, val);
1569 			WRITE_ONCE(mrt->mroute_do_assert, val);
1570 			WRITE_ONCE(mrt->mroute_do_wrvifwhole, do_wrvifwhole);
1571 		}
1572 		break;
1573 	case MRT_TABLE:
1574 		if (!IS_BUILTIN(CONFIG_IP_MROUTE_MULTIPLE_TABLES)) {
1575 			ret = -ENOPROTOOPT;
1576 			break;
1577 		}
1578 		if (optlen != sizeof(uval)) {
1579 			ret = -EINVAL;
1580 			break;
1581 		}
1582 		if (copy_from_sockptr(&uval, optval, sizeof(uval))) {
1583 			ret = -EFAULT;
1584 			break;
1585 		}
1586 
1587 		if (sk == rtnl_dereference(mrt->mroute_sk)) {
1588 			ret = -EBUSY;
1589 		} else {
1590 			mrt = ipmr_new_table(net, uval);
1591 			if (IS_ERR(mrt))
1592 				ret = PTR_ERR(mrt);
1593 			else
1594 				raw_sk(sk)->ipmr_table = uval;
1595 		}
1596 		break;
1597 	/* Spurious command, or MRT_VERSION which you cannot set. */
1598 	default:
1599 		ret = -ENOPROTOOPT;
1600 	}
1601 out_unlock:
1602 	rtnl_unlock();
1603 out:
1604 	return ret;
1605 }
1606 
1607 /* Execute if this ioctl is a special mroute ioctl */
1608 int ipmr_sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1609 {
1610 	switch (cmd) {
1611 	/* These userspace buffers will be consumed by ipmr_ioctl() */
1612 	case SIOCGETVIFCNT: {
1613 		struct sioc_vif_req buffer;
1614 
1615 		return sock_ioctl_inout(sk, cmd, arg, &buffer,
1616 				      sizeof(buffer));
1617 		}
1618 	case SIOCGETSGCNT: {
1619 		struct sioc_sg_req buffer;
1620 
1621 		return sock_ioctl_inout(sk, cmd, arg, &buffer,
1622 				      sizeof(buffer));
1623 		}
1624 	}
1625 	/* return code > 0 means that the ioctl was not executed */
1626 	return 1;
1627 }
1628 
1629 /* Getsock opt support for the multicast routing system. */
1630 int ip_mroute_getsockopt(struct sock *sk, int optname, sockptr_t optval,
1631 			 sockptr_t optlen)
1632 {
1633 	int olr;
1634 	int val;
1635 	struct net *net = sock_net(sk);
1636 	struct mr_table *mrt;
1637 
1638 	if (sk->sk_type != SOCK_RAW ||
1639 	    inet_sk(sk)->inet_num != IPPROTO_IGMP)
1640 		return -EOPNOTSUPP;
1641 
1642 	mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1643 	if (!mrt)
1644 		return -ENOENT;
1645 
1646 	switch (optname) {
1647 	case MRT_VERSION:
1648 		val = 0x0305;
1649 		break;
1650 	case MRT_PIM:
1651 		if (!ipmr_pimsm_enabled())
1652 			return -ENOPROTOOPT;
1653 		val = READ_ONCE(mrt->mroute_do_pim);
1654 		break;
1655 	case MRT_ASSERT:
1656 		val = READ_ONCE(mrt->mroute_do_assert);
1657 		break;
1658 	default:
1659 		return -ENOPROTOOPT;
1660 	}
1661 
1662 	if (copy_from_sockptr(&olr, optlen, sizeof(int)))
1663 		return -EFAULT;
1664 	if (olr < 0)
1665 		return -EINVAL;
1666 
1667 	olr = min_t(unsigned int, olr, sizeof(int));
1668 
1669 	if (copy_to_sockptr(optlen, &olr, sizeof(int)))
1670 		return -EFAULT;
1671 	if (copy_to_sockptr(optval, &val, olr))
1672 		return -EFAULT;
1673 	return 0;
1674 }
1675 
1676 /* The IP multicast ioctl support routines. */
1677 int ipmr_ioctl(struct sock *sk, int cmd, void *arg)
1678 {
1679 	struct vif_device *vif;
1680 	struct mfc_cache *c;
1681 	struct net *net = sock_net(sk);
1682 	struct sioc_vif_req *vr;
1683 	struct sioc_sg_req *sr;
1684 	struct mr_table *mrt;
1685 
1686 	mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1687 	if (!mrt)
1688 		return -ENOENT;
1689 
1690 	switch (cmd) {
1691 	case SIOCGETVIFCNT:
1692 		vr = (struct sioc_vif_req *)arg;
1693 		if (vr->vifi >= mrt->maxvif)
1694 			return -EINVAL;
1695 		vr->vifi = array_index_nospec(vr->vifi, mrt->maxvif);
1696 		rcu_read_lock();
1697 		vif = &mrt->vif_table[vr->vifi];
1698 		if (VIF_EXISTS(mrt, vr->vifi)) {
1699 			vr->icount = READ_ONCE(vif->pkt_in);
1700 			vr->ocount = READ_ONCE(vif->pkt_out);
1701 			vr->ibytes = READ_ONCE(vif->bytes_in);
1702 			vr->obytes = READ_ONCE(vif->bytes_out);
1703 			rcu_read_unlock();
1704 
1705 			return 0;
1706 		}
1707 		rcu_read_unlock();
1708 		return -EADDRNOTAVAIL;
1709 	case SIOCGETSGCNT:
1710 		sr = (struct sioc_sg_req *)arg;
1711 
1712 		rcu_read_lock();
1713 		c = ipmr_cache_find(mrt, sr->src.s_addr, sr->grp.s_addr);
1714 		if (c) {
1715 			sr->pktcnt = atomic_long_read(&c->_c.mfc_un.res.pkt);
1716 			sr->bytecnt = atomic_long_read(&c->_c.mfc_un.res.bytes);
1717 			sr->wrong_if = atomic_long_read(&c->_c.mfc_un.res.wrong_if);
1718 			rcu_read_unlock();
1719 			return 0;
1720 		}
1721 		rcu_read_unlock();
1722 		return -EADDRNOTAVAIL;
1723 	default:
1724 		return -ENOIOCTLCMD;
1725 	}
1726 }
1727 
1728 #ifdef CONFIG_COMPAT
1729 struct compat_sioc_sg_req {
1730 	struct in_addr src;
1731 	struct in_addr grp;
1732 	compat_ulong_t pktcnt;
1733 	compat_ulong_t bytecnt;
1734 	compat_ulong_t wrong_if;
1735 };
1736 
1737 struct compat_sioc_vif_req {
1738 	vifi_t	vifi;		/* Which iface */
1739 	compat_ulong_t icount;
1740 	compat_ulong_t ocount;
1741 	compat_ulong_t ibytes;
1742 	compat_ulong_t obytes;
1743 };
1744 
1745 int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1746 {
1747 	struct compat_sioc_sg_req sr;
1748 	struct compat_sioc_vif_req vr;
1749 	struct vif_device *vif;
1750 	struct mfc_cache *c;
1751 	struct net *net = sock_net(sk);
1752 	struct mr_table *mrt;
1753 
1754 	mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1755 	if (!mrt)
1756 		return -ENOENT;
1757 
1758 	switch (cmd) {
1759 	case SIOCGETVIFCNT:
1760 		if (copy_from_user(&vr, arg, sizeof(vr)))
1761 			return -EFAULT;
1762 		if (vr.vifi >= mrt->maxvif)
1763 			return -EINVAL;
1764 		vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
1765 		rcu_read_lock();
1766 		vif = &mrt->vif_table[vr.vifi];
1767 		if (VIF_EXISTS(mrt, vr.vifi)) {
1768 			vr.icount = READ_ONCE(vif->pkt_in);
1769 			vr.ocount = READ_ONCE(vif->pkt_out);
1770 			vr.ibytes = READ_ONCE(vif->bytes_in);
1771 			vr.obytes = READ_ONCE(vif->bytes_out);
1772 			rcu_read_unlock();
1773 
1774 			if (copy_to_user(arg, &vr, sizeof(vr)))
1775 				return -EFAULT;
1776 			return 0;
1777 		}
1778 		rcu_read_unlock();
1779 		return -EADDRNOTAVAIL;
1780 	case SIOCGETSGCNT:
1781 		if (copy_from_user(&sr, arg, sizeof(sr)))
1782 			return -EFAULT;
1783 
1784 		rcu_read_lock();
1785 		c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1786 		if (c) {
1787 			sr.pktcnt = atomic_long_read(&c->_c.mfc_un.res.pkt);
1788 			sr.bytecnt = atomic_long_read(&c->_c.mfc_un.res.bytes);
1789 			sr.wrong_if = atomic_long_read(&c->_c.mfc_un.res.wrong_if);
1790 			rcu_read_unlock();
1791 
1792 			if (copy_to_user(arg, &sr, sizeof(sr)))
1793 				return -EFAULT;
1794 			return 0;
1795 		}
1796 		rcu_read_unlock();
1797 		return -EADDRNOTAVAIL;
1798 	default:
1799 		return -ENOIOCTLCMD;
1800 	}
1801 }
1802 #endif
1803 
1804 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1805 {
1806 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1807 	struct net *net = dev_net(dev);
1808 	struct mr_table *mrt;
1809 	struct vif_device *v;
1810 	int ct;
1811 
1812 	if (event != NETDEV_UNREGISTER)
1813 		return NOTIFY_DONE;
1814 
1815 	ipmr_for_each_table(mrt, net) {
1816 		v = &mrt->vif_table[0];
1817 		for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1818 			if (rcu_access_pointer(v->dev) == dev)
1819 				vif_delete(mrt, ct, 1, NULL);
1820 		}
1821 	}
1822 	return NOTIFY_DONE;
1823 }
1824 
1825 static struct notifier_block ip_mr_notifier = {
1826 	.notifier_call = ipmr_device_event,
1827 };
1828 
1829 /* Encapsulate a packet by attaching a valid IPIP header to it.
1830  * This avoids tunnel drivers and other mess and gives us the speed so
1831  * important for multicast video.
1832  */
1833 static void ip_encap(struct net *net, struct sk_buff *skb,
1834 		     __be32 saddr, __be32 daddr)
1835 {
1836 	struct iphdr *iph;
1837 	const struct iphdr *old_iph = ip_hdr(skb);
1838 
1839 	skb_push(skb, sizeof(struct iphdr));
1840 	skb->transport_header = skb->network_header;
1841 	skb_reset_network_header(skb);
1842 	iph = ip_hdr(skb);
1843 
1844 	iph->version	=	4;
1845 	iph->tos	=	old_iph->tos;
1846 	iph->ttl	=	old_iph->ttl;
1847 	iph->frag_off	=	0;
1848 	iph->daddr	=	daddr;
1849 	iph->saddr	=	saddr;
1850 	iph->protocol	=	IPPROTO_IPIP;
1851 	iph->ihl	=	5;
1852 	iph->tot_len	=	htons(skb->len);
1853 	ip_select_ident(net, skb, NULL);
1854 	ip_send_check(iph);
1855 
1856 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1857 	nf_reset_ct(skb);
1858 }
1859 
1860 static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
1861 				      struct sk_buff *skb)
1862 {
1863 	struct ip_options *opt = &(IPCB(skb)->opt);
1864 
1865 	IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
1866 
1867 	if (unlikely(opt->optlen))
1868 		ip_forward_options(skb);
1869 
1870 	return dst_output(net, sk, skb);
1871 }
1872 
1873 #ifdef CONFIG_NET_SWITCHDEV
1874 static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
1875 				   int in_vifi, int out_vifi)
1876 {
1877 	struct vif_device *out_vif = &mrt->vif_table[out_vifi];
1878 	struct vif_device *in_vif = &mrt->vif_table[in_vifi];
1879 
1880 	if (!skb->offload_l3_fwd_mark)
1881 		return false;
1882 	if (!out_vif->dev_parent_id.id_len || !in_vif->dev_parent_id.id_len)
1883 		return false;
1884 	return netdev_phys_item_id_same(&out_vif->dev_parent_id,
1885 					&in_vif->dev_parent_id);
1886 }
1887 #else
1888 static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
1889 				   int in_vifi, int out_vifi)
1890 {
1891 	return false;
1892 }
1893 #endif
1894 
1895 /* Processing handlers for ipmr_forward, under rcu_read_lock() */
1896 
1897 static int ipmr_prepare_xmit(struct net *net, struct mr_table *mrt,
1898 			     struct sk_buff *skb, int vifi)
1899 {
1900 	const struct iphdr *iph = ip_hdr(skb);
1901 	struct vif_device *vif = &mrt->vif_table[vifi];
1902 	struct net_device *vif_dev;
1903 	struct rtable *rt;
1904 	struct flowi4 fl4;
1905 	int    encap = 0;
1906 
1907 	vif_dev = vif_dev_read(vif);
1908 	if (!vif_dev)
1909 		return -1;
1910 
1911 	if (vif->flags & VIFF_REGISTER) {
1912 		WRITE_ONCE(vif->pkt_out, vif->pkt_out + 1);
1913 		WRITE_ONCE(vif->bytes_out, vif->bytes_out + skb->len);
1914 		DEV_STATS_ADD(vif_dev, tx_bytes, skb->len);
1915 		DEV_STATS_INC(vif_dev, tx_packets);
1916 		ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1917 		return -1;
1918 	}
1919 
1920 	if (vif->flags & VIFF_TUNNEL) {
1921 		rt = ip_route_output_ports(net, &fl4, NULL,
1922 					   vif->remote, vif->local,
1923 					   0, 0,
1924 					   IPPROTO_IPIP,
1925 					   iph->tos & INET_DSCP_MASK, vif->link);
1926 		if (IS_ERR(rt))
1927 			return -1;
1928 		encap = sizeof(struct iphdr);
1929 	} else {
1930 		rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
1931 					   0, 0,
1932 					   IPPROTO_IPIP,
1933 					   iph->tos & INET_DSCP_MASK, vif->link);
1934 		if (IS_ERR(rt))
1935 			return -1;
1936 	}
1937 
1938 	if (skb->len+encap > dst4_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1939 		/* Do not fragment multicasts. Alas, IPv4 does not
1940 		 * allow to send ICMP, so that packets will disappear
1941 		 * to blackhole.
1942 		 */
1943 		IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
1944 		ip_rt_put(rt);
1945 		return -1;
1946 	}
1947 
1948 	encap += LL_RESERVED_SPACE(dst_dev_rcu(&rt->dst)) + rt->dst.header_len;
1949 
1950 	if (skb_cow(skb, encap)) {
1951 		ip_rt_put(rt);
1952 		return -1;
1953 	}
1954 
1955 	WRITE_ONCE(vif->pkt_out, vif->pkt_out + 1);
1956 	WRITE_ONCE(vif->bytes_out, vif->bytes_out + skb->len);
1957 
1958 	skb_dst_drop(skb);
1959 	skb_dst_set(skb, &rt->dst);
1960 	ip_decrease_ttl(ip_hdr(skb));
1961 
1962 	/* FIXME: forward and output firewalls used to be called here.
1963 	 * What do we do with netfilter? -- RR
1964 	 */
1965 	if (vif->flags & VIFF_TUNNEL) {
1966 		ip_encap(net, skb, vif->local, vif->remote);
1967 		/* FIXME: extra output firewall step used to be here. --RR */
1968 		DEV_STATS_INC(vif_dev, tx_packets);
1969 		DEV_STATS_ADD(vif_dev, tx_bytes, skb->len);
1970 	}
1971 
1972 	return 0;
1973 }
1974 
1975 static void ipmr_queue_fwd_xmit(struct net *net, struct mr_table *mrt,
1976 				int in_vifi, struct sk_buff *skb, int vifi)
1977 {
1978 	struct rtable *rt;
1979 
1980 	if (ipmr_forward_offloaded(skb, mrt, in_vifi, vifi))
1981 		goto out_free;
1982 
1983 	if (ipmr_prepare_xmit(net, mrt, skb, vifi))
1984 		goto out_free;
1985 
1986 	rt = skb_rtable(skb);
1987 
1988 	IPCB(skb)->flags |= IPSKB_FORWARDED;
1989 
1990 	/* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1991 	 * not only before forwarding, but after forwarding on all output
1992 	 * interfaces. It is clear, if mrouter runs a multicasting
1993 	 * program, it should receive packets not depending to what interface
1994 	 * program is joined.
1995 	 * If we will not make it, the program will have to join on all
1996 	 * interfaces. On the other hand, multihoming host (or router, but
1997 	 * not mrouter) cannot join to more than one interface - it will
1998 	 * result in receiving multiple packets.
1999 	 */
2000 	NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
2001 		net, NULL, skb, skb->dev, dst_dev_rcu(&rt->dst),
2002 		ipmr_forward_finish);
2003 	return;
2004 
2005 out_free:
2006 	kfree_skb(skb);
2007 }
2008 
2009 static void ipmr_queue_output_xmit(struct net *net, struct mr_table *mrt,
2010 				   struct sk_buff *skb, int vifi)
2011 {
2012 	if (ipmr_prepare_xmit(net, mrt, skb, vifi))
2013 		goto out_free;
2014 
2015 	ip_mc_output(net, NULL, skb);
2016 	return;
2017 
2018 out_free:
2019 	kfree_skb(skb);
2020 }
2021 
2022 /* Called with mrt_lock or rcu_read_lock() */
2023 static int ipmr_find_vif(const struct mr_table *mrt, struct net_device *dev)
2024 {
2025 	int ct;
2026 	/* Pairs with WRITE_ONCE() in vif_delete()/vif_add() */
2027 	for (ct = READ_ONCE(mrt->maxvif) - 1; ct >= 0; ct--) {
2028 		if (rcu_access_pointer(mrt->vif_table[ct].dev) == dev)
2029 			break;
2030 	}
2031 	return ct;
2032 }
2033 
2034 /* "local" means that we should preserve one skb (for local delivery) */
2035 /* Called uner rcu_read_lock() */
2036 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
2037 			  struct net_device *dev, struct sk_buff *skb,
2038 			  struct mfc_cache *c, int local)
2039 {
2040 	int true_vifi = ipmr_find_vif(mrt, dev);
2041 	int psend = -1;
2042 	int vif, ct;
2043 
2044 	vif = c->_c.mfc_parent;
2045 	atomic_long_inc(&c->_c.mfc_un.res.pkt);
2046 	atomic_long_add(skb->len, &c->_c.mfc_un.res.bytes);
2047 	WRITE_ONCE(c->_c.mfc_un.res.lastuse, jiffies);
2048 
2049 	if (c->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
2050 		struct mfc_cache *cache_proxy;
2051 
2052 		/* For an (*,G) entry, we only check that the incoming
2053 		 * interface is part of the static tree.
2054 		 */
2055 		cache_proxy = mr_mfc_find_any_parent(mrt, vif);
2056 		if (cache_proxy &&
2057 		    cache_proxy->_c.mfc_un.res.ttls[true_vifi] < 255)
2058 			goto forward;
2059 	}
2060 
2061 	/* Wrong interface: drop packet and (maybe) send PIM assert. */
2062 	if (rcu_access_pointer(mrt->vif_table[vif].dev) != dev) {
2063 		if (rt_is_output_route(skb_rtable(skb))) {
2064 			/* It is our own packet, looped back.
2065 			 * Very complicated situation...
2066 			 *
2067 			 * The best workaround until routing daemons will be
2068 			 * fixed is not to redistribute packet, if it was
2069 			 * send through wrong interface. It means, that
2070 			 * multicast applications WILL NOT work for
2071 			 * (S,G), which have default multicast route pointing
2072 			 * to wrong oif. In any case, it is not a good
2073 			 * idea to use multicasting applications on router.
2074 			 */
2075 			goto dont_forward;
2076 		}
2077 
2078 		atomic_long_inc(&c->_c.mfc_un.res.wrong_if);
2079 
2080 		if (true_vifi >= 0 && READ_ONCE(mrt->mroute_do_assert) &&
2081 		    /* pimsm uses asserts, when switching from RPT to SPT,
2082 		     * so that we cannot check that packet arrived on an oif.
2083 		     * It is bad, but otherwise we would need to move pretty
2084 		     * large chunk of pimd to kernel. Ough... --ANK
2085 		     */
2086 		    (READ_ONCE(mrt->mroute_do_pim) ||
2087 		     c->_c.mfc_un.res.ttls[true_vifi] < 255) &&
2088 		    time_after(jiffies,
2089 			       c->_c.mfc_un.res.last_assert +
2090 			       MFC_ASSERT_THRESH)) {
2091 			c->_c.mfc_un.res.last_assert = jiffies;
2092 			ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
2093 			if (READ_ONCE(mrt->mroute_do_wrvifwhole))
2094 				ipmr_cache_report(mrt, skb, true_vifi,
2095 						  IGMPMSG_WRVIFWHOLE);
2096 		}
2097 		goto dont_forward;
2098 	}
2099 
2100 forward:
2101 	WRITE_ONCE(mrt->vif_table[vif].pkt_in,
2102 		   mrt->vif_table[vif].pkt_in + 1);
2103 	WRITE_ONCE(mrt->vif_table[vif].bytes_in,
2104 		   mrt->vif_table[vif].bytes_in + skb->len);
2105 
2106 	/* Forward the frame */
2107 	if (c->mfc_origin == htonl(INADDR_ANY) &&
2108 	    c->mfc_mcastgrp == htonl(INADDR_ANY)) {
2109 		if (true_vifi >= 0 &&
2110 		    true_vifi != c->_c.mfc_parent &&
2111 		    ip_hdr(skb)->ttl >
2112 				c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) {
2113 			/* It's an (*,*) entry and the packet is not coming from
2114 			 * the upstream: forward the packet to the upstream
2115 			 * only.
2116 			 */
2117 			psend = c->_c.mfc_parent;
2118 			goto last_forward;
2119 		}
2120 		goto dont_forward;
2121 	}
2122 	for (ct = c->_c.mfc_un.res.maxvif - 1;
2123 	     ct >= c->_c.mfc_un.res.minvif; ct--) {
2124 		/* For (*,G) entry, don't forward to the incoming interface */
2125 		if ((c->mfc_origin != htonl(INADDR_ANY) ||
2126 		     ct != true_vifi) &&
2127 		    ip_hdr(skb)->ttl > c->_c.mfc_un.res.ttls[ct]) {
2128 			if (psend != -1) {
2129 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2130 
2131 				if (skb2)
2132 					ipmr_queue_fwd_xmit(net, mrt, true_vifi,
2133 							    skb2, psend);
2134 			}
2135 			psend = ct;
2136 		}
2137 	}
2138 last_forward:
2139 	if (psend != -1) {
2140 		if (local) {
2141 			struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2142 
2143 			if (skb2)
2144 				ipmr_queue_fwd_xmit(net, mrt, true_vifi, skb2,
2145 						    psend);
2146 		} else {
2147 			ipmr_queue_fwd_xmit(net, mrt, true_vifi, skb, psend);
2148 			return;
2149 		}
2150 	}
2151 
2152 dont_forward:
2153 	if (!local)
2154 		kfree_skb(skb);
2155 }
2156 
2157 static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
2158 {
2159 	struct rtable *rt = skb_rtable(skb);
2160 	struct iphdr *iph = ip_hdr(skb);
2161 	struct flowi4 fl4 = {
2162 		.daddr = iph->daddr,
2163 		.saddr = iph->saddr,
2164 		.flowi4_dscp = ip4h_dscp(iph),
2165 		.flowi4_oif = (rt_is_output_route(rt) ?
2166 			       skb->dev->ifindex : 0),
2167 		.flowi4_iif = (rt_is_output_route(rt) ?
2168 			       LOOPBACK_IFINDEX :
2169 			       skb->dev->ifindex),
2170 		.flowi4_mark = skb->mark,
2171 	};
2172 	struct mr_table *mrt;
2173 	int err;
2174 
2175 	err = ipmr_fib_lookup(net, &fl4, &mrt);
2176 	if (err)
2177 		return ERR_PTR(err);
2178 	return mrt;
2179 }
2180 
2181 /* Multicast packets for forwarding arrive here
2182  * Called with rcu_read_lock();
2183  */
2184 int ip_mr_input(struct sk_buff *skb)
2185 {
2186 	struct mfc_cache *cache;
2187 	struct net *net = dev_net(skb->dev);
2188 	int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
2189 	struct mr_table *mrt;
2190 	struct net_device *dev;
2191 
2192 	/* skb->dev passed in is the loX master dev for vrfs.
2193 	 * As there are no vifs associated with loopback devices,
2194 	 * get the proper interface that does have a vif associated with it.
2195 	 */
2196 	dev = skb->dev;
2197 	if (netif_is_l3_master(skb->dev)) {
2198 		dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
2199 		if (!dev) {
2200 			kfree_skb(skb);
2201 			return -ENODEV;
2202 		}
2203 	}
2204 
2205 	/* Packet is looped back after forward, it should not be
2206 	 * forwarded second time, but still can be delivered locally.
2207 	 */
2208 	if (IPCB(skb)->flags & IPSKB_FORWARDED)
2209 		goto dont_forward;
2210 
2211 	mrt = ipmr_rt_fib_lookup(net, skb);
2212 	if (IS_ERR(mrt)) {
2213 		kfree_skb(skb);
2214 		return PTR_ERR(mrt);
2215 	}
2216 	if (!local) {
2217 		if (IPCB(skb)->opt.router_alert) {
2218 			if (ip_call_ra_chain(skb))
2219 				return 0;
2220 		} else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
2221 			/* IGMPv1 (and broken IGMPv2 implementations sort of
2222 			 * Cisco IOS <= 11.2(8)) do not put router alert
2223 			 * option to IGMP packets destined to routable
2224 			 * groups. It is very bad, because it means
2225 			 * that we can forward NO IGMP messages.
2226 			 */
2227 			struct sock *mroute_sk;
2228 
2229 			mroute_sk = rcu_dereference(mrt->mroute_sk);
2230 			if (mroute_sk) {
2231 				nf_reset_ct(skb);
2232 				raw_rcv(mroute_sk, skb);
2233 				return 0;
2234 			}
2235 		}
2236 	}
2237 
2238 	/* already under rcu_read_lock() */
2239 	cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
2240 	if (!cache) {
2241 		int vif = ipmr_find_vif(mrt, dev);
2242 
2243 		if (vif >= 0)
2244 			cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
2245 						    vif);
2246 	}
2247 
2248 	/* No usable cache entry */
2249 	if (!cache) {
2250 		int vif;
2251 
2252 		if (local) {
2253 			struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2254 			ip_local_deliver(skb);
2255 			if (!skb2)
2256 				return -ENOBUFS;
2257 			skb = skb2;
2258 		}
2259 
2260 		vif = ipmr_find_vif(mrt, dev);
2261 		if (vif >= 0)
2262 			return ipmr_cache_unresolved(mrt, vif, skb, dev);
2263 		kfree_skb(skb);
2264 		return -ENODEV;
2265 	}
2266 
2267 	ip_mr_forward(net, mrt, dev, skb, cache, local);
2268 
2269 	if (local)
2270 		return ip_local_deliver(skb);
2271 
2272 	return 0;
2273 
2274 dont_forward:
2275 	if (local)
2276 		return ip_local_deliver(skb);
2277 	kfree_skb(skb);
2278 	return 0;
2279 }
2280 
2281 static void ip_mr_output_finish(struct net *net, struct mr_table *mrt,
2282 				struct net_device *dev, struct sk_buff *skb,
2283 				struct mfc_cache *c)
2284 {
2285 	int psend = -1;
2286 	int ct;
2287 
2288 	atomic_long_inc(&c->_c.mfc_un.res.pkt);
2289 	atomic_long_add(skb->len, &c->_c.mfc_un.res.bytes);
2290 	WRITE_ONCE(c->_c.mfc_un.res.lastuse, jiffies);
2291 
2292 	/* Forward the frame */
2293 	if (c->mfc_origin == htonl(INADDR_ANY) &&
2294 	    c->mfc_mcastgrp == htonl(INADDR_ANY)) {
2295 		if (ip_hdr(skb)->ttl >
2296 		    c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) {
2297 			/* It's an (*,*) entry and the packet is not coming from
2298 			 * the upstream: forward the packet to the upstream
2299 			 * only.
2300 			 */
2301 			psend = c->_c.mfc_parent;
2302 			goto last_xmit;
2303 		}
2304 		goto dont_xmit;
2305 	}
2306 
2307 	for (ct = c->_c.mfc_un.res.maxvif - 1;
2308 	     ct >= c->_c.mfc_un.res.minvif; ct--) {
2309 		if (ip_hdr(skb)->ttl > c->_c.mfc_un.res.ttls[ct]) {
2310 			if (psend != -1) {
2311 				struct sk_buff *skb2;
2312 
2313 				skb2 = skb_clone(skb, GFP_ATOMIC);
2314 				if (skb2)
2315 					ipmr_queue_output_xmit(net, mrt,
2316 							       skb2, psend);
2317 			}
2318 			psend = ct;
2319 		}
2320 	}
2321 
2322 last_xmit:
2323 	if (psend != -1) {
2324 		ipmr_queue_output_xmit(net, mrt, skb, psend);
2325 		return;
2326 	}
2327 
2328 dont_xmit:
2329 	kfree_skb(skb);
2330 }
2331 
2332 /* Multicast packets for forwarding arrive here
2333  * Called with rcu_read_lock();
2334  */
2335 int ip_mr_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2336 {
2337 	struct rtable *rt = skb_rtable(skb);
2338 	struct mfc_cache *cache;
2339 	struct net_device *dev;
2340 	struct mr_table *mrt;
2341 	int vif;
2342 
2343 	guard(rcu)();
2344 
2345 	dev = dst_dev_rcu(&rt->dst);
2346 
2347 	if (IPCB(skb)->flags & IPSKB_FORWARDED)
2348 		goto mc_output;
2349 	if (!(IPCB(skb)->flags & IPSKB_MCROUTE))
2350 		goto mc_output;
2351 
2352 	skb->dev = dev;
2353 
2354 	mrt = ipmr_rt_fib_lookup(net, skb);
2355 	if (IS_ERR(mrt))
2356 		goto mc_output;
2357 
2358 	cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
2359 	if (!cache) {
2360 		vif = ipmr_find_vif(mrt, dev);
2361 		if (vif >= 0)
2362 			cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
2363 						    vif);
2364 	}
2365 
2366 	/* No usable cache entry */
2367 	if (!cache) {
2368 		vif = ipmr_find_vif(mrt, dev);
2369 		if (vif >= 0)
2370 			return ipmr_cache_unresolved(mrt, vif, skb, dev);
2371 		goto mc_output;
2372 	}
2373 
2374 	vif = cache->_c.mfc_parent;
2375 	if (rcu_access_pointer(mrt->vif_table[vif].dev) != dev)
2376 		goto mc_output;
2377 
2378 	ip_mr_output_finish(net, mrt, dev, skb, cache);
2379 	return 0;
2380 
2381 mc_output:
2382 	return ip_mc_output(net, sk, skb);
2383 }
2384 
2385 #ifdef CONFIG_IP_PIMSM_V1
2386 /* Handle IGMP messages of PIMv1 */
2387 int pim_rcv_v1(struct sk_buff *skb)
2388 {
2389 	struct igmphdr *pim;
2390 	struct net *net = dev_net(skb->dev);
2391 	struct mr_table *mrt;
2392 
2393 	if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2394 		goto drop;
2395 
2396 	pim = igmp_hdr(skb);
2397 
2398 	mrt = ipmr_rt_fib_lookup(net, skb);
2399 	if (IS_ERR(mrt))
2400 		goto drop;
2401 	if (!READ_ONCE(mrt->mroute_do_pim) ||
2402 	    pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
2403 		goto drop;
2404 
2405 	if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2406 drop:
2407 		kfree_skb(skb);
2408 	}
2409 	return 0;
2410 }
2411 #endif
2412 
2413 #ifdef CONFIG_IP_PIMSM_V2
2414 static int pim_rcv(struct sk_buff *skb)
2415 {
2416 	struct pimreghdr *pim;
2417 	struct net *net = dev_net(skb->dev);
2418 	struct mr_table *mrt;
2419 
2420 	if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2421 		goto drop;
2422 
2423 	pim = (struct pimreghdr *)skb_transport_header(skb);
2424 	if (pim->type != ((PIM_VERSION << 4) | (PIM_TYPE_REGISTER)) ||
2425 	    (pim->flags & PIM_NULL_REGISTER) ||
2426 	    (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
2427 	     csum_fold(skb_checksum(skb, 0, skb->len, 0))))
2428 		goto drop;
2429 
2430 	mrt = ipmr_rt_fib_lookup(net, skb);
2431 	if (IS_ERR(mrt))
2432 		goto drop;
2433 	if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2434 drop:
2435 		kfree_skb(skb);
2436 	}
2437 	return 0;
2438 }
2439 #endif
2440 
2441 int ipmr_get_route(struct net *net, struct sk_buff *skb,
2442 		   __be32 saddr, __be32 daddr,
2443 		   struct rtmsg *rtm, u32 portid)
2444 {
2445 	struct mfc_cache *cache;
2446 	struct mr_table *mrt;
2447 	int err;
2448 
2449 	rcu_read_lock();
2450 	mrt = __ipmr_get_table(net, RT_TABLE_DEFAULT);
2451 	if (!mrt) {
2452 		rcu_read_unlock();
2453 		return -ENOENT;
2454 	}
2455 
2456 	cache = ipmr_cache_find(mrt, saddr, daddr);
2457 	if (!cache && skb->dev) {
2458 		int vif = ipmr_find_vif(mrt, skb->dev);
2459 
2460 		if (vif >= 0)
2461 			cache = ipmr_cache_find_any(mrt, daddr, vif);
2462 	}
2463 	if (!cache) {
2464 		struct sk_buff *skb2;
2465 		struct iphdr *iph;
2466 		struct net_device *dev;
2467 		int vif = -1;
2468 
2469 		dev = skb->dev;
2470 		if (dev)
2471 			vif = ipmr_find_vif(mrt, dev);
2472 		if (vif < 0) {
2473 			rcu_read_unlock();
2474 			return -ENODEV;
2475 		}
2476 
2477 		skb2 = skb_realloc_headroom(skb, sizeof(struct iphdr));
2478 		if (!skb2) {
2479 			rcu_read_unlock();
2480 			return -ENOMEM;
2481 		}
2482 
2483 		NETLINK_CB(skb2).portid = portid;
2484 		skb_push(skb2, sizeof(struct iphdr));
2485 		skb_reset_network_header(skb2);
2486 		iph = ip_hdr(skb2);
2487 		iph->ihl = sizeof(struct iphdr) >> 2;
2488 		iph->saddr = saddr;
2489 		iph->daddr = daddr;
2490 		iph->version = 0;
2491 		err = ipmr_cache_unresolved(mrt, vif, skb2, dev);
2492 		rcu_read_unlock();
2493 		return err;
2494 	}
2495 
2496 	err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
2497 	rcu_read_unlock();
2498 	return err;
2499 }
2500 
2501 static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2502 			    u32 portid, u32 seq, struct mfc_cache *c, int cmd,
2503 			    int flags)
2504 {
2505 	struct nlmsghdr *nlh;
2506 	struct rtmsg *rtm;
2507 	int err;
2508 
2509 	nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2510 	if (!nlh)
2511 		return -EMSGSIZE;
2512 
2513 	rtm = nlmsg_data(nlh);
2514 	rtm->rtm_family   = RTNL_FAMILY_IPMR;
2515 	rtm->rtm_dst_len  = 32;
2516 	rtm->rtm_src_len  = 32;
2517 	rtm->rtm_tos      = 0;
2518 	rtm->rtm_table    = mrt->id;
2519 	if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2520 		goto nla_put_failure;
2521 	rtm->rtm_type     = RTN_MULTICAST;
2522 	rtm->rtm_scope    = RT_SCOPE_UNIVERSE;
2523 	if (c->_c.mfc_flags & MFC_STATIC)
2524 		rtm->rtm_protocol = RTPROT_STATIC;
2525 	else
2526 		rtm->rtm_protocol = RTPROT_MROUTED;
2527 	rtm->rtm_flags    = 0;
2528 
2529 	if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) ||
2530 	    nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp))
2531 		goto nla_put_failure;
2532 	err = mr_fill_mroute(mrt, skb, &c->_c, rtm);
2533 	/* do not break the dump if cache is unresolved */
2534 	if (err < 0 && err != -ENOENT)
2535 		goto nla_put_failure;
2536 
2537 	nlmsg_end(skb, nlh);
2538 	return 0;
2539 
2540 nla_put_failure:
2541 	nlmsg_cancel(skb, nlh);
2542 	return -EMSGSIZE;
2543 }
2544 
2545 static int _ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2546 			     u32 portid, u32 seq, struct mr_mfc *c, int cmd,
2547 			     int flags)
2548 {
2549 	return ipmr_fill_mroute(mrt, skb, portid, seq, (struct mfc_cache *)c,
2550 				cmd, flags);
2551 }
2552 
2553 static size_t mroute_msgsize(bool unresolved)
2554 {
2555 	size_t len =
2556 		NLMSG_ALIGN(sizeof(struct rtmsg))
2557 		+ nla_total_size(4)	/* RTA_TABLE */
2558 		+ nla_total_size(4)	/* RTA_SRC */
2559 		+ nla_total_size(4)	/* RTA_DST */
2560 		;
2561 
2562 	if (!unresolved)
2563 		len = len
2564 		      + nla_total_size(4)	/* RTA_IIF */
2565 		      + nla_total_size(0)	/* RTA_MULTIPATH */
2566 		      + MAXVIFS * NLA_ALIGN(sizeof(struct rtnexthop))
2567 						/* RTA_MFC_STATS */
2568 		      + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
2569 		;
2570 
2571 	return len;
2572 }
2573 
2574 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2575 				 int cmd)
2576 {
2577 	struct net *net = read_pnet(&mrt->net);
2578 	struct sk_buff *skb;
2579 	int err = -ENOBUFS;
2580 
2581 	skb = nlmsg_new(mroute_msgsize(mfc->_c.mfc_parent >= MAXVIFS),
2582 			GFP_ATOMIC);
2583 	if (!skb)
2584 		goto errout;
2585 
2586 	err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2587 	if (err < 0)
2588 		goto errout;
2589 
2590 	rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC);
2591 	return;
2592 
2593 errout:
2594 	kfree_skb(skb);
2595 	rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
2596 }
2597 
2598 static size_t igmpmsg_netlink_msgsize(size_t payloadlen)
2599 {
2600 	size_t len =
2601 		NLMSG_ALIGN(sizeof(struct rtgenmsg))
2602 		+ nla_total_size(1)	/* IPMRA_CREPORT_MSGTYPE */
2603 		+ nla_total_size(4)	/* IPMRA_CREPORT_VIF_ID */
2604 		+ nla_total_size(4)	/* IPMRA_CREPORT_SRC_ADDR */
2605 		+ nla_total_size(4)	/* IPMRA_CREPORT_DST_ADDR */
2606 		+ nla_total_size(4)	/* IPMRA_CREPORT_TABLE */
2607 					/* IPMRA_CREPORT_PKT */
2608 		+ nla_total_size(payloadlen)
2609 		;
2610 
2611 	return len;
2612 }
2613 
2614 static void igmpmsg_netlink_event(const struct mr_table *mrt, struct sk_buff *pkt)
2615 {
2616 	struct net *net = read_pnet(&mrt->net);
2617 	struct nlmsghdr *nlh;
2618 	struct rtgenmsg *rtgenm;
2619 	struct igmpmsg *msg;
2620 	struct sk_buff *skb;
2621 	struct nlattr *nla;
2622 	int payloadlen;
2623 
2624 	payloadlen = pkt->len - sizeof(struct igmpmsg);
2625 	msg = (struct igmpmsg *)skb_network_header(pkt);
2626 
2627 	skb = nlmsg_new(igmpmsg_netlink_msgsize(payloadlen), GFP_ATOMIC);
2628 	if (!skb)
2629 		goto errout;
2630 
2631 	nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
2632 			sizeof(struct rtgenmsg), 0);
2633 	if (!nlh)
2634 		goto errout;
2635 	rtgenm = nlmsg_data(nlh);
2636 	rtgenm->rtgen_family = RTNL_FAMILY_IPMR;
2637 	if (nla_put_u8(skb, IPMRA_CREPORT_MSGTYPE, msg->im_msgtype) ||
2638 	    nla_put_u32(skb, IPMRA_CREPORT_VIF_ID, msg->im_vif | (msg->im_vif_hi << 8)) ||
2639 	    nla_put_in_addr(skb, IPMRA_CREPORT_SRC_ADDR,
2640 			    msg->im_src.s_addr) ||
2641 	    nla_put_in_addr(skb, IPMRA_CREPORT_DST_ADDR,
2642 			    msg->im_dst.s_addr) ||
2643 	    nla_put_u32(skb, IPMRA_CREPORT_TABLE, mrt->id))
2644 		goto nla_put_failure;
2645 
2646 	nla = nla_reserve(skb, IPMRA_CREPORT_PKT, payloadlen);
2647 	if (!nla || skb_copy_bits(pkt, sizeof(struct igmpmsg),
2648 				  nla_data(nla), payloadlen))
2649 		goto nla_put_failure;
2650 
2651 	nlmsg_end(skb, nlh);
2652 
2653 	rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE_R, NULL, GFP_ATOMIC);
2654 	return;
2655 
2656 nla_put_failure:
2657 	nlmsg_cancel(skb, nlh);
2658 errout:
2659 	kfree_skb(skb);
2660 	rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE_R, -ENOBUFS);
2661 }
2662 
2663 static int ipmr_rtm_valid_getroute_req(struct sk_buff *skb,
2664 				       const struct nlmsghdr *nlh,
2665 				       struct nlattr **tb,
2666 				       struct netlink_ext_ack *extack)
2667 {
2668 	struct rtmsg *rtm;
2669 	int i, err;
2670 
2671 	rtm = nlmsg_payload(nlh, sizeof(*rtm));
2672 	if (!rtm) {
2673 		NL_SET_ERR_MSG(extack, "ipv4: Invalid header for multicast route get request");
2674 		return -EINVAL;
2675 	}
2676 
2677 	if (!netlink_strict_get_check(skb))
2678 		return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
2679 					      rtm_ipv4_policy, extack);
2680 
2681 	if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
2682 	    (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
2683 	    rtm->rtm_tos || rtm->rtm_table || rtm->rtm_protocol ||
2684 	    rtm->rtm_scope || rtm->rtm_type || rtm->rtm_flags) {
2685 		NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for multicast route get request");
2686 		return -EINVAL;
2687 	}
2688 
2689 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
2690 					    rtm_ipv4_policy, extack);
2691 	if (err)
2692 		return err;
2693 
2694 	if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
2695 	    (tb[RTA_DST] && !rtm->rtm_dst_len)) {
2696 		NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
2697 		return -EINVAL;
2698 	}
2699 
2700 	for (i = 0; i <= RTA_MAX; i++) {
2701 		if (!tb[i])
2702 			continue;
2703 
2704 		switch (i) {
2705 		case RTA_SRC:
2706 		case RTA_DST:
2707 		case RTA_TABLE:
2708 			break;
2709 		default:
2710 			NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in multicast route get request");
2711 			return -EINVAL;
2712 		}
2713 	}
2714 
2715 	return 0;
2716 }
2717 
2718 static int ipmr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2719 			     struct netlink_ext_ack *extack)
2720 {
2721 	struct net *net = sock_net(in_skb->sk);
2722 	struct nlattr *tb[RTA_MAX + 1];
2723 	struct mfc_cache *cache;
2724 	struct mr_table *mrt;
2725 	struct sk_buff *skb;
2726 	__be32 src, grp;
2727 	u32 tableid;
2728 	int err;
2729 
2730 	err = ipmr_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
2731 	if (err < 0)
2732 		goto errout;
2733 
2734 	src = nla_get_in_addr_default(tb[RTA_SRC], 0);
2735 	grp = nla_get_in_addr_default(tb[RTA_DST], 0);
2736 	tableid = nla_get_u32_default(tb[RTA_TABLE], 0);
2737 
2738 	skb = nlmsg_new(mroute_msgsize(false), GFP_KERNEL);
2739 	if (!skb) {
2740 		err = -ENOBUFS;
2741 		goto errout;
2742 	}
2743 
2744 	rcu_read_lock();
2745 
2746 	mrt = __ipmr_get_table(net, tableid ? tableid : RT_TABLE_DEFAULT);
2747 	if (!mrt) {
2748 		err = -ENOENT;
2749 		goto errout_unlock;
2750 	}
2751 
2752 	cache = ipmr_cache_find(mrt, src, grp);
2753 	if (!cache) {
2754 		err = -ENOENT;
2755 		goto errout_unlock;
2756 	}
2757 
2758 	err = ipmr_fill_mroute(mrt, skb, NETLINK_CB(in_skb).portid,
2759 			       nlh->nlmsg_seq, cache,
2760 			       RTM_NEWROUTE, 0);
2761 	if (err < 0)
2762 		goto errout_unlock;
2763 
2764 	rcu_read_unlock();
2765 
2766 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2767 errout:
2768 	return err;
2769 
2770 errout_unlock:
2771 	rcu_read_unlock();
2772 	kfree_skb(skb);
2773 	goto errout;
2774 }
2775 
2776 static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2777 {
2778 	struct fib_dump_filter filter = {
2779 		.rtnl_held = false,
2780 	};
2781 	int err;
2782 
2783 	rcu_read_lock();
2784 
2785 	if (cb->strict_check) {
2786 		err = ip_valid_fib_dump_req(sock_net(skb->sk), cb->nlh,
2787 					    &filter, cb);
2788 		if (err < 0)
2789 			goto out;
2790 	}
2791 
2792 	if (filter.table_id) {
2793 		struct mr_table *mrt;
2794 
2795 		mrt = __ipmr_get_table(sock_net(skb->sk), filter.table_id);
2796 		if (!mrt) {
2797 			if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IPMR) {
2798 				err = skb->len;
2799 				goto out;
2800 			}
2801 
2802 			NL_SET_ERR_MSG(cb->extack, "ipv4: MR table does not exist");
2803 			err = -ENOENT;
2804 			goto out;
2805 		}
2806 
2807 		err = mr_table_dump(mrt, skb, cb, _ipmr_fill_mroute,
2808 				    &mfc_unres_lock, &filter);
2809 		err = skb->len ? : err;
2810 		goto out;
2811 	}
2812 
2813 	err = mr_rtm_dumproute(skb, cb, ipmr_mr_table_iter,
2814 			       _ipmr_fill_mroute, &mfc_unres_lock, &filter);
2815 out:
2816 	rcu_read_unlock();
2817 
2818 	return err;
2819 }
2820 
2821 static const struct nla_policy rtm_ipmr_policy[RTA_MAX + 1] = {
2822 	[RTA_SRC]	= { .type = NLA_U32 },
2823 	[RTA_DST]	= { .type = NLA_U32 },
2824 	[RTA_IIF]	= { .type = NLA_U32 },
2825 	[RTA_TABLE]	= { .type = NLA_U32 },
2826 	[RTA_MULTIPATH]	= { .len = sizeof(struct rtnexthop) },
2827 };
2828 
2829 static bool ipmr_rtm_validate_proto(unsigned char rtm_protocol)
2830 {
2831 	switch (rtm_protocol) {
2832 	case RTPROT_STATIC:
2833 	case RTPROT_MROUTED:
2834 		return true;
2835 	}
2836 	return false;
2837 }
2838 
2839 static int ipmr_nla_get_ttls(const struct nlattr *nla, struct mfcctl *mfcc)
2840 {
2841 	struct rtnexthop *rtnh = nla_data(nla);
2842 	int remaining = nla_len(nla), vifi = 0;
2843 
2844 	while (rtnh_ok(rtnh, remaining)) {
2845 		mfcc->mfcc_ttls[vifi] = rtnh->rtnh_hops;
2846 		if (++vifi == MAXVIFS)
2847 			break;
2848 		rtnh = rtnh_next(rtnh, &remaining);
2849 	}
2850 
2851 	return remaining > 0 ? -EINVAL : vifi;
2852 }
2853 
2854 /* returns < 0 on error, 0 for ADD_MFC and 1 for ADD_MFC_PROXY */
2855 static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh,
2856 			    struct mfcctl *mfcc, int *mrtsock,
2857 			    struct mr_table **mrtret,
2858 			    struct netlink_ext_ack *extack)
2859 {
2860 	struct net_device *dev = NULL;
2861 	u32 tblid = RT_TABLE_DEFAULT;
2862 	int ret, rem, iif = 0;
2863 	struct mr_table *mrt;
2864 	struct nlattr *attr;
2865 	struct rtmsg *rtm;
2866 
2867 	ret = nlmsg_validate_deprecated(nlh, sizeof(*rtm), RTA_MAX,
2868 					rtm_ipmr_policy, extack);
2869 	if (ret < 0)
2870 		goto out;
2871 	rtm = nlmsg_data(nlh);
2872 
2873 	ret = -EINVAL;
2874 	if (rtm->rtm_family != RTNL_FAMILY_IPMR || rtm->rtm_dst_len != 32 ||
2875 	    rtm->rtm_type != RTN_MULTICAST ||
2876 	    rtm->rtm_scope != RT_SCOPE_UNIVERSE ||
2877 	    !ipmr_rtm_validate_proto(rtm->rtm_protocol))
2878 		goto out;
2879 
2880 	memset(mfcc, 0, sizeof(*mfcc));
2881 	mfcc->mfcc_parent = -1;
2882 	ret = 0;
2883 	nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), rem) {
2884 		switch (nla_type(attr)) {
2885 		case RTA_SRC:
2886 			mfcc->mfcc_origin.s_addr = nla_get_be32(attr);
2887 			break;
2888 		case RTA_DST:
2889 			mfcc->mfcc_mcastgrp.s_addr = nla_get_be32(attr);
2890 			break;
2891 		case RTA_IIF:
2892 			iif = nla_get_u32(attr);
2893 			break;
2894 		case RTA_MULTIPATH:
2895 			if (ipmr_nla_get_ttls(attr, mfcc) < 0) {
2896 				ret = -EINVAL;
2897 				goto out;
2898 			}
2899 			break;
2900 		case RTA_PREFSRC:
2901 			ret = 1;
2902 			break;
2903 		case RTA_TABLE:
2904 			tblid = nla_get_u32(attr);
2905 			break;
2906 		}
2907 	}
2908 
2909 	rcu_read_lock();
2910 
2911 	mrt = __ipmr_get_table(net, tblid);
2912 	if (!mrt) {
2913 		ret = -ENOENT;
2914 		goto unlock;
2915 	}
2916 
2917 	if (iif) {
2918 		dev = dev_get_by_index_rcu(net, iif);
2919 		if (!dev) {
2920 			ret = -ENODEV;
2921 			goto unlock;
2922 		}
2923 
2924 		mfcc->mfcc_parent = ipmr_find_vif(mrt, dev);
2925 	}
2926 
2927 	*mrtret = mrt;
2928 	*mrtsock = rtm->rtm_protocol == RTPROT_MROUTED ? 1 : 0;
2929 
2930 unlock:
2931 	rcu_read_unlock();
2932 out:
2933 	return ret;
2934 }
2935 
2936 /* takes care of both newroute and delroute */
2937 static int ipmr_rtm_route(struct sk_buff *skb, struct nlmsghdr *nlh,
2938 			  struct netlink_ext_ack *extack)
2939 {
2940 	struct net *net = sock_net(skb->sk);
2941 	int ret, mrtsock = 0, parent;
2942 	struct mr_table *tbl = NULL;
2943 	struct mfcctl mfcc;
2944 
2945 	ret = rtm_to_ipmr_mfcc(net, nlh, &mfcc, &mrtsock, &tbl, extack);
2946 	if (ret < 0)
2947 		return ret;
2948 
2949 	parent = ret ? mfcc.mfcc_parent : -1;
2950 
2951 	mutex_lock(&net->ipv4.mfc_mutex);
2952 
2953 	if (nlh->nlmsg_type == RTM_NEWROUTE)
2954 		ret = ipmr_mfc_add(net, tbl, &mfcc, mrtsock, parent);
2955 	else
2956 		ret = ipmr_mfc_delete(tbl, &mfcc, parent);
2957 
2958 	mutex_unlock(&net->ipv4.mfc_mutex);
2959 
2960 	return ret;
2961 }
2962 
2963 static bool ipmr_fill_table(struct mr_table *mrt, struct sk_buff *skb)
2964 {
2965 	u32 queue_len = atomic_read(&mrt->cache_resolve_queue_len);
2966 
2967 	if (nla_put_u32(skb, IPMRA_TABLE_ID, mrt->id) ||
2968 	    nla_put_u32(skb, IPMRA_TABLE_CACHE_RES_QUEUE_LEN, queue_len) ||
2969 	    nla_put_s32(skb, IPMRA_TABLE_MROUTE_REG_VIF_NUM,
2970 			READ_ONCE(mrt->mroute_reg_vif_num)) ||
2971 	    nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_ASSERT,
2972 		       READ_ONCE(mrt->mroute_do_assert)) ||
2973 	    nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_PIM,
2974 		       READ_ONCE(mrt->mroute_do_pim)) ||
2975 	    nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_WRVIFWHOLE,
2976 		       READ_ONCE(mrt->mroute_do_wrvifwhole)))
2977 		return false;
2978 
2979 	return true;
2980 }
2981 
2982 static bool ipmr_fill_vif(struct mr_table *mrt, u32 vifid, struct sk_buff *skb)
2983 {
2984 	struct net_device *vif_dev;
2985 	struct nlattr *vif_nest;
2986 	struct vif_device *vif;
2987 
2988 	vif = &mrt->vif_table[vifid];
2989 	vif_dev = vif_dev_read(vif);
2990 	/* if the VIF doesn't exist just continue */
2991 	if (!vif_dev)
2992 		return true;
2993 
2994 	vif_nest = nla_nest_start_noflag(skb, IPMRA_VIF);
2995 	if (!vif_nest)
2996 		return false;
2997 
2998 	if (nla_put_u32(skb, IPMRA_VIFA_IFINDEX, READ_ONCE(vif_dev->ifindex)) ||
2999 	    nla_put_u32(skb, IPMRA_VIFA_VIF_ID, vifid) ||
3000 	    nla_put_u16(skb, IPMRA_VIFA_FLAGS, vif->flags) ||
3001 	    nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_IN, READ_ONCE(vif->bytes_in),
3002 			      IPMRA_VIFA_PAD) ||
3003 	    nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_OUT, READ_ONCE(vif->bytes_out),
3004 			      IPMRA_VIFA_PAD) ||
3005 	    nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_IN, READ_ONCE(vif->pkt_in),
3006 			      IPMRA_VIFA_PAD) ||
3007 	    nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_OUT, READ_ONCE(vif->pkt_out),
3008 			      IPMRA_VIFA_PAD) ||
3009 	    nla_put_be32(skb, IPMRA_VIFA_LOCAL_ADDR, vif->local) ||
3010 	    nla_put_be32(skb, IPMRA_VIFA_REMOTE_ADDR, vif->remote)) {
3011 		nla_nest_cancel(skb, vif_nest);
3012 		return false;
3013 	}
3014 	nla_nest_end(skb, vif_nest);
3015 
3016 	return true;
3017 }
3018 
3019 static int ipmr_valid_dumplink(const struct nlmsghdr *nlh,
3020 			       struct netlink_ext_ack *extack)
3021 {
3022 	struct ifinfomsg *ifm;
3023 
3024 	ifm = nlmsg_payload(nlh, sizeof(*ifm));
3025 	if (!ifm) {
3026 		NL_SET_ERR_MSG(extack, "ipv4: Invalid header for ipmr link dump");
3027 		return -EINVAL;
3028 	}
3029 
3030 	if (nlmsg_attrlen(nlh, sizeof(*ifm))) {
3031 		NL_SET_ERR_MSG(extack, "Invalid data after header in ipmr link dump");
3032 		return -EINVAL;
3033 	}
3034 
3035 	if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
3036 	    ifm->ifi_change || ifm->ifi_index) {
3037 		NL_SET_ERR_MSG(extack, "Invalid values in header for ipmr link dump request");
3038 		return -EINVAL;
3039 	}
3040 
3041 	return 0;
3042 }
3043 
3044 static int ipmr_rtm_dumplink(struct sk_buff *skb, struct netlink_callback *cb)
3045 {
3046 	struct net *net = sock_net(skb->sk);
3047 	struct nlmsghdr *nlh = NULL;
3048 	unsigned int t = 0, s_t;
3049 	unsigned int e = 0, s_e;
3050 	struct mr_table *mrt;
3051 
3052 	if (cb->strict_check) {
3053 		int err = ipmr_valid_dumplink(cb->nlh, cb->extack);
3054 
3055 		if (err < 0)
3056 			return err;
3057 	}
3058 
3059 	s_t = cb->args[0];
3060 	s_e = cb->args[1];
3061 
3062 	rcu_read_lock();
3063 
3064 	ipmr_for_each_table(mrt, net) {
3065 		struct nlattr *vifs, *af;
3066 		struct ifinfomsg *hdr;
3067 		u32 i;
3068 
3069 		if (t < s_t)
3070 			goto skip_table;
3071 		nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3072 				cb->nlh->nlmsg_seq, RTM_NEWLINK,
3073 				sizeof(*hdr), NLM_F_MULTI);
3074 		if (!nlh)
3075 			break;
3076 
3077 		hdr = nlmsg_data(nlh);
3078 		memset(hdr, 0, sizeof(*hdr));
3079 		hdr->ifi_family = RTNL_FAMILY_IPMR;
3080 
3081 		af = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
3082 		if (!af) {
3083 			nlmsg_cancel(skb, nlh);
3084 			goto out;
3085 		}
3086 
3087 		if (!ipmr_fill_table(mrt, skb)) {
3088 			nlmsg_cancel(skb, nlh);
3089 			goto out;
3090 		}
3091 
3092 		vifs = nla_nest_start_noflag(skb, IPMRA_TABLE_VIFS);
3093 		if (!vifs) {
3094 			nla_nest_end(skb, af);
3095 			nlmsg_end(skb, nlh);
3096 			goto out;
3097 		}
3098 		for (i = 0; i < READ_ONCE(mrt->maxvif); i++) {
3099 			if (e < s_e)
3100 				goto skip_entry;
3101 			if (!ipmr_fill_vif(mrt, i, skb)) {
3102 				nla_nest_end(skb, vifs);
3103 				nla_nest_end(skb, af);
3104 				nlmsg_end(skb, nlh);
3105 				goto out;
3106 			}
3107 skip_entry:
3108 			e++;
3109 		}
3110 		s_e = 0;
3111 		e = 0;
3112 		nla_nest_end(skb, vifs);
3113 		nla_nest_end(skb, af);
3114 		nlmsg_end(skb, nlh);
3115 skip_table:
3116 		t++;
3117 	}
3118 
3119 out:
3120 	rcu_read_unlock();
3121 
3122 	cb->args[1] = e;
3123 	cb->args[0] = t;
3124 
3125 	return skb->len;
3126 }
3127 
3128 #ifdef CONFIG_PROC_FS
3129 /* The /proc interfaces to multicast routing :
3130  * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
3131  */
3132 
3133 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
3134 	__acquires(RCU)
3135 {
3136 	struct mr_vif_iter *iter = seq->private;
3137 	struct net *net = seq_file_net(seq);
3138 	struct mr_table *mrt;
3139 
3140 	rcu_read_lock();
3141 	mrt = __ipmr_get_table(net, RT_TABLE_DEFAULT);
3142 	if (!mrt) {
3143 		rcu_read_unlock();
3144 		return ERR_PTR(-ENOENT);
3145 	}
3146 
3147 	iter->mrt = mrt;
3148 
3149 	return mr_vif_seq_start(seq, pos);
3150 }
3151 
3152 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
3153 	__releases(RCU)
3154 {
3155 	rcu_read_unlock();
3156 }
3157 
3158 static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
3159 {
3160 	struct mr_vif_iter *iter = seq->private;
3161 	struct mr_table *mrt = iter->mrt;
3162 
3163 	if (v == SEQ_START_TOKEN) {
3164 		seq_puts(seq,
3165 			 "Interface      BytesIn  PktsIn  BytesOut PktsOut Flags Local    Remote\n");
3166 	} else {
3167 		const struct vif_device *vif = v;
3168 		const struct net_device *vif_dev;
3169 		const char *name;
3170 
3171 		vif_dev = vif_dev_read(vif);
3172 		name = vif_dev ? vif_dev->name : "none";
3173 		seq_printf(seq,
3174 			   "%2td %-10s %8ld %7ld  %8ld %7ld %05X %08X %08X\n",
3175 			   vif - mrt->vif_table,
3176 			   name, vif->bytes_in, vif->pkt_in,
3177 			   vif->bytes_out, vif->pkt_out,
3178 			   vif->flags, vif->local, vif->remote);
3179 	}
3180 	return 0;
3181 }
3182 
3183 static const struct seq_operations ipmr_vif_seq_ops = {
3184 	.start = ipmr_vif_seq_start,
3185 	.next  = mr_vif_seq_next,
3186 	.stop  = ipmr_vif_seq_stop,
3187 	.show  = ipmr_vif_seq_show,
3188 };
3189 
3190 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
3191 {
3192 	struct net *net = seq_file_net(seq);
3193 	struct mr_table *mrt;
3194 
3195 	mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
3196 	if (!mrt)
3197 		return ERR_PTR(-ENOENT);
3198 
3199 	return mr_mfc_seq_start(seq, pos, mrt, &mfc_unres_lock);
3200 }
3201 
3202 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
3203 {
3204 	int n;
3205 
3206 	if (v == SEQ_START_TOKEN) {
3207 		seq_puts(seq,
3208 		 "Group    Origin   Iif     Pkts    Bytes    Wrong Oifs\n");
3209 	} else {
3210 		const struct mfc_cache *mfc = v;
3211 		const struct mr_mfc_iter *it = seq->private;
3212 		const struct mr_table *mrt = it->mrt;
3213 
3214 		seq_printf(seq, "%08X %08X %-3hd",
3215 			   (__force u32) mfc->mfc_mcastgrp,
3216 			   (__force u32) mfc->mfc_origin,
3217 			   mfc->_c.mfc_parent);
3218 
3219 		if (it->cache != &mrt->mfc_unres_queue) {
3220 			seq_printf(seq, " %8lu %8lu %8lu",
3221 				   atomic_long_read(&mfc->_c.mfc_un.res.pkt),
3222 				   atomic_long_read(&mfc->_c.mfc_un.res.bytes),
3223 				   atomic_long_read(&mfc->_c.mfc_un.res.wrong_if));
3224 			for (n = mfc->_c.mfc_un.res.minvif;
3225 			     n < mfc->_c.mfc_un.res.maxvif; n++) {
3226 				if (VIF_EXISTS(mrt, n) &&
3227 				    mfc->_c.mfc_un.res.ttls[n] < 255)
3228 					seq_printf(seq,
3229 					   " %2d:%-3d",
3230 					   n, mfc->_c.mfc_un.res.ttls[n]);
3231 			}
3232 		} else {
3233 			/* unresolved mfc_caches don't contain
3234 			 * pkt, bytes and wrong_if values
3235 			 */
3236 			seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
3237 		}
3238 		seq_putc(seq, '\n');
3239 	}
3240 	return 0;
3241 }
3242 
3243 static const struct seq_operations ipmr_mfc_seq_ops = {
3244 	.start = ipmr_mfc_seq_start,
3245 	.next  = mr_mfc_seq_next,
3246 	.stop  = mr_mfc_seq_stop,
3247 	.show  = ipmr_mfc_seq_show,
3248 };
3249 #endif
3250 
3251 #ifdef CONFIG_IP_PIMSM_V2
3252 static const struct net_protocol pim_protocol = {
3253 	.handler	=	pim_rcv,
3254 };
3255 #endif
3256 
3257 static unsigned int ipmr_seq_read(const struct net *net)
3258 {
3259 	return atomic_read(&net->ipv4.ipmr_seq) + ipmr_rules_seq_read(net);
3260 }
3261 
3262 static int ipmr_dump(struct net *net, struct notifier_block *nb,
3263 		     struct netlink_ext_ack *extack)
3264 {
3265 	return mr_dump(net, nb, RTNL_FAMILY_IPMR, ipmr_rules_dump,
3266 		       ipmr_mr_table_iter, extack);
3267 }
3268 
3269 static const struct fib_notifier_ops ipmr_notifier_ops_template = {
3270 	.family		= RTNL_FAMILY_IPMR,
3271 	.fib_seq_read	= ipmr_seq_read,
3272 	.fib_dump	= ipmr_dump,
3273 	.owner		= THIS_MODULE,
3274 };
3275 
3276 static int __net_init ipmr_notifier_init(struct net *net)
3277 {
3278 	struct fib_notifier_ops *ops;
3279 
3280 	atomic_set(&net->ipv4.ipmr_seq, 0);
3281 
3282 	ops = fib_notifier_ops_register(&ipmr_notifier_ops_template, net);
3283 	if (IS_ERR(ops))
3284 		return PTR_ERR(ops);
3285 	net->ipv4.ipmr_notifier_ops = ops;
3286 
3287 	return 0;
3288 }
3289 
3290 static void __net_exit ipmr_notifier_exit(struct net *net)
3291 {
3292 	fib_notifier_ops_unregister(net->ipv4.ipmr_notifier_ops);
3293 	net->ipv4.ipmr_notifier_ops = NULL;
3294 }
3295 
3296 /* Setup for IP multicast routing */
3297 static int __net_init ipmr_net_init(struct net *net)
3298 {
3299 	LIST_HEAD(dev_kill_list);
3300 	int err;
3301 
3302 	mutex_init(&net->ipv4.mfc_mutex);
3303 
3304 	err = ipmr_notifier_init(net);
3305 	if (err)
3306 		goto ipmr_notifier_fail;
3307 
3308 	err = ipmr_rules_init(net);
3309 	if (err < 0)
3310 		goto ipmr_rules_fail;
3311 
3312 #ifdef CONFIG_PROC_FS
3313 	err = -ENOMEM;
3314 	if (!proc_create_net("ip_mr_vif", 0, net->proc_net, &ipmr_vif_seq_ops,
3315 			sizeof(struct mr_vif_iter)))
3316 		goto proc_vif_fail;
3317 	if (!proc_create_net("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_seq_ops,
3318 			sizeof(struct mr_mfc_iter)))
3319 		goto proc_cache_fail;
3320 #endif
3321 	return 0;
3322 
3323 #ifdef CONFIG_PROC_FS
3324 proc_cache_fail:
3325 	remove_proc_entry("ip_mr_vif", net->proc_net);
3326 proc_vif_fail:
3327 	ipmr_rules_exit_rtnl(net, &dev_kill_list);
3328 	ipmr_rules_exit(net);
3329 #endif
3330 ipmr_rules_fail:
3331 	ipmr_notifier_exit(net);
3332 ipmr_notifier_fail:
3333 	return err;
3334 }
3335 
3336 static void __net_exit ipmr_net_exit(struct net *net)
3337 {
3338 #ifdef CONFIG_PROC_FS
3339 	remove_proc_entry("ip_mr_cache", net->proc_net);
3340 	remove_proc_entry("ip_mr_vif", net->proc_net);
3341 #endif
3342 	ipmr_rules_exit(net);
3343 	ipmr_notifier_exit(net);
3344 }
3345 
3346 static void __net_exit ipmr_net_exit_rtnl(struct net *net,
3347 					  struct list_head *dev_kill_list)
3348 {
3349 	ipmr_rules_exit_rtnl(net, dev_kill_list);
3350 }
3351 
3352 static struct pernet_operations ipmr_net_ops = {
3353 	.init = ipmr_net_init,
3354 	.exit = ipmr_net_exit,
3355 	.exit_rtnl = ipmr_net_exit_rtnl,
3356 };
3357 
3358 static const struct rtnl_msg_handler ipmr_rtnl_msg_handlers[] __initconst = {
3359 	{.protocol = RTNL_FAMILY_IPMR, .msgtype = RTM_GETLINK,
3360 	 .dumpit = ipmr_rtm_dumplink, .flags = RTNL_FLAG_DUMP_UNLOCKED},
3361 	{.protocol = RTNL_FAMILY_IPMR, .msgtype = RTM_NEWROUTE,
3362 	 .doit = ipmr_rtm_route, .flags = RTNL_FLAG_DOIT_UNLOCKED},
3363 	{.protocol = RTNL_FAMILY_IPMR, .msgtype = RTM_DELROUTE,
3364 	 .doit = ipmr_rtm_route, .flags = RTNL_FLAG_DOIT_UNLOCKED},
3365 	{.protocol = RTNL_FAMILY_IPMR, .msgtype = RTM_GETROUTE,
3366 	 .doit = ipmr_rtm_getroute, .dumpit = ipmr_rtm_dumproute,
3367 	 .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED},
3368 };
3369 
3370 int __init ip_mr_init(void)
3371 {
3372 	int err;
3373 
3374 	mrt_cachep = KMEM_CACHE(mfc_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
3375 
3376 	err = register_pernet_subsys(&ipmr_net_ops);
3377 	if (err)
3378 		goto reg_pernet_fail;
3379 
3380 	err = register_netdevice_notifier(&ip_mr_notifier);
3381 	if (err)
3382 		goto reg_notif_fail;
3383 #ifdef CONFIG_IP_PIMSM_V2
3384 	if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
3385 		pr_err("%s: can't add PIM protocol\n", __func__);
3386 		err = -EAGAIN;
3387 		goto add_proto_fail;
3388 	}
3389 #endif
3390 	rtnl_register_many(ipmr_rtnl_msg_handlers);
3391 
3392 	return 0;
3393 
3394 #ifdef CONFIG_IP_PIMSM_V2
3395 add_proto_fail:
3396 	unregister_netdevice_notifier(&ip_mr_notifier);
3397 #endif
3398 reg_notif_fail:
3399 	unregister_pernet_subsys(&ipmr_net_ops);
3400 reg_pernet_fail:
3401 	kmem_cache_destroy(mrt_cachep);
3402 	return err;
3403 }
3404