xref: /linux/net/ipv4/ipmr.c (revision 05e352444b2430de4b183b4a988085381e5fd6ad)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	IP multicast routing support for mrouted 3.6/3.8
4  *
5  *		(c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
6  *	  Linux Consultancy and Custom Driver Development
7  *
8  *	Fixes:
9  *	Michael Chastain	:	Incorrect size of copying.
10  *	Alan Cox		:	Added the cache manager code
11  *	Alan Cox		:	Fixed the clone/copy bug and device race.
12  *	Mike McLagan		:	Routing by source
13  *	Malcolm Beattie		:	Buffer handling fixes.
14  *	Alexey Kuznetsov	:	Double buffer free and other fixes.
15  *	SVR Anand		:	Fixed several multicast bugs and problems.
16  *	Alexey Kuznetsov	:	Status, optimisations and more.
17  *	Brad Parker		:	Better behaviour on mrouted upcall
18  *					overflow.
19  *      Carlos Picoto           :       PIMv1 Support
20  *	Pavlin Ivanov Radoslavov:	PIMv2 Registers must checksum only PIM header
21  *					Relax this requirement to work with older peers.
22  */
23 
24 #include <linux/uaccess.h>
25 #include <linux/types.h>
26 #include <linux/cache.h>
27 #include <linux/capability.h>
28 #include <linux/errno.h>
29 #include <linux/mm.h>
30 #include <linux/kernel.h>
31 #include <linux/fcntl.h>
32 #include <linux/stat.h>
33 #include <linux/socket.h>
34 #include <linux/in.h>
35 #include <linux/inet.h>
36 #include <linux/netdevice.h>
37 #include <linux/inetdevice.h>
38 #include <linux/igmp.h>
39 #include <linux/proc_fs.h>
40 #include <linux/seq_file.h>
41 #include <linux/mroute.h>
42 #include <linux/init.h>
43 #include <linux/if_ether.h>
44 #include <linux/slab.h>
45 #include <net/flow.h>
46 #include <net/net_namespace.h>
47 #include <net/ip.h>
48 #include <net/protocol.h>
49 #include <linux/skbuff.h>
50 #include <net/route.h>
51 #include <net/icmp.h>
52 #include <net/udp.h>
53 #include <net/raw.h>
54 #include <linux/notifier.h>
55 #include <linux/if_arp.h>
56 #include <linux/netfilter_ipv4.h>
57 #include <linux/compat.h>
58 #include <linux/export.h>
59 #include <linux/rhashtable.h>
60 #include <net/ip_tunnels.h>
61 #include <net/checksum.h>
62 #include <net/netlink.h>
63 #include <net/fib_rules.h>
64 #include <linux/netconf.h>
65 #include <net/rtnh.h>
66 #include <net/inet_dscp.h>
67 
68 #include <linux/nospec.h>
69 
70 struct ipmr_rule {
71 	struct fib_rule		common;
72 };
73 
74 struct ipmr_result {
75 	struct mr_table		*mrt;
76 };
77 
78 /* Big lock, protecting vif table, mrt cache and mroute socket state.
79  * Note that the changes are semaphored via rtnl_lock.
80  */
81 
82 static DEFINE_SPINLOCK(mrt_lock);
83 
84 static struct net_device *vif_dev_read(const struct vif_device *vif)
85 {
86 	return rcu_dereference(vif->dev);
87 }
88 
89 /* Multicast router control variables */
90 
91 /* Special spinlock for queue of unresolved entries */
92 static DEFINE_SPINLOCK(mfc_unres_lock);
93 
94 /* We return to original Alan's scheme. Hash table of resolved
95  * entries is changed only in process context and protected
96  * with weak lock mrt_lock. Queue of unresolved entries is protected
97  * with strong spinlock mfc_unres_lock.
98  *
99  * In this case data path is free of exclusive locks at all.
100  */
101 
102 static struct kmem_cache *mrt_cachep __ro_after_init;
103 
104 static struct mr_table *ipmr_new_table(struct net *net, u32 id);
105 static void ipmr_free_table(struct mr_table *mrt,
106 			    struct list_head *dev_kill_list);
107 
108 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
109 			  struct net_device *dev, struct sk_buff *skb,
110 			  struct mfc_cache *cache, int local);
111 static int ipmr_cache_report(const struct mr_table *mrt,
112 			     struct sk_buff *pkt, vifi_t vifi, int assert);
113 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
114 				 int cmd);
115 static void igmpmsg_netlink_event(const struct mr_table *mrt, struct sk_buff *pkt);
116 static void mroute_clean_tables(struct mr_table *mrt, int flags,
117 				struct list_head *dev_kill_list);
118 static void ipmr_expire_process(struct timer_list *t);
119 
120 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
121 #define ipmr_for_each_table(mrt, net)					\
122 	list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list,	\
123 				lockdep_rtnl_is_held() ||		\
124 				list_empty(&net->ipv4.mr_tables))
125 
126 static struct mr_table *ipmr_mr_table_iter(struct net *net,
127 					   struct mr_table *mrt)
128 {
129 	struct mr_table *ret;
130 
131 	if (!mrt)
132 		ret = list_entry_rcu(net->ipv4.mr_tables.next,
133 				     struct mr_table, list);
134 	else
135 		ret = list_entry_rcu(mrt->list.next,
136 				     struct mr_table, list);
137 
138 	if (&ret->list == &net->ipv4.mr_tables)
139 		return NULL;
140 	return ret;
141 }
142 
143 static struct mr_table *__ipmr_get_table(struct net *net, u32 id)
144 {
145 	struct mr_table *mrt;
146 
147 	ipmr_for_each_table(mrt, net) {
148 		if (mrt->id == id)
149 			return mrt;
150 	}
151 	return NULL;
152 }
153 
154 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
155 {
156 	struct mr_table *mrt;
157 
158 	rcu_read_lock();
159 	mrt = __ipmr_get_table(net, id);
160 	rcu_read_unlock();
161 	return mrt;
162 }
163 
164 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
165 			   struct mr_table **mrt)
166 {
167 	int err;
168 	struct ipmr_result res;
169 	struct fib_lookup_arg arg = {
170 		.result = &res,
171 		.flags = FIB_LOOKUP_NOREF,
172 	};
173 
174 	/* update flow if oif or iif point to device enslaved to l3mdev */
175 	l3mdev_update_flow(net, flowi4_to_flowi(flp4));
176 
177 	err = fib_rules_lookup(net->ipv4.mr_rules_ops,
178 			       flowi4_to_flowi(flp4), 0, &arg);
179 	if (err < 0)
180 		return err;
181 	*mrt = res.mrt;
182 	return 0;
183 }
184 
185 static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
186 			    int flags, struct fib_lookup_arg *arg)
187 {
188 	struct ipmr_result *res = arg->result;
189 	struct mr_table *mrt;
190 
191 	switch (rule->action) {
192 	case FR_ACT_TO_TBL:
193 		break;
194 	case FR_ACT_UNREACHABLE:
195 		return -ENETUNREACH;
196 	case FR_ACT_PROHIBIT:
197 		return -EACCES;
198 	case FR_ACT_BLACKHOLE:
199 	default:
200 		return -EINVAL;
201 	}
202 
203 	arg->table = fib_rule_get_table(rule, arg);
204 
205 	mrt = __ipmr_get_table(rule->fr_net, arg->table);
206 	if (!mrt)
207 		return -EAGAIN;
208 	res->mrt = mrt;
209 	return 0;
210 }
211 
212 static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
213 {
214 	return 1;
215 }
216 
217 static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
218 			       struct fib_rule_hdr *frh, struct nlattr **tb,
219 			       struct netlink_ext_ack *extack)
220 {
221 	return 0;
222 }
223 
224 static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
225 			     struct nlattr **tb)
226 {
227 	return 1;
228 }
229 
230 static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
231 			  struct fib_rule_hdr *frh)
232 {
233 	frh->dst_len = 0;
234 	frh->src_len = 0;
235 	frh->tos     = 0;
236 	return 0;
237 }
238 
239 static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
240 	.family		= RTNL_FAMILY_IPMR,
241 	.rule_size	= sizeof(struct ipmr_rule),
242 	.addr_size	= sizeof(u32),
243 	.action		= ipmr_rule_action,
244 	.match		= ipmr_rule_match,
245 	.configure	= ipmr_rule_configure,
246 	.compare	= ipmr_rule_compare,
247 	.fill		= ipmr_rule_fill,
248 	.nlgroup	= RTNLGRP_IPV4_RULE,
249 	.owner		= THIS_MODULE,
250 };
251 
252 static int __net_init ipmr_rules_init(struct net *net)
253 {
254 	struct fib_rules_ops *ops;
255 	LIST_HEAD(dev_kill_list);
256 	struct mr_table *mrt;
257 	int err;
258 
259 	ops = fib_rules_register(&ipmr_rules_ops_template, net);
260 	if (IS_ERR(ops))
261 		return PTR_ERR(ops);
262 
263 	INIT_LIST_HEAD(&net->ipv4.mr_tables);
264 
265 	mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
266 	if (IS_ERR(mrt)) {
267 		err = PTR_ERR(mrt);
268 		goto err1;
269 	}
270 
271 	err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT);
272 	if (err < 0)
273 		goto err2;
274 
275 	net->ipv4.mr_rules_ops = ops;
276 	return 0;
277 
278 err2:
279 	ipmr_free_table(mrt, &dev_kill_list);
280 err1:
281 	fib_rules_unregister(ops);
282 	return err;
283 }
284 
285 static void __net_exit ipmr_rules_exit(struct net *net)
286 {
287 	fib_rules_unregister(net->ipv4.mr_rules_ops);
288 }
289 
290 static void __net_exit ipmr_rules_exit_rtnl(struct net *net,
291 					    struct list_head *dev_kill_list)
292 {
293 	struct mr_table *mrt, *next;
294 
295 	list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
296 		list_del(&mrt->list);
297 		ipmr_free_table(mrt, dev_kill_list);
298 	}
299 }
300 
301 static int ipmr_rules_dump(struct net *net, struct notifier_block *nb,
302 			   struct netlink_ext_ack *extack)
303 {
304 	return fib_rules_dump(net, nb, RTNL_FAMILY_IPMR, extack);
305 }
306 
307 static unsigned int ipmr_rules_seq_read(const struct net *net)
308 {
309 	return fib_rules_seq_read(net, RTNL_FAMILY_IPMR);
310 }
311 
312 bool ipmr_rule_default(const struct fib_rule *rule)
313 {
314 	return fib_rule_matchall(rule) && rule->table == RT_TABLE_DEFAULT;
315 }
316 EXPORT_SYMBOL(ipmr_rule_default);
317 #else
318 #define ipmr_for_each_table(mrt, net) \
319 	for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
320 
321 static struct mr_table *ipmr_mr_table_iter(struct net *net,
322 					   struct mr_table *mrt)
323 {
324 	if (!mrt)
325 		return net->ipv4.mrt;
326 	return NULL;
327 }
328 
329 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
330 {
331 	return net->ipv4.mrt;
332 }
333 
334 #define __ipmr_get_table ipmr_get_table
335 
336 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
337 			   struct mr_table **mrt)
338 {
339 	*mrt = net->ipv4.mrt;
340 	return 0;
341 }
342 
343 static int __net_init ipmr_rules_init(struct net *net)
344 {
345 	struct mr_table *mrt;
346 
347 	mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
348 	if (IS_ERR(mrt))
349 		return PTR_ERR(mrt);
350 	net->ipv4.mrt = mrt;
351 	return 0;
352 }
353 
354 static void __net_exit ipmr_rules_exit(struct net *net)
355 {
356 }
357 
358 static void __net_exit ipmr_rules_exit_rtnl(struct net *net,
359 					    struct list_head *dev_kill_list)
360 {
361 	ipmr_free_table(net->ipv4.mrt, dev_kill_list);
362 
363 	net->ipv4.mrt = NULL;
364 }
365 
366 static int ipmr_rules_dump(struct net *net, struct notifier_block *nb,
367 			   struct netlink_ext_ack *extack)
368 {
369 	return 0;
370 }
371 
372 static unsigned int ipmr_rules_seq_read(const struct net *net)
373 {
374 	return 0;
375 }
376 
377 bool ipmr_rule_default(const struct fib_rule *rule)
378 {
379 	return true;
380 }
381 EXPORT_SYMBOL(ipmr_rule_default);
382 #endif
383 
384 static inline int ipmr_hash_cmp(struct rhashtable_compare_arg *arg,
385 				const void *ptr)
386 {
387 	const struct mfc_cache_cmp_arg *cmparg = arg->key;
388 	const struct mfc_cache *c = ptr;
389 
390 	return cmparg->mfc_mcastgrp != c->mfc_mcastgrp ||
391 	       cmparg->mfc_origin != c->mfc_origin;
392 }
393 
394 static const struct rhashtable_params ipmr_rht_params = {
395 	.head_offset = offsetof(struct mr_mfc, mnode),
396 	.key_offset = offsetof(struct mfc_cache, cmparg),
397 	.key_len = sizeof(struct mfc_cache_cmp_arg),
398 	.nelem_hint = 3,
399 	.obj_cmpfn = ipmr_hash_cmp,
400 	.automatic_shrinking = true,
401 };
402 
403 static void ipmr_new_table_set(struct mr_table *mrt,
404 			       struct net *net)
405 {
406 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
407 	list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
408 #endif
409 }
410 
411 static struct mfc_cache_cmp_arg ipmr_mr_table_ops_cmparg_any = {
412 	.mfc_mcastgrp = htonl(INADDR_ANY),
413 	.mfc_origin = htonl(INADDR_ANY),
414 };
415 
416 static struct mr_table_ops ipmr_mr_table_ops = {
417 	.rht_params = &ipmr_rht_params,
418 	.cmparg_any = &ipmr_mr_table_ops_cmparg_any,
419 };
420 
421 static struct mr_table *ipmr_new_table(struct net *net, u32 id)
422 {
423 	struct mr_table *mrt;
424 
425 	/* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
426 	if (id != RT_TABLE_DEFAULT && id >= 1000000000)
427 		return ERR_PTR(-EINVAL);
428 
429 	mrt = __ipmr_get_table(net, id);
430 	if (mrt)
431 		return mrt;
432 
433 	return mr_table_alloc(net, id, &ipmr_mr_table_ops,
434 			      ipmr_expire_process, ipmr_new_table_set);
435 }
436 
437 static void ipmr_free_table(struct mr_table *mrt, struct list_head *dev_kill_list)
438 {
439 	struct net *net = read_pnet(&mrt->net);
440 	LIST_HEAD(ipmr_dev_kill_list);
441 
442 	WARN_ON_ONCE(!mr_can_free_table(net));
443 
444 	timer_shutdown_sync(&mrt->ipmr_expire_timer);
445 	mroute_clean_tables(mrt, MRT_FLUSH_VIFS | MRT_FLUSH_VIFS_STATIC |
446 			    MRT_FLUSH_MFC | MRT_FLUSH_MFC_STATIC,
447 			    &ipmr_dev_kill_list);
448 	rhltable_destroy(&mrt->mfc_hash);
449 	kfree(mrt);
450 
451 	WARN_ON_ONCE(!net_initialized(net) && !list_empty(&ipmr_dev_kill_list));
452 	list_splice(&ipmr_dev_kill_list, dev_kill_list);
453 }
454 
455 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
456 
457 /* Initialize ipmr pimreg/tunnel in_device */
458 static bool ipmr_init_vif_indev(const struct net_device *dev)
459 {
460 	struct in_device *in_dev;
461 
462 	ASSERT_RTNL();
463 
464 	in_dev = __in_dev_get_rtnl(dev);
465 	if (!in_dev)
466 		return false;
467 	ipv4_devconf_setall(in_dev);
468 	neigh_parms_data_state_setall(in_dev->arp_parms);
469 	IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
470 
471 	return true;
472 }
473 
474 static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
475 {
476 	struct net_device *tunnel_dev, *new_dev;
477 	struct ip_tunnel_parm_kern p = { };
478 	int err;
479 
480 	tunnel_dev = __dev_get_by_name(net, "tunl0");
481 	if (!tunnel_dev)
482 		goto out;
483 
484 	p.iph.daddr = v->vifc_rmt_addr.s_addr;
485 	p.iph.saddr = v->vifc_lcl_addr.s_addr;
486 	p.iph.version = 4;
487 	p.iph.ihl = 5;
488 	p.iph.protocol = IPPROTO_IPIP;
489 	sprintf(p.name, "dvmrp%d", v->vifc_vifi);
490 
491 	if (!tunnel_dev->netdev_ops->ndo_tunnel_ctl)
492 		goto out;
493 	err = tunnel_dev->netdev_ops->ndo_tunnel_ctl(tunnel_dev, &p,
494 			SIOCADDTUNNEL);
495 	if (err)
496 		goto out;
497 
498 	new_dev = __dev_get_by_name(net, p.name);
499 	if (!new_dev)
500 		goto out;
501 
502 	new_dev->flags |= IFF_MULTICAST;
503 	if (!ipmr_init_vif_indev(new_dev))
504 		goto out_unregister;
505 	if (dev_open(new_dev, NULL))
506 		goto out_unregister;
507 	dev_hold(new_dev);
508 	err = dev_set_allmulti(new_dev, 1);
509 	if (err) {
510 		dev_close(new_dev);
511 		tunnel_dev->netdev_ops->ndo_tunnel_ctl(tunnel_dev, &p,
512 				SIOCDELTUNNEL);
513 		dev_put(new_dev);
514 		new_dev = ERR_PTR(err);
515 	}
516 	return new_dev;
517 
518 out_unregister:
519 	unregister_netdevice(new_dev);
520 out:
521 	return ERR_PTR(-ENOBUFS);
522 }
523 
524 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
525 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
526 {
527 	struct net *net = dev_net(dev);
528 	struct mr_table *mrt;
529 	struct flowi4 fl4 = {
530 		.flowi4_oif	= dev->ifindex,
531 		.flowi4_iif	= skb->skb_iif ? : LOOPBACK_IFINDEX,
532 		.flowi4_mark	= skb->mark,
533 	};
534 	int err;
535 
536 	err = ipmr_fib_lookup(net, &fl4, &mrt);
537 	if (err < 0) {
538 		kfree_skb(skb);
539 		return err;
540 	}
541 
542 	DEV_STATS_ADD(dev, tx_bytes, skb->len);
543 	DEV_STATS_INC(dev, tx_packets);
544 	rcu_read_lock();
545 
546 	/* Pairs with WRITE_ONCE() in vif_add() and vif_delete() */
547 	ipmr_cache_report(mrt, skb, READ_ONCE(mrt->mroute_reg_vif_num),
548 			  IGMPMSG_WHOLEPKT);
549 
550 	rcu_read_unlock();
551 	kfree_skb(skb);
552 	return NETDEV_TX_OK;
553 }
554 
555 static int reg_vif_get_iflink(const struct net_device *dev)
556 {
557 	return 0;
558 }
559 
560 static const struct net_device_ops reg_vif_netdev_ops = {
561 	.ndo_start_xmit	= reg_vif_xmit,
562 	.ndo_get_iflink = reg_vif_get_iflink,
563 };
564 
565 static void reg_vif_setup(struct net_device *dev)
566 {
567 	dev->type		= ARPHRD_PIMREG;
568 	dev->mtu		= ETH_DATA_LEN - sizeof(struct iphdr) - 8;
569 	dev->flags		= IFF_NOARP;
570 	dev->netdev_ops		= &reg_vif_netdev_ops;
571 	dev->needs_free_netdev	= true;
572 	dev->netns_immutable	= true;
573 }
574 
575 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
576 {
577 	struct net_device *dev;
578 	char name[IFNAMSIZ];
579 
580 	if (mrt->id == RT_TABLE_DEFAULT)
581 		sprintf(name, "pimreg");
582 	else
583 		sprintf(name, "pimreg%u", mrt->id);
584 
585 	dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
586 
587 	if (!dev)
588 		return NULL;
589 
590 	dev_net_set(dev, net);
591 
592 	if (register_netdevice(dev)) {
593 		free_netdev(dev);
594 		return NULL;
595 	}
596 
597 	if (!ipmr_init_vif_indev(dev))
598 		goto failure;
599 	if (dev_open(dev, NULL))
600 		goto failure;
601 
602 	dev_hold(dev);
603 
604 	return dev;
605 
606 failure:
607 	unregister_netdevice(dev);
608 	return NULL;
609 }
610 
611 /* called with rcu_read_lock() */
612 static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
613 		     unsigned int pimlen)
614 {
615 	struct net_device *reg_dev = NULL;
616 	struct iphdr *encap;
617 	int vif_num;
618 
619 	encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
620 	/* Check that:
621 	 * a. packet is really sent to a multicast group
622 	 * b. packet is not a NULL-REGISTER
623 	 * c. packet is not truncated
624 	 */
625 	if (!ipv4_is_multicast(encap->daddr) ||
626 	    encap->tot_len == 0 ||
627 	    ntohs(encap->tot_len) + pimlen > skb->len)
628 		return 1;
629 
630 	/* Pairs with WRITE_ONCE() in vif_add()/vid_delete() */
631 	vif_num = READ_ONCE(mrt->mroute_reg_vif_num);
632 	if (vif_num >= 0)
633 		reg_dev = vif_dev_read(&mrt->vif_table[vif_num]);
634 	if (!reg_dev)
635 		return 1;
636 
637 	skb->mac_header = skb->network_header;
638 	skb_pull(skb, (u8 *)encap - skb->data);
639 	skb_reset_network_header(skb);
640 	skb->protocol = htons(ETH_P_IP);
641 	skb->ip_summed = CHECKSUM_NONE;
642 
643 	skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
644 
645 	netif_rx(skb);
646 
647 	return NET_RX_SUCCESS;
648 }
649 #else
650 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
651 {
652 	return NULL;
653 }
654 #endif
655 
656 static int call_ipmr_vif_entry_notifiers(struct net *net,
657 					 enum fib_event_type event_type,
658 					 struct vif_device *vif,
659 					 struct net_device *vif_dev,
660 					 vifi_t vif_index, u32 tb_id)
661 {
662 	return mr_call_vif_notifiers(net, RTNL_FAMILY_IPMR, event_type,
663 				     vif, vif_dev, vif_index, tb_id,
664 				     &net->ipv4.ipmr_seq);
665 }
666 
667 static int call_ipmr_mfc_entry_notifiers(struct net *net,
668 					 enum fib_event_type event_type,
669 					 struct mfc_cache *mfc, u32 tb_id)
670 {
671 	return mr_call_mfc_notifiers(net, RTNL_FAMILY_IPMR, event_type,
672 				     &mfc->_c, tb_id, &net->ipv4.ipmr_seq);
673 }
674 
675 /**
676  *	vif_delete - Delete a VIF entry
677  *	@mrt: Table to delete from
678  *	@vifi: VIF identifier to delete
679  *	@notify: Set to 1, if the caller is a notifier_call
680  *	@head: if unregistering the VIF, place it on this queue
681  */
682 static int vif_delete(struct mr_table *mrt, int vifi, int notify,
683 		      struct list_head *head)
684 {
685 	struct net *net = read_pnet(&mrt->net);
686 	struct vif_device *v;
687 	struct net_device *dev;
688 	struct in_device *in_dev;
689 
690 	if (vifi < 0 || vifi >= mrt->maxvif)
691 		return -EADDRNOTAVAIL;
692 
693 	v = &mrt->vif_table[vifi];
694 
695 	dev = rtnl_dereference(v->dev);
696 	if (!dev)
697 		return -EADDRNOTAVAIL;
698 
699 	spin_lock(&mrt_lock);
700 	call_ipmr_vif_entry_notifiers(net, FIB_EVENT_VIF_DEL, v, dev,
701 				      vifi, mrt->id);
702 	RCU_INIT_POINTER(v->dev, NULL);
703 
704 	if (vifi == mrt->mroute_reg_vif_num) {
705 		/* Pairs with READ_ONCE() in ipmr_cache_report() and reg_vif_xmit() */
706 		WRITE_ONCE(mrt->mroute_reg_vif_num, -1);
707 	}
708 	if (vifi + 1 == mrt->maxvif) {
709 		int tmp;
710 
711 		for (tmp = vifi - 1; tmp >= 0; tmp--) {
712 			if (VIF_EXISTS(mrt, tmp))
713 				break;
714 		}
715 		WRITE_ONCE(mrt->maxvif, tmp + 1);
716 	}
717 
718 	spin_unlock(&mrt_lock);
719 
720 	dev_set_allmulti(dev, -1);
721 
722 	in_dev = __in_dev_get_rtnl(dev);
723 	if (in_dev) {
724 		IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
725 		inet_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
726 					    NETCONFA_MC_FORWARDING,
727 					    dev->ifindex, &in_dev->cnf);
728 		ip_rt_multicast_event(in_dev);
729 	}
730 
731 	if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
732 		unregister_netdevice_queue(dev, head);
733 
734 	netdev_put(dev, &v->dev_tracker);
735 	return 0;
736 }
737 
738 static void ipmr_cache_free_rcu(struct rcu_head *head)
739 {
740 	struct mr_mfc *c = container_of(head, struct mr_mfc, rcu);
741 
742 	kmem_cache_free(mrt_cachep, (struct mfc_cache *)c);
743 }
744 
745 static void ipmr_cache_free(struct mfc_cache *c)
746 {
747 	call_rcu(&c->_c.rcu, ipmr_cache_free_rcu);
748 }
749 
750 /* Destroy an unresolved cache entry, killing queued skbs
751  * and reporting error to netlink readers.
752  */
753 static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
754 {
755 	struct net *net = read_pnet(&mrt->net);
756 	struct sk_buff *skb;
757 	struct nlmsgerr *e;
758 
759 	atomic_dec(&mrt->cache_resolve_queue_len);
760 
761 	while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved))) {
762 		if (ip_hdr(skb)->version == 0) {
763 			struct nlmsghdr *nlh = skb_pull(skb,
764 							sizeof(struct iphdr));
765 			nlh->nlmsg_type = NLMSG_ERROR;
766 			nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
767 			skb_trim(skb, nlh->nlmsg_len);
768 			e = nlmsg_data(nlh);
769 			e->error = -ETIMEDOUT;
770 			memset(&e->msg, 0, sizeof(e->msg));
771 
772 			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
773 		} else {
774 			kfree_skb(skb);
775 		}
776 	}
777 
778 	ipmr_cache_free(c);
779 }
780 
781 /* Timer process for the unresolved queue. */
782 static void ipmr_expire_process(struct timer_list *t)
783 {
784 	struct mr_table *mrt = timer_container_of(mrt, t, ipmr_expire_timer);
785 	struct mr_mfc *c, *next;
786 	unsigned long expires;
787 	unsigned long now;
788 
789 	if (!spin_trylock(&mfc_unres_lock)) {
790 		mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
791 		return;
792 	}
793 
794 	if (list_empty(&mrt->mfc_unres_queue))
795 		goto out;
796 
797 	now = jiffies;
798 	expires = 10*HZ;
799 
800 	list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
801 		if (time_after(c->mfc_un.unres.expires, now)) {
802 			unsigned long interval = c->mfc_un.unres.expires - now;
803 			if (interval < expires)
804 				expires = interval;
805 			continue;
806 		}
807 
808 		list_del(&c->list);
809 		mroute_netlink_event(mrt, (struct mfc_cache *)c, RTM_DELROUTE);
810 		ipmr_destroy_unres(mrt, (struct mfc_cache *)c);
811 	}
812 
813 	if (!list_empty(&mrt->mfc_unres_queue))
814 		mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
815 
816 out:
817 	spin_unlock(&mfc_unres_lock);
818 }
819 
820 /* Fill oifs list. It is called under locked mrt_lock. */
821 static void ipmr_update_thresholds(struct mr_table *mrt, struct mr_mfc *cache,
822 				   unsigned char *ttls)
823 {
824 	int vifi;
825 
826 	cache->mfc_un.res.minvif = MAXVIFS;
827 	cache->mfc_un.res.maxvif = 0;
828 	memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
829 
830 	for (vifi = 0; vifi < mrt->maxvif; vifi++) {
831 		if (VIF_EXISTS(mrt, vifi) &&
832 		    ttls[vifi] && ttls[vifi] < 255) {
833 			cache->mfc_un.res.ttls[vifi] = ttls[vifi];
834 			if (cache->mfc_un.res.minvif > vifi)
835 				cache->mfc_un.res.minvif = vifi;
836 			if (cache->mfc_un.res.maxvif <= vifi)
837 				cache->mfc_un.res.maxvif = vifi + 1;
838 		}
839 	}
840 	WRITE_ONCE(cache->mfc_un.res.lastuse, jiffies);
841 }
842 
843 static int vif_add(struct net *net, struct mr_table *mrt,
844 		   struct vifctl *vifc, int mrtsock)
845 {
846 	struct netdev_phys_item_id ppid = { };
847 	int vifi = vifc->vifc_vifi;
848 	struct vif_device *v = &mrt->vif_table[vifi];
849 	struct net_device *dev;
850 	struct in_device *in_dev;
851 	int err;
852 
853 	/* Is vif busy ? */
854 	if (VIF_EXISTS(mrt, vifi))
855 		return -EADDRINUSE;
856 
857 	switch (vifc->vifc_flags) {
858 	case VIFF_REGISTER:
859 		if (!ipmr_pimsm_enabled())
860 			return -EINVAL;
861 		/* Special Purpose VIF in PIM
862 		 * All the packets will be sent to the daemon
863 		 */
864 		if (mrt->mroute_reg_vif_num >= 0)
865 			return -EADDRINUSE;
866 		dev = ipmr_reg_vif(net, mrt);
867 		if (!dev)
868 			return -ENOBUFS;
869 		err = dev_set_allmulti(dev, 1);
870 		if (err) {
871 			unregister_netdevice(dev);
872 			dev_put(dev);
873 			return err;
874 		}
875 		break;
876 	case VIFF_TUNNEL:
877 		dev = ipmr_new_tunnel(net, vifc);
878 		if (IS_ERR(dev))
879 			return PTR_ERR(dev);
880 		break;
881 	case VIFF_USE_IFINDEX:
882 	case 0:
883 		if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
884 			dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
885 			if (dev && !__in_dev_get_rtnl(dev)) {
886 				dev_put(dev);
887 				return -EADDRNOTAVAIL;
888 			}
889 		} else {
890 			dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
891 		}
892 		if (!dev)
893 			return -EADDRNOTAVAIL;
894 		err = dev_set_allmulti(dev, 1);
895 		if (err) {
896 			dev_put(dev);
897 			return err;
898 		}
899 		break;
900 	default:
901 		return -EINVAL;
902 	}
903 
904 	in_dev = __in_dev_get_rtnl(dev);
905 	if (!in_dev) {
906 		dev_put(dev);
907 		return -EADDRNOTAVAIL;
908 	}
909 	IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
910 	inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_MC_FORWARDING,
911 				    dev->ifindex, &in_dev->cnf);
912 	ip_rt_multicast_event(in_dev);
913 
914 	/* Fill in the VIF structures */
915 	vif_device_init(v, dev, vifc->vifc_rate_limit,
916 			vifc->vifc_threshold,
917 			vifc->vifc_flags | (!mrtsock ? VIFF_STATIC : 0),
918 			(VIFF_TUNNEL | VIFF_REGISTER));
919 
920 	err = netif_get_port_parent_id(dev, &ppid, true);
921 	if (err == 0) {
922 		memcpy(v->dev_parent_id.id, ppid.id, ppid.id_len);
923 		v->dev_parent_id.id_len = ppid.id_len;
924 	} else {
925 		v->dev_parent_id.id_len = 0;
926 	}
927 
928 	v->local = vifc->vifc_lcl_addr.s_addr;
929 	v->remote = vifc->vifc_rmt_addr.s_addr;
930 
931 	/* And finish update writing critical data */
932 	spin_lock(&mrt_lock);
933 	rcu_assign_pointer(v->dev, dev);
934 	netdev_tracker_alloc(dev, &v->dev_tracker, GFP_ATOMIC);
935 	if (v->flags & VIFF_REGISTER) {
936 		/* Pairs with READ_ONCE() in ipmr_cache_report() and reg_vif_xmit() */
937 		WRITE_ONCE(mrt->mroute_reg_vif_num, vifi);
938 	}
939 	if (vifi+1 > mrt->maxvif)
940 		WRITE_ONCE(mrt->maxvif, vifi + 1);
941 	spin_unlock(&mrt_lock);
942 	call_ipmr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD, v, dev,
943 				      vifi, mrt->id);
944 	return 0;
945 }
946 
947 /* called with rcu_read_lock() */
948 static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
949 					 __be32 origin,
950 					 __be32 mcastgrp)
951 {
952 	struct mfc_cache_cmp_arg arg = {
953 			.mfc_mcastgrp = mcastgrp,
954 			.mfc_origin = origin
955 	};
956 
957 	return mr_mfc_find(mrt, &arg);
958 }
959 
960 /* Look for a (*,G) entry */
961 static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
962 					     __be32 mcastgrp, int vifi)
963 {
964 	struct mfc_cache_cmp_arg arg = {
965 			.mfc_mcastgrp = mcastgrp,
966 			.mfc_origin = htonl(INADDR_ANY)
967 	};
968 
969 	if (mcastgrp == htonl(INADDR_ANY))
970 		return mr_mfc_find_any_parent(mrt, vifi);
971 	return mr_mfc_find_any(mrt, vifi, &arg);
972 }
973 
974 /* Look for a (S,G,iif) entry if parent != -1 */
975 static struct mfc_cache *ipmr_cache_find_parent(struct mr_table *mrt,
976 						__be32 origin, __be32 mcastgrp,
977 						int parent)
978 {
979 	struct mfc_cache_cmp_arg arg = {
980 			.mfc_mcastgrp = mcastgrp,
981 			.mfc_origin = origin,
982 	};
983 
984 	return mr_mfc_find_parent(mrt, &arg, parent);
985 }
986 
987 /* Allocate a multicast cache entry */
988 static struct mfc_cache *ipmr_cache_alloc(void)
989 {
990 	struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
991 
992 	if (c) {
993 		c->_c.mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
994 		c->_c.mfc_un.res.minvif = MAXVIFS;
995 		c->_c.free = ipmr_cache_free_rcu;
996 		refcount_set(&c->_c.mfc_un.res.refcount, 1);
997 	}
998 	return c;
999 }
1000 
1001 static struct mfc_cache *ipmr_cache_alloc_unres(void)
1002 {
1003 	struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
1004 
1005 	if (c) {
1006 		skb_queue_head_init(&c->_c.mfc_un.unres.unresolved);
1007 		c->_c.mfc_un.unres.expires = jiffies + 10 * HZ;
1008 	}
1009 	return c;
1010 }
1011 
1012 /* A cache entry has gone into a resolved state from queued */
1013 static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
1014 			       struct mfc_cache *uc, struct mfc_cache *c)
1015 {
1016 	struct sk_buff *skb;
1017 	struct nlmsgerr *e;
1018 
1019 	/* Play the pending entries through our router */
1020 	while ((skb = __skb_dequeue(&uc->_c.mfc_un.unres.unresolved))) {
1021 		if (ip_hdr(skb)->version == 0) {
1022 			struct nlmsghdr *nlh = skb_pull(skb,
1023 							sizeof(struct iphdr));
1024 
1025 			if (mr_fill_mroute(mrt, skb, &c->_c,
1026 					   nlmsg_data(nlh)) > 0) {
1027 				nlh->nlmsg_len = skb_tail_pointer(skb) -
1028 						 (u8 *)nlh;
1029 			} else {
1030 				nlh->nlmsg_type = NLMSG_ERROR;
1031 				nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1032 				skb_trim(skb, nlh->nlmsg_len);
1033 				e = nlmsg_data(nlh);
1034 				e->error = -EMSGSIZE;
1035 				memset(&e->msg, 0, sizeof(e->msg));
1036 			}
1037 
1038 			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1039 		} else {
1040 			rcu_read_lock();
1041 			ip_mr_forward(net, mrt, skb->dev, skb, c, 0);
1042 			rcu_read_unlock();
1043 		}
1044 	}
1045 }
1046 
1047 /* Bounce a cache query up to mrouted and netlink.
1048  *
1049  * Called under rcu_read_lock().
1050  */
1051 static int ipmr_cache_report(const struct mr_table *mrt,
1052 			     struct sk_buff *pkt, vifi_t vifi, int assert)
1053 {
1054 	const int ihl = ip_hdrlen(pkt);
1055 	struct sock *mroute_sk;
1056 	struct igmphdr *igmp;
1057 	struct igmpmsg *msg;
1058 	struct sk_buff *skb;
1059 	int ret;
1060 
1061 	mroute_sk = rcu_dereference(mrt->mroute_sk);
1062 	if (!mroute_sk)
1063 		return -EINVAL;
1064 
1065 	if (assert == IGMPMSG_WHOLEPKT || assert == IGMPMSG_WRVIFWHOLE)
1066 		skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
1067 	else
1068 		skb = alloc_skb(128, GFP_ATOMIC);
1069 
1070 	if (!skb)
1071 		return -ENOBUFS;
1072 
1073 	if (assert == IGMPMSG_WHOLEPKT || assert == IGMPMSG_WRVIFWHOLE) {
1074 		/* Ugly, but we have no choice with this interface.
1075 		 * Duplicate old header, fix ihl, length etc.
1076 		 * And all this only to mangle msg->im_msgtype and
1077 		 * to set msg->im_mbz to "mbz" :-)
1078 		 */
1079 		skb_push(skb, sizeof(struct iphdr));
1080 		skb_reset_network_header(skb);
1081 		skb_reset_transport_header(skb);
1082 		msg = (struct igmpmsg *)skb_network_header(skb);
1083 		memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
1084 		msg->im_msgtype = assert;
1085 		msg->im_mbz = 0;
1086 		if (assert == IGMPMSG_WRVIFWHOLE) {
1087 			msg->im_vif = vifi;
1088 			msg->im_vif_hi = vifi >> 8;
1089 		} else {
1090 			/* Pairs with WRITE_ONCE() in vif_add() and vif_delete() */
1091 			int vif_num = READ_ONCE(mrt->mroute_reg_vif_num);
1092 
1093 			msg->im_vif = vif_num;
1094 			msg->im_vif_hi = vif_num >> 8;
1095 		}
1096 		ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
1097 		ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
1098 					     sizeof(struct iphdr));
1099 	} else {
1100 		/* Copy the IP header */
1101 		skb_set_network_header(skb, skb->len);
1102 		skb_put(skb, ihl);
1103 		skb_copy_to_linear_data(skb, pkt->data, ihl);
1104 		/* Flag to the kernel this is a route add */
1105 		ip_hdr(skb)->protocol = 0;
1106 		msg = (struct igmpmsg *)skb_network_header(skb);
1107 		msg->im_vif = vifi;
1108 		msg->im_vif_hi = vifi >> 8;
1109 		ipv4_pktinfo_prepare(mroute_sk, pkt, false);
1110 		memcpy(skb->cb, pkt->cb, sizeof(skb->cb));
1111 		/* Add our header */
1112 		igmp = skb_put(skb, sizeof(struct igmphdr));
1113 		igmp->type = assert;
1114 		msg->im_msgtype = assert;
1115 		igmp->code = 0;
1116 		ip_hdr(skb)->tot_len = htons(skb->len);	/* Fix the length */
1117 		skb->transport_header = skb->network_header;
1118 	}
1119 
1120 	igmpmsg_netlink_event(mrt, skb);
1121 
1122 	/* Deliver to mrouted */
1123 	ret = sock_queue_rcv_skb(mroute_sk, skb);
1124 
1125 	if (ret < 0) {
1126 		net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
1127 		kfree_skb(skb);
1128 	}
1129 
1130 	return ret;
1131 }
1132 
1133 /* Queue a packet for resolution. It gets locked cache entry! */
1134 /* Called under rcu_read_lock() */
1135 static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
1136 				 struct sk_buff *skb, struct net_device *dev)
1137 {
1138 	const struct iphdr *iph = ip_hdr(skb);
1139 	struct mfc_cache *c;
1140 	bool found = false;
1141 	int err;
1142 
1143 	spin_lock_bh(&mfc_unres_lock);
1144 	list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) {
1145 		if (c->mfc_mcastgrp == iph->daddr &&
1146 		    c->mfc_origin == iph->saddr) {
1147 			found = true;
1148 			break;
1149 		}
1150 	}
1151 
1152 	if (!found) {
1153 		/* Create a new entry if allowable */
1154 		c = ipmr_cache_alloc_unres();
1155 		if (!c) {
1156 			spin_unlock_bh(&mfc_unres_lock);
1157 
1158 			kfree_skb(skb);
1159 			return -ENOBUFS;
1160 		}
1161 
1162 		/* Fill in the new cache entry */
1163 		c->_c.mfc_parent = -1;
1164 		c->mfc_origin	= iph->saddr;
1165 		c->mfc_mcastgrp	= iph->daddr;
1166 
1167 		/* Reflect first query at mrouted. */
1168 		err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
1169 
1170 		if (err < 0) {
1171 			/* If the report failed throw the cache entry
1172 			   out - Brad Parker
1173 			 */
1174 			spin_unlock_bh(&mfc_unres_lock);
1175 
1176 			ipmr_cache_free(c);
1177 			kfree_skb(skb);
1178 			return err;
1179 		}
1180 
1181 		atomic_inc(&mrt->cache_resolve_queue_len);
1182 		list_add(&c->_c.list, &mrt->mfc_unres_queue);
1183 		mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1184 
1185 		if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1186 			mod_timer(&mrt->ipmr_expire_timer,
1187 				  c->_c.mfc_un.unres.expires);
1188 	}
1189 
1190 	/* See if we can append the packet */
1191 	if (c->_c.mfc_un.unres.unresolved.qlen > 3) {
1192 		kfree_skb(skb);
1193 		err = -ENOBUFS;
1194 	} else {
1195 		if (dev) {
1196 			skb->dev = dev;
1197 			skb->skb_iif = dev->ifindex;
1198 		}
1199 		skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb);
1200 		err = 0;
1201 	}
1202 
1203 	spin_unlock_bh(&mfc_unres_lock);
1204 	return err;
1205 }
1206 
1207 /* MFC cache manipulation by user space mroute daemon */
1208 
1209 static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
1210 {
1211 	struct net *net = read_pnet(&mrt->net);
1212 	struct mfc_cache *c;
1213 
1214 	rcu_read_lock();
1215 	c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
1216 				   mfc->mfcc_mcastgrp.s_addr, parent);
1217 	rcu_read_unlock();
1218 	if (!c)
1219 		return -ENOENT;
1220 	rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ipmr_rht_params);
1221 	list_del_rcu(&c->_c.list);
1222 	call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, c, mrt->id);
1223 	mroute_netlink_event(mrt, c, RTM_DELROUTE);
1224 	mr_cache_put(&c->_c);
1225 
1226 	return 0;
1227 }
1228 
1229 static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1230 			struct mfcctl *mfc, int mrtsock, int parent)
1231 {
1232 	struct mfc_cache *uc, *c;
1233 	struct mr_mfc *_uc;
1234 	bool found;
1235 	int ret;
1236 
1237 	if (mfc->mfcc_parent >= MAXVIFS)
1238 		return -ENFILE;
1239 
1240 	rcu_read_lock();
1241 	c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
1242 				   mfc->mfcc_mcastgrp.s_addr, parent);
1243 	rcu_read_unlock();
1244 	if (c) {
1245 		spin_lock(&mrt_lock);
1246 		c->_c.mfc_parent = mfc->mfcc_parent;
1247 		ipmr_update_thresholds(mrt, &c->_c, mfc->mfcc_ttls);
1248 		if (!mrtsock)
1249 			c->_c.mfc_flags |= MFC_STATIC;
1250 		spin_unlock(&mrt_lock);
1251 		call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, c,
1252 					      mrt->id);
1253 		mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1254 		return 0;
1255 	}
1256 
1257 	if (mfc->mfcc_mcastgrp.s_addr != htonl(INADDR_ANY) &&
1258 	    !ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1259 		return -EINVAL;
1260 
1261 	c = ipmr_cache_alloc();
1262 	if (!c)
1263 		return -ENOMEM;
1264 
1265 	c->mfc_origin = mfc->mfcc_origin.s_addr;
1266 	c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
1267 	c->_c.mfc_parent = mfc->mfcc_parent;
1268 	ipmr_update_thresholds(mrt, &c->_c, mfc->mfcc_ttls);
1269 	if (!mrtsock)
1270 		c->_c.mfc_flags |= MFC_STATIC;
1271 
1272 	ret = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->_c.mnode,
1273 				  ipmr_rht_params);
1274 	if (ret) {
1275 		pr_err("ipmr: rhtable insert error %d\n", ret);
1276 		ipmr_cache_free(c);
1277 		return ret;
1278 	}
1279 	list_add_tail_rcu(&c->_c.list, &mrt->mfc_cache_list);
1280 	/* Check to see if we resolved a queued list. If so we
1281 	 * need to send on the frames and tidy up.
1282 	 */
1283 	found = false;
1284 	spin_lock_bh(&mfc_unres_lock);
1285 	list_for_each_entry(_uc, &mrt->mfc_unres_queue, list) {
1286 		uc = (struct mfc_cache *)_uc;
1287 		if (uc->mfc_origin == c->mfc_origin &&
1288 		    uc->mfc_mcastgrp == c->mfc_mcastgrp) {
1289 			list_del(&_uc->list);
1290 			atomic_dec(&mrt->cache_resolve_queue_len);
1291 			found = true;
1292 			break;
1293 		}
1294 	}
1295 	if (list_empty(&mrt->mfc_unres_queue))
1296 		timer_delete(&mrt->ipmr_expire_timer);
1297 	spin_unlock_bh(&mfc_unres_lock);
1298 
1299 	if (found) {
1300 		ipmr_cache_resolve(net, mrt, uc, c);
1301 		ipmr_cache_free(uc);
1302 	}
1303 	call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD, c, mrt->id);
1304 	mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1305 	return 0;
1306 }
1307 
1308 /* Close the multicast socket, and clear the vif tables etc */
1309 static void mroute_clean_tables(struct mr_table *mrt, int flags,
1310 				struct list_head *dev_kill_list)
1311 {
1312 	struct net *net = read_pnet(&mrt->net);
1313 	struct mfc_cache *cache;
1314 	struct mr_mfc *c, *tmp;
1315 	int i;
1316 
1317 	/* Shut down all active vif entries */
1318 	if (flags & (MRT_FLUSH_VIFS | MRT_FLUSH_VIFS_STATIC)) {
1319 		for (i = 0; i < mrt->maxvif; i++) {
1320 			if (((mrt->vif_table[i].flags & VIFF_STATIC) &&
1321 			     !(flags & MRT_FLUSH_VIFS_STATIC)) ||
1322 			    (!(mrt->vif_table[i].flags & VIFF_STATIC) && !(flags & MRT_FLUSH_VIFS)))
1323 				continue;
1324 			vif_delete(mrt, i, 0, dev_kill_list);
1325 		}
1326 	}
1327 
1328 	/* Wipe the cache */
1329 	if (flags & (MRT_FLUSH_MFC | MRT_FLUSH_MFC_STATIC)) {
1330 		mutex_lock(&net->ipv4.mfc_mutex);
1331 
1332 		list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
1333 			if (((c->mfc_flags & MFC_STATIC) && !(flags & MRT_FLUSH_MFC_STATIC)) ||
1334 			    (!(c->mfc_flags & MFC_STATIC) && !(flags & MRT_FLUSH_MFC)))
1335 				continue;
1336 			rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
1337 			list_del_rcu(&c->list);
1338 			cache = (struct mfc_cache *)c;
1339 			call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, cache,
1340 						      mrt->id);
1341 			mroute_netlink_event(mrt, cache, RTM_DELROUTE);
1342 			mr_cache_put(c);
1343 		}
1344 
1345 		mutex_unlock(&net->ipv4.mfc_mutex);
1346 	}
1347 
1348 	if (flags & MRT_FLUSH_MFC) {
1349 		if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1350 			spin_lock_bh(&mfc_unres_lock);
1351 			list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
1352 				list_del(&c->list);
1353 				cache = (struct mfc_cache *)c;
1354 				mroute_netlink_event(mrt, cache, RTM_DELROUTE);
1355 				ipmr_destroy_unres(mrt, cache);
1356 			}
1357 			spin_unlock_bh(&mfc_unres_lock);
1358 		}
1359 	}
1360 }
1361 
1362 /* called from ip_ra_control(), before an RCU grace period,
1363  * we don't need to call synchronize_rcu() here
1364  */
1365 static void mrtsock_destruct(struct sock *sk)
1366 {
1367 	struct net *net = sock_net(sk);
1368 	LIST_HEAD(dev_kill_list);
1369 	struct mr_table *mrt;
1370 
1371 	rtnl_lock();
1372 
1373 	ipmr_for_each_table(mrt, net) {
1374 		if (sk == rtnl_dereference(mrt->mroute_sk)) {
1375 			IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1376 			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
1377 						    NETCONFA_MC_FORWARDING,
1378 						    NETCONFA_IFINDEX_ALL,
1379 						    net->ipv4.devconf_all);
1380 			RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1381 			mroute_clean_tables(mrt, MRT_FLUSH_VIFS | MRT_FLUSH_MFC,
1382 					    &dev_kill_list);
1383 		}
1384 	}
1385 
1386 	unregister_netdevice_many(&dev_kill_list);
1387 
1388 	rtnl_unlock();
1389 }
1390 
1391 /* Socket options and virtual interface manipulation. The whole
1392  * virtual interface system is a complete heap, but unfortunately
1393  * that's how BSD mrouted happens to think. Maybe one day with a proper
1394  * MOSPF/PIM router set up we can clean this up.
1395  */
1396 
1397 int ip_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval,
1398 			 unsigned int optlen)
1399 {
1400 	struct net *net = sock_net(sk);
1401 	int val, ret = 0, parent = 0;
1402 	struct mr_table *mrt;
1403 	struct vifctl vif;
1404 	struct mfcctl mfc;
1405 	bool do_wrvifwhole;
1406 	u32 uval;
1407 
1408 	/* There's one exception to the lock - MRT_DONE which needs to unlock */
1409 	rtnl_lock();
1410 	if (sk->sk_type != SOCK_RAW ||
1411 	    inet_sk(sk)->inet_num != IPPROTO_IGMP) {
1412 		ret = -EOPNOTSUPP;
1413 		goto out_unlock;
1414 	}
1415 
1416 	mrt = __ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1417 	if (!mrt) {
1418 		ret = -ENOENT;
1419 		goto out_unlock;
1420 	}
1421 	if (optname != MRT_INIT) {
1422 		if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1423 		    !ns_capable(net->user_ns, CAP_NET_ADMIN)) {
1424 			ret = -EACCES;
1425 			goto out_unlock;
1426 		}
1427 	}
1428 
1429 	switch (optname) {
1430 	case MRT_INIT:
1431 		if (optlen != sizeof(int)) {
1432 			ret = -EINVAL;
1433 			break;
1434 		}
1435 		if (rtnl_dereference(mrt->mroute_sk)) {
1436 			ret = -EADDRINUSE;
1437 			break;
1438 		}
1439 
1440 		ret = ip_ra_control(sk, 1, mrtsock_destruct);
1441 		if (ret == 0) {
1442 			rcu_assign_pointer(mrt->mroute_sk, sk);
1443 			IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1444 			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
1445 						    NETCONFA_MC_FORWARDING,
1446 						    NETCONFA_IFINDEX_ALL,
1447 						    net->ipv4.devconf_all);
1448 		}
1449 		break;
1450 	case MRT_DONE:
1451 		if (sk != rcu_access_pointer(mrt->mroute_sk)) {
1452 			ret = -EACCES;
1453 		} else {
1454 			/* We need to unlock here because mrtsock_destruct takes
1455 			 * care of rtnl itself and we can't change that due to
1456 			 * the IP_ROUTER_ALERT setsockopt which runs without it.
1457 			 */
1458 			rtnl_unlock();
1459 			ret = ip_ra_control(sk, 0, NULL);
1460 			goto out;
1461 		}
1462 		break;
1463 	case MRT_ADD_VIF:
1464 	case MRT_DEL_VIF:
1465 		if (optlen != sizeof(vif)) {
1466 			ret = -EINVAL;
1467 			break;
1468 		}
1469 		if (copy_from_sockptr(&vif, optval, sizeof(vif))) {
1470 			ret = -EFAULT;
1471 			break;
1472 		}
1473 		if (vif.vifc_vifi >= MAXVIFS) {
1474 			ret = -ENFILE;
1475 			break;
1476 		}
1477 		if (optname == MRT_ADD_VIF) {
1478 			ret = vif_add(net, mrt, &vif,
1479 				      sk == rtnl_dereference(mrt->mroute_sk));
1480 		} else {
1481 			ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1482 		}
1483 		break;
1484 	/* Manipulate the forwarding caches. These live
1485 	 * in a sort of kernel/user symbiosis.
1486 	 */
1487 	case MRT_ADD_MFC:
1488 	case MRT_DEL_MFC:
1489 		parent = -1;
1490 		fallthrough;
1491 	case MRT_ADD_MFC_PROXY:
1492 	case MRT_DEL_MFC_PROXY:
1493 		if (optlen != sizeof(mfc)) {
1494 			ret = -EINVAL;
1495 			break;
1496 		}
1497 		if (copy_from_sockptr(&mfc, optval, sizeof(mfc))) {
1498 			ret = -EFAULT;
1499 			break;
1500 		}
1501 		if (parent == 0)
1502 			parent = mfc.mfcc_parent;
1503 
1504 		mutex_lock(&net->ipv4.mfc_mutex);
1505 
1506 		if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY)
1507 			ret = ipmr_mfc_delete(mrt, &mfc, parent);
1508 		else
1509 			ret = ipmr_mfc_add(net, mrt, &mfc,
1510 					   sk == rtnl_dereference(mrt->mroute_sk),
1511 					   parent);
1512 
1513 		mutex_unlock(&net->ipv4.mfc_mutex);
1514 		break;
1515 	case MRT_FLUSH: {
1516 		LIST_HEAD(dev_kill_list);
1517 
1518 		if (optlen != sizeof(val)) {
1519 			ret = -EINVAL;
1520 			break;
1521 		}
1522 		if (copy_from_sockptr(&val, optval, sizeof(val))) {
1523 			ret = -EFAULT;
1524 			break;
1525 		}
1526 
1527 		mroute_clean_tables(mrt, val, &dev_kill_list);
1528 		unregister_netdevice_many(&dev_kill_list);
1529 		break;
1530 	}
1531 	/* Control PIM assert. */
1532 	case MRT_ASSERT:
1533 		if (optlen != sizeof(val)) {
1534 			ret = -EINVAL;
1535 			break;
1536 		}
1537 		if (copy_from_sockptr(&val, optval, sizeof(val))) {
1538 			ret = -EFAULT;
1539 			break;
1540 		}
1541 		WRITE_ONCE(mrt->mroute_do_assert, val);
1542 		break;
1543 	case MRT_PIM:
1544 		if (!ipmr_pimsm_enabled()) {
1545 			ret = -ENOPROTOOPT;
1546 			break;
1547 		}
1548 		if (optlen != sizeof(val)) {
1549 			ret = -EINVAL;
1550 			break;
1551 		}
1552 		if (copy_from_sockptr(&val, optval, sizeof(val))) {
1553 			ret = -EFAULT;
1554 			break;
1555 		}
1556 
1557 		do_wrvifwhole = (val == IGMPMSG_WRVIFWHOLE);
1558 		val = !!val;
1559 		if (val != mrt->mroute_do_pim) {
1560 			WRITE_ONCE(mrt->mroute_do_pim, val);
1561 			WRITE_ONCE(mrt->mroute_do_assert, val);
1562 			WRITE_ONCE(mrt->mroute_do_wrvifwhole, do_wrvifwhole);
1563 		}
1564 		break;
1565 	case MRT_TABLE:
1566 		if (!IS_BUILTIN(CONFIG_IP_MROUTE_MULTIPLE_TABLES)) {
1567 			ret = -ENOPROTOOPT;
1568 			break;
1569 		}
1570 		if (optlen != sizeof(uval)) {
1571 			ret = -EINVAL;
1572 			break;
1573 		}
1574 		if (copy_from_sockptr(&uval, optval, sizeof(uval))) {
1575 			ret = -EFAULT;
1576 			break;
1577 		}
1578 
1579 		if (sk == rtnl_dereference(mrt->mroute_sk)) {
1580 			ret = -EBUSY;
1581 		} else {
1582 			mrt = ipmr_new_table(net, uval);
1583 			if (IS_ERR(mrt))
1584 				ret = PTR_ERR(mrt);
1585 			else
1586 				raw_sk(sk)->ipmr_table = uval;
1587 		}
1588 		break;
1589 	/* Spurious command, or MRT_VERSION which you cannot set. */
1590 	default:
1591 		ret = -ENOPROTOOPT;
1592 	}
1593 out_unlock:
1594 	rtnl_unlock();
1595 out:
1596 	return ret;
1597 }
1598 
1599 /* Execute if this ioctl is a special mroute ioctl */
1600 int ipmr_sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1601 {
1602 	switch (cmd) {
1603 	/* These userspace buffers will be consumed by ipmr_ioctl() */
1604 	case SIOCGETVIFCNT: {
1605 		struct sioc_vif_req buffer;
1606 
1607 		return sock_ioctl_inout(sk, cmd, arg, &buffer,
1608 				      sizeof(buffer));
1609 		}
1610 	case SIOCGETSGCNT: {
1611 		struct sioc_sg_req buffer;
1612 
1613 		return sock_ioctl_inout(sk, cmd, arg, &buffer,
1614 				      sizeof(buffer));
1615 		}
1616 	}
1617 	/* return code > 0 means that the ioctl was not executed */
1618 	return 1;
1619 }
1620 
1621 /* Getsock opt support for the multicast routing system. */
1622 int ip_mroute_getsockopt(struct sock *sk, int optname, sockptr_t optval,
1623 			 sockptr_t optlen)
1624 {
1625 	int olr;
1626 	int val;
1627 	struct net *net = sock_net(sk);
1628 	struct mr_table *mrt;
1629 
1630 	if (sk->sk_type != SOCK_RAW ||
1631 	    inet_sk(sk)->inet_num != IPPROTO_IGMP)
1632 		return -EOPNOTSUPP;
1633 
1634 	mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1635 	if (!mrt)
1636 		return -ENOENT;
1637 
1638 	switch (optname) {
1639 	case MRT_VERSION:
1640 		val = 0x0305;
1641 		break;
1642 	case MRT_PIM:
1643 		if (!ipmr_pimsm_enabled())
1644 			return -ENOPROTOOPT;
1645 		val = READ_ONCE(mrt->mroute_do_pim);
1646 		break;
1647 	case MRT_ASSERT:
1648 		val = READ_ONCE(mrt->mroute_do_assert);
1649 		break;
1650 	default:
1651 		return -ENOPROTOOPT;
1652 	}
1653 
1654 	if (copy_from_sockptr(&olr, optlen, sizeof(int)))
1655 		return -EFAULT;
1656 	if (olr < 0)
1657 		return -EINVAL;
1658 
1659 	olr = min_t(unsigned int, olr, sizeof(int));
1660 
1661 	if (copy_to_sockptr(optlen, &olr, sizeof(int)))
1662 		return -EFAULT;
1663 	if (copy_to_sockptr(optval, &val, olr))
1664 		return -EFAULT;
1665 	return 0;
1666 }
1667 
1668 /* The IP multicast ioctl support routines. */
1669 int ipmr_ioctl(struct sock *sk, int cmd, void *arg)
1670 {
1671 	struct vif_device *vif;
1672 	struct mfc_cache *c;
1673 	struct net *net = sock_net(sk);
1674 	struct sioc_vif_req *vr;
1675 	struct sioc_sg_req *sr;
1676 	struct mr_table *mrt;
1677 
1678 	mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1679 	if (!mrt)
1680 		return -ENOENT;
1681 
1682 	switch (cmd) {
1683 	case SIOCGETVIFCNT:
1684 		vr = (struct sioc_vif_req *)arg;
1685 		if (vr->vifi >= mrt->maxvif)
1686 			return -EINVAL;
1687 		vr->vifi = array_index_nospec(vr->vifi, mrt->maxvif);
1688 		rcu_read_lock();
1689 		vif = &mrt->vif_table[vr->vifi];
1690 		if (VIF_EXISTS(mrt, vr->vifi)) {
1691 			vr->icount = READ_ONCE(vif->pkt_in);
1692 			vr->ocount = READ_ONCE(vif->pkt_out);
1693 			vr->ibytes = READ_ONCE(vif->bytes_in);
1694 			vr->obytes = READ_ONCE(vif->bytes_out);
1695 			rcu_read_unlock();
1696 
1697 			return 0;
1698 		}
1699 		rcu_read_unlock();
1700 		return -EADDRNOTAVAIL;
1701 	case SIOCGETSGCNT:
1702 		sr = (struct sioc_sg_req *)arg;
1703 
1704 		rcu_read_lock();
1705 		c = ipmr_cache_find(mrt, sr->src.s_addr, sr->grp.s_addr);
1706 		if (c) {
1707 			sr->pktcnt = atomic_long_read(&c->_c.mfc_un.res.pkt);
1708 			sr->bytecnt = atomic_long_read(&c->_c.mfc_un.res.bytes);
1709 			sr->wrong_if = atomic_long_read(&c->_c.mfc_un.res.wrong_if);
1710 			rcu_read_unlock();
1711 			return 0;
1712 		}
1713 		rcu_read_unlock();
1714 		return -EADDRNOTAVAIL;
1715 	default:
1716 		return -ENOIOCTLCMD;
1717 	}
1718 }
1719 
1720 #ifdef CONFIG_COMPAT
1721 struct compat_sioc_sg_req {
1722 	struct in_addr src;
1723 	struct in_addr grp;
1724 	compat_ulong_t pktcnt;
1725 	compat_ulong_t bytecnt;
1726 	compat_ulong_t wrong_if;
1727 };
1728 
1729 struct compat_sioc_vif_req {
1730 	vifi_t	vifi;		/* Which iface */
1731 	compat_ulong_t icount;
1732 	compat_ulong_t ocount;
1733 	compat_ulong_t ibytes;
1734 	compat_ulong_t obytes;
1735 };
1736 
1737 int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1738 {
1739 	struct compat_sioc_sg_req sr;
1740 	struct compat_sioc_vif_req vr;
1741 	struct vif_device *vif;
1742 	struct mfc_cache *c;
1743 	struct net *net = sock_net(sk);
1744 	struct mr_table *mrt;
1745 
1746 	mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1747 	if (!mrt)
1748 		return -ENOENT;
1749 
1750 	switch (cmd) {
1751 	case SIOCGETVIFCNT:
1752 		if (copy_from_user(&vr, arg, sizeof(vr)))
1753 			return -EFAULT;
1754 		if (vr.vifi >= mrt->maxvif)
1755 			return -EINVAL;
1756 		vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
1757 		rcu_read_lock();
1758 		vif = &mrt->vif_table[vr.vifi];
1759 		if (VIF_EXISTS(mrt, vr.vifi)) {
1760 			vr.icount = READ_ONCE(vif->pkt_in);
1761 			vr.ocount = READ_ONCE(vif->pkt_out);
1762 			vr.ibytes = READ_ONCE(vif->bytes_in);
1763 			vr.obytes = READ_ONCE(vif->bytes_out);
1764 			rcu_read_unlock();
1765 
1766 			if (copy_to_user(arg, &vr, sizeof(vr)))
1767 				return -EFAULT;
1768 			return 0;
1769 		}
1770 		rcu_read_unlock();
1771 		return -EADDRNOTAVAIL;
1772 	case SIOCGETSGCNT:
1773 		if (copy_from_user(&sr, arg, sizeof(sr)))
1774 			return -EFAULT;
1775 
1776 		rcu_read_lock();
1777 		c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1778 		if (c) {
1779 			sr.pktcnt = atomic_long_read(&c->_c.mfc_un.res.pkt);
1780 			sr.bytecnt = atomic_long_read(&c->_c.mfc_un.res.bytes);
1781 			sr.wrong_if = atomic_long_read(&c->_c.mfc_un.res.wrong_if);
1782 			rcu_read_unlock();
1783 
1784 			if (copy_to_user(arg, &sr, sizeof(sr)))
1785 				return -EFAULT;
1786 			return 0;
1787 		}
1788 		rcu_read_unlock();
1789 		return -EADDRNOTAVAIL;
1790 	default:
1791 		return -ENOIOCTLCMD;
1792 	}
1793 }
1794 #endif
1795 
1796 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1797 {
1798 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1799 	struct net *net = dev_net(dev);
1800 	struct mr_table *mrt;
1801 	struct vif_device *v;
1802 	int ct;
1803 
1804 	if (event != NETDEV_UNREGISTER)
1805 		return NOTIFY_DONE;
1806 
1807 	ipmr_for_each_table(mrt, net) {
1808 		v = &mrt->vif_table[0];
1809 		for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1810 			if (rcu_access_pointer(v->dev) == dev)
1811 				vif_delete(mrt, ct, 1, NULL);
1812 		}
1813 	}
1814 	return NOTIFY_DONE;
1815 }
1816 
1817 static struct notifier_block ip_mr_notifier = {
1818 	.notifier_call = ipmr_device_event,
1819 };
1820 
1821 /* Encapsulate a packet by attaching a valid IPIP header to it.
1822  * This avoids tunnel drivers and other mess and gives us the speed so
1823  * important for multicast video.
1824  */
1825 static void ip_encap(struct net *net, struct sk_buff *skb,
1826 		     __be32 saddr, __be32 daddr)
1827 {
1828 	struct iphdr *iph;
1829 	const struct iphdr *old_iph = ip_hdr(skb);
1830 
1831 	skb_push(skb, sizeof(struct iphdr));
1832 	skb->transport_header = skb->network_header;
1833 	skb_reset_network_header(skb);
1834 	iph = ip_hdr(skb);
1835 
1836 	iph->version	=	4;
1837 	iph->tos	=	old_iph->tos;
1838 	iph->ttl	=	old_iph->ttl;
1839 	iph->frag_off	=	0;
1840 	iph->daddr	=	daddr;
1841 	iph->saddr	=	saddr;
1842 	iph->protocol	=	IPPROTO_IPIP;
1843 	iph->ihl	=	5;
1844 	iph->tot_len	=	htons(skb->len);
1845 	ip_select_ident(net, skb, NULL);
1846 	ip_send_check(iph);
1847 
1848 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1849 	nf_reset_ct(skb);
1850 }
1851 
1852 static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
1853 				      struct sk_buff *skb)
1854 {
1855 	struct ip_options *opt = &(IPCB(skb)->opt);
1856 
1857 	IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
1858 
1859 	if (unlikely(opt->optlen))
1860 		ip_forward_options(skb);
1861 
1862 	return dst_output(net, sk, skb);
1863 }
1864 
1865 #ifdef CONFIG_NET_SWITCHDEV
1866 static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
1867 				   int in_vifi, int out_vifi)
1868 {
1869 	struct vif_device *out_vif = &mrt->vif_table[out_vifi];
1870 	struct vif_device *in_vif = &mrt->vif_table[in_vifi];
1871 
1872 	if (!skb->offload_l3_fwd_mark)
1873 		return false;
1874 	if (!out_vif->dev_parent_id.id_len || !in_vif->dev_parent_id.id_len)
1875 		return false;
1876 	return netdev_phys_item_id_same(&out_vif->dev_parent_id,
1877 					&in_vif->dev_parent_id);
1878 }
1879 #else
1880 static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
1881 				   int in_vifi, int out_vifi)
1882 {
1883 	return false;
1884 }
1885 #endif
1886 
1887 /* Processing handlers for ipmr_forward, under rcu_read_lock() */
1888 
1889 static int ipmr_prepare_xmit(struct net *net, struct mr_table *mrt,
1890 			     struct sk_buff *skb, int vifi)
1891 {
1892 	const struct iphdr *iph = ip_hdr(skb);
1893 	struct vif_device *vif = &mrt->vif_table[vifi];
1894 	struct net_device *vif_dev;
1895 	struct rtable *rt;
1896 	struct flowi4 fl4;
1897 	int    encap = 0;
1898 
1899 	vif_dev = vif_dev_read(vif);
1900 	if (!vif_dev)
1901 		return -1;
1902 
1903 	if (vif->flags & VIFF_REGISTER) {
1904 		WRITE_ONCE(vif->pkt_out, vif->pkt_out + 1);
1905 		WRITE_ONCE(vif->bytes_out, vif->bytes_out + skb->len);
1906 		DEV_STATS_ADD(vif_dev, tx_bytes, skb->len);
1907 		DEV_STATS_INC(vif_dev, tx_packets);
1908 		ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1909 		return -1;
1910 	}
1911 
1912 	if (vif->flags & VIFF_TUNNEL) {
1913 		rt = ip_route_output_ports(net, &fl4, NULL,
1914 					   vif->remote, vif->local,
1915 					   0, 0,
1916 					   IPPROTO_IPIP,
1917 					   iph->tos & INET_DSCP_MASK, vif->link);
1918 		if (IS_ERR(rt))
1919 			return -1;
1920 		encap = sizeof(struct iphdr);
1921 	} else {
1922 		rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
1923 					   0, 0,
1924 					   IPPROTO_IPIP,
1925 					   iph->tos & INET_DSCP_MASK, vif->link);
1926 		if (IS_ERR(rt))
1927 			return -1;
1928 	}
1929 
1930 	if (skb->len+encap > dst4_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1931 		/* Do not fragment multicasts. Alas, IPv4 does not
1932 		 * allow to send ICMP, so that packets will disappear
1933 		 * to blackhole.
1934 		 */
1935 		IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
1936 		ip_rt_put(rt);
1937 		return -1;
1938 	}
1939 
1940 	encap += LL_RESERVED_SPACE(dst_dev_rcu(&rt->dst)) + rt->dst.header_len;
1941 
1942 	if (skb_cow(skb, encap)) {
1943 		ip_rt_put(rt);
1944 		return -1;
1945 	}
1946 
1947 	WRITE_ONCE(vif->pkt_out, vif->pkt_out + 1);
1948 	WRITE_ONCE(vif->bytes_out, vif->bytes_out + skb->len);
1949 
1950 	skb_dst_drop(skb);
1951 	skb_dst_set(skb, &rt->dst);
1952 	ip_decrease_ttl(ip_hdr(skb));
1953 
1954 	/* FIXME: forward and output firewalls used to be called here.
1955 	 * What do we do with netfilter? -- RR
1956 	 */
1957 	if (vif->flags & VIFF_TUNNEL) {
1958 		ip_encap(net, skb, vif->local, vif->remote);
1959 		/* FIXME: extra output firewall step used to be here. --RR */
1960 		DEV_STATS_INC(vif_dev, tx_packets);
1961 		DEV_STATS_ADD(vif_dev, tx_bytes, skb->len);
1962 	}
1963 
1964 	return 0;
1965 }
1966 
1967 static void ipmr_queue_fwd_xmit(struct net *net, struct mr_table *mrt,
1968 				int in_vifi, struct sk_buff *skb, int vifi)
1969 {
1970 	struct rtable *rt;
1971 
1972 	if (ipmr_forward_offloaded(skb, mrt, in_vifi, vifi))
1973 		goto out_free;
1974 
1975 	if (ipmr_prepare_xmit(net, mrt, skb, vifi))
1976 		goto out_free;
1977 
1978 	rt = skb_rtable(skb);
1979 
1980 	IPCB(skb)->flags |= IPSKB_FORWARDED;
1981 
1982 	/* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1983 	 * not only before forwarding, but after forwarding on all output
1984 	 * interfaces. It is clear, if mrouter runs a multicasting
1985 	 * program, it should receive packets not depending to what interface
1986 	 * program is joined.
1987 	 * If we will not make it, the program will have to join on all
1988 	 * interfaces. On the other hand, multihoming host (or router, but
1989 	 * not mrouter) cannot join to more than one interface - it will
1990 	 * result in receiving multiple packets.
1991 	 */
1992 	NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
1993 		net, NULL, skb, skb->dev, dst_dev_rcu(&rt->dst),
1994 		ipmr_forward_finish);
1995 	return;
1996 
1997 out_free:
1998 	kfree_skb(skb);
1999 }
2000 
2001 static void ipmr_queue_output_xmit(struct net *net, struct mr_table *mrt,
2002 				   struct sk_buff *skb, int vifi)
2003 {
2004 	if (ipmr_prepare_xmit(net, mrt, skb, vifi))
2005 		goto out_free;
2006 
2007 	ip_mc_output(net, NULL, skb);
2008 	return;
2009 
2010 out_free:
2011 	kfree_skb(skb);
2012 }
2013 
2014 /* Called with mrt_lock or rcu_read_lock() */
2015 static int ipmr_find_vif(const struct mr_table *mrt, struct net_device *dev)
2016 {
2017 	int ct;
2018 	/* Pairs with WRITE_ONCE() in vif_delete()/vif_add() */
2019 	for (ct = READ_ONCE(mrt->maxvif) - 1; ct >= 0; ct--) {
2020 		if (rcu_access_pointer(mrt->vif_table[ct].dev) == dev)
2021 			break;
2022 	}
2023 	return ct;
2024 }
2025 
2026 /* "local" means that we should preserve one skb (for local delivery) */
2027 /* Called uner rcu_read_lock() */
2028 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
2029 			  struct net_device *dev, struct sk_buff *skb,
2030 			  struct mfc_cache *c, int local)
2031 {
2032 	int true_vifi = ipmr_find_vif(mrt, dev);
2033 	int psend = -1;
2034 	int vif, ct;
2035 
2036 	vif = c->_c.mfc_parent;
2037 	atomic_long_inc(&c->_c.mfc_un.res.pkt);
2038 	atomic_long_add(skb->len, &c->_c.mfc_un.res.bytes);
2039 	WRITE_ONCE(c->_c.mfc_un.res.lastuse, jiffies);
2040 
2041 	if (c->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
2042 		struct mfc_cache *cache_proxy;
2043 
2044 		/* For an (*,G) entry, we only check that the incoming
2045 		 * interface is part of the static tree.
2046 		 */
2047 		cache_proxy = mr_mfc_find_any_parent(mrt, vif);
2048 		if (cache_proxy &&
2049 		    cache_proxy->_c.mfc_un.res.ttls[true_vifi] < 255)
2050 			goto forward;
2051 	}
2052 
2053 	/* Wrong interface: drop packet and (maybe) send PIM assert. */
2054 	if (rcu_access_pointer(mrt->vif_table[vif].dev) != dev) {
2055 		if (rt_is_output_route(skb_rtable(skb))) {
2056 			/* It is our own packet, looped back.
2057 			 * Very complicated situation...
2058 			 *
2059 			 * The best workaround until routing daemons will be
2060 			 * fixed is not to redistribute packet, if it was
2061 			 * send through wrong interface. It means, that
2062 			 * multicast applications WILL NOT work for
2063 			 * (S,G), which have default multicast route pointing
2064 			 * to wrong oif. In any case, it is not a good
2065 			 * idea to use multicasting applications on router.
2066 			 */
2067 			goto dont_forward;
2068 		}
2069 
2070 		atomic_long_inc(&c->_c.mfc_un.res.wrong_if);
2071 
2072 		if (true_vifi >= 0 && READ_ONCE(mrt->mroute_do_assert) &&
2073 		    /* pimsm uses asserts, when switching from RPT to SPT,
2074 		     * so that we cannot check that packet arrived on an oif.
2075 		     * It is bad, but otherwise we would need to move pretty
2076 		     * large chunk of pimd to kernel. Ough... --ANK
2077 		     */
2078 		    (READ_ONCE(mrt->mroute_do_pim) ||
2079 		     c->_c.mfc_un.res.ttls[true_vifi] < 255) &&
2080 		    time_after(jiffies,
2081 			       c->_c.mfc_un.res.last_assert +
2082 			       MFC_ASSERT_THRESH)) {
2083 			c->_c.mfc_un.res.last_assert = jiffies;
2084 			ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
2085 			if (READ_ONCE(mrt->mroute_do_wrvifwhole))
2086 				ipmr_cache_report(mrt, skb, true_vifi,
2087 						  IGMPMSG_WRVIFWHOLE);
2088 		}
2089 		goto dont_forward;
2090 	}
2091 
2092 forward:
2093 	WRITE_ONCE(mrt->vif_table[vif].pkt_in,
2094 		   mrt->vif_table[vif].pkt_in + 1);
2095 	WRITE_ONCE(mrt->vif_table[vif].bytes_in,
2096 		   mrt->vif_table[vif].bytes_in + skb->len);
2097 
2098 	/* Forward the frame */
2099 	if (c->mfc_origin == htonl(INADDR_ANY) &&
2100 	    c->mfc_mcastgrp == htonl(INADDR_ANY)) {
2101 		if (true_vifi >= 0 &&
2102 		    true_vifi != c->_c.mfc_parent &&
2103 		    ip_hdr(skb)->ttl >
2104 				c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) {
2105 			/* It's an (*,*) entry and the packet is not coming from
2106 			 * the upstream: forward the packet to the upstream
2107 			 * only.
2108 			 */
2109 			psend = c->_c.mfc_parent;
2110 			goto last_forward;
2111 		}
2112 		goto dont_forward;
2113 	}
2114 	for (ct = c->_c.mfc_un.res.maxvif - 1;
2115 	     ct >= c->_c.mfc_un.res.minvif; ct--) {
2116 		/* For (*,G) entry, don't forward to the incoming interface */
2117 		if ((c->mfc_origin != htonl(INADDR_ANY) ||
2118 		     ct != true_vifi) &&
2119 		    ip_hdr(skb)->ttl > c->_c.mfc_un.res.ttls[ct]) {
2120 			if (psend != -1) {
2121 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2122 
2123 				if (skb2)
2124 					ipmr_queue_fwd_xmit(net, mrt, true_vifi,
2125 							    skb2, psend);
2126 			}
2127 			psend = ct;
2128 		}
2129 	}
2130 last_forward:
2131 	if (psend != -1) {
2132 		if (local) {
2133 			struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2134 
2135 			if (skb2)
2136 				ipmr_queue_fwd_xmit(net, mrt, true_vifi, skb2,
2137 						    psend);
2138 		} else {
2139 			ipmr_queue_fwd_xmit(net, mrt, true_vifi, skb, psend);
2140 			return;
2141 		}
2142 	}
2143 
2144 dont_forward:
2145 	if (!local)
2146 		kfree_skb(skb);
2147 }
2148 
2149 static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
2150 {
2151 	struct rtable *rt = skb_rtable(skb);
2152 	struct iphdr *iph = ip_hdr(skb);
2153 	struct flowi4 fl4 = {
2154 		.daddr = iph->daddr,
2155 		.saddr = iph->saddr,
2156 		.flowi4_dscp = ip4h_dscp(iph),
2157 		.flowi4_oif = (rt_is_output_route(rt) ?
2158 			       skb->dev->ifindex : 0),
2159 		.flowi4_iif = (rt_is_output_route(rt) ?
2160 			       LOOPBACK_IFINDEX :
2161 			       skb->dev->ifindex),
2162 		.flowi4_mark = skb->mark,
2163 	};
2164 	struct mr_table *mrt;
2165 	int err;
2166 
2167 	err = ipmr_fib_lookup(net, &fl4, &mrt);
2168 	if (err)
2169 		return ERR_PTR(err);
2170 	return mrt;
2171 }
2172 
2173 /* Multicast packets for forwarding arrive here
2174  * Called with rcu_read_lock();
2175  */
2176 int ip_mr_input(struct sk_buff *skb)
2177 {
2178 	struct mfc_cache *cache;
2179 	struct net *net = dev_net(skb->dev);
2180 	int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
2181 	struct mr_table *mrt;
2182 	struct net_device *dev;
2183 
2184 	/* skb->dev passed in is the loX master dev for vrfs.
2185 	 * As there are no vifs associated with loopback devices,
2186 	 * get the proper interface that does have a vif associated with it.
2187 	 */
2188 	dev = skb->dev;
2189 	if (netif_is_l3_master(skb->dev)) {
2190 		dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
2191 		if (!dev) {
2192 			kfree_skb(skb);
2193 			return -ENODEV;
2194 		}
2195 	}
2196 
2197 	/* Packet is looped back after forward, it should not be
2198 	 * forwarded second time, but still can be delivered locally.
2199 	 */
2200 	if (IPCB(skb)->flags & IPSKB_FORWARDED)
2201 		goto dont_forward;
2202 
2203 	mrt = ipmr_rt_fib_lookup(net, skb);
2204 	if (IS_ERR(mrt)) {
2205 		kfree_skb(skb);
2206 		return PTR_ERR(mrt);
2207 	}
2208 	if (!local) {
2209 		if (IPCB(skb)->opt.router_alert) {
2210 			if (ip_call_ra_chain(skb))
2211 				return 0;
2212 		} else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
2213 			/* IGMPv1 (and broken IGMPv2 implementations sort of
2214 			 * Cisco IOS <= 11.2(8)) do not put router alert
2215 			 * option to IGMP packets destined to routable
2216 			 * groups. It is very bad, because it means
2217 			 * that we can forward NO IGMP messages.
2218 			 */
2219 			struct sock *mroute_sk;
2220 
2221 			mroute_sk = rcu_dereference(mrt->mroute_sk);
2222 			if (mroute_sk) {
2223 				nf_reset_ct(skb);
2224 				raw_rcv(mroute_sk, skb);
2225 				return 0;
2226 			}
2227 		}
2228 	}
2229 
2230 	/* already under rcu_read_lock() */
2231 	cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
2232 	if (!cache) {
2233 		int vif = ipmr_find_vif(mrt, dev);
2234 
2235 		if (vif >= 0)
2236 			cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
2237 						    vif);
2238 	}
2239 
2240 	/* No usable cache entry */
2241 	if (!cache) {
2242 		int vif;
2243 
2244 		if (local) {
2245 			struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2246 			ip_local_deliver(skb);
2247 			if (!skb2)
2248 				return -ENOBUFS;
2249 			skb = skb2;
2250 		}
2251 
2252 		vif = ipmr_find_vif(mrt, dev);
2253 		if (vif >= 0)
2254 			return ipmr_cache_unresolved(mrt, vif, skb, dev);
2255 		kfree_skb(skb);
2256 		return -ENODEV;
2257 	}
2258 
2259 	ip_mr_forward(net, mrt, dev, skb, cache, local);
2260 
2261 	if (local)
2262 		return ip_local_deliver(skb);
2263 
2264 	return 0;
2265 
2266 dont_forward:
2267 	if (local)
2268 		return ip_local_deliver(skb);
2269 	kfree_skb(skb);
2270 	return 0;
2271 }
2272 
2273 static void ip_mr_output_finish(struct net *net, struct mr_table *mrt,
2274 				struct net_device *dev, struct sk_buff *skb,
2275 				struct mfc_cache *c)
2276 {
2277 	int psend = -1;
2278 	int ct;
2279 
2280 	atomic_long_inc(&c->_c.mfc_un.res.pkt);
2281 	atomic_long_add(skb->len, &c->_c.mfc_un.res.bytes);
2282 	WRITE_ONCE(c->_c.mfc_un.res.lastuse, jiffies);
2283 
2284 	/* Forward the frame */
2285 	if (c->mfc_origin == htonl(INADDR_ANY) &&
2286 	    c->mfc_mcastgrp == htonl(INADDR_ANY)) {
2287 		if (ip_hdr(skb)->ttl >
2288 		    c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) {
2289 			/* It's an (*,*) entry and the packet is not coming from
2290 			 * the upstream: forward the packet to the upstream
2291 			 * only.
2292 			 */
2293 			psend = c->_c.mfc_parent;
2294 			goto last_xmit;
2295 		}
2296 		goto dont_xmit;
2297 	}
2298 
2299 	for (ct = c->_c.mfc_un.res.maxvif - 1;
2300 	     ct >= c->_c.mfc_un.res.minvif; ct--) {
2301 		if (ip_hdr(skb)->ttl > c->_c.mfc_un.res.ttls[ct]) {
2302 			if (psend != -1) {
2303 				struct sk_buff *skb2;
2304 
2305 				skb2 = skb_clone(skb, GFP_ATOMIC);
2306 				if (skb2)
2307 					ipmr_queue_output_xmit(net, mrt,
2308 							       skb2, psend);
2309 			}
2310 			psend = ct;
2311 		}
2312 	}
2313 
2314 last_xmit:
2315 	if (psend != -1) {
2316 		ipmr_queue_output_xmit(net, mrt, skb, psend);
2317 		return;
2318 	}
2319 
2320 dont_xmit:
2321 	kfree_skb(skb);
2322 }
2323 
2324 /* Multicast packets for forwarding arrive here
2325  * Called with rcu_read_lock();
2326  */
2327 int ip_mr_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2328 {
2329 	struct rtable *rt = skb_rtable(skb);
2330 	struct mfc_cache *cache;
2331 	struct net_device *dev;
2332 	struct mr_table *mrt;
2333 	int vif;
2334 
2335 	guard(rcu)();
2336 
2337 	dev = dst_dev_rcu(&rt->dst);
2338 
2339 	if (IPCB(skb)->flags & IPSKB_FORWARDED)
2340 		goto mc_output;
2341 	if (!(IPCB(skb)->flags & IPSKB_MCROUTE))
2342 		goto mc_output;
2343 
2344 	skb->dev = dev;
2345 
2346 	mrt = ipmr_rt_fib_lookup(net, skb);
2347 	if (IS_ERR(mrt))
2348 		goto mc_output;
2349 
2350 	cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
2351 	if (!cache) {
2352 		vif = ipmr_find_vif(mrt, dev);
2353 		if (vif >= 0)
2354 			cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
2355 						    vif);
2356 	}
2357 
2358 	/* No usable cache entry */
2359 	if (!cache) {
2360 		vif = ipmr_find_vif(mrt, dev);
2361 		if (vif >= 0)
2362 			return ipmr_cache_unresolved(mrt, vif, skb, dev);
2363 		goto mc_output;
2364 	}
2365 
2366 	vif = cache->_c.mfc_parent;
2367 	if (rcu_access_pointer(mrt->vif_table[vif].dev) != dev)
2368 		goto mc_output;
2369 
2370 	ip_mr_output_finish(net, mrt, dev, skb, cache);
2371 	return 0;
2372 
2373 mc_output:
2374 	return ip_mc_output(net, sk, skb);
2375 }
2376 
2377 #ifdef CONFIG_IP_PIMSM_V1
2378 /* Handle IGMP messages of PIMv1 */
2379 int pim_rcv_v1(struct sk_buff *skb)
2380 {
2381 	struct igmphdr *pim;
2382 	struct net *net = dev_net(skb->dev);
2383 	struct mr_table *mrt;
2384 
2385 	if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2386 		goto drop;
2387 
2388 	pim = igmp_hdr(skb);
2389 
2390 	mrt = ipmr_rt_fib_lookup(net, skb);
2391 	if (IS_ERR(mrt))
2392 		goto drop;
2393 	if (!READ_ONCE(mrt->mroute_do_pim) ||
2394 	    pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
2395 		goto drop;
2396 
2397 	if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2398 drop:
2399 		kfree_skb(skb);
2400 	}
2401 	return 0;
2402 }
2403 #endif
2404 
2405 #ifdef CONFIG_IP_PIMSM_V2
2406 static int pim_rcv(struct sk_buff *skb)
2407 {
2408 	struct pimreghdr *pim;
2409 	struct net *net = dev_net(skb->dev);
2410 	struct mr_table *mrt;
2411 
2412 	if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2413 		goto drop;
2414 
2415 	pim = (struct pimreghdr *)skb_transport_header(skb);
2416 	if (pim->type != ((PIM_VERSION << 4) | (PIM_TYPE_REGISTER)) ||
2417 	    (pim->flags & PIM_NULL_REGISTER) ||
2418 	    (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
2419 	     csum_fold(skb_checksum(skb, 0, skb->len, 0))))
2420 		goto drop;
2421 
2422 	mrt = ipmr_rt_fib_lookup(net, skb);
2423 	if (IS_ERR(mrt))
2424 		goto drop;
2425 	if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2426 drop:
2427 		kfree_skb(skb);
2428 	}
2429 	return 0;
2430 }
2431 #endif
2432 
2433 int ipmr_get_route(struct net *net, struct sk_buff *skb,
2434 		   __be32 saddr, __be32 daddr,
2435 		   struct rtmsg *rtm, u32 portid)
2436 {
2437 	struct mfc_cache *cache;
2438 	struct mr_table *mrt;
2439 	int err;
2440 
2441 	rcu_read_lock();
2442 	mrt = __ipmr_get_table(net, RT_TABLE_DEFAULT);
2443 	if (!mrt) {
2444 		rcu_read_unlock();
2445 		return -ENOENT;
2446 	}
2447 
2448 	cache = ipmr_cache_find(mrt, saddr, daddr);
2449 	if (!cache && skb->dev) {
2450 		int vif = ipmr_find_vif(mrt, skb->dev);
2451 
2452 		if (vif >= 0)
2453 			cache = ipmr_cache_find_any(mrt, daddr, vif);
2454 	}
2455 	if (!cache) {
2456 		struct sk_buff *skb2;
2457 		struct iphdr *iph;
2458 		struct net_device *dev;
2459 		int vif = -1;
2460 
2461 		dev = skb->dev;
2462 		if (dev)
2463 			vif = ipmr_find_vif(mrt, dev);
2464 		if (vif < 0) {
2465 			rcu_read_unlock();
2466 			return -ENODEV;
2467 		}
2468 
2469 		skb2 = skb_realloc_headroom(skb, sizeof(struct iphdr));
2470 		if (!skb2) {
2471 			rcu_read_unlock();
2472 			return -ENOMEM;
2473 		}
2474 
2475 		NETLINK_CB(skb2).portid = portid;
2476 		skb_push(skb2, sizeof(struct iphdr));
2477 		skb_reset_network_header(skb2);
2478 		iph = ip_hdr(skb2);
2479 		iph->ihl = sizeof(struct iphdr) >> 2;
2480 		iph->saddr = saddr;
2481 		iph->daddr = daddr;
2482 		iph->version = 0;
2483 		err = ipmr_cache_unresolved(mrt, vif, skb2, dev);
2484 		rcu_read_unlock();
2485 		return err;
2486 	}
2487 
2488 	err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
2489 	rcu_read_unlock();
2490 	return err;
2491 }
2492 
2493 static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2494 			    u32 portid, u32 seq, struct mfc_cache *c, int cmd,
2495 			    int flags)
2496 {
2497 	struct nlmsghdr *nlh;
2498 	struct rtmsg *rtm;
2499 	int err;
2500 
2501 	nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2502 	if (!nlh)
2503 		return -EMSGSIZE;
2504 
2505 	rtm = nlmsg_data(nlh);
2506 	rtm->rtm_family   = RTNL_FAMILY_IPMR;
2507 	rtm->rtm_dst_len  = 32;
2508 	rtm->rtm_src_len  = 32;
2509 	rtm->rtm_tos      = 0;
2510 	rtm->rtm_table    = mrt->id;
2511 	if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2512 		goto nla_put_failure;
2513 	rtm->rtm_type     = RTN_MULTICAST;
2514 	rtm->rtm_scope    = RT_SCOPE_UNIVERSE;
2515 	if (c->_c.mfc_flags & MFC_STATIC)
2516 		rtm->rtm_protocol = RTPROT_STATIC;
2517 	else
2518 		rtm->rtm_protocol = RTPROT_MROUTED;
2519 	rtm->rtm_flags    = 0;
2520 
2521 	if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) ||
2522 	    nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp))
2523 		goto nla_put_failure;
2524 	err = mr_fill_mroute(mrt, skb, &c->_c, rtm);
2525 	/* do not break the dump if cache is unresolved */
2526 	if (err < 0 && err != -ENOENT)
2527 		goto nla_put_failure;
2528 
2529 	nlmsg_end(skb, nlh);
2530 	return 0;
2531 
2532 nla_put_failure:
2533 	nlmsg_cancel(skb, nlh);
2534 	return -EMSGSIZE;
2535 }
2536 
2537 static int _ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2538 			     u32 portid, u32 seq, struct mr_mfc *c, int cmd,
2539 			     int flags)
2540 {
2541 	return ipmr_fill_mroute(mrt, skb, portid, seq, (struct mfc_cache *)c,
2542 				cmd, flags);
2543 }
2544 
2545 static size_t mroute_msgsize(bool unresolved)
2546 {
2547 	size_t len =
2548 		NLMSG_ALIGN(sizeof(struct rtmsg))
2549 		+ nla_total_size(4)	/* RTA_TABLE */
2550 		+ nla_total_size(4)	/* RTA_SRC */
2551 		+ nla_total_size(4)	/* RTA_DST */
2552 		;
2553 
2554 	if (!unresolved)
2555 		len = len
2556 		      + nla_total_size(4)	/* RTA_IIF */
2557 		      + nla_total_size(0)	/* RTA_MULTIPATH */
2558 		      + MAXVIFS * NLA_ALIGN(sizeof(struct rtnexthop))
2559 						/* RTA_MFC_STATS */
2560 		      + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
2561 		;
2562 
2563 	return len;
2564 }
2565 
2566 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2567 				 int cmd)
2568 {
2569 	struct net *net = read_pnet(&mrt->net);
2570 	struct sk_buff *skb;
2571 	int err = -ENOBUFS;
2572 
2573 	skb = nlmsg_new(mroute_msgsize(mfc->_c.mfc_parent >= MAXVIFS),
2574 			GFP_ATOMIC);
2575 	if (!skb)
2576 		goto errout;
2577 
2578 	err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2579 	if (err < 0)
2580 		goto errout;
2581 
2582 	rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC);
2583 	return;
2584 
2585 errout:
2586 	kfree_skb(skb);
2587 	rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
2588 }
2589 
2590 static size_t igmpmsg_netlink_msgsize(size_t payloadlen)
2591 {
2592 	size_t len =
2593 		NLMSG_ALIGN(sizeof(struct rtgenmsg))
2594 		+ nla_total_size(1)	/* IPMRA_CREPORT_MSGTYPE */
2595 		+ nla_total_size(4)	/* IPMRA_CREPORT_VIF_ID */
2596 		+ nla_total_size(4)	/* IPMRA_CREPORT_SRC_ADDR */
2597 		+ nla_total_size(4)	/* IPMRA_CREPORT_DST_ADDR */
2598 		+ nla_total_size(4)	/* IPMRA_CREPORT_TABLE */
2599 					/* IPMRA_CREPORT_PKT */
2600 		+ nla_total_size(payloadlen)
2601 		;
2602 
2603 	return len;
2604 }
2605 
2606 static void igmpmsg_netlink_event(const struct mr_table *mrt, struct sk_buff *pkt)
2607 {
2608 	struct net *net = read_pnet(&mrt->net);
2609 	struct nlmsghdr *nlh;
2610 	struct rtgenmsg *rtgenm;
2611 	struct igmpmsg *msg;
2612 	struct sk_buff *skb;
2613 	struct nlattr *nla;
2614 	int payloadlen;
2615 
2616 	payloadlen = pkt->len - sizeof(struct igmpmsg);
2617 	msg = (struct igmpmsg *)skb_network_header(pkt);
2618 
2619 	skb = nlmsg_new(igmpmsg_netlink_msgsize(payloadlen), GFP_ATOMIC);
2620 	if (!skb)
2621 		goto errout;
2622 
2623 	nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
2624 			sizeof(struct rtgenmsg), 0);
2625 	if (!nlh)
2626 		goto errout;
2627 	rtgenm = nlmsg_data(nlh);
2628 	rtgenm->rtgen_family = RTNL_FAMILY_IPMR;
2629 	if (nla_put_u8(skb, IPMRA_CREPORT_MSGTYPE, msg->im_msgtype) ||
2630 	    nla_put_u32(skb, IPMRA_CREPORT_VIF_ID, msg->im_vif | (msg->im_vif_hi << 8)) ||
2631 	    nla_put_in_addr(skb, IPMRA_CREPORT_SRC_ADDR,
2632 			    msg->im_src.s_addr) ||
2633 	    nla_put_in_addr(skb, IPMRA_CREPORT_DST_ADDR,
2634 			    msg->im_dst.s_addr) ||
2635 	    nla_put_u32(skb, IPMRA_CREPORT_TABLE, mrt->id))
2636 		goto nla_put_failure;
2637 
2638 	nla = nla_reserve(skb, IPMRA_CREPORT_PKT, payloadlen);
2639 	if (!nla || skb_copy_bits(pkt, sizeof(struct igmpmsg),
2640 				  nla_data(nla), payloadlen))
2641 		goto nla_put_failure;
2642 
2643 	nlmsg_end(skb, nlh);
2644 
2645 	rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE_R, NULL, GFP_ATOMIC);
2646 	return;
2647 
2648 nla_put_failure:
2649 	nlmsg_cancel(skb, nlh);
2650 errout:
2651 	kfree_skb(skb);
2652 	rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE_R, -ENOBUFS);
2653 }
2654 
2655 static int ipmr_rtm_valid_getroute_req(struct sk_buff *skb,
2656 				       const struct nlmsghdr *nlh,
2657 				       struct nlattr **tb,
2658 				       struct netlink_ext_ack *extack)
2659 {
2660 	struct rtmsg *rtm;
2661 	int i, err;
2662 
2663 	rtm = nlmsg_payload(nlh, sizeof(*rtm));
2664 	if (!rtm) {
2665 		NL_SET_ERR_MSG(extack, "ipv4: Invalid header for multicast route get request");
2666 		return -EINVAL;
2667 	}
2668 
2669 	if (!netlink_strict_get_check(skb))
2670 		return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
2671 					      rtm_ipv4_policy, extack);
2672 
2673 	if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
2674 	    (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
2675 	    rtm->rtm_tos || rtm->rtm_table || rtm->rtm_protocol ||
2676 	    rtm->rtm_scope || rtm->rtm_type || rtm->rtm_flags) {
2677 		NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for multicast route get request");
2678 		return -EINVAL;
2679 	}
2680 
2681 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
2682 					    rtm_ipv4_policy, extack);
2683 	if (err)
2684 		return err;
2685 
2686 	if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
2687 	    (tb[RTA_DST] && !rtm->rtm_dst_len)) {
2688 		NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
2689 		return -EINVAL;
2690 	}
2691 
2692 	for (i = 0; i <= RTA_MAX; i++) {
2693 		if (!tb[i])
2694 			continue;
2695 
2696 		switch (i) {
2697 		case RTA_SRC:
2698 		case RTA_DST:
2699 		case RTA_TABLE:
2700 			break;
2701 		default:
2702 			NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in multicast route get request");
2703 			return -EINVAL;
2704 		}
2705 	}
2706 
2707 	return 0;
2708 }
2709 
2710 static int ipmr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2711 			     struct netlink_ext_ack *extack)
2712 {
2713 	struct net *net = sock_net(in_skb->sk);
2714 	struct nlattr *tb[RTA_MAX + 1];
2715 	struct mfc_cache *cache;
2716 	struct mr_table *mrt;
2717 	struct sk_buff *skb;
2718 	__be32 src, grp;
2719 	u32 tableid;
2720 	int err;
2721 
2722 	err = ipmr_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
2723 	if (err < 0)
2724 		goto errout;
2725 
2726 	src = nla_get_in_addr_default(tb[RTA_SRC], 0);
2727 	grp = nla_get_in_addr_default(tb[RTA_DST], 0);
2728 	tableid = nla_get_u32_default(tb[RTA_TABLE], 0);
2729 
2730 	skb = nlmsg_new(mroute_msgsize(false), GFP_KERNEL);
2731 	if (!skb) {
2732 		err = -ENOBUFS;
2733 		goto errout;
2734 	}
2735 
2736 	rcu_read_lock();
2737 
2738 	mrt = __ipmr_get_table(net, tableid ? tableid : RT_TABLE_DEFAULT);
2739 	if (!mrt) {
2740 		err = -ENOENT;
2741 		goto errout_unlock;
2742 	}
2743 
2744 	cache = ipmr_cache_find(mrt, src, grp);
2745 	if (!cache) {
2746 		err = -ENOENT;
2747 		goto errout_unlock;
2748 	}
2749 
2750 	err = ipmr_fill_mroute(mrt, skb, NETLINK_CB(in_skb).portid,
2751 			       nlh->nlmsg_seq, cache,
2752 			       RTM_NEWROUTE, 0);
2753 	if (err < 0)
2754 		goto errout_unlock;
2755 
2756 	rcu_read_unlock();
2757 
2758 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2759 errout:
2760 	return err;
2761 
2762 errout_unlock:
2763 	rcu_read_unlock();
2764 	kfree_skb(skb);
2765 	goto errout;
2766 }
2767 
2768 static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2769 {
2770 	struct fib_dump_filter filter = {
2771 		.rtnl_held = false,
2772 	};
2773 	int err;
2774 
2775 	rcu_read_lock();
2776 
2777 	if (cb->strict_check) {
2778 		err = ip_valid_fib_dump_req(sock_net(skb->sk), cb->nlh,
2779 					    &filter, cb);
2780 		if (err < 0)
2781 			goto out;
2782 	}
2783 
2784 	if (filter.table_id) {
2785 		struct mr_table *mrt;
2786 
2787 		mrt = __ipmr_get_table(sock_net(skb->sk), filter.table_id);
2788 		if (!mrt) {
2789 			if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IPMR) {
2790 				err = skb->len;
2791 				goto out;
2792 			}
2793 
2794 			NL_SET_ERR_MSG(cb->extack, "ipv4: MR table does not exist");
2795 			err = -ENOENT;
2796 			goto out;
2797 		}
2798 
2799 		err = mr_table_dump(mrt, skb, cb, _ipmr_fill_mroute,
2800 				    &mfc_unres_lock, &filter);
2801 		err = skb->len ? : err;
2802 		goto out;
2803 	}
2804 
2805 	err = mr_rtm_dumproute(skb, cb, ipmr_mr_table_iter,
2806 			       _ipmr_fill_mroute, &mfc_unres_lock, &filter);
2807 out:
2808 	rcu_read_unlock();
2809 
2810 	return err;
2811 }
2812 
2813 static const struct nla_policy rtm_ipmr_policy[RTA_MAX + 1] = {
2814 	[RTA_SRC]	= { .type = NLA_U32 },
2815 	[RTA_DST]	= { .type = NLA_U32 },
2816 	[RTA_IIF]	= { .type = NLA_U32 },
2817 	[RTA_TABLE]	= { .type = NLA_U32 },
2818 	[RTA_MULTIPATH]	= { .len = sizeof(struct rtnexthop) },
2819 };
2820 
2821 static bool ipmr_rtm_validate_proto(unsigned char rtm_protocol)
2822 {
2823 	switch (rtm_protocol) {
2824 	case RTPROT_STATIC:
2825 	case RTPROT_MROUTED:
2826 		return true;
2827 	}
2828 	return false;
2829 }
2830 
2831 static int ipmr_nla_get_ttls(const struct nlattr *nla, struct mfcctl *mfcc)
2832 {
2833 	struct rtnexthop *rtnh = nla_data(nla);
2834 	int remaining = nla_len(nla), vifi = 0;
2835 
2836 	while (rtnh_ok(rtnh, remaining)) {
2837 		mfcc->mfcc_ttls[vifi] = rtnh->rtnh_hops;
2838 		if (++vifi == MAXVIFS)
2839 			break;
2840 		rtnh = rtnh_next(rtnh, &remaining);
2841 	}
2842 
2843 	return remaining > 0 ? -EINVAL : vifi;
2844 }
2845 
2846 /* returns < 0 on error, 0 for ADD_MFC and 1 for ADD_MFC_PROXY */
2847 static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh,
2848 			    struct mfcctl *mfcc, int *mrtsock,
2849 			    struct mr_table **mrtret,
2850 			    struct netlink_ext_ack *extack)
2851 {
2852 	struct net_device *dev = NULL;
2853 	u32 tblid = RT_TABLE_DEFAULT;
2854 	int ret, rem, iif = 0;
2855 	struct mr_table *mrt;
2856 	struct nlattr *attr;
2857 	struct rtmsg *rtm;
2858 
2859 	ret = nlmsg_validate_deprecated(nlh, sizeof(*rtm), RTA_MAX,
2860 					rtm_ipmr_policy, extack);
2861 	if (ret < 0)
2862 		goto out;
2863 	rtm = nlmsg_data(nlh);
2864 
2865 	ret = -EINVAL;
2866 	if (rtm->rtm_family != RTNL_FAMILY_IPMR || rtm->rtm_dst_len != 32 ||
2867 	    rtm->rtm_type != RTN_MULTICAST ||
2868 	    rtm->rtm_scope != RT_SCOPE_UNIVERSE ||
2869 	    !ipmr_rtm_validate_proto(rtm->rtm_protocol))
2870 		goto out;
2871 
2872 	memset(mfcc, 0, sizeof(*mfcc));
2873 	mfcc->mfcc_parent = -1;
2874 	ret = 0;
2875 	nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), rem) {
2876 		switch (nla_type(attr)) {
2877 		case RTA_SRC:
2878 			mfcc->mfcc_origin.s_addr = nla_get_be32(attr);
2879 			break;
2880 		case RTA_DST:
2881 			mfcc->mfcc_mcastgrp.s_addr = nla_get_be32(attr);
2882 			break;
2883 		case RTA_IIF:
2884 			iif = nla_get_u32(attr);
2885 			break;
2886 		case RTA_MULTIPATH:
2887 			if (ipmr_nla_get_ttls(attr, mfcc) < 0) {
2888 				ret = -EINVAL;
2889 				goto out;
2890 			}
2891 			break;
2892 		case RTA_PREFSRC:
2893 			ret = 1;
2894 			break;
2895 		case RTA_TABLE:
2896 			tblid = nla_get_u32(attr);
2897 			break;
2898 		}
2899 	}
2900 
2901 	rcu_read_lock();
2902 
2903 	mrt = __ipmr_get_table(net, tblid);
2904 	if (!mrt) {
2905 		ret = -ENOENT;
2906 		goto unlock;
2907 	}
2908 
2909 	if (iif) {
2910 		dev = dev_get_by_index_rcu(net, iif);
2911 		if (!dev) {
2912 			ret = -ENODEV;
2913 			goto unlock;
2914 		}
2915 
2916 		mfcc->mfcc_parent = ipmr_find_vif(mrt, dev);
2917 	}
2918 
2919 	*mrtret = mrt;
2920 	*mrtsock = rtm->rtm_protocol == RTPROT_MROUTED ? 1 : 0;
2921 
2922 unlock:
2923 	rcu_read_unlock();
2924 out:
2925 	return ret;
2926 }
2927 
2928 /* takes care of both newroute and delroute */
2929 static int ipmr_rtm_route(struct sk_buff *skb, struct nlmsghdr *nlh,
2930 			  struct netlink_ext_ack *extack)
2931 {
2932 	struct net *net = sock_net(skb->sk);
2933 	int ret, mrtsock = 0, parent;
2934 	struct mr_table *tbl = NULL;
2935 	struct mfcctl mfcc;
2936 
2937 	ret = rtm_to_ipmr_mfcc(net, nlh, &mfcc, &mrtsock, &tbl, extack);
2938 	if (ret < 0)
2939 		return ret;
2940 
2941 	parent = ret ? mfcc.mfcc_parent : -1;
2942 
2943 	mutex_lock(&net->ipv4.mfc_mutex);
2944 
2945 	if (nlh->nlmsg_type == RTM_NEWROUTE)
2946 		ret = ipmr_mfc_add(net, tbl, &mfcc, mrtsock, parent);
2947 	else
2948 		ret = ipmr_mfc_delete(tbl, &mfcc, parent);
2949 
2950 	mutex_unlock(&net->ipv4.mfc_mutex);
2951 
2952 	return ret;
2953 }
2954 
2955 static bool ipmr_fill_table(struct mr_table *mrt, struct sk_buff *skb)
2956 {
2957 	u32 queue_len = atomic_read(&mrt->cache_resolve_queue_len);
2958 
2959 	if (nla_put_u32(skb, IPMRA_TABLE_ID, mrt->id) ||
2960 	    nla_put_u32(skb, IPMRA_TABLE_CACHE_RES_QUEUE_LEN, queue_len) ||
2961 	    nla_put_s32(skb, IPMRA_TABLE_MROUTE_REG_VIF_NUM,
2962 			READ_ONCE(mrt->mroute_reg_vif_num)) ||
2963 	    nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_ASSERT,
2964 		       READ_ONCE(mrt->mroute_do_assert)) ||
2965 	    nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_PIM,
2966 		       READ_ONCE(mrt->mroute_do_pim)) ||
2967 	    nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_WRVIFWHOLE,
2968 		       READ_ONCE(mrt->mroute_do_wrvifwhole)))
2969 		return false;
2970 
2971 	return true;
2972 }
2973 
2974 static bool ipmr_fill_vif(struct mr_table *mrt, u32 vifid, struct sk_buff *skb)
2975 {
2976 	struct net_device *vif_dev;
2977 	struct nlattr *vif_nest;
2978 	struct vif_device *vif;
2979 
2980 	vif = &mrt->vif_table[vifid];
2981 	vif_dev = vif_dev_read(vif);
2982 	/* if the VIF doesn't exist just continue */
2983 	if (!vif_dev)
2984 		return true;
2985 
2986 	vif_nest = nla_nest_start_noflag(skb, IPMRA_VIF);
2987 	if (!vif_nest)
2988 		return false;
2989 
2990 	if (nla_put_u32(skb, IPMRA_VIFA_IFINDEX, READ_ONCE(vif_dev->ifindex)) ||
2991 	    nla_put_u32(skb, IPMRA_VIFA_VIF_ID, vifid) ||
2992 	    nla_put_u16(skb, IPMRA_VIFA_FLAGS, vif->flags) ||
2993 	    nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_IN, READ_ONCE(vif->bytes_in),
2994 			      IPMRA_VIFA_PAD) ||
2995 	    nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_OUT, READ_ONCE(vif->bytes_out),
2996 			      IPMRA_VIFA_PAD) ||
2997 	    nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_IN, READ_ONCE(vif->pkt_in),
2998 			      IPMRA_VIFA_PAD) ||
2999 	    nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_OUT, READ_ONCE(vif->pkt_out),
3000 			      IPMRA_VIFA_PAD) ||
3001 	    nla_put_be32(skb, IPMRA_VIFA_LOCAL_ADDR, vif->local) ||
3002 	    nla_put_be32(skb, IPMRA_VIFA_REMOTE_ADDR, vif->remote)) {
3003 		nla_nest_cancel(skb, vif_nest);
3004 		return false;
3005 	}
3006 	nla_nest_end(skb, vif_nest);
3007 
3008 	return true;
3009 }
3010 
3011 static int ipmr_valid_dumplink(const struct nlmsghdr *nlh,
3012 			       struct netlink_ext_ack *extack)
3013 {
3014 	struct ifinfomsg *ifm;
3015 
3016 	ifm = nlmsg_payload(nlh, sizeof(*ifm));
3017 	if (!ifm) {
3018 		NL_SET_ERR_MSG(extack, "ipv4: Invalid header for ipmr link dump");
3019 		return -EINVAL;
3020 	}
3021 
3022 	if (nlmsg_attrlen(nlh, sizeof(*ifm))) {
3023 		NL_SET_ERR_MSG(extack, "Invalid data after header in ipmr link dump");
3024 		return -EINVAL;
3025 	}
3026 
3027 	if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
3028 	    ifm->ifi_change || ifm->ifi_index) {
3029 		NL_SET_ERR_MSG(extack, "Invalid values in header for ipmr link dump request");
3030 		return -EINVAL;
3031 	}
3032 
3033 	return 0;
3034 }
3035 
3036 static int ipmr_rtm_dumplink(struct sk_buff *skb, struct netlink_callback *cb)
3037 {
3038 	struct net *net = sock_net(skb->sk);
3039 	struct nlmsghdr *nlh = NULL;
3040 	unsigned int t = 0, s_t;
3041 	unsigned int e = 0, s_e;
3042 	struct mr_table *mrt;
3043 
3044 	if (cb->strict_check) {
3045 		int err = ipmr_valid_dumplink(cb->nlh, cb->extack);
3046 
3047 		if (err < 0)
3048 			return err;
3049 	}
3050 
3051 	s_t = cb->args[0];
3052 	s_e = cb->args[1];
3053 
3054 	rcu_read_lock();
3055 
3056 	ipmr_for_each_table(mrt, net) {
3057 		struct nlattr *vifs, *af;
3058 		struct ifinfomsg *hdr;
3059 		u32 i;
3060 
3061 		if (t < s_t)
3062 			goto skip_table;
3063 		nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3064 				cb->nlh->nlmsg_seq, RTM_NEWLINK,
3065 				sizeof(*hdr), NLM_F_MULTI);
3066 		if (!nlh)
3067 			break;
3068 
3069 		hdr = nlmsg_data(nlh);
3070 		memset(hdr, 0, sizeof(*hdr));
3071 		hdr->ifi_family = RTNL_FAMILY_IPMR;
3072 
3073 		af = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
3074 		if (!af) {
3075 			nlmsg_cancel(skb, nlh);
3076 			goto out;
3077 		}
3078 
3079 		if (!ipmr_fill_table(mrt, skb)) {
3080 			nlmsg_cancel(skb, nlh);
3081 			goto out;
3082 		}
3083 
3084 		vifs = nla_nest_start_noflag(skb, IPMRA_TABLE_VIFS);
3085 		if (!vifs) {
3086 			nla_nest_end(skb, af);
3087 			nlmsg_end(skb, nlh);
3088 			goto out;
3089 		}
3090 		for (i = 0; i < READ_ONCE(mrt->maxvif); i++) {
3091 			if (e < s_e)
3092 				goto skip_entry;
3093 			if (!ipmr_fill_vif(mrt, i, skb)) {
3094 				nla_nest_end(skb, vifs);
3095 				nla_nest_end(skb, af);
3096 				nlmsg_end(skb, nlh);
3097 				goto out;
3098 			}
3099 skip_entry:
3100 			e++;
3101 		}
3102 		s_e = 0;
3103 		e = 0;
3104 		nla_nest_end(skb, vifs);
3105 		nla_nest_end(skb, af);
3106 		nlmsg_end(skb, nlh);
3107 skip_table:
3108 		t++;
3109 	}
3110 
3111 out:
3112 	rcu_read_unlock();
3113 
3114 	cb->args[1] = e;
3115 	cb->args[0] = t;
3116 
3117 	return skb->len;
3118 }
3119 
3120 #ifdef CONFIG_PROC_FS
3121 /* The /proc interfaces to multicast routing :
3122  * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
3123  */
3124 
3125 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
3126 	__acquires(RCU)
3127 {
3128 	struct mr_vif_iter *iter = seq->private;
3129 	struct net *net = seq_file_net(seq);
3130 	struct mr_table *mrt;
3131 
3132 	rcu_read_lock();
3133 	mrt = __ipmr_get_table(net, RT_TABLE_DEFAULT);
3134 	if (!mrt) {
3135 		rcu_read_unlock();
3136 		return ERR_PTR(-ENOENT);
3137 	}
3138 
3139 	iter->mrt = mrt;
3140 
3141 	return mr_vif_seq_start(seq, pos);
3142 }
3143 
3144 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
3145 	__releases(RCU)
3146 {
3147 	rcu_read_unlock();
3148 }
3149 
3150 static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
3151 {
3152 	struct mr_vif_iter *iter = seq->private;
3153 	struct mr_table *mrt = iter->mrt;
3154 
3155 	if (v == SEQ_START_TOKEN) {
3156 		seq_puts(seq,
3157 			 "Interface      BytesIn  PktsIn  BytesOut PktsOut Flags Local    Remote\n");
3158 	} else {
3159 		const struct vif_device *vif = v;
3160 		const struct net_device *vif_dev;
3161 		const char *name;
3162 
3163 		vif_dev = vif_dev_read(vif);
3164 		name = vif_dev ? vif_dev->name : "none";
3165 		seq_printf(seq,
3166 			   "%2td %-10s %8ld %7ld  %8ld %7ld %05X %08X %08X\n",
3167 			   vif - mrt->vif_table,
3168 			   name, vif->bytes_in, vif->pkt_in,
3169 			   vif->bytes_out, vif->pkt_out,
3170 			   vif->flags, vif->local, vif->remote);
3171 	}
3172 	return 0;
3173 }
3174 
3175 static const struct seq_operations ipmr_vif_seq_ops = {
3176 	.start = ipmr_vif_seq_start,
3177 	.next  = mr_vif_seq_next,
3178 	.stop  = ipmr_vif_seq_stop,
3179 	.show  = ipmr_vif_seq_show,
3180 };
3181 
3182 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
3183 {
3184 	struct net *net = seq_file_net(seq);
3185 	struct mr_table *mrt;
3186 
3187 	mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
3188 	if (!mrt)
3189 		return ERR_PTR(-ENOENT);
3190 
3191 	return mr_mfc_seq_start(seq, pos, mrt, &mfc_unres_lock);
3192 }
3193 
3194 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
3195 {
3196 	int n;
3197 
3198 	if (v == SEQ_START_TOKEN) {
3199 		seq_puts(seq,
3200 		 "Group    Origin   Iif     Pkts    Bytes    Wrong Oifs\n");
3201 	} else {
3202 		const struct mfc_cache *mfc = v;
3203 		const struct mr_mfc_iter *it = seq->private;
3204 		const struct mr_table *mrt = it->mrt;
3205 
3206 		seq_printf(seq, "%08X %08X %-3hd",
3207 			   (__force u32) mfc->mfc_mcastgrp,
3208 			   (__force u32) mfc->mfc_origin,
3209 			   mfc->_c.mfc_parent);
3210 
3211 		if (it->cache != &mrt->mfc_unres_queue) {
3212 			seq_printf(seq, " %8lu %8lu %8lu",
3213 				   atomic_long_read(&mfc->_c.mfc_un.res.pkt),
3214 				   atomic_long_read(&mfc->_c.mfc_un.res.bytes),
3215 				   atomic_long_read(&mfc->_c.mfc_un.res.wrong_if));
3216 			for (n = mfc->_c.mfc_un.res.minvif;
3217 			     n < mfc->_c.mfc_un.res.maxvif; n++) {
3218 				if (VIF_EXISTS(mrt, n) &&
3219 				    mfc->_c.mfc_un.res.ttls[n] < 255)
3220 					seq_printf(seq,
3221 					   " %2d:%-3d",
3222 					   n, mfc->_c.mfc_un.res.ttls[n]);
3223 			}
3224 		} else {
3225 			/* unresolved mfc_caches don't contain
3226 			 * pkt, bytes and wrong_if values
3227 			 */
3228 			seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
3229 		}
3230 		seq_putc(seq, '\n');
3231 	}
3232 	return 0;
3233 }
3234 
3235 static const struct seq_operations ipmr_mfc_seq_ops = {
3236 	.start = ipmr_mfc_seq_start,
3237 	.next  = mr_mfc_seq_next,
3238 	.stop  = mr_mfc_seq_stop,
3239 	.show  = ipmr_mfc_seq_show,
3240 };
3241 #endif
3242 
3243 #ifdef CONFIG_IP_PIMSM_V2
3244 static const struct net_protocol pim_protocol = {
3245 	.handler	=	pim_rcv,
3246 };
3247 #endif
3248 
3249 static unsigned int ipmr_seq_read(const struct net *net)
3250 {
3251 	return atomic_read(&net->ipv4.ipmr_seq) + ipmr_rules_seq_read(net);
3252 }
3253 
3254 static int ipmr_dump(struct net *net, struct notifier_block *nb,
3255 		     struct netlink_ext_ack *extack)
3256 {
3257 	return mr_dump(net, nb, RTNL_FAMILY_IPMR, ipmr_rules_dump,
3258 		       ipmr_mr_table_iter, extack);
3259 }
3260 
3261 static const struct fib_notifier_ops ipmr_notifier_ops_template = {
3262 	.family		= RTNL_FAMILY_IPMR,
3263 	.fib_seq_read	= ipmr_seq_read,
3264 	.fib_dump	= ipmr_dump,
3265 	.owner		= THIS_MODULE,
3266 };
3267 
3268 static int __net_init ipmr_notifier_init(struct net *net)
3269 {
3270 	struct fib_notifier_ops *ops;
3271 
3272 	atomic_set(&net->ipv4.ipmr_seq, 0);
3273 
3274 	ops = fib_notifier_ops_register(&ipmr_notifier_ops_template, net);
3275 	if (IS_ERR(ops))
3276 		return PTR_ERR(ops);
3277 	net->ipv4.ipmr_notifier_ops = ops;
3278 
3279 	return 0;
3280 }
3281 
3282 static void __net_exit ipmr_notifier_exit(struct net *net)
3283 {
3284 	fib_notifier_ops_unregister(net->ipv4.ipmr_notifier_ops);
3285 	net->ipv4.ipmr_notifier_ops = NULL;
3286 }
3287 
3288 /* Setup for IP multicast routing */
3289 static int __net_init ipmr_net_init(struct net *net)
3290 {
3291 	LIST_HEAD(dev_kill_list);
3292 	int err;
3293 
3294 	mutex_init(&net->ipv4.mfc_mutex);
3295 
3296 	err = ipmr_notifier_init(net);
3297 	if (err)
3298 		goto ipmr_notifier_fail;
3299 
3300 	err = ipmr_rules_init(net);
3301 	if (err < 0)
3302 		goto ipmr_rules_fail;
3303 
3304 #ifdef CONFIG_PROC_FS
3305 	err = -ENOMEM;
3306 	if (!proc_create_net("ip_mr_vif", 0, net->proc_net, &ipmr_vif_seq_ops,
3307 			sizeof(struct mr_vif_iter)))
3308 		goto proc_vif_fail;
3309 	if (!proc_create_net("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_seq_ops,
3310 			sizeof(struct mr_mfc_iter)))
3311 		goto proc_cache_fail;
3312 #endif
3313 	return 0;
3314 
3315 #ifdef CONFIG_PROC_FS
3316 proc_cache_fail:
3317 	remove_proc_entry("ip_mr_vif", net->proc_net);
3318 proc_vif_fail:
3319 	ipmr_rules_exit_rtnl(net, &dev_kill_list);
3320 	ipmr_rules_exit(net);
3321 #endif
3322 ipmr_rules_fail:
3323 	ipmr_notifier_exit(net);
3324 ipmr_notifier_fail:
3325 	return err;
3326 }
3327 
3328 static void __net_exit ipmr_net_exit(struct net *net)
3329 {
3330 #ifdef CONFIG_PROC_FS
3331 	remove_proc_entry("ip_mr_cache", net->proc_net);
3332 	remove_proc_entry("ip_mr_vif", net->proc_net);
3333 #endif
3334 	ipmr_rules_exit(net);
3335 	ipmr_notifier_exit(net);
3336 }
3337 
3338 static void __net_exit ipmr_net_exit_rtnl(struct net *net,
3339 					  struct list_head *dev_kill_list)
3340 {
3341 	ipmr_rules_exit_rtnl(net, dev_kill_list);
3342 }
3343 
3344 static struct pernet_operations ipmr_net_ops = {
3345 	.init = ipmr_net_init,
3346 	.exit = ipmr_net_exit,
3347 	.exit_rtnl = ipmr_net_exit_rtnl,
3348 };
3349 
3350 static const struct rtnl_msg_handler ipmr_rtnl_msg_handlers[] __initconst = {
3351 	{.protocol = RTNL_FAMILY_IPMR, .msgtype = RTM_GETLINK,
3352 	 .dumpit = ipmr_rtm_dumplink, .flags = RTNL_FLAG_DUMP_UNLOCKED},
3353 	{.protocol = RTNL_FAMILY_IPMR, .msgtype = RTM_NEWROUTE,
3354 	 .doit = ipmr_rtm_route, .flags = RTNL_FLAG_DOIT_UNLOCKED},
3355 	{.protocol = RTNL_FAMILY_IPMR, .msgtype = RTM_DELROUTE,
3356 	 .doit = ipmr_rtm_route, .flags = RTNL_FLAG_DOIT_UNLOCKED},
3357 	{.protocol = RTNL_FAMILY_IPMR, .msgtype = RTM_GETROUTE,
3358 	 .doit = ipmr_rtm_getroute, .dumpit = ipmr_rtm_dumproute,
3359 	 .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED},
3360 };
3361 
3362 int __init ip_mr_init(void)
3363 {
3364 	int err;
3365 
3366 	mrt_cachep = KMEM_CACHE(mfc_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
3367 
3368 	err = register_pernet_subsys(&ipmr_net_ops);
3369 	if (err)
3370 		goto reg_pernet_fail;
3371 
3372 	err = register_netdevice_notifier(&ip_mr_notifier);
3373 	if (err)
3374 		goto reg_notif_fail;
3375 #ifdef CONFIG_IP_PIMSM_V2
3376 	if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
3377 		pr_err("%s: can't add PIM protocol\n", __func__);
3378 		err = -EAGAIN;
3379 		goto add_proto_fail;
3380 	}
3381 #endif
3382 	rtnl_register_many(ipmr_rtnl_msg_handlers);
3383 
3384 	return 0;
3385 
3386 #ifdef CONFIG_IP_PIMSM_V2
3387 add_proto_fail:
3388 	unregister_netdevice_notifier(&ip_mr_notifier);
3389 #endif
3390 reg_notif_fail:
3391 	unregister_pernet_subsys(&ipmr_net_ops);
3392 reg_pernet_fail:
3393 	kmem_cache_destroy(mrt_cachep);
3394 	return err;
3395 }
3396