xref: /linux/net/ipv6/mcast.c (revision 47ee43e4bf50be16a142df1bf51e04b4bc5a6cdc)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Multicast support for IPv6
4  *	Linux INET6 implementation
5  *
6  *	Authors:
7  *	Pedro Roque		<roque@di.fc.ul.pt>
8  *
9  *	Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
10  */
11 
12 /* Changes:
13  *
14  *	yoshfuji	: fix format of router-alert option
15  *	YOSHIFUJI Hideaki @USAGI:
16  *		Fixed source address for MLD message based on
17  *		<draft-ietf-magma-mld-source-05.txt>.
18  *	YOSHIFUJI Hideaki @USAGI:
19  *		- Ignore Queries for invalid addresses.
20  *		- MLD for link-local addresses.
21  *	David L Stevens <dlstevens@us.ibm.com>:
22  *		- MLDv2 support
23  */
24 
25 #include <linux/module.h>
26 #include <linux/errno.h>
27 #include <linux/types.h>
28 #include <linux/string.h>
29 #include <linux/socket.h>
30 #include <linux/sockios.h>
31 #include <linux/jiffies.h>
32 #include <linux/net.h>
33 #include <linux/in.h>
34 #include <linux/in6.h>
35 #include <linux/netdevice.h>
36 #include <linux/if_addr.h>
37 #include <linux/if_arp.h>
38 #include <linux/route.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/init.h>
41 #include <linux/proc_fs.h>
42 #include <linux/seq_file.h>
43 #include <linux/slab.h>
44 #include <linux/pkt_sched.h>
45 #include <net/mld.h>
46 #include <linux/workqueue.h>
47 
48 #include <linux/netfilter.h>
49 #include <linux/netfilter_ipv6.h>
50 
51 #include <net/net_namespace.h>
52 #include <net/netlink.h>
53 #include <net/sock.h>
54 #include <net/snmp.h>
55 
56 #include <net/ipv6.h>
57 #include <net/protocol.h>
58 #include <net/if_inet6.h>
59 #include <net/ndisc.h>
60 #include <net/addrconf.h>
61 #include <net/ip6_route.h>
62 #include <net/inet_common.h>
63 
64 #include <net/ip6_checksum.h>
65 
66 /* Ensure that we have struct in6_addr aligned on 32bit word. */
67 static int __mld2_query_bugs[] __attribute__((__unused__)) = {
68 	BUILD_BUG_ON_ZERO(offsetof(struct mld2_query, mld2q_srcs) % 4),
69 	BUILD_BUG_ON_ZERO(offsetof(struct mld2_report, mld2r_grec) % 4),
70 	BUILD_BUG_ON_ZERO(offsetof(struct mld2_grec, grec_mca) % 4)
71 };
72 
73 static struct workqueue_struct *mld_wq;
74 static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
75 
76 static void igmp6_join_group(struct ifmcaddr6 *ma);
77 static void igmp6_leave_group(struct ifmcaddr6 *ma);
78 static void mld_mca_work(struct work_struct *work);
79 
80 static void mld_ifc_event(struct inet6_dev *idev);
81 static bool mld_in_v1_mode(const struct inet6_dev *idev);
82 static int sf_setstate(struct ifmcaddr6 *pmc);
83 static void sf_markstate(struct ifmcaddr6 *pmc);
84 static void ip6_mc_clear_src(struct ifmcaddr6 *pmc);
85 static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
86 			  int sfmode, int sfcount, const struct in6_addr *psfsrc,
87 			  int delta);
88 static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
89 			  int sfmode, int sfcount, const struct in6_addr *psfsrc,
90 			  int delta);
91 static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
92 			    struct inet6_dev *idev);
93 static int __ipv6_dev_mc_inc(struct net_device *dev,
94 			     const struct in6_addr *addr, unsigned int mode);
95 
96 #define MLD_QRV_DEFAULT		2
97 /* RFC3810, 9.2. Query Interval */
98 #define MLD_QI_DEFAULT		(125 * HZ)
99 /* RFC3810, 9.3. Query Response Interval */
100 #define MLD_QRI_DEFAULT		(10 * HZ)
101 
102 /* RFC3810, 8.1 Query Version Distinctions */
103 #define MLD_V1_QUERY_LEN	24
104 #define MLD_V2_QUERY_LEN_MIN	28
105 
106 #define IPV6_MLD_MAX_MSF	64
107 
108 int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
109 int sysctl_mld_qrv __read_mostly = MLD_QRV_DEFAULT;
110 
111 #define mc_assert_locked(idev)			\
112 	lockdep_assert_held(&(idev)->mc_lock)
113 
114 #define mc_dereference(e, idev) \
115 	rcu_dereference_protected(e, lockdep_is_held(&(idev)->mc_lock))
116 
117 #define sock_dereference(e, sk) \
118 	rcu_dereference_protected(e, lockdep_sock_is_held(sk))
119 
120 #define for_each_pmc_socklock(np, sk, pmc)			\
121 	for (pmc = sock_dereference((np)->ipv6_mc_list, sk);	\
122 	     pmc;						\
123 	     pmc = sock_dereference(pmc->next, sk))
124 
125 #define for_each_pmc_rcu(np, pmc)				\
126 	for (pmc = rcu_dereference((np)->ipv6_mc_list);		\
127 	     pmc;						\
128 	     pmc = rcu_dereference(pmc->next))
129 
130 #define for_each_psf_mclock(mc, psf)				\
131 	for (psf = mc_dereference((mc)->mca_sources, mc->idev);	\
132 	     psf;						\
133 	     psf = mc_dereference(psf->sf_next, mc->idev))
134 
135 #define for_each_psf_rcu(mc, psf)				\
136 	for (psf = rcu_dereference((mc)->mca_sources);		\
137 	     psf;						\
138 	     psf = rcu_dereference(psf->sf_next))
139 
140 #define for_each_psf_tomb(mc, psf)				\
141 	for (psf = mc_dereference((mc)->mca_tomb, mc->idev);	\
142 	     psf;						\
143 	     psf = mc_dereference(psf->sf_next, mc->idev))
144 
145 #define for_each_mc_mclock(idev, mc)				\
146 	for (mc = mc_dereference((idev)->mc_list, idev);	\
147 	     mc;						\
148 	     mc = mc_dereference(mc->next, idev))
149 
150 #define for_each_mc_rcu(idev, mc)				\
151 	for (mc = rcu_dereference((idev)->mc_list);             \
152 	     mc;                                                \
153 	     mc = rcu_dereference(mc->next))
154 
155 #define for_each_mc_tomb(idev, mc)				\
156 	for (mc = mc_dereference((idev)->mc_tomb, idev);	\
157 	     mc;						\
158 	     mc = mc_dereference(mc->next, idev))
159 
160 static int unsolicited_report_interval(struct inet6_dev *idev)
161 {
162 	int iv;
163 
164 	if (mld_in_v1_mode(idev))
165 		iv = READ_ONCE(idev->cnf.mldv1_unsolicited_report_interval);
166 	else
167 		iv = READ_ONCE(idev->cnf.mldv2_unsolicited_report_interval);
168 
169 	return iv > 0 ? iv : 1;
170 }
171 
172 /*
173  *	socket join on multicast group
174  */
175 static int __ipv6_sock_mc_join(struct sock *sk, int ifindex,
176 			       const struct in6_addr *addr, unsigned int mode)
177 {
178 	struct ipv6_pinfo *np = inet6_sk(sk);
179 	struct ipv6_mc_socklist *mc_lst;
180 	struct net *net = sock_net(sk);
181 	struct net_device *dev = NULL;
182 	int err;
183 
184 	if (!ipv6_addr_is_multicast(addr))
185 		return -EINVAL;
186 
187 	for_each_pmc_socklock(np, sk, mc_lst) {
188 		if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
189 		    ipv6_addr_equal(&mc_lst->addr, addr))
190 			return -EADDRINUSE;
191 	}
192 
193 	mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
194 
195 	if (!mc_lst)
196 		return -ENOMEM;
197 
198 	mc_lst->next = NULL;
199 	mc_lst->addr = *addr;
200 
201 	if (ifindex == 0) {
202 		struct rt6_info *rt;
203 
204 		rcu_read_lock();
205 		rt = rt6_lookup(net, addr, NULL, 0, NULL, 0);
206 		if (rt) {
207 			dev = dst_dev(&rt->dst);
208 			dev_hold(dev);
209 			ip6_rt_put(rt);
210 		}
211 		rcu_read_unlock();
212 	} else {
213 		dev = dev_get_by_index(net, ifindex);
214 	}
215 
216 	if (!dev) {
217 		sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
218 		return -ENODEV;
219 	}
220 
221 	mc_lst->ifindex = dev->ifindex;
222 	mc_lst->sfmode = mode;
223 	RCU_INIT_POINTER(mc_lst->sflist, NULL);
224 
225 	/* now add/increase the group membership on the device */
226 	err = __ipv6_dev_mc_inc(dev, addr, mode);
227 
228 	dev_put(dev);
229 
230 	if (err) {
231 		sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
232 		return err;
233 	}
234 
235 	mc_lst->next = np->ipv6_mc_list;
236 	rcu_assign_pointer(np->ipv6_mc_list, mc_lst);
237 
238 	return 0;
239 }
240 
241 int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
242 {
243 	return __ipv6_sock_mc_join(sk, ifindex, addr, MCAST_EXCLUDE);
244 }
245 EXPORT_SYMBOL(ipv6_sock_mc_join);
246 
247 int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
248 			  const struct in6_addr *addr, unsigned int mode)
249 {
250 	return __ipv6_sock_mc_join(sk, ifindex, addr, mode);
251 }
252 
253 /*
254  *	socket leave on multicast group
255  */
256 static void __ipv6_sock_mc_drop(struct sock *sk, struct ipv6_mc_socklist *mc_lst)
257 {
258 	struct net *net = sock_net(sk);
259 	struct net_device *dev;
260 
261 	dev = dev_get_by_index(net, mc_lst->ifindex);
262 	if (dev) {
263 		struct inet6_dev *idev = in6_dev_get(dev);
264 
265 		ip6_mc_leave_src(sk, mc_lst, idev);
266 
267 		if (idev) {
268 			__ipv6_dev_mc_dec(idev, &mc_lst->addr);
269 			in6_dev_put(idev);
270 		}
271 
272 		dev_put(dev);
273 	} else {
274 		ip6_mc_leave_src(sk, mc_lst, NULL);
275 	}
276 
277 	atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
278 	kfree_rcu(mc_lst, rcu);
279 }
280 
281 int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
282 {
283 	struct ipv6_pinfo *np = inet6_sk(sk);
284 	struct ipv6_mc_socklist __rcu **lnk;
285 	struct ipv6_mc_socklist *mc_lst;
286 
287 	if (!ipv6_addr_is_multicast(addr))
288 		return -EINVAL;
289 
290 	for (lnk = &np->ipv6_mc_list;
291 	     (mc_lst = sock_dereference(*lnk, sk)) != NULL;
292 	      lnk = &mc_lst->next) {
293 		if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
294 		    ipv6_addr_equal(&mc_lst->addr, addr)) {
295 			*lnk = mc_lst->next;
296 			__ipv6_sock_mc_drop(sk, mc_lst);
297 			return 0;
298 		}
299 	}
300 
301 	return -EADDRNOTAVAIL;
302 }
303 EXPORT_SYMBOL(ipv6_sock_mc_drop);
304 
305 static struct inet6_dev *ip6_mc_find_dev(struct net *net,
306 					 const struct in6_addr *group,
307 					 int ifindex)
308 {
309 	struct net_device *dev = NULL;
310 	struct inet6_dev *idev;
311 
312 	if (ifindex == 0) {
313 		struct rt6_info *rt;
314 
315 		rcu_read_lock();
316 		rt = rt6_lookup(net, group, NULL, 0, NULL, 0);
317 		if (rt) {
318 			dev = dst_dev(&rt->dst);
319 			dev_hold(dev);
320 			ip6_rt_put(rt);
321 		}
322 		rcu_read_unlock();
323 	} else {
324 		dev = dev_get_by_index(net, ifindex);
325 	}
326 	if (!dev)
327 		return NULL;
328 
329 	idev = in6_dev_get(dev);
330 	dev_put(dev);
331 
332 	return idev;
333 }
334 
335 void __ipv6_sock_mc_close(struct sock *sk)
336 {
337 	struct ipv6_pinfo *np = inet6_sk(sk);
338 	struct ipv6_mc_socklist *mc_lst;
339 
340 	while ((mc_lst = sock_dereference(np->ipv6_mc_list, sk)) != NULL) {
341 		np->ipv6_mc_list = mc_lst->next;
342 		__ipv6_sock_mc_drop(sk, mc_lst);
343 	}
344 }
345 
346 void ipv6_sock_mc_close(struct sock *sk)
347 {
348 	struct ipv6_pinfo *np = inet6_sk(sk);
349 
350 	if (!rcu_access_pointer(np->ipv6_mc_list))
351 		return;
352 
353 	lock_sock(sk);
354 	__ipv6_sock_mc_close(sk);
355 	release_sock(sk);
356 }
357 
358 int ip6_mc_source(int add, int omode, struct sock *sk,
359 		  struct group_source_req *pgsr)
360 {
361 	struct ipv6_pinfo *inet6 = inet6_sk(sk);
362 	struct in6_addr *source, *group;
363 	struct net *net = sock_net(sk);
364 	struct ipv6_mc_socklist *pmc;
365 	struct ip6_sf_socklist *psl;
366 	struct inet6_dev *idev;
367 	int leavegroup = 0;
368 	int i, j, rv;
369 	int err;
370 
371 	source = &((struct sockaddr_in6 *)&pgsr->gsr_source)->sin6_addr;
372 	group = &((struct sockaddr_in6 *)&pgsr->gsr_group)->sin6_addr;
373 
374 	if (!ipv6_addr_is_multicast(group))
375 		return -EINVAL;
376 
377 	idev = ip6_mc_find_dev(net, group, pgsr->gsr_interface);
378 	if (!idev)
379 		return -ENODEV;
380 
381 	mutex_lock(&idev->mc_lock);
382 
383 	if (idev->dead) {
384 		err = -ENODEV;
385 		goto done;
386 	}
387 
388 	err = -EADDRNOTAVAIL;
389 
390 	for_each_pmc_socklock(inet6, sk, pmc) {
391 		if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
392 			continue;
393 		if (ipv6_addr_equal(&pmc->addr, group))
394 			break;
395 	}
396 	if (!pmc) {		/* must have a prior join */
397 		err = -EINVAL;
398 		goto done;
399 	}
400 	/* if a source filter was set, must be the same mode as before */
401 	if (rcu_access_pointer(pmc->sflist)) {
402 		if (pmc->sfmode != omode) {
403 			err = -EINVAL;
404 			goto done;
405 		}
406 	} else if (pmc->sfmode != omode) {
407 		/* allow mode switches for empty-set filters */
408 		ip6_mc_add_src(idev, group, omode, 0, NULL, 0);
409 		ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
410 		pmc->sfmode = omode;
411 	}
412 
413 	psl = sock_dereference(pmc->sflist, sk);
414 	if (!add) {
415 		if (!psl)
416 			goto done;	/* err = -EADDRNOTAVAIL */
417 		rv = !0;
418 		for (i = 0; i < psl->sl_count; i++) {
419 			rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
420 			if (rv == 0)
421 				break;
422 		}
423 		if (rv)		/* source not found */
424 			goto done;	/* err = -EADDRNOTAVAIL */
425 
426 		/* special case - (INCLUDE, empty) == LEAVE_GROUP */
427 		if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
428 			leavegroup = 1;
429 			goto done;
430 		}
431 
432 		/* update the interface filter */
433 		ip6_mc_del_src(idev, group, omode, 1, source, 1);
434 
435 		for (j = i+1; j < psl->sl_count; j++)
436 			psl->sl_addr[j-1] = psl->sl_addr[j];
437 		psl->sl_count--;
438 		err = 0;
439 		goto done;
440 	}
441 	/* else, add a new source to the filter */
442 
443 	if (psl && psl->sl_count >= sysctl_mld_max_msf) {
444 		err = -ENOBUFS;
445 		goto done;
446 	}
447 	if (!psl || psl->sl_count == psl->sl_max) {
448 		struct ip6_sf_socklist *newpsl;
449 		int count = IP6_SFBLOCK;
450 
451 		if (psl)
452 			count += psl->sl_max;
453 		newpsl = sock_kmalloc(sk, struct_size(newpsl, sl_addr, count),
454 				      GFP_KERNEL);
455 		if (!newpsl) {
456 			err = -ENOBUFS;
457 			goto done;
458 		}
459 		newpsl->sl_max = count;
460 		newpsl->sl_count = count - IP6_SFBLOCK;
461 		if (psl) {
462 			for (i = 0; i < psl->sl_count; i++)
463 				newpsl->sl_addr[i] = psl->sl_addr[i];
464 			atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
465 				   &sk->sk_omem_alloc);
466 		}
467 		rcu_assign_pointer(pmc->sflist, newpsl);
468 		kfree_rcu(psl, rcu);
469 		psl = newpsl;
470 	}
471 	rv = 1;	/* > 0 for insert logic below if sl_count is 0 */
472 	for (i = 0; i < psl->sl_count; i++) {
473 		rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
474 		if (rv == 0) /* There is an error in the address. */
475 			goto done;
476 	}
477 	for (j = psl->sl_count-1; j >= i; j--)
478 		psl->sl_addr[j+1] = psl->sl_addr[j];
479 	psl->sl_addr[i] = *source;
480 	psl->sl_count++;
481 	err = 0;
482 	/* update the interface list */
483 	ip6_mc_add_src(idev, group, omode, 1, source, 1);
484 done:
485 	mutex_unlock(&idev->mc_lock);
486 	in6_dev_put(idev);
487 	if (leavegroup)
488 		err = ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group);
489 	return err;
490 }
491 
492 int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf,
493 		    struct sockaddr_storage *list)
494 {
495 	struct ipv6_pinfo *inet6 = inet6_sk(sk);
496 	struct ip6_sf_socklist *newpsl, *psl;
497 	struct net *net = sock_net(sk);
498 	const struct in6_addr *group;
499 	struct ipv6_mc_socklist *pmc;
500 	struct inet6_dev *idev;
501 	int leavegroup = 0;
502 	int i, err;
503 
504 	group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
505 
506 	if (!ipv6_addr_is_multicast(group))
507 		return -EINVAL;
508 	if (gsf->gf_fmode != MCAST_INCLUDE &&
509 	    gsf->gf_fmode != MCAST_EXCLUDE)
510 		return -EINVAL;
511 
512 	idev = ip6_mc_find_dev(net, group, gsf->gf_interface);
513 	if (!idev)
514 		return -ENODEV;
515 
516 	mutex_lock(&idev->mc_lock);
517 
518 	if (idev->dead) {
519 		err = -ENODEV;
520 		goto done;
521 	}
522 
523 	err = 0;
524 
525 	if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
526 		leavegroup = 1;
527 		goto done;
528 	}
529 
530 	for_each_pmc_socklock(inet6, sk, pmc) {
531 		if (pmc->ifindex != gsf->gf_interface)
532 			continue;
533 		if (ipv6_addr_equal(&pmc->addr, group))
534 			break;
535 	}
536 	if (!pmc) {		/* must have a prior join */
537 		err = -EINVAL;
538 		goto done;
539 	}
540 	if (gsf->gf_numsrc) {
541 		newpsl = sock_kmalloc(sk, struct_size(newpsl, sl_addr,
542 						      gsf->gf_numsrc),
543 				      GFP_KERNEL);
544 		if (!newpsl) {
545 			err = -ENOBUFS;
546 			goto done;
547 		}
548 		newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc;
549 		for (i = 0; i < newpsl->sl_count; ++i, ++list) {
550 			struct sockaddr_in6 *psin6;
551 
552 			psin6 = (struct sockaddr_in6 *)list;
553 			newpsl->sl_addr[i] = psin6->sin6_addr;
554 		}
555 
556 		err = ip6_mc_add_src(idev, group, gsf->gf_fmode,
557 				     newpsl->sl_count, newpsl->sl_addr, 0);
558 		if (err) {
559 			sock_kfree_s(sk, newpsl, struct_size(newpsl, sl_addr,
560 							     newpsl->sl_max));
561 			goto done;
562 		}
563 	} else {
564 		newpsl = NULL;
565 		ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0);
566 	}
567 
568 	psl = sock_dereference(pmc->sflist, sk);
569 	if (psl) {
570 		ip6_mc_del_src(idev, group, pmc->sfmode,
571 			       psl->sl_count, psl->sl_addr, 0);
572 		atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
573 			   &sk->sk_omem_alloc);
574 	} else {
575 		ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
576 	}
577 
578 	rcu_assign_pointer(pmc->sflist, newpsl);
579 	kfree_rcu(psl, rcu);
580 	pmc->sfmode = gsf->gf_fmode;
581 	err = 0;
582 done:
583 	mutex_unlock(&idev->mc_lock);
584 	in6_dev_put(idev);
585 	if (leavegroup)
586 		err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group);
587 	return err;
588 }
589 
590 int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
591 		  sockptr_t optval, size_t ss_offset)
592 {
593 	struct ipv6_pinfo *inet6 = inet6_sk(sk);
594 	const struct in6_addr *group;
595 	struct ipv6_mc_socklist *pmc;
596 	struct ip6_sf_socklist *psl;
597 	unsigned int count;
598 	int i, copycount;
599 
600 	group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
601 
602 	if (!ipv6_addr_is_multicast(group))
603 		return -EINVAL;
604 
605 	for_each_pmc_socklock(inet6, sk, pmc) {
606 		if (pmc->ifindex != gsf->gf_interface)
607 			continue;
608 		if (ipv6_addr_equal(group, &pmc->addr))
609 			break;
610 	}
611 	if (!pmc)		/* must have a prior join */
612 		return -EADDRNOTAVAIL;
613 
614 	gsf->gf_fmode = pmc->sfmode;
615 	psl = sock_dereference(pmc->sflist, sk);
616 	count = psl ? psl->sl_count : 0;
617 
618 	copycount = min(count, gsf->gf_numsrc);
619 	gsf->gf_numsrc = count;
620 	for (i = 0; i < copycount; i++) {
621 		struct sockaddr_in6 *psin6;
622 		struct sockaddr_storage ss;
623 
624 		psin6 = (struct sockaddr_in6 *)&ss;
625 		memset(&ss, 0, sizeof(ss));
626 		psin6->sin6_family = AF_INET6;
627 		psin6->sin6_addr = psl->sl_addr[i];
628 		if (copy_to_sockptr_offset(optval, ss_offset, &ss, sizeof(ss)))
629 			return -EFAULT;
630 		ss_offset += sizeof(ss);
631 	}
632 	return 0;
633 }
634 
635 bool inet6_mc_check(const struct sock *sk, const struct in6_addr *mc_addr,
636 		    const struct in6_addr *src_addr)
637 {
638 	const struct ipv6_pinfo *np = inet6_sk(sk);
639 	const struct ipv6_mc_socklist *mc;
640 	const struct ip6_sf_socklist *psl;
641 	bool rv = true;
642 
643 	rcu_read_lock();
644 	for_each_pmc_rcu(np, mc) {
645 		if (ipv6_addr_equal(&mc->addr, mc_addr))
646 			break;
647 	}
648 	if (!mc) {
649 		rcu_read_unlock();
650 		return inet6_test_bit(MC6_ALL, sk);
651 	}
652 	psl = rcu_dereference(mc->sflist);
653 	if (!psl) {
654 		rv = mc->sfmode == MCAST_EXCLUDE;
655 	} else {
656 		int i;
657 
658 		for (i = 0; i < psl->sl_count; i++) {
659 			if (ipv6_addr_equal(&psl->sl_addr[i], src_addr))
660 				break;
661 		}
662 		if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
663 			rv = false;
664 		if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
665 			rv = false;
666 	}
667 	rcu_read_unlock();
668 
669 	return rv;
670 }
671 
672 static void igmp6_group_added(struct ifmcaddr6 *mc)
673 {
674 	struct net_device *dev = mc->idev->dev;
675 	char buf[MAX_ADDR_LEN];
676 
677 	mc_assert_locked(mc->idev);
678 
679 	if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
680 	    IPV6_ADDR_SCOPE_LINKLOCAL)
681 		return;
682 
683 	if (!(mc->mca_flags&MAF_LOADED)) {
684 		mc->mca_flags |= MAF_LOADED;
685 		if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
686 			dev_mc_add(dev, buf);
687 	}
688 
689 	if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT))
690 		return;
691 
692 	if (mld_in_v1_mode(mc->idev)) {
693 		igmp6_join_group(mc);
694 		return;
695 	}
696 	/* else v2 */
697 
698 	/* Based on RFC3810 6.1, for newly added INCLUDE SSM, we
699 	 * should not send filter-mode change record as the mode
700 	 * should be from IN() to IN(A).
701 	 */
702 	if (mc->mca_sfmode == MCAST_EXCLUDE)
703 		mc->mca_crcount = mc->idev->mc_qrv;
704 
705 	mld_ifc_event(mc->idev);
706 }
707 
708 static void igmp6_group_dropped(struct ifmcaddr6 *mc)
709 {
710 	struct net_device *dev = mc->idev->dev;
711 	char buf[MAX_ADDR_LEN];
712 
713 	mc_assert_locked(mc->idev);
714 
715 	if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
716 	    IPV6_ADDR_SCOPE_LINKLOCAL)
717 		return;
718 
719 	if (mc->mca_flags&MAF_LOADED) {
720 		mc->mca_flags &= ~MAF_LOADED;
721 		if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
722 			dev_mc_del(dev, buf);
723 	}
724 
725 	if (mc->mca_flags & MAF_NOREPORT)
726 		return;
727 
728 	if (!mc->idev->dead)
729 		igmp6_leave_group(mc);
730 
731 	if (cancel_delayed_work(&mc->mca_work))
732 		refcount_dec(&mc->mca_refcnt);
733 }
734 
735 /* deleted ifmcaddr6 manipulation */
736 static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
737 {
738 	struct ifmcaddr6 *pmc;
739 
740 	mc_assert_locked(idev);
741 
742 	/* this is an "ifmcaddr6" for convenience; only the fields below
743 	 * are actually used. In particular, the refcnt and users are not
744 	 * used for management of the delete list. Using the same structure
745 	 * for deleted items allows change reports to use common code with
746 	 * non-deleted or query-response MCA's.
747 	 */
748 	pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
749 	if (!pmc)
750 		return;
751 
752 	pmc->idev = im->idev;
753 	in6_dev_hold(idev);
754 	pmc->mca_addr = im->mca_addr;
755 	pmc->mca_crcount = idev->mc_qrv;
756 	pmc->mca_sfmode = im->mca_sfmode;
757 	if (pmc->mca_sfmode == MCAST_INCLUDE) {
758 		struct ip6_sf_list *psf;
759 
760 		rcu_assign_pointer(pmc->mca_tomb,
761 				   mc_dereference(im->mca_tomb, idev));
762 		rcu_assign_pointer(pmc->mca_sources,
763 				   mc_dereference(im->mca_sources, idev));
764 		RCU_INIT_POINTER(im->mca_tomb, NULL);
765 		RCU_INIT_POINTER(im->mca_sources, NULL);
766 
767 		for_each_psf_mclock(pmc, psf)
768 			psf->sf_crcount = pmc->mca_crcount;
769 	}
770 
771 	rcu_assign_pointer(pmc->next, idev->mc_tomb);
772 	rcu_assign_pointer(idev->mc_tomb, pmc);
773 }
774 
775 static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
776 {
777 	struct ip6_sf_list *psf, *sources, *tomb;
778 	struct in6_addr *pmca = &im->mca_addr;
779 	struct ifmcaddr6 *pmc, *pmc_prev;
780 
781 	mc_assert_locked(idev);
782 
783 	pmc_prev = NULL;
784 	for_each_mc_tomb(idev, pmc) {
785 		if (ipv6_addr_equal(&pmc->mca_addr, pmca))
786 			break;
787 		pmc_prev = pmc;
788 	}
789 	if (!pmc)
790 		return;
791 	if (pmc_prev)
792 		rcu_assign_pointer(pmc_prev->next, pmc->next);
793 	else
794 		rcu_assign_pointer(idev->mc_tomb, pmc->next);
795 
796 	im->idev = pmc->idev;
797 	if (im->mca_sfmode == MCAST_INCLUDE) {
798 		tomb = rcu_replace_pointer(im->mca_tomb,
799 					   mc_dereference(pmc->mca_tomb, pmc->idev),
800 					   lockdep_is_held(&im->idev->mc_lock));
801 		rcu_assign_pointer(pmc->mca_tomb, tomb);
802 
803 		sources = rcu_replace_pointer(im->mca_sources,
804 					      mc_dereference(pmc->mca_sources, pmc->idev),
805 					      lockdep_is_held(&im->idev->mc_lock));
806 		rcu_assign_pointer(pmc->mca_sources, sources);
807 		for_each_psf_mclock(im, psf)
808 			psf->sf_crcount = idev->mc_qrv;
809 	} else {
810 		im->mca_crcount = idev->mc_qrv;
811 	}
812 	in6_dev_put(pmc->idev);
813 	ip6_mc_clear_src(pmc);
814 	kfree_rcu(pmc, rcu);
815 }
816 
817 static void mld_clear_delrec(struct inet6_dev *idev)
818 {
819 	struct ifmcaddr6 *pmc, *nextpmc;
820 
821 	mc_assert_locked(idev);
822 
823 	pmc = mc_dereference(idev->mc_tomb, idev);
824 	RCU_INIT_POINTER(idev->mc_tomb, NULL);
825 
826 	for (; pmc; pmc = nextpmc) {
827 		nextpmc = mc_dereference(pmc->next, idev);
828 		ip6_mc_clear_src(pmc);
829 		in6_dev_put(pmc->idev);
830 		kfree_rcu(pmc, rcu);
831 	}
832 
833 	/* clear dead sources, too */
834 	for_each_mc_mclock(idev, pmc) {
835 		struct ip6_sf_list *psf, *psf_next;
836 
837 		psf = mc_dereference(pmc->mca_tomb, idev);
838 		RCU_INIT_POINTER(pmc->mca_tomb, NULL);
839 		for (; psf; psf = psf_next) {
840 			psf_next = mc_dereference(psf->sf_next, idev);
841 			kfree_rcu(psf, rcu);
842 		}
843 	}
844 }
845 
846 static void mld_clear_query(struct inet6_dev *idev)
847 {
848 	struct sk_buff *skb;
849 
850 	spin_lock_bh(&idev->mc_query_lock);
851 	while ((skb = __skb_dequeue(&idev->mc_query_queue)))
852 		kfree_skb(skb);
853 	spin_unlock_bh(&idev->mc_query_lock);
854 }
855 
856 static void mld_clear_report(struct inet6_dev *idev)
857 {
858 	struct sk_buff *skb;
859 
860 	spin_lock_bh(&idev->mc_report_lock);
861 	while ((skb = __skb_dequeue(&idev->mc_report_queue)))
862 		kfree_skb(skb);
863 	spin_unlock_bh(&idev->mc_report_lock);
864 }
865 
866 static void ma_put(struct ifmcaddr6 *mc)
867 {
868 	if (refcount_dec_and_test(&mc->mca_refcnt)) {
869 		in6_dev_put(mc->idev);
870 		kfree_rcu(mc, rcu);
871 	}
872 }
873 
874 static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
875 				   const struct in6_addr *addr,
876 				   unsigned int mode)
877 {
878 	struct ifmcaddr6 *mc;
879 
880 	mc_assert_locked(idev);
881 
882 	mc = kzalloc(sizeof(*mc), GFP_KERNEL);
883 	if (!mc)
884 		return NULL;
885 
886 	INIT_DELAYED_WORK(&mc->mca_work, mld_mca_work);
887 
888 	mc->mca_addr = *addr;
889 	mc->idev = idev; /* reference taken by caller */
890 	mc->mca_users = 1;
891 	/* mca_stamp should be updated upon changes */
892 	mc->mca_cstamp = mc->mca_tstamp = jiffies;
893 	refcount_set(&mc->mca_refcnt, 1);
894 
895 	mc->mca_sfmode = mode;
896 	mc->mca_sfcount[mode] = 1;
897 
898 	if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) ||
899 	    IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
900 		mc->mca_flags |= MAF_NOREPORT;
901 
902 	return mc;
903 }
904 
905 static void inet6_ifmcaddr_notify(struct net_device *dev,
906 				  const struct ifmcaddr6 *ifmca, int event)
907 {
908 	struct inet6_fill_args fillargs = {
909 		.portid = 0,
910 		.seq = 0,
911 		.event = event,
912 		.flags = 0,
913 		.netnsid = -1,
914 		.force_rt_scope_universe = true,
915 	};
916 	struct net *net = dev_net(dev);
917 	struct sk_buff *skb;
918 	int err = -ENOMEM;
919 
920 	skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) +
921 			nla_total_size(sizeof(struct in6_addr)) +
922 			nla_total_size(sizeof(struct ifa_cacheinfo)),
923 			GFP_KERNEL);
924 	if (!skb)
925 		goto error;
926 
927 	err = inet6_fill_ifmcaddr(skb, ifmca, &fillargs);
928 	if (err < 0) {
929 		WARN_ON_ONCE(err == -EMSGSIZE);
930 		nlmsg_free(skb);
931 		goto error;
932 	}
933 
934 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MCADDR, NULL, GFP_KERNEL);
935 	return;
936 error:
937 	rtnl_set_sk_err(net, RTNLGRP_IPV6_MCADDR, err);
938 }
939 
940 /*
941  *	device multicast group inc (add if not found)
942  */
943 static int __ipv6_dev_mc_inc(struct net_device *dev,
944 			     const struct in6_addr *addr, unsigned int mode)
945 {
946 	struct inet6_dev *idev;
947 	struct ifmcaddr6 *mc;
948 
949 	/* we need to take a reference on idev */
950 	idev = in6_dev_get(dev);
951 	if (!idev)
952 		return -EINVAL;
953 
954 	mutex_lock(&idev->mc_lock);
955 
956 	if (READ_ONCE(idev->dead)) {
957 		mutex_unlock(&idev->mc_lock);
958 		in6_dev_put(idev);
959 		return -ENODEV;
960 	}
961 
962 	for_each_mc_mclock(idev, mc) {
963 		if (ipv6_addr_equal(&mc->mca_addr, addr)) {
964 			mc->mca_users++;
965 			ip6_mc_add_src(idev, &mc->mca_addr, mode, 0, NULL, 0);
966 			mutex_unlock(&idev->mc_lock);
967 			in6_dev_put(idev);
968 			return 0;
969 		}
970 	}
971 
972 	mc = mca_alloc(idev, addr, mode);
973 	if (!mc) {
974 		mutex_unlock(&idev->mc_lock);
975 		in6_dev_put(idev);
976 		return -ENOMEM;
977 	}
978 
979 	rcu_assign_pointer(mc->next, idev->mc_list);
980 	rcu_assign_pointer(idev->mc_list, mc);
981 
982 	mld_del_delrec(idev, mc);
983 	igmp6_group_added(mc);
984 	inet6_ifmcaddr_notify(dev, mc, RTM_NEWMULTICAST);
985 	mutex_unlock(&idev->mc_lock);
986 
987 	return 0;
988 }
989 
990 int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
991 {
992 	return __ipv6_dev_mc_inc(dev, addr, MCAST_EXCLUDE);
993 }
994 EXPORT_SYMBOL(ipv6_dev_mc_inc);
995 
996 /*
997  * device multicast group del
998  */
999 int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
1000 {
1001 	struct ifmcaddr6 *ma, __rcu **map;
1002 
1003 	mutex_lock(&idev->mc_lock);
1004 
1005 	for (map = &idev->mc_list;
1006 	     (ma = mc_dereference(*map, idev));
1007 	     map = &ma->next) {
1008 		if (ipv6_addr_equal(&ma->mca_addr, addr)) {
1009 			if (--ma->mca_users == 0) {
1010 				*map = ma->next;
1011 
1012 				igmp6_group_dropped(ma);
1013 				inet6_ifmcaddr_notify(idev->dev, ma,
1014 						      RTM_DELMULTICAST);
1015 				ip6_mc_clear_src(ma);
1016 				mutex_unlock(&idev->mc_lock);
1017 
1018 				ma_put(ma);
1019 				return 0;
1020 			}
1021 			mutex_unlock(&idev->mc_lock);
1022 			return 0;
1023 		}
1024 	}
1025 
1026 	mutex_unlock(&idev->mc_lock);
1027 	return -ENOENT;
1028 }
1029 
1030 int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
1031 {
1032 	struct inet6_dev *idev;
1033 	int err;
1034 
1035 	idev = in6_dev_get(dev);
1036 	if (!idev)
1037 		return -ENODEV;
1038 
1039 	err = __ipv6_dev_mc_dec(idev, addr);
1040 	in6_dev_put(idev);
1041 
1042 	return err;
1043 }
1044 EXPORT_SYMBOL(ipv6_dev_mc_dec);
1045 
1046 /*
1047  *	check if the interface/address pair is valid
1048  */
1049 bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
1050 			 const struct in6_addr *src_addr)
1051 {
1052 	struct inet6_dev *idev;
1053 	struct ifmcaddr6 *mc;
1054 	bool rv = false;
1055 
1056 	rcu_read_lock();
1057 	idev = __in6_dev_get(dev);
1058 	if (!idev)
1059 		goto unlock;
1060 	for_each_mc_rcu(idev, mc) {
1061 		if (ipv6_addr_equal(&mc->mca_addr, group))
1062 			break;
1063 	}
1064 	if (!mc)
1065 		goto unlock;
1066 	if (src_addr && !ipv6_addr_any(src_addr)) {
1067 		struct ip6_sf_list *psf;
1068 
1069 		for_each_psf_rcu(mc, psf) {
1070 			if (ipv6_addr_equal(&psf->sf_addr, src_addr))
1071 				break;
1072 		}
1073 		if (psf)
1074 			rv = READ_ONCE(psf->sf_count[MCAST_INCLUDE]) ||
1075 				READ_ONCE(psf->sf_count[MCAST_EXCLUDE]) !=
1076 				READ_ONCE(mc->mca_sfcount[MCAST_EXCLUDE]);
1077 		else
1078 			rv = READ_ONCE(mc->mca_sfcount[MCAST_EXCLUDE]) != 0;
1079 	} else {
1080 		rv = true; /* don't filter unspecified source */
1081 	}
1082 unlock:
1083 	rcu_read_unlock();
1084 	return rv;
1085 }
1086 
1087 static void mld_gq_start_work(struct inet6_dev *idev)
1088 {
1089 	unsigned long tv = get_random_u32_below(idev->mc_maxdelay);
1090 
1091 	mc_assert_locked(idev);
1092 
1093 	idev->mc_gq_running = 1;
1094 	if (!mod_delayed_work(mld_wq, &idev->mc_gq_work, tv + 2))
1095 		in6_dev_hold(idev);
1096 }
1097 
1098 static void mld_gq_stop_work(struct inet6_dev *idev)
1099 {
1100 	mc_assert_locked(idev);
1101 
1102 	idev->mc_gq_running = 0;
1103 	if (cancel_delayed_work(&idev->mc_gq_work))
1104 		__in6_dev_put(idev);
1105 }
1106 
1107 static void mld_ifc_start_work(struct inet6_dev *idev, unsigned long delay)
1108 {
1109 	unsigned long tv = get_random_u32_below(delay);
1110 
1111 	mc_assert_locked(idev);
1112 
1113 	if (!mod_delayed_work(mld_wq, &idev->mc_ifc_work, tv + 2))
1114 		in6_dev_hold(idev);
1115 }
1116 
1117 static void mld_ifc_stop_work(struct inet6_dev *idev)
1118 {
1119 	mc_assert_locked(idev);
1120 
1121 	idev->mc_ifc_count = 0;
1122 	if (cancel_delayed_work(&idev->mc_ifc_work))
1123 		__in6_dev_put(idev);
1124 }
1125 
1126 static void mld_dad_start_work(struct inet6_dev *idev, unsigned long delay)
1127 {
1128 	unsigned long tv = get_random_u32_below(delay);
1129 
1130 	mc_assert_locked(idev);
1131 
1132 	if (!mod_delayed_work(mld_wq, &idev->mc_dad_work, tv + 2))
1133 		in6_dev_hold(idev);
1134 }
1135 
1136 static void mld_dad_stop_work(struct inet6_dev *idev)
1137 {
1138 	if (cancel_delayed_work(&idev->mc_dad_work))
1139 		__in6_dev_put(idev);
1140 }
1141 
1142 static void mld_query_stop_work(struct inet6_dev *idev)
1143 {
1144 	spin_lock_bh(&idev->mc_query_lock);
1145 	if (cancel_delayed_work(&idev->mc_query_work))
1146 		__in6_dev_put(idev);
1147 	spin_unlock_bh(&idev->mc_query_lock);
1148 }
1149 
1150 static void mld_report_stop_work(struct inet6_dev *idev)
1151 {
1152 	if (cancel_delayed_work_sync(&idev->mc_report_work))
1153 		__in6_dev_put(idev);
1154 }
1155 
1156 /* IGMP handling (alias multicast ICMPv6 messages) */
1157 static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
1158 {
1159 	unsigned long delay = resptime;
1160 
1161 	mc_assert_locked(ma->idev);
1162 
1163 	/* Do not start work for these addresses */
1164 	if (ipv6_addr_is_ll_all_nodes(&ma->mca_addr) ||
1165 	    IPV6_ADDR_MC_SCOPE(&ma->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
1166 		return;
1167 
1168 	if (cancel_delayed_work(&ma->mca_work)) {
1169 		refcount_dec(&ma->mca_refcnt);
1170 		delay = ma->mca_work.timer.expires - jiffies;
1171 	}
1172 
1173 	if (delay >= resptime)
1174 		delay = get_random_u32_below(resptime);
1175 
1176 	if (!mod_delayed_work(mld_wq, &ma->mca_work, delay))
1177 		refcount_inc(&ma->mca_refcnt);
1178 	ma->mca_flags |= MAF_TIMER_RUNNING;
1179 }
1180 
1181 /* mark EXCLUDE-mode sources */
1182 static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1183 			     const struct in6_addr *srcs)
1184 {
1185 	struct ip6_sf_list *psf;
1186 	int i, scount;
1187 
1188 	mc_assert_locked(pmc->idev);
1189 
1190 	scount = 0;
1191 	for_each_psf_mclock(pmc, psf) {
1192 		if (scount == nsrcs)
1193 			break;
1194 		for (i = 0; i < nsrcs; i++) {
1195 			/* skip inactive filters */
1196 			if (psf->sf_count[MCAST_INCLUDE] ||
1197 			    pmc->mca_sfcount[MCAST_EXCLUDE] !=
1198 			    psf->sf_count[MCAST_EXCLUDE])
1199 				break;
1200 			if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1201 				scount++;
1202 				break;
1203 			}
1204 		}
1205 	}
1206 	pmc->mca_flags &= ~MAF_GSQUERY;
1207 	if (scount == nsrcs)	/* all sources excluded */
1208 		return false;
1209 	return true;
1210 }
1211 
1212 static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
1213 			    const struct in6_addr *srcs)
1214 {
1215 	struct ip6_sf_list *psf;
1216 	int i, scount;
1217 
1218 	mc_assert_locked(pmc->idev);
1219 
1220 	if (pmc->mca_sfmode == MCAST_EXCLUDE)
1221 		return mld_xmarksources(pmc, nsrcs, srcs);
1222 
1223 	/* mark INCLUDE-mode sources */
1224 
1225 	scount = 0;
1226 	for_each_psf_mclock(pmc, psf) {
1227 		if (scount == nsrcs)
1228 			break;
1229 		for (i = 0; i < nsrcs; i++) {
1230 			if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1231 				psf->sf_gsresp = 1;
1232 				scount++;
1233 				break;
1234 			}
1235 		}
1236 	}
1237 	if (!scount) {
1238 		pmc->mca_flags &= ~MAF_GSQUERY;
1239 		return false;
1240 	}
1241 	pmc->mca_flags |= MAF_GSQUERY;
1242 	return true;
1243 }
1244 
1245 static int mld_force_mld_version(const struct inet6_dev *idev)
1246 {
1247 	const struct net *net = dev_net(idev->dev);
1248 	int all_force;
1249 
1250 	all_force = READ_ONCE(net->ipv6.devconf_all->force_mld_version);
1251 	/* Normally, both are 0 here. If enforcement to a particular is
1252 	 * being used, individual device enforcement will have a lower
1253 	 * precedence over 'all' device (.../conf/all/force_mld_version).
1254 	 */
1255 	return all_force ?: READ_ONCE(idev->cnf.force_mld_version);
1256 }
1257 
1258 static bool mld_in_v2_mode_only(const struct inet6_dev *idev)
1259 {
1260 	return mld_force_mld_version(idev) == 2;
1261 }
1262 
1263 static bool mld_in_v1_mode_only(const struct inet6_dev *idev)
1264 {
1265 	return mld_force_mld_version(idev) == 1;
1266 }
1267 
1268 static bool mld_in_v1_mode(const struct inet6_dev *idev)
1269 {
1270 	if (mld_in_v2_mode_only(idev))
1271 		return false;
1272 	if (mld_in_v1_mode_only(idev))
1273 		return true;
1274 	if (idev->mc_v1_seen && time_before(jiffies, idev->mc_v1_seen))
1275 		return true;
1276 
1277 	return false;
1278 }
1279 
1280 static void mld_set_v1_mode(struct inet6_dev *idev)
1281 {
1282 	/* RFC3810, relevant sections:
1283 	 *  - 9.1. Robustness Variable
1284 	 *  - 9.2. Query Interval
1285 	 *  - 9.3. Query Response Interval
1286 	 *  - 9.12. Older Version Querier Present Timeout
1287 	 */
1288 	unsigned long switchback;
1289 
1290 	switchback = (idev->mc_qrv * idev->mc_qi) + idev->mc_qri;
1291 
1292 	idev->mc_v1_seen = jiffies + switchback;
1293 }
1294 
1295 static void mld_update_qrv(struct inet6_dev *idev,
1296 			   const struct mld2_query *mlh2)
1297 {
1298 	/* RFC3810, relevant sections:
1299 	 *  - 5.1.8. QRV (Querier's Robustness Variable)
1300 	 *  - 9.1. Robustness Variable
1301 	 */
1302 
1303 	/* The value of the Robustness Variable MUST NOT be zero,
1304 	 * and SHOULD NOT be one. Catch this here if we ever run
1305 	 * into such a case in future.
1306 	 */
1307 	const int min_qrv = min(MLD_QRV_DEFAULT, sysctl_mld_qrv);
1308 	WARN_ON(idev->mc_qrv == 0);
1309 
1310 	if (mlh2->mld2q_qrv > 0)
1311 		idev->mc_qrv = mlh2->mld2q_qrv;
1312 
1313 	if (unlikely(idev->mc_qrv < min_qrv)) {
1314 		net_warn_ratelimited("IPv6: MLD: clamping QRV from %u to %u!\n",
1315 				     idev->mc_qrv, min_qrv);
1316 		idev->mc_qrv = min_qrv;
1317 	}
1318 }
1319 
1320 static void mld_update_qi(struct inet6_dev *idev,
1321 			  const struct mld2_query *mlh2)
1322 {
1323 	/* RFC3810, relevant sections:
1324 	 *  - 5.1.9. QQIC (Querier's Query Interval Code)
1325 	 *  - 9.2. Query Interval
1326 	 *  - 9.12. Older Version Querier Present Timeout
1327 	 *    (the [Query Interval] in the last Query received)
1328 	 */
1329 	unsigned long mc_qqi;
1330 
1331 	if (mlh2->mld2q_qqic < 128) {
1332 		mc_qqi = mlh2->mld2q_qqic;
1333 	} else {
1334 		unsigned long mc_man, mc_exp;
1335 
1336 		mc_exp = MLDV2_QQIC_EXP(mlh2->mld2q_qqic);
1337 		mc_man = MLDV2_QQIC_MAN(mlh2->mld2q_qqic);
1338 
1339 		mc_qqi = (mc_man | 0x10) << (mc_exp + 3);
1340 	}
1341 
1342 	idev->mc_qi = mc_qqi * HZ;
1343 }
1344 
1345 static void mld_update_qri(struct inet6_dev *idev,
1346 			   const struct mld2_query *mlh2)
1347 {
1348 	/* RFC3810, relevant sections:
1349 	 *  - 5.1.3. Maximum Response Code
1350 	 *  - 9.3. Query Response Interval
1351 	 */
1352 	idev->mc_qri = msecs_to_jiffies(mldv2_mrc(mlh2));
1353 }
1354 
1355 static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld,
1356 			  unsigned long *max_delay, bool v1_query)
1357 {
1358 	unsigned long mldv1_md;
1359 
1360 	/* Ignore v1 queries */
1361 	if (mld_in_v2_mode_only(idev))
1362 		return -EINVAL;
1363 
1364 	mldv1_md = ntohs(mld->mld_maxdelay);
1365 
1366 	/* When in MLDv1 fallback and a MLDv2 router start-up being
1367 	 * unaware of current MLDv1 operation, the MRC == MRD mapping
1368 	 * only works when the exponential algorithm is not being
1369 	 * used (as MLDv1 is unaware of such things).
1370 	 *
1371 	 * According to the RFC author, the MLDv2 implementations
1372 	 * he's aware of all use a MRC < 32768 on start up queries.
1373 	 *
1374 	 * Thus, should we *ever* encounter something else larger
1375 	 * than that, just assume the maximum possible within our
1376 	 * reach.
1377 	 */
1378 	if (!v1_query)
1379 		mldv1_md = min(mldv1_md, MLDV1_MRD_MAX_COMPAT);
1380 
1381 	*max_delay = max(msecs_to_jiffies(mldv1_md), 1UL);
1382 
1383 	/* MLDv1 router present: we need to go into v1 mode *only*
1384 	 * when an MLDv1 query is received as per section 9.12. of
1385 	 * RFC3810! And we know from RFC2710 section 3.7 that MLDv1
1386 	 * queries MUST be of exactly 24 octets.
1387 	 */
1388 	if (v1_query)
1389 		mld_set_v1_mode(idev);
1390 
1391 	/* cancel MLDv2 report work */
1392 	mld_gq_stop_work(idev);
1393 	/* cancel the interface change work */
1394 	mld_ifc_stop_work(idev);
1395 	/* clear deleted report items */
1396 	mld_clear_delrec(idev);
1397 
1398 	return 0;
1399 }
1400 
1401 static void mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld,
1402 			   unsigned long *max_delay)
1403 {
1404 	*max_delay = max(msecs_to_jiffies(mldv2_mrc(mld)), 1UL);
1405 
1406 	mld_update_qrv(idev, mld);
1407 	mld_update_qi(idev, mld);
1408 	mld_update_qri(idev, mld);
1409 
1410 	idev->mc_maxdelay = *max_delay;
1411 
1412 	return;
1413 }
1414 
1415 /* called with rcu_read_lock() */
1416 void igmp6_event_query(struct sk_buff *skb)
1417 {
1418 	struct inet6_dev *idev = __in6_dev_get(skb->dev);
1419 
1420 	if (!idev || idev->dead)
1421 		goto out;
1422 
1423 	spin_lock_bh(&idev->mc_query_lock);
1424 	if (skb_queue_len(&idev->mc_query_queue) < MLD_MAX_SKBS) {
1425 		__skb_queue_tail(&idev->mc_query_queue, skb);
1426 		if (!mod_delayed_work(mld_wq, &idev->mc_query_work, 0))
1427 			in6_dev_hold(idev);
1428 		skb = NULL;
1429 	}
1430 	spin_unlock_bh(&idev->mc_query_lock);
1431 out:
1432 	kfree_skb(skb);
1433 }
1434 
1435 static void __mld_query_work(struct sk_buff *skb)
1436 {
1437 	struct mld2_query *mlh2 = NULL;
1438 	const struct in6_addr *group;
1439 	unsigned long max_delay;
1440 	struct inet6_dev *idev;
1441 	struct ifmcaddr6 *ma;
1442 	struct mld_msg *mld;
1443 	int group_type;
1444 	int mark = 0;
1445 	int len, err;
1446 
1447 	if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
1448 		goto kfree_skb;
1449 
1450 	/* compute payload length excluding extension headers */
1451 	len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
1452 	len -= skb_network_header_len(skb);
1453 
1454 	/* RFC3810 6.2
1455 	 * Upon reception of an MLD message that contains a Query, the node
1456 	 * checks if the source address of the message is a valid link-local
1457 	 * address, if the Hop Limit is set to 1, and if the Router Alert
1458 	 * option is present in the Hop-By-Hop Options header of the IPv6
1459 	 * packet.  If any of these checks fails, the packet is dropped.
1460 	 */
1461 	if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) ||
1462 	    ipv6_hdr(skb)->hop_limit != 1 ||
1463 	    !(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) ||
1464 	    IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD))
1465 		goto kfree_skb;
1466 
1467 	idev = in6_dev_get(skb->dev);
1468 	if (!idev)
1469 		goto kfree_skb;
1470 
1471 	mld = (struct mld_msg *)icmp6_hdr(skb);
1472 	group = &mld->mld_mca;
1473 	group_type = ipv6_addr_type(group);
1474 
1475 	if (group_type != IPV6_ADDR_ANY &&
1476 	    !(group_type&IPV6_ADDR_MULTICAST))
1477 		goto out;
1478 
1479 	if (len < MLD_V1_QUERY_LEN) {
1480 		goto out;
1481 	} else if (len == MLD_V1_QUERY_LEN || mld_in_v1_mode(idev)) {
1482 		err = mld_process_v1(idev, mld, &max_delay,
1483 				     len == MLD_V1_QUERY_LEN);
1484 		if (err < 0)
1485 			goto out;
1486 	} else if (len >= MLD_V2_QUERY_LEN_MIN) {
1487 		int srcs_offset = sizeof(struct mld2_query) -
1488 				  sizeof(struct icmp6hdr);
1489 
1490 		if (!pskb_may_pull(skb, srcs_offset))
1491 			goto out;
1492 
1493 		mlh2 = (struct mld2_query *)skb_transport_header(skb);
1494 
1495 		mld_process_v2(idev, mlh2, &max_delay);
1496 
1497 		if (group_type == IPV6_ADDR_ANY) { /* general query */
1498 			if (mlh2->mld2q_nsrcs)
1499 				goto out; /* no sources allowed */
1500 
1501 			mld_gq_start_work(idev);
1502 			goto out;
1503 		}
1504 		/* mark sources to include, if group & source-specific */
1505 		if (mlh2->mld2q_nsrcs != 0) {
1506 			if (!pskb_may_pull(skb, srcs_offset +
1507 			    ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr)))
1508 				goto out;
1509 
1510 			mlh2 = (struct mld2_query *)skb_transport_header(skb);
1511 			mark = 1;
1512 		}
1513 	} else {
1514 		goto out;
1515 	}
1516 
1517 	if (group_type == IPV6_ADDR_ANY) {
1518 		for_each_mc_mclock(idev, ma) {
1519 			igmp6_group_queried(ma, max_delay);
1520 		}
1521 	} else {
1522 		for_each_mc_mclock(idev, ma) {
1523 			if (!ipv6_addr_equal(group, &ma->mca_addr))
1524 				continue;
1525 			if (ma->mca_flags & MAF_TIMER_RUNNING) {
1526 				/* gsquery <- gsquery && mark */
1527 				if (!mark)
1528 					ma->mca_flags &= ~MAF_GSQUERY;
1529 			} else {
1530 				/* gsquery <- mark */
1531 				if (mark)
1532 					ma->mca_flags |= MAF_GSQUERY;
1533 				else
1534 					ma->mca_flags &= ~MAF_GSQUERY;
1535 			}
1536 			if (!(ma->mca_flags & MAF_GSQUERY) ||
1537 			    mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs))
1538 				igmp6_group_queried(ma, max_delay);
1539 			break;
1540 		}
1541 	}
1542 
1543 out:
1544 	in6_dev_put(idev);
1545 kfree_skb:
1546 	consume_skb(skb);
1547 }
1548 
1549 static void mld_query_work(struct work_struct *work)
1550 {
1551 	struct inet6_dev *idev = container_of(to_delayed_work(work),
1552 					      struct inet6_dev,
1553 					      mc_query_work);
1554 	struct sk_buff_head q;
1555 	struct sk_buff *skb;
1556 	bool rework = false;
1557 	int cnt = 0;
1558 
1559 	skb_queue_head_init(&q);
1560 
1561 	spin_lock_bh(&idev->mc_query_lock);
1562 	while ((skb = __skb_dequeue(&idev->mc_query_queue))) {
1563 		__skb_queue_tail(&q, skb);
1564 
1565 		if (++cnt >= MLD_MAX_QUEUE) {
1566 			rework = true;
1567 			break;
1568 		}
1569 	}
1570 	spin_unlock_bh(&idev->mc_query_lock);
1571 
1572 	mutex_lock(&idev->mc_lock);
1573 	while ((skb = __skb_dequeue(&q)))
1574 		__mld_query_work(skb);
1575 	mutex_unlock(&idev->mc_lock);
1576 
1577 	if (rework && queue_delayed_work(mld_wq, &idev->mc_query_work, 0))
1578 		return;
1579 
1580 	in6_dev_put(idev);
1581 }
1582 
1583 /* called with rcu_read_lock() */
1584 void igmp6_event_report(struct sk_buff *skb)
1585 {
1586 	struct inet6_dev *idev = __in6_dev_get(skb->dev);
1587 
1588 	if (!idev || idev->dead)
1589 		goto out;
1590 
1591 	spin_lock_bh(&idev->mc_report_lock);
1592 	if (skb_queue_len(&idev->mc_report_queue) < MLD_MAX_SKBS) {
1593 		__skb_queue_tail(&idev->mc_report_queue, skb);
1594 		if (!mod_delayed_work(mld_wq, &idev->mc_report_work, 0))
1595 			in6_dev_hold(idev);
1596 		skb = NULL;
1597 	}
1598 	spin_unlock_bh(&idev->mc_report_lock);
1599 out:
1600 	kfree_skb(skb);
1601 }
1602 
1603 static void __mld_report_work(struct sk_buff *skb)
1604 {
1605 	struct inet6_dev *idev;
1606 	struct ifmcaddr6 *ma;
1607 	struct mld_msg *mld;
1608 	int addr_type;
1609 
1610 	/* Our own report looped back. Ignore it. */
1611 	if (skb->pkt_type == PACKET_LOOPBACK)
1612 		goto kfree_skb;
1613 
1614 	/* send our report if the MC router may not have heard this report */
1615 	if (skb->pkt_type != PACKET_MULTICAST &&
1616 	    skb->pkt_type != PACKET_BROADCAST)
1617 		goto kfree_skb;
1618 
1619 	if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr)))
1620 		goto kfree_skb;
1621 
1622 	mld = (struct mld_msg *)icmp6_hdr(skb);
1623 
1624 	/* Drop reports with not link local source */
1625 	addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr);
1626 	if (addr_type != IPV6_ADDR_ANY &&
1627 	    !(addr_type&IPV6_ADDR_LINKLOCAL))
1628 		goto kfree_skb;
1629 
1630 	idev = in6_dev_get(skb->dev);
1631 	if (!idev)
1632 		goto kfree_skb;
1633 
1634 	/*
1635 	 *	Cancel the work for this group
1636 	 */
1637 
1638 	for_each_mc_mclock(idev, ma) {
1639 		if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) {
1640 			if (cancel_delayed_work(&ma->mca_work))
1641 				refcount_dec(&ma->mca_refcnt);
1642 			ma->mca_flags &= ~(MAF_LAST_REPORTER |
1643 					   MAF_TIMER_RUNNING);
1644 			break;
1645 		}
1646 	}
1647 
1648 	in6_dev_put(idev);
1649 kfree_skb:
1650 	consume_skb(skb);
1651 }
1652 
1653 static void mld_report_work(struct work_struct *work)
1654 {
1655 	struct inet6_dev *idev = container_of(to_delayed_work(work),
1656 					      struct inet6_dev,
1657 					      mc_report_work);
1658 	struct sk_buff_head q;
1659 	struct sk_buff *skb;
1660 	bool rework = false;
1661 	int cnt = 0;
1662 
1663 	skb_queue_head_init(&q);
1664 	spin_lock_bh(&idev->mc_report_lock);
1665 	while ((skb = __skb_dequeue(&idev->mc_report_queue))) {
1666 		__skb_queue_tail(&q, skb);
1667 
1668 		if (++cnt >= MLD_MAX_QUEUE) {
1669 			rework = true;
1670 			break;
1671 		}
1672 	}
1673 	spin_unlock_bh(&idev->mc_report_lock);
1674 
1675 	mutex_lock(&idev->mc_lock);
1676 	while ((skb = __skb_dequeue(&q)))
1677 		__mld_report_work(skb);
1678 	mutex_unlock(&idev->mc_lock);
1679 
1680 	if (rework && queue_delayed_work(mld_wq, &idev->mc_report_work, 0))
1681 		return;
1682 
1683 	in6_dev_put(idev);
1684 }
1685 
1686 static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
1687 		  int gdeleted, int sdeleted)
1688 {
1689 	switch (type) {
1690 	case MLD2_MODE_IS_INCLUDE:
1691 	case MLD2_MODE_IS_EXCLUDE:
1692 		if (gdeleted || sdeleted)
1693 			return false;
1694 		if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) {
1695 			if (pmc->mca_sfmode == MCAST_INCLUDE)
1696 				return true;
1697 			/* don't include if this source is excluded
1698 			 * in all filters
1699 			 */
1700 			if (psf->sf_count[MCAST_INCLUDE])
1701 				return type == MLD2_MODE_IS_INCLUDE;
1702 			return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1703 				psf->sf_count[MCAST_EXCLUDE];
1704 		}
1705 		return false;
1706 	case MLD2_CHANGE_TO_INCLUDE:
1707 		if (gdeleted || sdeleted)
1708 			return false;
1709 		return psf->sf_count[MCAST_INCLUDE] != 0;
1710 	case MLD2_CHANGE_TO_EXCLUDE:
1711 		if (gdeleted || sdeleted)
1712 			return false;
1713 		if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 ||
1714 		    psf->sf_count[MCAST_INCLUDE])
1715 			return false;
1716 		return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1717 			psf->sf_count[MCAST_EXCLUDE];
1718 	case MLD2_ALLOW_NEW_SOURCES:
1719 		if (gdeleted || !psf->sf_crcount)
1720 			return false;
1721 		return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted;
1722 	case MLD2_BLOCK_OLD_SOURCES:
1723 		if (pmc->mca_sfmode == MCAST_INCLUDE)
1724 			return gdeleted || (psf->sf_crcount && sdeleted);
1725 		return psf->sf_crcount && !gdeleted && !sdeleted;
1726 	}
1727 	return false;
1728 }
1729 
1730 static int
1731 mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted)
1732 {
1733 	struct ip6_sf_list *psf;
1734 	int scount = 0;
1735 
1736 	for_each_psf_mclock(pmc, psf) {
1737 		if (!is_in(pmc, psf, type, gdeleted, sdeleted))
1738 			continue;
1739 		scount++;
1740 	}
1741 	return scount;
1742 }
1743 
1744 static void ip6_mc_hdr(const struct sock *sk, struct sk_buff *skb,
1745 		       struct net_device *dev, const struct in6_addr *saddr,
1746 		       const struct in6_addr *daddr, int proto, int len)
1747 {
1748 	struct ipv6hdr *hdr;
1749 
1750 	skb->protocol = htons(ETH_P_IPV6);
1751 	skb->dev = dev;
1752 
1753 	skb_reset_network_header(skb);
1754 	skb_put(skb, sizeof(struct ipv6hdr));
1755 	hdr = ipv6_hdr(skb);
1756 
1757 	ip6_flow_hdr(hdr, 0, 0);
1758 
1759 	hdr->payload_len = htons(len);
1760 	hdr->nexthdr = proto;
1761 	hdr->hop_limit = READ_ONCE(inet6_sk(sk)->hop_limit);
1762 
1763 	hdr->saddr = *saddr;
1764 	hdr->daddr = *daddr;
1765 }
1766 
1767 static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
1768 {
1769 	u8 ra[8] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT,
1770 		     2, 0, 0, IPV6_TLV_PADN, 0 };
1771 	struct net_device *dev = idev->dev;
1772 	int hlen = LL_RESERVED_SPACE(dev);
1773 	int tlen = dev->needed_tailroom;
1774 	const struct in6_addr *saddr;
1775 	struct in6_addr addr_buf;
1776 	struct mld2_report *pmr;
1777 	struct sk_buff *skb;
1778 	unsigned int size;
1779 	struct sock *sk;
1780 	struct net *net;
1781 
1782 	/* we assume size > sizeof(ra) here
1783 	 * Also try to not allocate high-order pages for big MTU
1784 	 */
1785 	size = min_t(int, mtu, PAGE_SIZE / 2) + hlen + tlen;
1786 	skb = alloc_skb(size, GFP_KERNEL);
1787 	if (!skb)
1788 		return NULL;
1789 
1790 	skb->priority = TC_PRIO_CONTROL;
1791 	skb_reserve(skb, hlen);
1792 	skb_tailroom_reserve(skb, mtu, tlen);
1793 
1794 	rcu_read_lock();
1795 
1796 	net = dev_net_rcu(dev);
1797 	sk = net->ipv6.igmp_sk;
1798 	skb_set_owner_w(skb, sk);
1799 
1800 	if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
1801 		/* <draft-ietf-magma-mld-source-05.txt>:
1802 		 * use unspecified address as the source address
1803 		 * when a valid link-local address is not available.
1804 		 */
1805 		saddr = &in6addr_any;
1806 	} else
1807 		saddr = &addr_buf;
1808 
1809 	ip6_mc_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0);
1810 
1811 	rcu_read_unlock();
1812 
1813 	skb_put_data(skb, ra, sizeof(ra));
1814 
1815 	skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data);
1816 	skb_put(skb, sizeof(*pmr));
1817 	pmr = (struct mld2_report *)skb_transport_header(skb);
1818 	pmr->mld2r_type = ICMPV6_MLD2_REPORT;
1819 	pmr->mld2r_resv1 = 0;
1820 	pmr->mld2r_cksum = 0;
1821 	pmr->mld2r_resv2 = 0;
1822 	pmr->mld2r_ngrec = 0;
1823 	return skb;
1824 }
1825 
1826 static void mld_sendpack(struct sk_buff *skb)
1827 {
1828 	struct ipv6hdr *pip6 = ipv6_hdr(skb);
1829 	struct mld2_report *pmr =
1830 			      (struct mld2_report *)skb_transport_header(skb);
1831 	int payload_len, mldlen;
1832 	struct inet6_dev *idev;
1833 	struct net *net = dev_net(skb->dev);
1834 	int err;
1835 	struct flowi6 fl6;
1836 	struct dst_entry *dst;
1837 
1838 	rcu_read_lock();
1839 	idev = __in6_dev_get(skb->dev);
1840 	IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
1841 
1842 	payload_len = (skb_tail_pointer(skb) - skb_network_header(skb)) -
1843 		sizeof(*pip6);
1844 	mldlen = skb_tail_pointer(skb) - skb_transport_header(skb);
1845 	pip6->payload_len = htons(payload_len);
1846 
1847 	pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
1848 					   IPPROTO_ICMPV6,
1849 					   csum_partial(skb_transport_header(skb),
1850 							mldlen, 0));
1851 
1852 	icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT,
1853 			 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1854 			 skb->dev->ifindex);
1855 	dst = icmp6_dst_alloc(skb->dev, &fl6);
1856 
1857 	err = 0;
1858 	if (IS_ERR(dst)) {
1859 		err = PTR_ERR(dst);
1860 		dst = NULL;
1861 	}
1862 	skb_dst_set(skb, dst);
1863 	if (err)
1864 		goto err_out;
1865 
1866 	err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
1867 		      net, net->ipv6.igmp_sk, skb, NULL, skb->dev,
1868 		      dst_output);
1869 out:
1870 	if (!err) {
1871 		ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
1872 		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1873 	} else {
1874 		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1875 	}
1876 
1877 	rcu_read_unlock();
1878 	return;
1879 
1880 err_out:
1881 	kfree_skb(skb);
1882 	goto out;
1883 }
1884 
1885 static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
1886 {
1887 	return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel);
1888 }
1889 
1890 static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1891 	int type, struct mld2_grec **ppgr, unsigned int mtu)
1892 {
1893 	struct mld2_report *pmr;
1894 	struct mld2_grec *pgr;
1895 
1896 	if (!skb) {
1897 		skb = mld_newpack(pmc->idev, mtu);
1898 		if (!skb)
1899 			return NULL;
1900 	}
1901 	pgr = skb_put(skb, sizeof(struct mld2_grec));
1902 	pgr->grec_type = type;
1903 	pgr->grec_auxwords = 0;
1904 	pgr->grec_nsrcs = 0;
1905 	pgr->grec_mca = pmc->mca_addr;	/* structure copy */
1906 	pmr = (struct mld2_report *)skb_transport_header(skb);
1907 	pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1);
1908 	*ppgr = pgr;
1909 	return skb;
1910 }
1911 
1912 #define AVAILABLE(skb)	((skb) ? skb_availroom(skb) : 0)
1913 
1914 static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1915 				int type, int gdeleted, int sdeleted,
1916 				int crsend)
1917 {
1918 	struct ip6_sf_list *psf, *psf_prev, *psf_next;
1919 	int scount, stotal, first, isquery, truncate;
1920 	struct ip6_sf_list __rcu **psf_list;
1921 	struct inet6_dev *idev = pmc->idev;
1922 	struct net_device *dev = idev->dev;
1923 	struct mld2_grec *pgr = NULL;
1924 	struct mld2_report *pmr;
1925 	unsigned int mtu;
1926 
1927 	mc_assert_locked(idev);
1928 
1929 	if (pmc->mca_flags & MAF_NOREPORT)
1930 		return skb;
1931 
1932 	mtu = READ_ONCE(dev->mtu);
1933 	if (mtu < IPV6_MIN_MTU)
1934 		return skb;
1935 
1936 	isquery = type == MLD2_MODE_IS_INCLUDE ||
1937 		  type == MLD2_MODE_IS_EXCLUDE;
1938 	truncate = type == MLD2_MODE_IS_EXCLUDE ||
1939 		    type == MLD2_CHANGE_TO_EXCLUDE;
1940 
1941 	stotal = scount = 0;
1942 
1943 	psf_list = sdeleted ? &pmc->mca_tomb : &pmc->mca_sources;
1944 
1945 	if (!rcu_access_pointer(*psf_list))
1946 		goto empty_source;
1947 
1948 	pmr = skb ? (struct mld2_report *)skb_transport_header(skb) : NULL;
1949 
1950 	/* EX and TO_EX get a fresh packet, if needed */
1951 	if (truncate) {
1952 		if (pmr && pmr->mld2r_ngrec &&
1953 		    AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
1954 			if (skb)
1955 				mld_sendpack(skb);
1956 			skb = mld_newpack(idev, mtu);
1957 		}
1958 	}
1959 	first = 1;
1960 	psf_prev = NULL;
1961 	for (psf = mc_dereference(*psf_list, idev);
1962 	     psf;
1963 	     psf = psf_next) {
1964 		struct in6_addr *psrc;
1965 
1966 		psf_next = mc_dereference(psf->sf_next, idev);
1967 
1968 		if (!is_in(pmc, psf, type, gdeleted, sdeleted) && !crsend) {
1969 			psf_prev = psf;
1970 			continue;
1971 		}
1972 
1973 		/* Based on RFC3810 6.1. Should not send source-list change
1974 		 * records when there is a filter mode change.
1975 		 */
1976 		if (((gdeleted && pmc->mca_sfmode == MCAST_EXCLUDE) ||
1977 		     (!gdeleted && pmc->mca_crcount)) &&
1978 		    (type == MLD2_ALLOW_NEW_SOURCES ||
1979 		     type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount)
1980 			goto decrease_sf_crcount;
1981 
1982 		/* clear marks on query responses */
1983 		if (isquery)
1984 			psf->sf_gsresp = 0;
1985 
1986 		if (AVAILABLE(skb) < sizeof(*psrc) +
1987 		    first*sizeof(struct mld2_grec)) {
1988 			if (truncate && !first)
1989 				break;	 /* truncate these */
1990 			if (pgr)
1991 				pgr->grec_nsrcs = htons(scount);
1992 			if (skb)
1993 				mld_sendpack(skb);
1994 			skb = mld_newpack(idev, mtu);
1995 			first = 1;
1996 			scount = 0;
1997 		}
1998 		if (first) {
1999 			skb = add_grhead(skb, pmc, type, &pgr, mtu);
2000 			first = 0;
2001 		}
2002 		if (!skb)
2003 			return NULL;
2004 		psrc = skb_put(skb, sizeof(*psrc));
2005 		*psrc = psf->sf_addr;
2006 		scount++; stotal++;
2007 		if ((type == MLD2_ALLOW_NEW_SOURCES ||
2008 		     type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
2009 decrease_sf_crcount:
2010 			psf->sf_crcount--;
2011 			if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
2012 				if (psf_prev)
2013 					rcu_assign_pointer(psf_prev->sf_next,
2014 							   mc_dereference(psf->sf_next, idev));
2015 				else
2016 					rcu_assign_pointer(*psf_list,
2017 							   mc_dereference(psf->sf_next, idev));
2018 				kfree_rcu(psf, rcu);
2019 				continue;
2020 			}
2021 		}
2022 		psf_prev = psf;
2023 	}
2024 
2025 empty_source:
2026 	if (!stotal) {
2027 		if (type == MLD2_ALLOW_NEW_SOURCES ||
2028 		    type == MLD2_BLOCK_OLD_SOURCES)
2029 			return skb;
2030 		if (pmc->mca_crcount || isquery || crsend) {
2031 			/* make sure we have room for group header */
2032 			if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)) {
2033 				mld_sendpack(skb);
2034 				skb = NULL; /* add_grhead will get a new one */
2035 			}
2036 			skb = add_grhead(skb, pmc, type, &pgr, mtu);
2037 		}
2038 	}
2039 	if (pgr)
2040 		pgr->grec_nsrcs = htons(scount);
2041 
2042 	if (isquery)
2043 		pmc->mca_flags &= ~MAF_GSQUERY;	/* clear query state */
2044 	return skb;
2045 }
2046 
2047 static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
2048 {
2049 	struct sk_buff *skb = NULL;
2050 	int type;
2051 
2052 	mc_assert_locked(idev);
2053 
2054 	if (!pmc) {
2055 		for_each_mc_mclock(idev, pmc) {
2056 			if (pmc->mca_flags & MAF_NOREPORT)
2057 				continue;
2058 			if (pmc->mca_sfcount[MCAST_EXCLUDE])
2059 				type = MLD2_MODE_IS_EXCLUDE;
2060 			else
2061 				type = MLD2_MODE_IS_INCLUDE;
2062 			skb = add_grec(skb, pmc, type, 0, 0, 0);
2063 		}
2064 	} else {
2065 		if (pmc->mca_sfcount[MCAST_EXCLUDE])
2066 			type = MLD2_MODE_IS_EXCLUDE;
2067 		else
2068 			type = MLD2_MODE_IS_INCLUDE;
2069 		skb = add_grec(skb, pmc, type, 0, 0, 0);
2070 	}
2071 	if (skb)
2072 		mld_sendpack(skb);
2073 }
2074 
2075 /* remove zero-count source records from a source filter list */
2076 static void mld_clear_zeros(struct ip6_sf_list __rcu **ppsf, struct inet6_dev *idev)
2077 {
2078 	struct ip6_sf_list *psf_prev, *psf_next, *psf;
2079 
2080 	psf_prev = NULL;
2081 	for (psf = mc_dereference(*ppsf, idev);
2082 	     psf;
2083 	     psf = psf_next) {
2084 		psf_next = mc_dereference(psf->sf_next, idev);
2085 		if (psf->sf_crcount == 0) {
2086 			if (psf_prev)
2087 				rcu_assign_pointer(psf_prev->sf_next,
2088 						   mc_dereference(psf->sf_next, idev));
2089 			else
2090 				rcu_assign_pointer(*ppsf,
2091 						   mc_dereference(psf->sf_next, idev));
2092 			kfree_rcu(psf, rcu);
2093 		} else {
2094 			psf_prev = psf;
2095 		}
2096 	}
2097 }
2098 
2099 static void mld_send_cr(struct inet6_dev *idev)
2100 {
2101 	struct ifmcaddr6 *pmc, *pmc_prev, *pmc_next;
2102 	struct sk_buff *skb = NULL;
2103 	int type, dtype;
2104 
2105 	/* deleted MCA's */
2106 	pmc_prev = NULL;
2107 	for (pmc = mc_dereference(idev->mc_tomb, idev);
2108 	     pmc;
2109 	     pmc = pmc_next) {
2110 		pmc_next = mc_dereference(pmc->next, idev);
2111 		if (pmc->mca_sfmode == MCAST_INCLUDE) {
2112 			type = MLD2_BLOCK_OLD_SOURCES;
2113 			dtype = MLD2_BLOCK_OLD_SOURCES;
2114 			skb = add_grec(skb, pmc, type, 1, 0, 0);
2115 			skb = add_grec(skb, pmc, dtype, 1, 1, 0);
2116 		}
2117 		if (pmc->mca_crcount) {
2118 			if (pmc->mca_sfmode == MCAST_EXCLUDE) {
2119 				type = MLD2_CHANGE_TO_INCLUDE;
2120 				skb = add_grec(skb, pmc, type, 1, 0, 0);
2121 			}
2122 			pmc->mca_crcount--;
2123 			if (pmc->mca_crcount == 0) {
2124 				mld_clear_zeros(&pmc->mca_tomb, idev);
2125 				mld_clear_zeros(&pmc->mca_sources, idev);
2126 			}
2127 		}
2128 		if (pmc->mca_crcount == 0 &&
2129 		    !rcu_access_pointer(pmc->mca_tomb) &&
2130 		    !rcu_access_pointer(pmc->mca_sources)) {
2131 			if (pmc_prev)
2132 				rcu_assign_pointer(pmc_prev->next, pmc_next);
2133 			else
2134 				rcu_assign_pointer(idev->mc_tomb, pmc_next);
2135 			in6_dev_put(pmc->idev);
2136 			kfree_rcu(pmc, rcu);
2137 		} else
2138 			pmc_prev = pmc;
2139 	}
2140 
2141 	/* change recs */
2142 	for_each_mc_mclock(idev, pmc) {
2143 		if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2144 			type = MLD2_BLOCK_OLD_SOURCES;
2145 			dtype = MLD2_ALLOW_NEW_SOURCES;
2146 		} else {
2147 			type = MLD2_ALLOW_NEW_SOURCES;
2148 			dtype = MLD2_BLOCK_OLD_SOURCES;
2149 		}
2150 		skb = add_grec(skb, pmc, type, 0, 0, 0);
2151 		skb = add_grec(skb, pmc, dtype, 0, 1, 0);	/* deleted sources */
2152 
2153 		/* filter mode changes */
2154 		if (pmc->mca_crcount) {
2155 			if (pmc->mca_sfmode == MCAST_EXCLUDE)
2156 				type = MLD2_CHANGE_TO_EXCLUDE;
2157 			else
2158 				type = MLD2_CHANGE_TO_INCLUDE;
2159 			skb = add_grec(skb, pmc, type, 0, 0, 0);
2160 			pmc->mca_crcount--;
2161 		}
2162 	}
2163 	if (!skb)
2164 		return;
2165 	(void) mld_sendpack(skb);
2166 }
2167 
2168 static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
2169 {
2170 	const struct in6_addr *snd_addr, *saddr;
2171 	int err, len, payload_len, full_len;
2172 	struct in6_addr addr_buf;
2173 	struct inet6_dev *idev;
2174 	struct sk_buff *skb;
2175 	struct mld_msg *hdr;
2176 	int hlen = LL_RESERVED_SPACE(dev);
2177 	int tlen = dev->needed_tailroom;
2178 	u8 ra[8] = { IPPROTO_ICMPV6, 0,
2179 		     IPV6_TLV_ROUTERALERT, 2, 0, 0,
2180 		     IPV6_TLV_PADN, 0 };
2181 	struct dst_entry *dst;
2182 	struct flowi6 fl6;
2183 	struct net *net;
2184 	struct sock *sk;
2185 
2186 	if (type == ICMPV6_MGM_REDUCTION)
2187 		snd_addr = &in6addr_linklocal_allrouters;
2188 	else
2189 		snd_addr = addr;
2190 
2191 	len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
2192 	payload_len = len + sizeof(ra);
2193 	full_len = sizeof(struct ipv6hdr) + payload_len;
2194 
2195 	skb = alloc_skb(hlen + tlen + full_len, GFP_KERNEL);
2196 
2197 	rcu_read_lock();
2198 
2199 	net = dev_net_rcu(dev);
2200 	idev = __in6_dev_get(dev);
2201 	IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
2202 	if (!skb) {
2203 		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
2204 		rcu_read_unlock();
2205 		return;
2206 	}
2207 	sk = net->ipv6.igmp_sk;
2208 	skb_set_owner_w(skb, sk);
2209 
2210 	skb->priority = TC_PRIO_CONTROL;
2211 	skb_reserve(skb, hlen);
2212 
2213 	if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
2214 		/* <draft-ietf-magma-mld-source-05.txt>:
2215 		 * use unspecified address as the source address
2216 		 * when a valid link-local address is not available.
2217 		 */
2218 		saddr = &in6addr_any;
2219 	} else
2220 		saddr = &addr_buf;
2221 
2222 	ip6_mc_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len);
2223 
2224 	skb_put_data(skb, ra, sizeof(ra));
2225 
2226 	hdr = skb_put_zero(skb, sizeof(struct mld_msg));
2227 	hdr->mld_type = type;
2228 	hdr->mld_mca = *addr;
2229 
2230 	hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len,
2231 					 IPPROTO_ICMPV6,
2232 					 csum_partial(hdr, len, 0));
2233 
2234 	icmpv6_flow_init(sk, &fl6, type,
2235 			 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
2236 			 skb->dev->ifindex);
2237 	dst = icmp6_dst_alloc(skb->dev, &fl6);
2238 	if (IS_ERR(dst)) {
2239 		err = PTR_ERR(dst);
2240 		goto err_out;
2241 	}
2242 
2243 	skb_dst_set(skb, dst);
2244 	err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
2245 		      net, sk, skb, NULL, skb->dev,
2246 		      dst_output);
2247 out:
2248 	if (!err) {
2249 		ICMP6MSGOUT_INC_STATS(net, idev, type);
2250 		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
2251 	} else
2252 		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
2253 
2254 	rcu_read_unlock();
2255 	return;
2256 
2257 err_out:
2258 	kfree_skb(skb);
2259 	goto out;
2260 }
2261 
2262 static void mld_send_initial_cr(struct inet6_dev *idev)
2263 {
2264 	struct ifmcaddr6 *pmc;
2265 	struct sk_buff *skb;
2266 	int type;
2267 
2268 	mc_assert_locked(idev);
2269 
2270 	if (mld_in_v1_mode(idev))
2271 		return;
2272 
2273 	skb = NULL;
2274 	for_each_mc_mclock(idev, pmc) {
2275 		if (pmc->mca_sfcount[MCAST_EXCLUDE])
2276 			type = MLD2_CHANGE_TO_EXCLUDE;
2277 		else
2278 			type = MLD2_ALLOW_NEW_SOURCES;
2279 		skb = add_grec(skb, pmc, type, 0, 0, 1);
2280 	}
2281 	if (skb)
2282 		mld_sendpack(skb);
2283 }
2284 
2285 void ipv6_mc_dad_complete(struct inet6_dev *idev)
2286 {
2287 	mutex_lock(&idev->mc_lock);
2288 	idev->mc_dad_count = idev->mc_qrv;
2289 	if (idev->mc_dad_count) {
2290 		mld_send_initial_cr(idev);
2291 		idev->mc_dad_count--;
2292 		if (idev->mc_dad_count)
2293 			mld_dad_start_work(idev,
2294 					   unsolicited_report_interval(idev));
2295 	}
2296 	mutex_unlock(&idev->mc_lock);
2297 }
2298 
2299 static void mld_dad_work(struct work_struct *work)
2300 {
2301 	struct inet6_dev *idev = container_of(to_delayed_work(work),
2302 					      struct inet6_dev,
2303 					      mc_dad_work);
2304 	mutex_lock(&idev->mc_lock);
2305 	mld_send_initial_cr(idev);
2306 	if (idev->mc_dad_count) {
2307 		idev->mc_dad_count--;
2308 		if (idev->mc_dad_count)
2309 			mld_dad_start_work(idev,
2310 					   unsolicited_report_interval(idev));
2311 	}
2312 	mutex_unlock(&idev->mc_lock);
2313 	in6_dev_put(idev);
2314 }
2315 
2316 static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
2317 			   const struct in6_addr *psfsrc)
2318 {
2319 	struct ip6_sf_list *psf, *psf_prev;
2320 	int rv = 0;
2321 
2322 	mc_assert_locked(pmc->idev);
2323 
2324 	psf_prev = NULL;
2325 	for_each_psf_mclock(pmc, psf) {
2326 		if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
2327 			break;
2328 		psf_prev = psf;
2329 	}
2330 	if (!psf || psf->sf_count[sfmode] == 0) {
2331 		/* source filter not found, or count wrong =>  bug */
2332 		return -ESRCH;
2333 	}
2334 	WRITE_ONCE(psf->sf_count[sfmode], psf->sf_count[sfmode] - 1);
2335 	if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
2336 		struct inet6_dev *idev = pmc->idev;
2337 
2338 		/* no more filters for this source */
2339 		if (psf_prev)
2340 			rcu_assign_pointer(psf_prev->sf_next,
2341 					   mc_dereference(psf->sf_next, idev));
2342 		else
2343 			rcu_assign_pointer(pmc->mca_sources,
2344 					   mc_dereference(psf->sf_next, idev));
2345 
2346 		if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) &&
2347 		    !mld_in_v1_mode(idev)) {
2348 			psf->sf_crcount = idev->mc_qrv;
2349 			rcu_assign_pointer(psf->sf_next,
2350 					   mc_dereference(pmc->mca_tomb, idev));
2351 			rcu_assign_pointer(pmc->mca_tomb, psf);
2352 			rv = 1;
2353 		} else {
2354 			kfree_rcu(psf, rcu);
2355 		}
2356 	}
2357 	return rv;
2358 }
2359 
2360 static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2361 			  int sfmode, int sfcount, const struct in6_addr *psfsrc,
2362 			  int delta)
2363 {
2364 	struct ifmcaddr6 *pmc;
2365 	int	changerec = 0;
2366 	int	i, err;
2367 
2368 	if (!idev)
2369 		return -ENODEV;
2370 
2371 	mc_assert_locked(idev);
2372 
2373 	for_each_mc_mclock(idev, pmc) {
2374 		if (ipv6_addr_equal(pmca, &pmc->mca_addr))
2375 			break;
2376 	}
2377 	if (!pmc)
2378 		return -ESRCH;
2379 
2380 	sf_markstate(pmc);
2381 	if (!delta) {
2382 		if (!pmc->mca_sfcount[sfmode])
2383 			return -EINVAL;
2384 
2385 		pmc->mca_sfcount[sfmode]--;
2386 	}
2387 	err = 0;
2388 	for (i = 0; i < sfcount; i++) {
2389 		int rv = ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]);
2390 
2391 		changerec |= rv > 0;
2392 		if (!err && rv < 0)
2393 			err = rv;
2394 	}
2395 	if (pmc->mca_sfmode == MCAST_EXCLUDE &&
2396 	    pmc->mca_sfcount[MCAST_EXCLUDE] == 0 &&
2397 	    pmc->mca_sfcount[MCAST_INCLUDE]) {
2398 		struct ip6_sf_list *psf;
2399 
2400 		/* filter mode change */
2401 		pmc->mca_sfmode = MCAST_INCLUDE;
2402 		pmc->mca_crcount = idev->mc_qrv;
2403 		idev->mc_ifc_count = pmc->mca_crcount;
2404 		for_each_psf_mclock(pmc, psf)
2405 			psf->sf_crcount = 0;
2406 		mld_ifc_event(pmc->idev);
2407 	} else if (sf_setstate(pmc) || changerec) {
2408 		mld_ifc_event(pmc->idev);
2409 	}
2410 
2411 	return err;
2412 }
2413 
2414 /* Add multicast single-source filter to the interface list */
2415 static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
2416 			   const struct in6_addr *psfsrc)
2417 {
2418 	struct ip6_sf_list *psf, *psf_prev;
2419 
2420 	mc_assert_locked(pmc->idev);
2421 
2422 	psf_prev = NULL;
2423 	for_each_psf_mclock(pmc, psf) {
2424 		if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
2425 			break;
2426 		psf_prev = psf;
2427 	}
2428 	if (!psf) {
2429 		psf = kzalloc(sizeof(*psf), GFP_KERNEL);
2430 		if (!psf)
2431 			return -ENOBUFS;
2432 
2433 		psf->sf_addr = *psfsrc;
2434 		if (psf_prev) {
2435 			rcu_assign_pointer(psf_prev->sf_next, psf);
2436 		} else {
2437 			rcu_assign_pointer(pmc->mca_sources, psf);
2438 		}
2439 	}
2440 	WRITE_ONCE(psf->sf_count[sfmode], psf->sf_count[sfmode] + 1);
2441 	return 0;
2442 }
2443 
2444 static void sf_markstate(struct ifmcaddr6 *pmc)
2445 {
2446 	int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
2447 	struct ip6_sf_list *psf;
2448 
2449 	mc_assert_locked(pmc->idev);
2450 
2451 	for_each_psf_mclock(pmc, psf) {
2452 		if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2453 			psf->sf_oldin = mca_xcount ==
2454 				psf->sf_count[MCAST_EXCLUDE] &&
2455 				!psf->sf_count[MCAST_INCLUDE];
2456 		} else {
2457 			psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
2458 		}
2459 	}
2460 }
2461 
2462 static int sf_setstate(struct ifmcaddr6 *pmc)
2463 {
2464 	int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
2465 	struct ip6_sf_list *psf, *dpsf;
2466 	int qrv = pmc->idev->mc_qrv;
2467 	int new_in, rv;
2468 
2469 	mc_assert_locked(pmc->idev);
2470 
2471 	rv = 0;
2472 	for_each_psf_mclock(pmc, psf) {
2473 		if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2474 			new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
2475 				!psf->sf_count[MCAST_INCLUDE];
2476 		} else
2477 			new_in = psf->sf_count[MCAST_INCLUDE] != 0;
2478 		if (new_in) {
2479 			if (!psf->sf_oldin) {
2480 				struct ip6_sf_list *prev = NULL;
2481 
2482 				for_each_psf_tomb(pmc, dpsf) {
2483 					if (ipv6_addr_equal(&dpsf->sf_addr,
2484 					    &psf->sf_addr))
2485 						break;
2486 					prev = dpsf;
2487 				}
2488 				if (dpsf) {
2489 					if (prev)
2490 						rcu_assign_pointer(prev->sf_next,
2491 								   mc_dereference(dpsf->sf_next,
2492 										  pmc->idev));
2493 					else
2494 						rcu_assign_pointer(pmc->mca_tomb,
2495 								   mc_dereference(dpsf->sf_next,
2496 										  pmc->idev));
2497 					kfree_rcu(dpsf, rcu);
2498 				}
2499 				psf->sf_crcount = qrv;
2500 				rv++;
2501 			}
2502 		} else if (psf->sf_oldin) {
2503 			psf->sf_crcount = 0;
2504 			/*
2505 			 * add or update "delete" records if an active filter
2506 			 * is now inactive
2507 			 */
2508 
2509 			for_each_psf_tomb(pmc, dpsf)
2510 				if (ipv6_addr_equal(&dpsf->sf_addr,
2511 				    &psf->sf_addr))
2512 					break;
2513 			if (!dpsf) {
2514 				dpsf = kmalloc(sizeof(*dpsf), GFP_KERNEL);
2515 				if (!dpsf)
2516 					continue;
2517 				*dpsf = *psf;
2518 				rcu_assign_pointer(dpsf->sf_next,
2519 						   mc_dereference(pmc->mca_tomb, pmc->idev));
2520 				rcu_assign_pointer(pmc->mca_tomb, dpsf);
2521 			}
2522 			dpsf->sf_crcount = qrv;
2523 			rv++;
2524 		}
2525 	}
2526 	return rv;
2527 }
2528 
2529 /* Add multicast source filter list to the interface list */
2530 static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2531 			  int sfmode, int sfcount, const struct in6_addr *psfsrc,
2532 			  int delta)
2533 {
2534 	struct ifmcaddr6 *pmc;
2535 	int	isexclude;
2536 	int	i, err;
2537 
2538 	if (!idev)
2539 		return -ENODEV;
2540 
2541 	mc_assert_locked(idev);
2542 
2543 	for_each_mc_mclock(idev, pmc) {
2544 		if (ipv6_addr_equal(pmca, &pmc->mca_addr))
2545 			break;
2546 	}
2547 	if (!pmc)
2548 		return -ESRCH;
2549 
2550 	sf_markstate(pmc);
2551 	isexclude = pmc->mca_sfmode == MCAST_EXCLUDE;
2552 	if (!delta)
2553 		WRITE_ONCE(pmc->mca_sfcount[sfmode],
2554 			   pmc->mca_sfcount[sfmode] + 1);
2555 	err = 0;
2556 	for (i = 0; i < sfcount; i++) {
2557 		err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]);
2558 		if (err)
2559 			break;
2560 	}
2561 	if (err) {
2562 		int j;
2563 
2564 		if (!delta)
2565 			WRITE_ONCE(pmc->mca_sfcount[sfmode],
2566 				   pmc->mca_sfcount[sfmode] - 1);
2567 		for (j = 0; j < i; j++)
2568 			ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
2569 	} else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
2570 		struct ip6_sf_list *psf;
2571 
2572 		/* filter mode change */
2573 		if (pmc->mca_sfcount[MCAST_EXCLUDE])
2574 			pmc->mca_sfmode = MCAST_EXCLUDE;
2575 		else if (pmc->mca_sfcount[MCAST_INCLUDE])
2576 			pmc->mca_sfmode = MCAST_INCLUDE;
2577 		/* else no filters; keep old mode for reports */
2578 
2579 		pmc->mca_crcount = idev->mc_qrv;
2580 		idev->mc_ifc_count = pmc->mca_crcount;
2581 		for_each_psf_mclock(pmc, psf)
2582 			psf->sf_crcount = 0;
2583 		mld_ifc_event(idev);
2584 	} else if (sf_setstate(pmc)) {
2585 		mld_ifc_event(idev);
2586 	}
2587 	return err;
2588 }
2589 
2590 static void ip6_mc_clear_src(struct ifmcaddr6 *pmc)
2591 {
2592 	struct ip6_sf_list *psf, *nextpsf;
2593 
2594 	mc_assert_locked(pmc->idev);
2595 
2596 	for (psf = mc_dereference(pmc->mca_tomb, pmc->idev);
2597 	     psf;
2598 	     psf = nextpsf) {
2599 		nextpsf = mc_dereference(psf->sf_next, pmc->idev);
2600 		kfree_rcu(psf, rcu);
2601 	}
2602 	RCU_INIT_POINTER(pmc->mca_tomb, NULL);
2603 	for (psf = mc_dereference(pmc->mca_sources, pmc->idev);
2604 	     psf;
2605 	     psf = nextpsf) {
2606 		nextpsf = mc_dereference(psf->sf_next, pmc->idev);
2607 		kfree_rcu(psf, rcu);
2608 	}
2609 	RCU_INIT_POINTER(pmc->mca_sources, NULL);
2610 	pmc->mca_sfmode = MCAST_EXCLUDE;
2611 	pmc->mca_sfcount[MCAST_INCLUDE] = 0;
2612 	/* Paired with the READ_ONCE() from ipv6_chk_mcast_addr() */
2613 	WRITE_ONCE(pmc->mca_sfcount[MCAST_EXCLUDE], 1);
2614 }
2615 
2616 static void igmp6_join_group(struct ifmcaddr6 *ma)
2617 {
2618 	unsigned long delay;
2619 
2620 	mc_assert_locked(ma->idev);
2621 
2622 	if (ma->mca_flags & MAF_NOREPORT)
2623 		return;
2624 
2625 	igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2626 
2627 	delay = get_random_u32_below(unsolicited_report_interval(ma->idev));
2628 
2629 	if (cancel_delayed_work(&ma->mca_work)) {
2630 		refcount_dec(&ma->mca_refcnt);
2631 		delay = ma->mca_work.timer.expires - jiffies;
2632 	}
2633 
2634 	if (!mod_delayed_work(mld_wq, &ma->mca_work, delay))
2635 		refcount_inc(&ma->mca_refcnt);
2636 	ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER;
2637 }
2638 
2639 static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
2640 			    struct inet6_dev *idev)
2641 {
2642 	struct ip6_sf_socklist *psl;
2643 	int err;
2644 
2645 	psl = sock_dereference(iml->sflist, sk);
2646 
2647 	if (idev)
2648 		mutex_lock(&idev->mc_lock);
2649 
2650 	if (!psl) {
2651 		/* any-source empty exclude case */
2652 		err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
2653 	} else {
2654 		err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
2655 				     psl->sl_count, psl->sl_addr, 0);
2656 		RCU_INIT_POINTER(iml->sflist, NULL);
2657 		atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
2658 			   &sk->sk_omem_alloc);
2659 		kfree_rcu(psl, rcu);
2660 	}
2661 
2662 	if (idev)
2663 		mutex_unlock(&idev->mc_lock);
2664 
2665 	return err;
2666 }
2667 
2668 static void igmp6_leave_group(struct ifmcaddr6 *ma)
2669 {
2670 	mc_assert_locked(ma->idev);
2671 
2672 	if (mld_in_v1_mode(ma->idev)) {
2673 		if (ma->mca_flags & MAF_LAST_REPORTER) {
2674 			igmp6_send(&ma->mca_addr, ma->idev->dev,
2675 				ICMPV6_MGM_REDUCTION);
2676 		}
2677 	} else {
2678 		mld_add_delrec(ma->idev, ma);
2679 		mld_ifc_event(ma->idev);
2680 	}
2681 }
2682 
2683 static void mld_gq_work(struct work_struct *work)
2684 {
2685 	struct inet6_dev *idev = container_of(to_delayed_work(work),
2686 					      struct inet6_dev,
2687 					      mc_gq_work);
2688 
2689 	mutex_lock(&idev->mc_lock);
2690 	mld_send_report(idev, NULL);
2691 	idev->mc_gq_running = 0;
2692 	mutex_unlock(&idev->mc_lock);
2693 
2694 	in6_dev_put(idev);
2695 }
2696 
2697 static void mld_ifc_work(struct work_struct *work)
2698 {
2699 	struct inet6_dev *idev = container_of(to_delayed_work(work),
2700 					      struct inet6_dev,
2701 					      mc_ifc_work);
2702 
2703 	mutex_lock(&idev->mc_lock);
2704 	mld_send_cr(idev);
2705 
2706 	if (idev->mc_ifc_count) {
2707 		idev->mc_ifc_count--;
2708 		if (idev->mc_ifc_count)
2709 			mld_ifc_start_work(idev,
2710 					   unsolicited_report_interval(idev));
2711 	}
2712 	mutex_unlock(&idev->mc_lock);
2713 	in6_dev_put(idev);
2714 }
2715 
2716 static void mld_ifc_event(struct inet6_dev *idev)
2717 {
2718 	mc_assert_locked(idev);
2719 
2720 	if (mld_in_v1_mode(idev))
2721 		return;
2722 
2723 	idev->mc_ifc_count = idev->mc_qrv;
2724 	mld_ifc_start_work(idev, 1);
2725 }
2726 
2727 static void mld_mca_work(struct work_struct *work)
2728 {
2729 	struct ifmcaddr6 *ma = container_of(to_delayed_work(work),
2730 					    struct ifmcaddr6, mca_work);
2731 
2732 	mutex_lock(&ma->idev->mc_lock);
2733 	if (mld_in_v1_mode(ma->idev))
2734 		igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2735 	else
2736 		mld_send_report(ma->idev, ma);
2737 	ma->mca_flags |=  MAF_LAST_REPORTER;
2738 	ma->mca_flags &= ~MAF_TIMER_RUNNING;
2739 	mutex_unlock(&ma->idev->mc_lock);
2740 
2741 	ma_put(ma);
2742 }
2743 
2744 /* Device changing type */
2745 
2746 void ipv6_mc_unmap(struct inet6_dev *idev)
2747 {
2748 	struct ifmcaddr6 *i;
2749 
2750 	/* Install multicast list, except for all-nodes (already installed) */
2751 
2752 	mutex_lock(&idev->mc_lock);
2753 	for_each_mc_mclock(idev, i)
2754 		igmp6_group_dropped(i);
2755 	mutex_unlock(&idev->mc_lock);
2756 }
2757 
2758 void ipv6_mc_remap(struct inet6_dev *idev)
2759 {
2760 	ipv6_mc_up(idev);
2761 }
2762 
2763 /* Device going down */
2764 void ipv6_mc_down(struct inet6_dev *idev)
2765 {
2766 	struct ifmcaddr6 *i;
2767 
2768 	mutex_lock(&idev->mc_lock);
2769 	/* Withdraw multicast list */
2770 	for_each_mc_mclock(idev, i)
2771 		igmp6_group_dropped(i);
2772 	mutex_unlock(&idev->mc_lock);
2773 
2774 	/* Should stop work after group drop. or we will
2775 	 * start work again in mld_ifc_event()
2776 	 */
2777 	mld_query_stop_work(idev);
2778 	mld_report_stop_work(idev);
2779 
2780 	mutex_lock(&idev->mc_lock);
2781 	mld_ifc_stop_work(idev);
2782 	mld_gq_stop_work(idev);
2783 	mutex_unlock(&idev->mc_lock);
2784 
2785 	mld_dad_stop_work(idev);
2786 }
2787 
2788 static void ipv6_mc_reset(struct inet6_dev *idev)
2789 {
2790 	idev->mc_qrv = sysctl_mld_qrv;
2791 	idev->mc_qi = MLD_QI_DEFAULT;
2792 	idev->mc_qri = MLD_QRI_DEFAULT;
2793 	idev->mc_v1_seen = 0;
2794 	idev->mc_maxdelay = unsolicited_report_interval(idev);
2795 }
2796 
2797 /* Device going up */
2798 
2799 void ipv6_mc_up(struct inet6_dev *idev)
2800 {
2801 	struct ifmcaddr6 *i;
2802 
2803 	/* Install multicast list, except for all-nodes (already installed) */
2804 
2805 	ipv6_mc_reset(idev);
2806 	mutex_lock(&idev->mc_lock);
2807 	for_each_mc_mclock(idev, i) {
2808 		mld_del_delrec(idev, i);
2809 		igmp6_group_added(i);
2810 	}
2811 	mutex_unlock(&idev->mc_lock);
2812 }
2813 
2814 /* IPv6 device initialization. */
2815 
2816 void ipv6_mc_init_dev(struct inet6_dev *idev)
2817 {
2818 	idev->mc_gq_running = 0;
2819 	INIT_DELAYED_WORK(&idev->mc_gq_work, mld_gq_work);
2820 	RCU_INIT_POINTER(idev->mc_tomb, NULL);
2821 	idev->mc_ifc_count = 0;
2822 	INIT_DELAYED_WORK(&idev->mc_ifc_work, mld_ifc_work);
2823 	INIT_DELAYED_WORK(&idev->mc_dad_work, mld_dad_work);
2824 	INIT_DELAYED_WORK(&idev->mc_query_work, mld_query_work);
2825 	INIT_DELAYED_WORK(&idev->mc_report_work, mld_report_work);
2826 	skb_queue_head_init(&idev->mc_query_queue);
2827 	skb_queue_head_init(&idev->mc_report_queue);
2828 	spin_lock_init(&idev->mc_query_lock);
2829 	spin_lock_init(&idev->mc_report_lock);
2830 	mutex_init(&idev->mc_lock);
2831 	ipv6_mc_reset(idev);
2832 }
2833 
2834 /*
2835  *	Device is about to be destroyed: clean up.
2836  */
2837 
2838 void ipv6_mc_destroy_dev(struct inet6_dev *idev)
2839 {
2840 	struct ifmcaddr6 *i;
2841 
2842 	/* Deactivate works */
2843 	ipv6_mc_down(idev);
2844 	mutex_lock(&idev->mc_lock);
2845 	mld_clear_delrec(idev);
2846 	mutex_unlock(&idev->mc_lock);
2847 	mld_clear_query(idev);
2848 	mld_clear_report(idev);
2849 
2850 	/* Delete all-nodes address. */
2851 	/* We cannot call ipv6_dev_mc_dec() directly, our caller in
2852 	 * addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will
2853 	 * fail.
2854 	 */
2855 	__ipv6_dev_mc_dec(idev, &in6addr_linklocal_allnodes);
2856 
2857 	if (idev->cnf.forwarding)
2858 		__ipv6_dev_mc_dec(idev, &in6addr_linklocal_allrouters);
2859 
2860 	mutex_lock(&idev->mc_lock);
2861 	while ((i = mc_dereference(idev->mc_list, idev))) {
2862 		rcu_assign_pointer(idev->mc_list, mc_dereference(i->next, idev));
2863 
2864 		ip6_mc_clear_src(i);
2865 		ma_put(i);
2866 	}
2867 	mutex_unlock(&idev->mc_lock);
2868 }
2869 
2870 static void ipv6_mc_rejoin_groups(struct inet6_dev *idev)
2871 {
2872 	struct ifmcaddr6 *pmc;
2873 
2874 	mutex_lock(&idev->mc_lock);
2875 	if (mld_in_v1_mode(idev)) {
2876 		for_each_mc_mclock(idev, pmc)
2877 			igmp6_join_group(pmc);
2878 	} else {
2879 		mld_send_report(idev, NULL);
2880 	}
2881 	mutex_unlock(&idev->mc_lock);
2882 }
2883 
2884 static int ipv6_mc_netdev_event(struct notifier_block *this,
2885 				unsigned long event,
2886 				void *ptr)
2887 {
2888 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2889 	struct inet6_dev *idev = __in6_dev_get(dev);
2890 
2891 	switch (event) {
2892 	case NETDEV_RESEND_IGMP:
2893 		if (idev)
2894 			ipv6_mc_rejoin_groups(idev);
2895 		break;
2896 	default:
2897 		break;
2898 	}
2899 
2900 	return NOTIFY_DONE;
2901 }
2902 
2903 static struct notifier_block igmp6_netdev_notifier = {
2904 	.notifier_call = ipv6_mc_netdev_event,
2905 };
2906 
2907 #ifdef CONFIG_PROC_FS
2908 struct igmp6_mc_iter_state {
2909 	struct seq_net_private p;
2910 	struct net_device *dev;
2911 	struct inet6_dev *idev;
2912 };
2913 
2914 #define igmp6_mc_seq_private(seq)	((struct igmp6_mc_iter_state *)(seq)->private)
2915 
2916 static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
2917 {
2918 	struct ifmcaddr6 *im = NULL;
2919 	struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2920 	struct net *net = seq_file_net(seq);
2921 
2922 	state->idev = NULL;
2923 	for_each_netdev_rcu(net, state->dev) {
2924 		struct inet6_dev *idev;
2925 		idev = __in6_dev_get(state->dev);
2926 		if (!idev)
2927 			continue;
2928 
2929 		im = rcu_dereference(idev->mc_list);
2930 		if (im) {
2931 			state->idev = idev;
2932 			break;
2933 		}
2934 	}
2935 	return im;
2936 }
2937 
2938 static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr6 *im)
2939 {
2940 	struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2941 
2942 	im = rcu_dereference(im->next);
2943 	while (!im) {
2944 		state->dev = next_net_device_rcu(state->dev);
2945 		if (!state->dev) {
2946 			state->idev = NULL;
2947 			break;
2948 		}
2949 		state->idev = __in6_dev_get(state->dev);
2950 		if (!state->idev)
2951 			continue;
2952 		im = rcu_dereference(state->idev->mc_list);
2953 	}
2954 	return im;
2955 }
2956 
2957 static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos)
2958 {
2959 	struct ifmcaddr6 *im = igmp6_mc_get_first(seq);
2960 	if (im)
2961 		while (pos && (im = igmp6_mc_get_next(seq, im)) != NULL)
2962 			--pos;
2963 	return pos ? NULL : im;
2964 }
2965 
2966 static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos)
2967 	__acquires(RCU)
2968 {
2969 	rcu_read_lock();
2970 	return igmp6_mc_get_idx(seq, *pos);
2971 }
2972 
2973 static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2974 {
2975 	struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v);
2976 
2977 	++*pos;
2978 	return im;
2979 }
2980 
2981 static void igmp6_mc_seq_stop(struct seq_file *seq, void *v)
2982 	__releases(RCU)
2983 {
2984 	struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2985 
2986 	if (likely(state->idev))
2987 		state->idev = NULL;
2988 	state->dev = NULL;
2989 	rcu_read_unlock();
2990 }
2991 
2992 static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
2993 {
2994 	struct ifmcaddr6 *im = (struct ifmcaddr6 *)v;
2995 	struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2996 
2997 	seq_printf(seq,
2998 		   "%-4d %-15s %pi6 %5d %08X %ld\n",
2999 		   state->dev->ifindex, state->dev->name,
3000 		   &im->mca_addr,
3001 		   im->mca_users, im->mca_flags,
3002 		   (im->mca_flags & MAF_TIMER_RUNNING) ?
3003 		   jiffies_to_clock_t(im->mca_work.timer.expires - jiffies) : 0);
3004 	return 0;
3005 }
3006 
3007 static const struct seq_operations igmp6_mc_seq_ops = {
3008 	.start	=	igmp6_mc_seq_start,
3009 	.next	=	igmp6_mc_seq_next,
3010 	.stop	=	igmp6_mc_seq_stop,
3011 	.show	=	igmp6_mc_seq_show,
3012 };
3013 
3014 struct igmp6_mcf_iter_state {
3015 	struct seq_net_private p;
3016 	struct net_device *dev;
3017 	struct inet6_dev *idev;
3018 	struct ifmcaddr6 *im;
3019 };
3020 
3021 #define igmp6_mcf_seq_private(seq)	((struct igmp6_mcf_iter_state *)(seq)->private)
3022 
3023 static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
3024 {
3025 	struct ip6_sf_list *psf = NULL;
3026 	struct ifmcaddr6 *im = NULL;
3027 	struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
3028 	struct net *net = seq_file_net(seq);
3029 
3030 	state->idev = NULL;
3031 	state->im = NULL;
3032 	for_each_netdev_rcu(net, state->dev) {
3033 		struct inet6_dev *idev;
3034 		idev = __in6_dev_get(state->dev);
3035 		if (unlikely(idev == NULL))
3036 			continue;
3037 
3038 		im = rcu_dereference(idev->mc_list);
3039 		if (likely(im)) {
3040 			psf = rcu_dereference(im->mca_sources);
3041 			if (likely(psf)) {
3042 				state->im = im;
3043 				state->idev = idev;
3044 				break;
3045 			}
3046 		}
3047 	}
3048 	return psf;
3049 }
3050 
3051 static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_sf_list *psf)
3052 {
3053 	struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
3054 
3055 	psf = rcu_dereference(psf->sf_next);
3056 	while (!psf) {
3057 		state->im = rcu_dereference(state->im->next);
3058 		while (!state->im) {
3059 			state->dev = next_net_device_rcu(state->dev);
3060 			if (!state->dev) {
3061 				state->idev = NULL;
3062 				goto out;
3063 			}
3064 			state->idev = __in6_dev_get(state->dev);
3065 			if (!state->idev)
3066 				continue;
3067 			state->im = rcu_dereference(state->idev->mc_list);
3068 		}
3069 		psf = rcu_dereference(state->im->mca_sources);
3070 	}
3071 out:
3072 	return psf;
3073 }
3074 
3075 static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos)
3076 {
3077 	struct ip6_sf_list *psf = igmp6_mcf_get_first(seq);
3078 	if (psf)
3079 		while (pos && (psf = igmp6_mcf_get_next(seq, psf)) != NULL)
3080 			--pos;
3081 	return pos ? NULL : psf;
3082 }
3083 
3084 static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos)
3085 	__acquires(RCU)
3086 {
3087 	rcu_read_lock();
3088 	return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
3089 }
3090 
3091 static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3092 {
3093 	struct ip6_sf_list *psf;
3094 	if (v == SEQ_START_TOKEN)
3095 		psf = igmp6_mcf_get_first(seq);
3096 	else
3097 		psf = igmp6_mcf_get_next(seq, v);
3098 	++*pos;
3099 	return psf;
3100 }
3101 
3102 static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
3103 	__releases(RCU)
3104 {
3105 	struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
3106 
3107 	if (likely(state->im))
3108 		state->im = NULL;
3109 	if (likely(state->idev))
3110 		state->idev = NULL;
3111 
3112 	state->dev = NULL;
3113 	rcu_read_unlock();
3114 }
3115 
3116 static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
3117 {
3118 	struct ip6_sf_list *psf = (struct ip6_sf_list *)v;
3119 	struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
3120 
3121 	if (v == SEQ_START_TOKEN) {
3122 		seq_puts(seq, "Idx Device                Multicast Address                   Source Address    INC    EXC\n");
3123 	} else {
3124 		seq_printf(seq,
3125 			   "%3d %6.6s %pi6 %pi6 %6lu %6lu\n",
3126 			   state->dev->ifindex, state->dev->name,
3127 			   &state->im->mca_addr,
3128 			   &psf->sf_addr,
3129 			   READ_ONCE(psf->sf_count[MCAST_INCLUDE]),
3130 			   READ_ONCE(psf->sf_count[MCAST_EXCLUDE]));
3131 	}
3132 	return 0;
3133 }
3134 
3135 static const struct seq_operations igmp6_mcf_seq_ops = {
3136 	.start	=	igmp6_mcf_seq_start,
3137 	.next	=	igmp6_mcf_seq_next,
3138 	.stop	=	igmp6_mcf_seq_stop,
3139 	.show	=	igmp6_mcf_seq_show,
3140 };
3141 
3142 static int __net_init igmp6_proc_init(struct net *net)
3143 {
3144 	int err;
3145 
3146 	err = -ENOMEM;
3147 	if (!proc_create_net("igmp6", 0444, net->proc_net, &igmp6_mc_seq_ops,
3148 			sizeof(struct igmp6_mc_iter_state)))
3149 		goto out;
3150 	if (!proc_create_net("mcfilter6", 0444, net->proc_net,
3151 			&igmp6_mcf_seq_ops,
3152 			sizeof(struct igmp6_mcf_iter_state)))
3153 		goto out_proc_net_igmp6;
3154 
3155 	err = 0;
3156 out:
3157 	return err;
3158 
3159 out_proc_net_igmp6:
3160 	remove_proc_entry("igmp6", net->proc_net);
3161 	goto out;
3162 }
3163 
3164 static void __net_exit igmp6_proc_exit(struct net *net)
3165 {
3166 	remove_proc_entry("mcfilter6", net->proc_net);
3167 	remove_proc_entry("igmp6", net->proc_net);
3168 }
3169 #else
3170 static inline int igmp6_proc_init(struct net *net)
3171 {
3172 	return 0;
3173 }
3174 static inline void igmp6_proc_exit(struct net *net)
3175 {
3176 }
3177 #endif
3178 
3179 static int __net_init igmp6_net_init(struct net *net)
3180 {
3181 	int err;
3182 
3183 	err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6,
3184 				   SOCK_RAW, IPPROTO_ICMPV6, net);
3185 	if (err < 0) {
3186 		pr_err("Failed to initialize the IGMP6 control socket (err %d)\n",
3187 		       err);
3188 		goto out;
3189 	}
3190 
3191 	inet6_sk(net->ipv6.igmp_sk)->hop_limit = 1;
3192 	net->ipv6.igmp_sk->sk_allocation = GFP_KERNEL;
3193 
3194 	err = inet_ctl_sock_create(&net->ipv6.mc_autojoin_sk, PF_INET6,
3195 				   SOCK_RAW, IPPROTO_ICMPV6, net);
3196 	if (err < 0) {
3197 		pr_err("Failed to initialize the IGMP6 autojoin socket (err %d)\n",
3198 		       err);
3199 		goto out_sock_create;
3200 	}
3201 
3202 	err = igmp6_proc_init(net);
3203 	if (err)
3204 		goto out_sock_create_autojoin;
3205 
3206 	return 0;
3207 
3208 out_sock_create_autojoin:
3209 	inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk);
3210 out_sock_create:
3211 	inet_ctl_sock_destroy(net->ipv6.igmp_sk);
3212 out:
3213 	return err;
3214 }
3215 
3216 static void __net_exit igmp6_net_exit(struct net *net)
3217 {
3218 	inet_ctl_sock_destroy(net->ipv6.igmp_sk);
3219 	inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk);
3220 	igmp6_proc_exit(net);
3221 }
3222 
3223 static struct pernet_operations igmp6_net_ops = {
3224 	.init = igmp6_net_init,
3225 	.exit = igmp6_net_exit,
3226 };
3227 
3228 int __init igmp6_init(void)
3229 {
3230 	int err;
3231 
3232 	err = register_pernet_subsys(&igmp6_net_ops);
3233 	if (err)
3234 		return err;
3235 
3236 	mld_wq = create_workqueue("mld");
3237 	if (!mld_wq) {
3238 		unregister_pernet_subsys(&igmp6_net_ops);
3239 		return -ENOMEM;
3240 	}
3241 
3242 	return err;
3243 }
3244 
3245 int __init igmp6_late_init(void)
3246 {
3247 	return register_netdevice_notifier(&igmp6_netdev_notifier);
3248 }
3249 
3250 void igmp6_cleanup(void)
3251 {
3252 	unregister_pernet_subsys(&igmp6_net_ops);
3253 	destroy_workqueue(mld_wq);
3254 }
3255 
3256 void igmp6_late_cleanup(void)
3257 {
3258 	unregister_netdevice_notifier(&igmp6_netdev_notifier);
3259 }
3260