1 /* 2 * Multicast support for IPv6 3 * Linux INET6 implementation 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * 8 * Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 */ 15 16 /* Changes: 17 * 18 * yoshfuji : fix format of router-alert option 19 * YOSHIFUJI Hideaki @USAGI: 20 * Fixed source address for MLD message based on 21 * <draft-ietf-magma-mld-source-05.txt>. 22 * YOSHIFUJI Hideaki @USAGI: 23 * - Ignore Queries for invalid addresses. 24 * - MLD for link-local addresses. 25 * David L Stevens <dlstevens@us.ibm.com>: 26 * - MLDv2 support 27 */ 28 29 #include <linux/module.h> 30 #include <linux/errno.h> 31 #include <linux/types.h> 32 #include <linux/string.h> 33 #include <linux/socket.h> 34 #include <linux/sockios.h> 35 #include <linux/jiffies.h> 36 #include <linux/times.h> 37 #include <linux/net.h> 38 #include <linux/in.h> 39 #include <linux/in6.h> 40 #include <linux/netdevice.h> 41 #include <linux/if_arp.h> 42 #include <linux/route.h> 43 #include <linux/init.h> 44 #include <linux/proc_fs.h> 45 #include <linux/seq_file.h> 46 #include <linux/slab.h> 47 #include <linux/pkt_sched.h> 48 #include <net/mld.h> 49 50 #include <linux/netfilter.h> 51 #include <linux/netfilter_ipv6.h> 52 53 #include <net/net_namespace.h> 54 #include <net/sock.h> 55 #include <net/snmp.h> 56 57 #include <net/ipv6.h> 58 #include <net/protocol.h> 59 #include <net/if_inet6.h> 60 #include <net/ndisc.h> 61 #include <net/addrconf.h> 62 #include <net/ip6_route.h> 63 #include <net/inet_common.h> 64 65 #include <net/ip6_checksum.h> 66 67 /* Ensure that we have struct in6_addr aligned on 32bit word. */ 68 static void *__mld2_query_bugs[] __attribute__((__unused__)) = { 69 BUILD_BUG_ON_NULL(offsetof(struct mld2_query, mld2q_srcs) % 4), 70 BUILD_BUG_ON_NULL(offsetof(struct mld2_report, mld2r_grec) % 4), 71 BUILD_BUG_ON_NULL(offsetof(struct mld2_grec, grec_mca) % 4) 72 }; 73 74 static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT; 75 76 static void igmp6_join_group(struct ifmcaddr6 *ma); 77 static void igmp6_leave_group(struct ifmcaddr6 *ma); 78 static void igmp6_timer_handler(struct timer_list *t); 79 80 static void mld_gq_timer_expire(struct timer_list *t); 81 static void mld_ifc_timer_expire(struct timer_list *t); 82 static void mld_ifc_event(struct inet6_dev *idev); 83 static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc); 84 static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc); 85 static void mld_clear_delrec(struct inet6_dev *idev); 86 static bool mld_in_v1_mode(const struct inet6_dev *idev); 87 static int sf_setstate(struct ifmcaddr6 *pmc); 88 static void sf_markstate(struct ifmcaddr6 *pmc); 89 static void ip6_mc_clear_src(struct ifmcaddr6 *pmc); 90 static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca, 91 int sfmode, int sfcount, const struct in6_addr *psfsrc, 92 int delta); 93 static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca, 94 int sfmode, int sfcount, const struct in6_addr *psfsrc, 95 int delta); 96 static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, 97 struct inet6_dev *idev); 98 99 #define MLD_QRV_DEFAULT 2 100 /* RFC3810, 9.2. Query Interval */ 101 #define MLD_QI_DEFAULT (125 * HZ) 102 /* RFC3810, 9.3. Query Response Interval */ 103 #define MLD_QRI_DEFAULT (10 * HZ) 104 105 /* RFC3810, 8.1 Query Version Distinctions */ 106 #define MLD_V1_QUERY_LEN 24 107 #define MLD_V2_QUERY_LEN_MIN 28 108 109 #define IPV6_MLD_MAX_MSF 64 110 111 int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF; 112 int sysctl_mld_qrv __read_mostly = MLD_QRV_DEFAULT; 113 114 /* 115 * socket join on multicast group 116 */ 117 118 #define for_each_pmc_rcu(np, pmc) \ 119 for (pmc = rcu_dereference(np->ipv6_mc_list); \ 120 pmc != NULL; \ 121 pmc = rcu_dereference(pmc->next)) 122 123 static int unsolicited_report_interval(struct inet6_dev *idev) 124 { 125 int iv; 126 127 if (mld_in_v1_mode(idev)) 128 iv = idev->cnf.mldv1_unsolicited_report_interval; 129 else 130 iv = idev->cnf.mldv2_unsolicited_report_interval; 131 132 return iv > 0 ? iv : 1; 133 } 134 135 int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) 136 { 137 struct net_device *dev = NULL; 138 struct ipv6_mc_socklist *mc_lst; 139 struct ipv6_pinfo *np = inet6_sk(sk); 140 struct net *net = sock_net(sk); 141 int err; 142 143 ASSERT_RTNL(); 144 145 if (!ipv6_addr_is_multicast(addr)) 146 return -EINVAL; 147 148 rcu_read_lock(); 149 for_each_pmc_rcu(np, mc_lst) { 150 if ((ifindex == 0 || mc_lst->ifindex == ifindex) && 151 ipv6_addr_equal(&mc_lst->addr, addr)) { 152 rcu_read_unlock(); 153 return -EADDRINUSE; 154 } 155 } 156 rcu_read_unlock(); 157 158 mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL); 159 160 if (!mc_lst) 161 return -ENOMEM; 162 163 mc_lst->next = NULL; 164 mc_lst->addr = *addr; 165 166 if (ifindex == 0) { 167 struct rt6_info *rt; 168 rt = rt6_lookup(net, addr, NULL, 0, 0); 169 if (rt) { 170 dev = rt->dst.dev; 171 ip6_rt_put(rt); 172 } 173 } else 174 dev = __dev_get_by_index(net, ifindex); 175 176 if (!dev) { 177 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); 178 return -ENODEV; 179 } 180 181 mc_lst->ifindex = dev->ifindex; 182 mc_lst->sfmode = MCAST_EXCLUDE; 183 rwlock_init(&mc_lst->sflock); 184 mc_lst->sflist = NULL; 185 186 /* 187 * now add/increase the group membership on the device 188 */ 189 190 err = ipv6_dev_mc_inc(dev, addr); 191 192 if (err) { 193 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); 194 return err; 195 } 196 197 mc_lst->next = np->ipv6_mc_list; 198 rcu_assign_pointer(np->ipv6_mc_list, mc_lst); 199 200 return 0; 201 } 202 EXPORT_SYMBOL(ipv6_sock_mc_join); 203 204 /* 205 * socket leave on multicast group 206 */ 207 int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) 208 { 209 struct ipv6_pinfo *np = inet6_sk(sk); 210 struct ipv6_mc_socklist *mc_lst; 211 struct ipv6_mc_socklist __rcu **lnk; 212 struct net *net = sock_net(sk); 213 214 ASSERT_RTNL(); 215 216 if (!ipv6_addr_is_multicast(addr)) 217 return -EINVAL; 218 219 for (lnk = &np->ipv6_mc_list; 220 (mc_lst = rtnl_dereference(*lnk)) != NULL; 221 lnk = &mc_lst->next) { 222 if ((ifindex == 0 || mc_lst->ifindex == ifindex) && 223 ipv6_addr_equal(&mc_lst->addr, addr)) { 224 struct net_device *dev; 225 226 *lnk = mc_lst->next; 227 228 dev = __dev_get_by_index(net, mc_lst->ifindex); 229 if (dev) { 230 struct inet6_dev *idev = __in6_dev_get(dev); 231 232 (void) ip6_mc_leave_src(sk, mc_lst, idev); 233 if (idev) 234 __ipv6_dev_mc_dec(idev, &mc_lst->addr); 235 } else 236 (void) ip6_mc_leave_src(sk, mc_lst, NULL); 237 238 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); 239 kfree_rcu(mc_lst, rcu); 240 return 0; 241 } 242 } 243 244 return -EADDRNOTAVAIL; 245 } 246 EXPORT_SYMBOL(ipv6_sock_mc_drop); 247 248 /* called with rcu_read_lock() */ 249 static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net, 250 const struct in6_addr *group, 251 int ifindex) 252 { 253 struct net_device *dev = NULL; 254 struct inet6_dev *idev = NULL; 255 256 if (ifindex == 0) { 257 struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, 0); 258 259 if (rt) { 260 dev = rt->dst.dev; 261 ip6_rt_put(rt); 262 } 263 } else 264 dev = dev_get_by_index_rcu(net, ifindex); 265 266 if (!dev) 267 return NULL; 268 idev = __in6_dev_get(dev); 269 if (!idev) 270 return NULL; 271 read_lock_bh(&idev->lock); 272 if (idev->dead) { 273 read_unlock_bh(&idev->lock); 274 return NULL; 275 } 276 return idev; 277 } 278 279 void __ipv6_sock_mc_close(struct sock *sk) 280 { 281 struct ipv6_pinfo *np = inet6_sk(sk); 282 struct ipv6_mc_socklist *mc_lst; 283 struct net *net = sock_net(sk); 284 285 ASSERT_RTNL(); 286 287 while ((mc_lst = rtnl_dereference(np->ipv6_mc_list)) != NULL) { 288 struct net_device *dev; 289 290 np->ipv6_mc_list = mc_lst->next; 291 292 dev = __dev_get_by_index(net, mc_lst->ifindex); 293 if (dev) { 294 struct inet6_dev *idev = __in6_dev_get(dev); 295 296 (void) ip6_mc_leave_src(sk, mc_lst, idev); 297 if (idev) 298 __ipv6_dev_mc_dec(idev, &mc_lst->addr); 299 } else 300 (void) ip6_mc_leave_src(sk, mc_lst, NULL); 301 302 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); 303 kfree_rcu(mc_lst, rcu); 304 } 305 } 306 307 void ipv6_sock_mc_close(struct sock *sk) 308 { 309 struct ipv6_pinfo *np = inet6_sk(sk); 310 311 if (!rcu_access_pointer(np->ipv6_mc_list)) 312 return; 313 rtnl_lock(); 314 __ipv6_sock_mc_close(sk); 315 rtnl_unlock(); 316 } 317 318 int ip6_mc_source(int add, int omode, struct sock *sk, 319 struct group_source_req *pgsr) 320 { 321 struct in6_addr *source, *group; 322 struct ipv6_mc_socklist *pmc; 323 struct inet6_dev *idev; 324 struct ipv6_pinfo *inet6 = inet6_sk(sk); 325 struct ip6_sf_socklist *psl; 326 struct net *net = sock_net(sk); 327 int i, j, rv; 328 int leavegroup = 0; 329 int pmclocked = 0; 330 int err; 331 332 source = &((struct sockaddr_in6 *)&pgsr->gsr_source)->sin6_addr; 333 group = &((struct sockaddr_in6 *)&pgsr->gsr_group)->sin6_addr; 334 335 if (!ipv6_addr_is_multicast(group)) 336 return -EINVAL; 337 338 rcu_read_lock(); 339 idev = ip6_mc_find_dev_rcu(net, group, pgsr->gsr_interface); 340 if (!idev) { 341 rcu_read_unlock(); 342 return -ENODEV; 343 } 344 345 err = -EADDRNOTAVAIL; 346 347 for_each_pmc_rcu(inet6, pmc) { 348 if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface) 349 continue; 350 if (ipv6_addr_equal(&pmc->addr, group)) 351 break; 352 } 353 if (!pmc) { /* must have a prior join */ 354 err = -EINVAL; 355 goto done; 356 } 357 /* if a source filter was set, must be the same mode as before */ 358 if (pmc->sflist) { 359 if (pmc->sfmode != omode) { 360 err = -EINVAL; 361 goto done; 362 } 363 } else if (pmc->sfmode != omode) { 364 /* allow mode switches for empty-set filters */ 365 ip6_mc_add_src(idev, group, omode, 0, NULL, 0); 366 ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0); 367 pmc->sfmode = omode; 368 } 369 370 write_lock(&pmc->sflock); 371 pmclocked = 1; 372 373 psl = pmc->sflist; 374 if (!add) { 375 if (!psl) 376 goto done; /* err = -EADDRNOTAVAIL */ 377 rv = !0; 378 for (i = 0; i < psl->sl_count; i++) { 379 rv = !ipv6_addr_equal(&psl->sl_addr[i], source); 380 if (rv == 0) 381 break; 382 } 383 if (rv) /* source not found */ 384 goto done; /* err = -EADDRNOTAVAIL */ 385 386 /* special case - (INCLUDE, empty) == LEAVE_GROUP */ 387 if (psl->sl_count == 1 && omode == MCAST_INCLUDE) { 388 leavegroup = 1; 389 goto done; 390 } 391 392 /* update the interface filter */ 393 ip6_mc_del_src(idev, group, omode, 1, source, 1); 394 395 for (j = i+1; j < psl->sl_count; j++) 396 psl->sl_addr[j-1] = psl->sl_addr[j]; 397 psl->sl_count--; 398 err = 0; 399 goto done; 400 } 401 /* else, add a new source to the filter */ 402 403 if (psl && psl->sl_count >= sysctl_mld_max_msf) { 404 err = -ENOBUFS; 405 goto done; 406 } 407 if (!psl || psl->sl_count == psl->sl_max) { 408 struct ip6_sf_socklist *newpsl; 409 int count = IP6_SFBLOCK; 410 411 if (psl) 412 count += psl->sl_max; 413 newpsl = sock_kmalloc(sk, IP6_SFLSIZE(count), GFP_ATOMIC); 414 if (!newpsl) { 415 err = -ENOBUFS; 416 goto done; 417 } 418 newpsl->sl_max = count; 419 newpsl->sl_count = count - IP6_SFBLOCK; 420 if (psl) { 421 for (i = 0; i < psl->sl_count; i++) 422 newpsl->sl_addr[i] = psl->sl_addr[i]; 423 sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max)); 424 } 425 pmc->sflist = psl = newpsl; 426 } 427 rv = 1; /* > 0 for insert logic below if sl_count is 0 */ 428 for (i = 0; i < psl->sl_count; i++) { 429 rv = !ipv6_addr_equal(&psl->sl_addr[i], source); 430 if (rv == 0) /* There is an error in the address. */ 431 goto done; 432 } 433 for (j = psl->sl_count-1; j >= i; j--) 434 psl->sl_addr[j+1] = psl->sl_addr[j]; 435 psl->sl_addr[i] = *source; 436 psl->sl_count++; 437 err = 0; 438 /* update the interface list */ 439 ip6_mc_add_src(idev, group, omode, 1, source, 1); 440 done: 441 if (pmclocked) 442 write_unlock(&pmc->sflock); 443 read_unlock_bh(&idev->lock); 444 rcu_read_unlock(); 445 if (leavegroup) 446 err = ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group); 447 return err; 448 } 449 450 int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) 451 { 452 const struct in6_addr *group; 453 struct ipv6_mc_socklist *pmc; 454 struct inet6_dev *idev; 455 struct ipv6_pinfo *inet6 = inet6_sk(sk); 456 struct ip6_sf_socklist *newpsl, *psl; 457 struct net *net = sock_net(sk); 458 int leavegroup = 0; 459 int i, err; 460 461 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr; 462 463 if (!ipv6_addr_is_multicast(group)) 464 return -EINVAL; 465 if (gsf->gf_fmode != MCAST_INCLUDE && 466 gsf->gf_fmode != MCAST_EXCLUDE) 467 return -EINVAL; 468 469 rcu_read_lock(); 470 idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface); 471 472 if (!idev) { 473 rcu_read_unlock(); 474 return -ENODEV; 475 } 476 477 err = 0; 478 479 if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) { 480 leavegroup = 1; 481 goto done; 482 } 483 484 for_each_pmc_rcu(inet6, pmc) { 485 if (pmc->ifindex != gsf->gf_interface) 486 continue; 487 if (ipv6_addr_equal(&pmc->addr, group)) 488 break; 489 } 490 if (!pmc) { /* must have a prior join */ 491 err = -EINVAL; 492 goto done; 493 } 494 if (gsf->gf_numsrc) { 495 newpsl = sock_kmalloc(sk, IP6_SFLSIZE(gsf->gf_numsrc), 496 GFP_ATOMIC); 497 if (!newpsl) { 498 err = -ENOBUFS; 499 goto done; 500 } 501 newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc; 502 for (i = 0; i < newpsl->sl_count; ++i) { 503 struct sockaddr_in6 *psin6; 504 505 psin6 = (struct sockaddr_in6 *)&gsf->gf_slist[i]; 506 newpsl->sl_addr[i] = psin6->sin6_addr; 507 } 508 err = ip6_mc_add_src(idev, group, gsf->gf_fmode, 509 newpsl->sl_count, newpsl->sl_addr, 0); 510 if (err) { 511 sock_kfree_s(sk, newpsl, IP6_SFLSIZE(newpsl->sl_max)); 512 goto done; 513 } 514 } else { 515 newpsl = NULL; 516 (void) ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0); 517 } 518 519 write_lock(&pmc->sflock); 520 psl = pmc->sflist; 521 if (psl) { 522 (void) ip6_mc_del_src(idev, group, pmc->sfmode, 523 psl->sl_count, psl->sl_addr, 0); 524 sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max)); 525 } else 526 (void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0); 527 pmc->sflist = newpsl; 528 pmc->sfmode = gsf->gf_fmode; 529 write_unlock(&pmc->sflock); 530 err = 0; 531 done: 532 read_unlock_bh(&idev->lock); 533 rcu_read_unlock(); 534 if (leavegroup) 535 err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group); 536 return err; 537 } 538 539 int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, 540 struct group_filter __user *optval, int __user *optlen) 541 { 542 int err, i, count, copycount; 543 const struct in6_addr *group; 544 struct ipv6_mc_socklist *pmc; 545 struct inet6_dev *idev; 546 struct ipv6_pinfo *inet6 = inet6_sk(sk); 547 struct ip6_sf_socklist *psl; 548 struct net *net = sock_net(sk); 549 550 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr; 551 552 if (!ipv6_addr_is_multicast(group)) 553 return -EINVAL; 554 555 rcu_read_lock(); 556 idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface); 557 558 if (!idev) { 559 rcu_read_unlock(); 560 return -ENODEV; 561 } 562 563 err = -EADDRNOTAVAIL; 564 /* changes to the ipv6_mc_list require the socket lock and 565 * rtnl lock. We have the socket lock and rcu read lock, 566 * so reading the list is safe. 567 */ 568 569 for_each_pmc_rcu(inet6, pmc) { 570 if (pmc->ifindex != gsf->gf_interface) 571 continue; 572 if (ipv6_addr_equal(group, &pmc->addr)) 573 break; 574 } 575 if (!pmc) /* must have a prior join */ 576 goto done; 577 gsf->gf_fmode = pmc->sfmode; 578 psl = pmc->sflist; 579 count = psl ? psl->sl_count : 0; 580 read_unlock_bh(&idev->lock); 581 rcu_read_unlock(); 582 583 copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc; 584 gsf->gf_numsrc = count; 585 if (put_user(GROUP_FILTER_SIZE(copycount), optlen) || 586 copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) { 587 return -EFAULT; 588 } 589 /* changes to psl require the socket lock, and a write lock 590 * on pmc->sflock. We have the socket lock so reading here is safe. 591 */ 592 for (i = 0; i < copycount; i++) { 593 struct sockaddr_in6 *psin6; 594 struct sockaddr_storage ss; 595 596 psin6 = (struct sockaddr_in6 *)&ss; 597 memset(&ss, 0, sizeof(ss)); 598 psin6->sin6_family = AF_INET6; 599 psin6->sin6_addr = psl->sl_addr[i]; 600 if (copy_to_user(&optval->gf_slist[i], &ss, sizeof(ss))) 601 return -EFAULT; 602 } 603 return 0; 604 done: 605 read_unlock_bh(&idev->lock); 606 rcu_read_unlock(); 607 return err; 608 } 609 610 bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr, 611 const struct in6_addr *src_addr) 612 { 613 struct ipv6_pinfo *np = inet6_sk(sk); 614 struct ipv6_mc_socklist *mc; 615 struct ip6_sf_socklist *psl; 616 bool rv = true; 617 618 rcu_read_lock(); 619 for_each_pmc_rcu(np, mc) { 620 if (ipv6_addr_equal(&mc->addr, mc_addr)) 621 break; 622 } 623 if (!mc) { 624 rcu_read_unlock(); 625 return true; 626 } 627 read_lock(&mc->sflock); 628 psl = mc->sflist; 629 if (!psl) { 630 rv = mc->sfmode == MCAST_EXCLUDE; 631 } else { 632 int i; 633 634 for (i = 0; i < psl->sl_count; i++) { 635 if (ipv6_addr_equal(&psl->sl_addr[i], src_addr)) 636 break; 637 } 638 if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count) 639 rv = false; 640 if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count) 641 rv = false; 642 } 643 read_unlock(&mc->sflock); 644 rcu_read_unlock(); 645 646 return rv; 647 } 648 649 static void igmp6_group_added(struct ifmcaddr6 *mc) 650 { 651 struct net_device *dev = mc->idev->dev; 652 char buf[MAX_ADDR_LEN]; 653 654 if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < 655 IPV6_ADDR_SCOPE_LINKLOCAL) 656 return; 657 658 spin_lock_bh(&mc->mca_lock); 659 if (!(mc->mca_flags&MAF_LOADED)) { 660 mc->mca_flags |= MAF_LOADED; 661 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) 662 dev_mc_add(dev, buf); 663 } 664 spin_unlock_bh(&mc->mca_lock); 665 666 if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT)) 667 return; 668 669 if (mld_in_v1_mode(mc->idev)) { 670 igmp6_join_group(mc); 671 return; 672 } 673 /* else v2 */ 674 675 mc->mca_crcount = mc->idev->mc_qrv; 676 mld_ifc_event(mc->idev); 677 } 678 679 static void igmp6_group_dropped(struct ifmcaddr6 *mc) 680 { 681 struct net_device *dev = mc->idev->dev; 682 char buf[MAX_ADDR_LEN]; 683 684 if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < 685 IPV6_ADDR_SCOPE_LINKLOCAL) 686 return; 687 688 spin_lock_bh(&mc->mca_lock); 689 if (mc->mca_flags&MAF_LOADED) { 690 mc->mca_flags &= ~MAF_LOADED; 691 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) 692 dev_mc_del(dev, buf); 693 } 694 695 spin_unlock_bh(&mc->mca_lock); 696 if (mc->mca_flags & MAF_NOREPORT) 697 return; 698 699 if (!mc->idev->dead) 700 igmp6_leave_group(mc); 701 702 spin_lock_bh(&mc->mca_lock); 703 if (del_timer(&mc->mca_timer)) 704 refcount_dec(&mc->mca_refcnt); 705 spin_unlock_bh(&mc->mca_lock); 706 } 707 708 /* 709 * deleted ifmcaddr6 manipulation 710 */ 711 static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) 712 { 713 struct ifmcaddr6 *pmc; 714 715 /* this is an "ifmcaddr6" for convenience; only the fields below 716 * are actually used. In particular, the refcnt and users are not 717 * used for management of the delete list. Using the same structure 718 * for deleted items allows change reports to use common code with 719 * non-deleted or query-response MCA's. 720 */ 721 pmc = kzalloc(sizeof(*pmc), GFP_ATOMIC); 722 if (!pmc) 723 return; 724 725 spin_lock_bh(&im->mca_lock); 726 spin_lock_init(&pmc->mca_lock); 727 pmc->idev = im->idev; 728 in6_dev_hold(idev); 729 pmc->mca_addr = im->mca_addr; 730 pmc->mca_crcount = idev->mc_qrv; 731 pmc->mca_sfmode = im->mca_sfmode; 732 if (pmc->mca_sfmode == MCAST_INCLUDE) { 733 struct ip6_sf_list *psf; 734 735 pmc->mca_tomb = im->mca_tomb; 736 pmc->mca_sources = im->mca_sources; 737 im->mca_tomb = im->mca_sources = NULL; 738 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) 739 psf->sf_crcount = pmc->mca_crcount; 740 } 741 spin_unlock_bh(&im->mca_lock); 742 743 spin_lock_bh(&idev->mc_lock); 744 pmc->next = idev->mc_tomb; 745 idev->mc_tomb = pmc; 746 spin_unlock_bh(&idev->mc_lock); 747 } 748 749 static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) 750 { 751 struct ifmcaddr6 *pmc, *pmc_prev; 752 struct ip6_sf_list *psf; 753 struct in6_addr *pmca = &im->mca_addr; 754 755 spin_lock_bh(&idev->mc_lock); 756 pmc_prev = NULL; 757 for (pmc = idev->mc_tomb; pmc; pmc = pmc->next) { 758 if (ipv6_addr_equal(&pmc->mca_addr, pmca)) 759 break; 760 pmc_prev = pmc; 761 } 762 if (pmc) { 763 if (pmc_prev) 764 pmc_prev->next = pmc->next; 765 else 766 idev->mc_tomb = pmc->next; 767 } 768 spin_unlock_bh(&idev->mc_lock); 769 770 spin_lock_bh(&im->mca_lock); 771 if (pmc) { 772 im->idev = pmc->idev; 773 im->mca_crcount = idev->mc_qrv; 774 im->mca_sfmode = pmc->mca_sfmode; 775 if (pmc->mca_sfmode == MCAST_INCLUDE) { 776 im->mca_tomb = pmc->mca_tomb; 777 im->mca_sources = pmc->mca_sources; 778 for (psf = im->mca_sources; psf; psf = psf->sf_next) 779 psf->sf_crcount = im->mca_crcount; 780 } 781 in6_dev_put(pmc->idev); 782 kfree(pmc); 783 } 784 spin_unlock_bh(&im->mca_lock); 785 } 786 787 static void mld_clear_delrec(struct inet6_dev *idev) 788 { 789 struct ifmcaddr6 *pmc, *nextpmc; 790 791 spin_lock_bh(&idev->mc_lock); 792 pmc = idev->mc_tomb; 793 idev->mc_tomb = NULL; 794 spin_unlock_bh(&idev->mc_lock); 795 796 for (; pmc; pmc = nextpmc) { 797 nextpmc = pmc->next; 798 ip6_mc_clear_src(pmc); 799 in6_dev_put(pmc->idev); 800 kfree(pmc); 801 } 802 803 /* clear dead sources, too */ 804 read_lock_bh(&idev->lock); 805 for (pmc = idev->mc_list; pmc; pmc = pmc->next) { 806 struct ip6_sf_list *psf, *psf_next; 807 808 spin_lock_bh(&pmc->mca_lock); 809 psf = pmc->mca_tomb; 810 pmc->mca_tomb = NULL; 811 spin_unlock_bh(&pmc->mca_lock); 812 for (; psf; psf = psf_next) { 813 psf_next = psf->sf_next; 814 kfree(psf); 815 } 816 } 817 read_unlock_bh(&idev->lock); 818 } 819 820 static void mca_get(struct ifmcaddr6 *mc) 821 { 822 refcount_inc(&mc->mca_refcnt); 823 } 824 825 static void ma_put(struct ifmcaddr6 *mc) 826 { 827 if (refcount_dec_and_test(&mc->mca_refcnt)) { 828 in6_dev_put(mc->idev); 829 kfree(mc); 830 } 831 } 832 833 static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev, 834 const struct in6_addr *addr) 835 { 836 struct ifmcaddr6 *mc; 837 838 mc = kzalloc(sizeof(*mc), GFP_ATOMIC); 839 if (!mc) 840 return NULL; 841 842 timer_setup(&mc->mca_timer, igmp6_timer_handler, 0); 843 844 mc->mca_addr = *addr; 845 mc->idev = idev; /* reference taken by caller */ 846 mc->mca_users = 1; 847 /* mca_stamp should be updated upon changes */ 848 mc->mca_cstamp = mc->mca_tstamp = jiffies; 849 refcount_set(&mc->mca_refcnt, 1); 850 spin_lock_init(&mc->mca_lock); 851 852 /* initial mode is (EX, empty) */ 853 mc->mca_sfmode = MCAST_EXCLUDE; 854 mc->mca_sfcount[MCAST_EXCLUDE] = 1; 855 856 if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) || 857 IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL) 858 mc->mca_flags |= MAF_NOREPORT; 859 860 return mc; 861 } 862 863 /* 864 * device multicast group inc (add if not found) 865 */ 866 int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr) 867 { 868 struct ifmcaddr6 *mc; 869 struct inet6_dev *idev; 870 871 ASSERT_RTNL(); 872 873 /* we need to take a reference on idev */ 874 idev = in6_dev_get(dev); 875 876 if (!idev) 877 return -EINVAL; 878 879 write_lock_bh(&idev->lock); 880 if (idev->dead) { 881 write_unlock_bh(&idev->lock); 882 in6_dev_put(idev); 883 return -ENODEV; 884 } 885 886 for (mc = idev->mc_list; mc; mc = mc->next) { 887 if (ipv6_addr_equal(&mc->mca_addr, addr)) { 888 mc->mca_users++; 889 write_unlock_bh(&idev->lock); 890 ip6_mc_add_src(idev, &mc->mca_addr, MCAST_EXCLUDE, 0, 891 NULL, 0); 892 in6_dev_put(idev); 893 return 0; 894 } 895 } 896 897 mc = mca_alloc(idev, addr); 898 if (!mc) { 899 write_unlock_bh(&idev->lock); 900 in6_dev_put(idev); 901 return -ENOMEM; 902 } 903 904 mc->next = idev->mc_list; 905 idev->mc_list = mc; 906 907 /* Hold this for the code below before we unlock, 908 * it is already exposed via idev->mc_list. 909 */ 910 mca_get(mc); 911 write_unlock_bh(&idev->lock); 912 913 mld_del_delrec(idev, mc); 914 igmp6_group_added(mc); 915 ma_put(mc); 916 return 0; 917 } 918 919 /* 920 * device multicast group del 921 */ 922 int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr) 923 { 924 struct ifmcaddr6 *ma, **map; 925 926 ASSERT_RTNL(); 927 928 write_lock_bh(&idev->lock); 929 for (map = &idev->mc_list; (ma = *map) != NULL; map = &ma->next) { 930 if (ipv6_addr_equal(&ma->mca_addr, addr)) { 931 if (--ma->mca_users == 0) { 932 *map = ma->next; 933 write_unlock_bh(&idev->lock); 934 935 igmp6_group_dropped(ma); 936 ip6_mc_clear_src(ma); 937 938 ma_put(ma); 939 return 0; 940 } 941 write_unlock_bh(&idev->lock); 942 return 0; 943 } 944 } 945 write_unlock_bh(&idev->lock); 946 947 return -ENOENT; 948 } 949 950 int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr) 951 { 952 struct inet6_dev *idev; 953 int err; 954 955 ASSERT_RTNL(); 956 957 idev = __in6_dev_get(dev); 958 if (!idev) 959 err = -ENODEV; 960 else 961 err = __ipv6_dev_mc_dec(idev, addr); 962 963 return err; 964 } 965 966 /* 967 * check if the interface/address pair is valid 968 */ 969 bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, 970 const struct in6_addr *src_addr) 971 { 972 struct inet6_dev *idev; 973 struct ifmcaddr6 *mc; 974 bool rv = false; 975 976 rcu_read_lock(); 977 idev = __in6_dev_get(dev); 978 if (idev) { 979 read_lock_bh(&idev->lock); 980 for (mc = idev->mc_list; mc; mc = mc->next) { 981 if (ipv6_addr_equal(&mc->mca_addr, group)) 982 break; 983 } 984 if (mc) { 985 if (src_addr && !ipv6_addr_any(src_addr)) { 986 struct ip6_sf_list *psf; 987 988 spin_lock_bh(&mc->mca_lock); 989 for (psf = mc->mca_sources; psf; psf = psf->sf_next) { 990 if (ipv6_addr_equal(&psf->sf_addr, src_addr)) 991 break; 992 } 993 if (psf) 994 rv = psf->sf_count[MCAST_INCLUDE] || 995 psf->sf_count[MCAST_EXCLUDE] != 996 mc->mca_sfcount[MCAST_EXCLUDE]; 997 else 998 rv = mc->mca_sfcount[MCAST_EXCLUDE] != 0; 999 spin_unlock_bh(&mc->mca_lock); 1000 } else 1001 rv = true; /* don't filter unspecified source */ 1002 } 1003 read_unlock_bh(&idev->lock); 1004 } 1005 rcu_read_unlock(); 1006 return rv; 1007 } 1008 1009 static void mld_gq_start_timer(struct inet6_dev *idev) 1010 { 1011 unsigned long tv = prandom_u32() % idev->mc_maxdelay; 1012 1013 idev->mc_gq_running = 1; 1014 if (!mod_timer(&idev->mc_gq_timer, jiffies+tv+2)) 1015 in6_dev_hold(idev); 1016 } 1017 1018 static void mld_gq_stop_timer(struct inet6_dev *idev) 1019 { 1020 idev->mc_gq_running = 0; 1021 if (del_timer(&idev->mc_gq_timer)) 1022 __in6_dev_put(idev); 1023 } 1024 1025 static void mld_ifc_start_timer(struct inet6_dev *idev, unsigned long delay) 1026 { 1027 unsigned long tv = prandom_u32() % delay; 1028 1029 if (!mod_timer(&idev->mc_ifc_timer, jiffies+tv+2)) 1030 in6_dev_hold(idev); 1031 } 1032 1033 static void mld_ifc_stop_timer(struct inet6_dev *idev) 1034 { 1035 idev->mc_ifc_count = 0; 1036 if (del_timer(&idev->mc_ifc_timer)) 1037 __in6_dev_put(idev); 1038 } 1039 1040 static void mld_dad_start_timer(struct inet6_dev *idev, unsigned long delay) 1041 { 1042 unsigned long tv = prandom_u32() % delay; 1043 1044 if (!mod_timer(&idev->mc_dad_timer, jiffies+tv+2)) 1045 in6_dev_hold(idev); 1046 } 1047 1048 static void mld_dad_stop_timer(struct inet6_dev *idev) 1049 { 1050 if (del_timer(&idev->mc_dad_timer)) 1051 __in6_dev_put(idev); 1052 } 1053 1054 /* 1055 * IGMP handling (alias multicast ICMPv6 messages) 1056 */ 1057 1058 static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime) 1059 { 1060 unsigned long delay = resptime; 1061 1062 /* Do not start timer for these addresses */ 1063 if (ipv6_addr_is_ll_all_nodes(&ma->mca_addr) || 1064 IPV6_ADDR_MC_SCOPE(&ma->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL) 1065 return; 1066 1067 if (del_timer(&ma->mca_timer)) { 1068 refcount_dec(&ma->mca_refcnt); 1069 delay = ma->mca_timer.expires - jiffies; 1070 } 1071 1072 if (delay >= resptime) 1073 delay = prandom_u32() % resptime; 1074 1075 ma->mca_timer.expires = jiffies + delay; 1076 if (!mod_timer(&ma->mca_timer, jiffies + delay)) 1077 refcount_inc(&ma->mca_refcnt); 1078 ma->mca_flags |= MAF_TIMER_RUNNING; 1079 } 1080 1081 /* mark EXCLUDE-mode sources */ 1082 static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs, 1083 const struct in6_addr *srcs) 1084 { 1085 struct ip6_sf_list *psf; 1086 int i, scount; 1087 1088 scount = 0; 1089 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) { 1090 if (scount == nsrcs) 1091 break; 1092 for (i = 0; i < nsrcs; i++) { 1093 /* skip inactive filters */ 1094 if (psf->sf_count[MCAST_INCLUDE] || 1095 pmc->mca_sfcount[MCAST_EXCLUDE] != 1096 psf->sf_count[MCAST_EXCLUDE]) 1097 break; 1098 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) { 1099 scount++; 1100 break; 1101 } 1102 } 1103 } 1104 pmc->mca_flags &= ~MAF_GSQUERY; 1105 if (scount == nsrcs) /* all sources excluded */ 1106 return false; 1107 return true; 1108 } 1109 1110 static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs, 1111 const struct in6_addr *srcs) 1112 { 1113 struct ip6_sf_list *psf; 1114 int i, scount; 1115 1116 if (pmc->mca_sfmode == MCAST_EXCLUDE) 1117 return mld_xmarksources(pmc, nsrcs, srcs); 1118 1119 /* mark INCLUDE-mode sources */ 1120 1121 scount = 0; 1122 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) { 1123 if (scount == nsrcs) 1124 break; 1125 for (i = 0; i < nsrcs; i++) { 1126 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) { 1127 psf->sf_gsresp = 1; 1128 scount++; 1129 break; 1130 } 1131 } 1132 } 1133 if (!scount) { 1134 pmc->mca_flags &= ~MAF_GSQUERY; 1135 return false; 1136 } 1137 pmc->mca_flags |= MAF_GSQUERY; 1138 return true; 1139 } 1140 1141 static int mld_force_mld_version(const struct inet6_dev *idev) 1142 { 1143 /* Normally, both are 0 here. If enforcement to a particular is 1144 * being used, individual device enforcement will have a lower 1145 * precedence over 'all' device (.../conf/all/force_mld_version). 1146 */ 1147 1148 if (dev_net(idev->dev)->ipv6.devconf_all->force_mld_version != 0) 1149 return dev_net(idev->dev)->ipv6.devconf_all->force_mld_version; 1150 else 1151 return idev->cnf.force_mld_version; 1152 } 1153 1154 static bool mld_in_v2_mode_only(const struct inet6_dev *idev) 1155 { 1156 return mld_force_mld_version(idev) == 2; 1157 } 1158 1159 static bool mld_in_v1_mode_only(const struct inet6_dev *idev) 1160 { 1161 return mld_force_mld_version(idev) == 1; 1162 } 1163 1164 static bool mld_in_v1_mode(const struct inet6_dev *idev) 1165 { 1166 if (mld_in_v2_mode_only(idev)) 1167 return false; 1168 if (mld_in_v1_mode_only(idev)) 1169 return true; 1170 if (idev->mc_v1_seen && time_before(jiffies, idev->mc_v1_seen)) 1171 return true; 1172 1173 return false; 1174 } 1175 1176 static void mld_set_v1_mode(struct inet6_dev *idev) 1177 { 1178 /* RFC3810, relevant sections: 1179 * - 9.1. Robustness Variable 1180 * - 9.2. Query Interval 1181 * - 9.3. Query Response Interval 1182 * - 9.12. Older Version Querier Present Timeout 1183 */ 1184 unsigned long switchback; 1185 1186 switchback = (idev->mc_qrv * idev->mc_qi) + idev->mc_qri; 1187 1188 idev->mc_v1_seen = jiffies + switchback; 1189 } 1190 1191 static void mld_update_qrv(struct inet6_dev *idev, 1192 const struct mld2_query *mlh2) 1193 { 1194 /* RFC3810, relevant sections: 1195 * - 5.1.8. QRV (Querier's Robustness Variable) 1196 * - 9.1. Robustness Variable 1197 */ 1198 1199 /* The value of the Robustness Variable MUST NOT be zero, 1200 * and SHOULD NOT be one. Catch this here if we ever run 1201 * into such a case in future. 1202 */ 1203 const int min_qrv = min(MLD_QRV_DEFAULT, sysctl_mld_qrv); 1204 WARN_ON(idev->mc_qrv == 0); 1205 1206 if (mlh2->mld2q_qrv > 0) 1207 idev->mc_qrv = mlh2->mld2q_qrv; 1208 1209 if (unlikely(idev->mc_qrv < min_qrv)) { 1210 net_warn_ratelimited("IPv6: MLD: clamping QRV from %u to %u!\n", 1211 idev->mc_qrv, min_qrv); 1212 idev->mc_qrv = min_qrv; 1213 } 1214 } 1215 1216 static void mld_update_qi(struct inet6_dev *idev, 1217 const struct mld2_query *mlh2) 1218 { 1219 /* RFC3810, relevant sections: 1220 * - 5.1.9. QQIC (Querier's Query Interval Code) 1221 * - 9.2. Query Interval 1222 * - 9.12. Older Version Querier Present Timeout 1223 * (the [Query Interval] in the last Query received) 1224 */ 1225 unsigned long mc_qqi; 1226 1227 if (mlh2->mld2q_qqic < 128) { 1228 mc_qqi = mlh2->mld2q_qqic; 1229 } else { 1230 unsigned long mc_man, mc_exp; 1231 1232 mc_exp = MLDV2_QQIC_EXP(mlh2->mld2q_qqic); 1233 mc_man = MLDV2_QQIC_MAN(mlh2->mld2q_qqic); 1234 1235 mc_qqi = (mc_man | 0x10) << (mc_exp + 3); 1236 } 1237 1238 idev->mc_qi = mc_qqi * HZ; 1239 } 1240 1241 static void mld_update_qri(struct inet6_dev *idev, 1242 const struct mld2_query *mlh2) 1243 { 1244 /* RFC3810, relevant sections: 1245 * - 5.1.3. Maximum Response Code 1246 * - 9.3. Query Response Interval 1247 */ 1248 idev->mc_qri = msecs_to_jiffies(mldv2_mrc(mlh2)); 1249 } 1250 1251 static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld, 1252 unsigned long *max_delay, bool v1_query) 1253 { 1254 unsigned long mldv1_md; 1255 1256 /* Ignore v1 queries */ 1257 if (mld_in_v2_mode_only(idev)) 1258 return -EINVAL; 1259 1260 mldv1_md = ntohs(mld->mld_maxdelay); 1261 1262 /* When in MLDv1 fallback and a MLDv2 router start-up being 1263 * unaware of current MLDv1 operation, the MRC == MRD mapping 1264 * only works when the exponential algorithm is not being 1265 * used (as MLDv1 is unaware of such things). 1266 * 1267 * According to the RFC author, the MLDv2 implementations 1268 * he's aware of all use a MRC < 32768 on start up queries. 1269 * 1270 * Thus, should we *ever* encounter something else larger 1271 * than that, just assume the maximum possible within our 1272 * reach. 1273 */ 1274 if (!v1_query) 1275 mldv1_md = min(mldv1_md, MLDV1_MRD_MAX_COMPAT); 1276 1277 *max_delay = max(msecs_to_jiffies(mldv1_md), 1UL); 1278 1279 /* MLDv1 router present: we need to go into v1 mode *only* 1280 * when an MLDv1 query is received as per section 9.12. of 1281 * RFC3810! And we know from RFC2710 section 3.7 that MLDv1 1282 * queries MUST be of exactly 24 octets. 1283 */ 1284 if (v1_query) 1285 mld_set_v1_mode(idev); 1286 1287 /* cancel MLDv2 report timer */ 1288 mld_gq_stop_timer(idev); 1289 /* cancel the interface change timer */ 1290 mld_ifc_stop_timer(idev); 1291 /* clear deleted report items */ 1292 mld_clear_delrec(idev); 1293 1294 return 0; 1295 } 1296 1297 static int mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld, 1298 unsigned long *max_delay) 1299 { 1300 *max_delay = max(msecs_to_jiffies(mldv2_mrc(mld)), 1UL); 1301 1302 mld_update_qrv(idev, mld); 1303 mld_update_qi(idev, mld); 1304 mld_update_qri(idev, mld); 1305 1306 idev->mc_maxdelay = *max_delay; 1307 1308 return 0; 1309 } 1310 1311 /* called with rcu_read_lock() */ 1312 int igmp6_event_query(struct sk_buff *skb) 1313 { 1314 struct mld2_query *mlh2 = NULL; 1315 struct ifmcaddr6 *ma; 1316 const struct in6_addr *group; 1317 unsigned long max_delay; 1318 struct inet6_dev *idev; 1319 struct mld_msg *mld; 1320 int group_type; 1321 int mark = 0; 1322 int len, err; 1323 1324 if (!pskb_may_pull(skb, sizeof(struct in6_addr))) 1325 return -EINVAL; 1326 1327 /* compute payload length excluding extension headers */ 1328 len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr); 1329 len -= skb_network_header_len(skb); 1330 1331 /* RFC3810 6.2 1332 * Upon reception of an MLD message that contains a Query, the node 1333 * checks if the source address of the message is a valid link-local 1334 * address, if the Hop Limit is set to 1, and if the Router Alert 1335 * option is present in the Hop-By-Hop Options header of the IPv6 1336 * packet. If any of these checks fails, the packet is dropped. 1337 */ 1338 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) || 1339 ipv6_hdr(skb)->hop_limit != 1 || 1340 !(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) || 1341 IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD)) 1342 return -EINVAL; 1343 1344 idev = __in6_dev_get(skb->dev); 1345 if (!idev) 1346 return 0; 1347 1348 mld = (struct mld_msg *)icmp6_hdr(skb); 1349 group = &mld->mld_mca; 1350 group_type = ipv6_addr_type(group); 1351 1352 if (group_type != IPV6_ADDR_ANY && 1353 !(group_type&IPV6_ADDR_MULTICAST)) 1354 return -EINVAL; 1355 1356 if (len < MLD_V1_QUERY_LEN) { 1357 return -EINVAL; 1358 } else if (len == MLD_V1_QUERY_LEN || mld_in_v1_mode(idev)) { 1359 err = mld_process_v1(idev, mld, &max_delay, 1360 len == MLD_V1_QUERY_LEN); 1361 if (err < 0) 1362 return err; 1363 } else if (len >= MLD_V2_QUERY_LEN_MIN) { 1364 int srcs_offset = sizeof(struct mld2_query) - 1365 sizeof(struct icmp6hdr); 1366 1367 if (!pskb_may_pull(skb, srcs_offset)) 1368 return -EINVAL; 1369 1370 mlh2 = (struct mld2_query *)skb_transport_header(skb); 1371 1372 err = mld_process_v2(idev, mlh2, &max_delay); 1373 if (err < 0) 1374 return err; 1375 1376 if (group_type == IPV6_ADDR_ANY) { /* general query */ 1377 if (mlh2->mld2q_nsrcs) 1378 return -EINVAL; /* no sources allowed */ 1379 1380 mld_gq_start_timer(idev); 1381 return 0; 1382 } 1383 /* mark sources to include, if group & source-specific */ 1384 if (mlh2->mld2q_nsrcs != 0) { 1385 if (!pskb_may_pull(skb, srcs_offset + 1386 ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr))) 1387 return -EINVAL; 1388 1389 mlh2 = (struct mld2_query *)skb_transport_header(skb); 1390 mark = 1; 1391 } 1392 } else { 1393 return -EINVAL; 1394 } 1395 1396 read_lock_bh(&idev->lock); 1397 if (group_type == IPV6_ADDR_ANY) { 1398 for (ma = idev->mc_list; ma; ma = ma->next) { 1399 spin_lock_bh(&ma->mca_lock); 1400 igmp6_group_queried(ma, max_delay); 1401 spin_unlock_bh(&ma->mca_lock); 1402 } 1403 } else { 1404 for (ma = idev->mc_list; ma; ma = ma->next) { 1405 if (!ipv6_addr_equal(group, &ma->mca_addr)) 1406 continue; 1407 spin_lock_bh(&ma->mca_lock); 1408 if (ma->mca_flags & MAF_TIMER_RUNNING) { 1409 /* gsquery <- gsquery && mark */ 1410 if (!mark) 1411 ma->mca_flags &= ~MAF_GSQUERY; 1412 } else { 1413 /* gsquery <- mark */ 1414 if (mark) 1415 ma->mca_flags |= MAF_GSQUERY; 1416 else 1417 ma->mca_flags &= ~MAF_GSQUERY; 1418 } 1419 if (!(ma->mca_flags & MAF_GSQUERY) || 1420 mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs)) 1421 igmp6_group_queried(ma, max_delay); 1422 spin_unlock_bh(&ma->mca_lock); 1423 break; 1424 } 1425 } 1426 read_unlock_bh(&idev->lock); 1427 1428 return 0; 1429 } 1430 1431 /* called with rcu_read_lock() */ 1432 int igmp6_event_report(struct sk_buff *skb) 1433 { 1434 struct ifmcaddr6 *ma; 1435 struct inet6_dev *idev; 1436 struct mld_msg *mld; 1437 int addr_type; 1438 1439 /* Our own report looped back. Ignore it. */ 1440 if (skb->pkt_type == PACKET_LOOPBACK) 1441 return 0; 1442 1443 /* send our report if the MC router may not have heard this report */ 1444 if (skb->pkt_type != PACKET_MULTICAST && 1445 skb->pkt_type != PACKET_BROADCAST) 1446 return 0; 1447 1448 if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr))) 1449 return -EINVAL; 1450 1451 mld = (struct mld_msg *)icmp6_hdr(skb); 1452 1453 /* Drop reports with not link local source */ 1454 addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr); 1455 if (addr_type != IPV6_ADDR_ANY && 1456 !(addr_type&IPV6_ADDR_LINKLOCAL)) 1457 return -EINVAL; 1458 1459 idev = __in6_dev_get(skb->dev); 1460 if (!idev) 1461 return -ENODEV; 1462 1463 /* 1464 * Cancel the timer for this group 1465 */ 1466 1467 read_lock_bh(&idev->lock); 1468 for (ma = idev->mc_list; ma; ma = ma->next) { 1469 if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) { 1470 spin_lock(&ma->mca_lock); 1471 if (del_timer(&ma->mca_timer)) 1472 refcount_dec(&ma->mca_refcnt); 1473 ma->mca_flags &= ~(MAF_LAST_REPORTER|MAF_TIMER_RUNNING); 1474 spin_unlock(&ma->mca_lock); 1475 break; 1476 } 1477 } 1478 read_unlock_bh(&idev->lock); 1479 return 0; 1480 } 1481 1482 static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type, 1483 int gdeleted, int sdeleted) 1484 { 1485 switch (type) { 1486 case MLD2_MODE_IS_INCLUDE: 1487 case MLD2_MODE_IS_EXCLUDE: 1488 if (gdeleted || sdeleted) 1489 return false; 1490 if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) { 1491 if (pmc->mca_sfmode == MCAST_INCLUDE) 1492 return true; 1493 /* don't include if this source is excluded 1494 * in all filters 1495 */ 1496 if (psf->sf_count[MCAST_INCLUDE]) 1497 return type == MLD2_MODE_IS_INCLUDE; 1498 return pmc->mca_sfcount[MCAST_EXCLUDE] == 1499 psf->sf_count[MCAST_EXCLUDE]; 1500 } 1501 return false; 1502 case MLD2_CHANGE_TO_INCLUDE: 1503 if (gdeleted || sdeleted) 1504 return false; 1505 return psf->sf_count[MCAST_INCLUDE] != 0; 1506 case MLD2_CHANGE_TO_EXCLUDE: 1507 if (gdeleted || sdeleted) 1508 return false; 1509 if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 || 1510 psf->sf_count[MCAST_INCLUDE]) 1511 return false; 1512 return pmc->mca_sfcount[MCAST_EXCLUDE] == 1513 psf->sf_count[MCAST_EXCLUDE]; 1514 case MLD2_ALLOW_NEW_SOURCES: 1515 if (gdeleted || !psf->sf_crcount) 1516 return false; 1517 return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted; 1518 case MLD2_BLOCK_OLD_SOURCES: 1519 if (pmc->mca_sfmode == MCAST_INCLUDE) 1520 return gdeleted || (psf->sf_crcount && sdeleted); 1521 return psf->sf_crcount && !gdeleted && !sdeleted; 1522 } 1523 return false; 1524 } 1525 1526 static int 1527 mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted) 1528 { 1529 struct ip6_sf_list *psf; 1530 int scount = 0; 1531 1532 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) { 1533 if (!is_in(pmc, psf, type, gdeleted, sdeleted)) 1534 continue; 1535 scount++; 1536 } 1537 return scount; 1538 } 1539 1540 static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb, 1541 struct net_device *dev, 1542 const struct in6_addr *saddr, 1543 const struct in6_addr *daddr, 1544 int proto, int len) 1545 { 1546 struct ipv6hdr *hdr; 1547 1548 skb->protocol = htons(ETH_P_IPV6); 1549 skb->dev = dev; 1550 1551 skb_reset_network_header(skb); 1552 skb_put(skb, sizeof(struct ipv6hdr)); 1553 hdr = ipv6_hdr(skb); 1554 1555 ip6_flow_hdr(hdr, 0, 0); 1556 1557 hdr->payload_len = htons(len); 1558 hdr->nexthdr = proto; 1559 hdr->hop_limit = inet6_sk(sk)->hop_limit; 1560 1561 hdr->saddr = *saddr; 1562 hdr->daddr = *daddr; 1563 } 1564 1565 static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu) 1566 { 1567 struct net_device *dev = idev->dev; 1568 struct net *net = dev_net(dev); 1569 struct sock *sk = net->ipv6.igmp_sk; 1570 struct sk_buff *skb; 1571 struct mld2_report *pmr; 1572 struct in6_addr addr_buf; 1573 const struct in6_addr *saddr; 1574 int hlen = LL_RESERVED_SPACE(dev); 1575 int tlen = dev->needed_tailroom; 1576 unsigned int size = mtu + hlen + tlen; 1577 int err; 1578 u8 ra[8] = { IPPROTO_ICMPV6, 0, 1579 IPV6_TLV_ROUTERALERT, 2, 0, 0, 1580 IPV6_TLV_PADN, 0 }; 1581 1582 /* we assume size > sizeof(ra) here */ 1583 /* limit our allocations to order-0 page */ 1584 size = min_t(int, size, SKB_MAX_ORDER(0, 0)); 1585 skb = sock_alloc_send_skb(sk, size, 1, &err); 1586 1587 if (!skb) 1588 return NULL; 1589 1590 skb->priority = TC_PRIO_CONTROL; 1591 skb_reserve(skb, hlen); 1592 skb_tailroom_reserve(skb, mtu, tlen); 1593 1594 if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) { 1595 /* <draft-ietf-magma-mld-source-05.txt>: 1596 * use unspecified address as the source address 1597 * when a valid link-local address is not available. 1598 */ 1599 saddr = &in6addr_any; 1600 } else 1601 saddr = &addr_buf; 1602 1603 ip6_mc_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0); 1604 1605 skb_put_data(skb, ra, sizeof(ra)); 1606 1607 skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data); 1608 skb_put(skb, sizeof(*pmr)); 1609 pmr = (struct mld2_report *)skb_transport_header(skb); 1610 pmr->mld2r_type = ICMPV6_MLD2_REPORT; 1611 pmr->mld2r_resv1 = 0; 1612 pmr->mld2r_cksum = 0; 1613 pmr->mld2r_resv2 = 0; 1614 pmr->mld2r_ngrec = 0; 1615 return skb; 1616 } 1617 1618 static void mld_sendpack(struct sk_buff *skb) 1619 { 1620 struct ipv6hdr *pip6 = ipv6_hdr(skb); 1621 struct mld2_report *pmr = 1622 (struct mld2_report *)skb_transport_header(skb); 1623 int payload_len, mldlen; 1624 struct inet6_dev *idev; 1625 struct net *net = dev_net(skb->dev); 1626 int err; 1627 struct flowi6 fl6; 1628 struct dst_entry *dst; 1629 1630 rcu_read_lock(); 1631 idev = __in6_dev_get(skb->dev); 1632 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); 1633 1634 payload_len = (skb_tail_pointer(skb) - skb_network_header(skb)) - 1635 sizeof(*pip6); 1636 mldlen = skb_tail_pointer(skb) - skb_transport_header(skb); 1637 pip6->payload_len = htons(payload_len); 1638 1639 pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen, 1640 IPPROTO_ICMPV6, 1641 csum_partial(skb_transport_header(skb), 1642 mldlen, 0)); 1643 1644 icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT, 1645 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 1646 skb->dev->ifindex); 1647 dst = icmp6_dst_alloc(skb->dev, &fl6); 1648 1649 err = 0; 1650 if (IS_ERR(dst)) { 1651 err = PTR_ERR(dst); 1652 dst = NULL; 1653 } 1654 skb_dst_set(skb, dst); 1655 if (err) 1656 goto err_out; 1657 1658 payload_len = skb->len; 1659 1660 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, 1661 net, net->ipv6.igmp_sk, skb, NULL, skb->dev, 1662 dst_output); 1663 out: 1664 if (!err) { 1665 ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT); 1666 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); 1667 } else { 1668 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); 1669 } 1670 1671 rcu_read_unlock(); 1672 return; 1673 1674 err_out: 1675 kfree_skb(skb); 1676 goto out; 1677 } 1678 1679 static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel) 1680 { 1681 return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel); 1682 } 1683 1684 static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, 1685 int type, struct mld2_grec **ppgr) 1686 { 1687 struct net_device *dev = pmc->idev->dev; 1688 struct mld2_report *pmr; 1689 struct mld2_grec *pgr; 1690 1691 if (!skb) 1692 skb = mld_newpack(pmc->idev, dev->mtu); 1693 if (!skb) 1694 return NULL; 1695 pgr = skb_put(skb, sizeof(struct mld2_grec)); 1696 pgr->grec_type = type; 1697 pgr->grec_auxwords = 0; 1698 pgr->grec_nsrcs = 0; 1699 pgr->grec_mca = pmc->mca_addr; /* structure copy */ 1700 pmr = (struct mld2_report *)skb_transport_header(skb); 1701 pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1); 1702 *ppgr = pgr; 1703 return skb; 1704 } 1705 1706 #define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0) 1707 1708 static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, 1709 int type, int gdeleted, int sdeleted, int crsend) 1710 { 1711 struct inet6_dev *idev = pmc->idev; 1712 struct net_device *dev = idev->dev; 1713 struct mld2_report *pmr; 1714 struct mld2_grec *pgr = NULL; 1715 struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list; 1716 int scount, stotal, first, isquery, truncate; 1717 1718 if (pmc->mca_flags & MAF_NOREPORT) 1719 return skb; 1720 1721 isquery = type == MLD2_MODE_IS_INCLUDE || 1722 type == MLD2_MODE_IS_EXCLUDE; 1723 truncate = type == MLD2_MODE_IS_EXCLUDE || 1724 type == MLD2_CHANGE_TO_EXCLUDE; 1725 1726 stotal = scount = 0; 1727 1728 psf_list = sdeleted ? &pmc->mca_tomb : &pmc->mca_sources; 1729 1730 if (!*psf_list) 1731 goto empty_source; 1732 1733 pmr = skb ? (struct mld2_report *)skb_transport_header(skb) : NULL; 1734 1735 /* EX and TO_EX get a fresh packet, if needed */ 1736 if (truncate) { 1737 if (pmr && pmr->mld2r_ngrec && 1738 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { 1739 if (skb) 1740 mld_sendpack(skb); 1741 skb = mld_newpack(idev, dev->mtu); 1742 } 1743 } 1744 first = 1; 1745 psf_prev = NULL; 1746 for (psf = *psf_list; psf; psf = psf_next) { 1747 struct in6_addr *psrc; 1748 1749 psf_next = psf->sf_next; 1750 1751 if (!is_in(pmc, psf, type, gdeleted, sdeleted)) { 1752 psf_prev = psf; 1753 continue; 1754 } 1755 1756 /* Based on RFC3810 6.1. Should not send source-list change 1757 * records when there is a filter mode change. 1758 */ 1759 if (((gdeleted && pmc->mca_sfmode == MCAST_EXCLUDE) || 1760 (!gdeleted && pmc->mca_crcount)) && 1761 (type == MLD2_ALLOW_NEW_SOURCES || 1762 type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) 1763 goto decrease_sf_crcount; 1764 1765 /* clear marks on query responses */ 1766 if (isquery) 1767 psf->sf_gsresp = 0; 1768 1769 if (AVAILABLE(skb) < sizeof(*psrc) + 1770 first*sizeof(struct mld2_grec)) { 1771 if (truncate && !first) 1772 break; /* truncate these */ 1773 if (pgr) 1774 pgr->grec_nsrcs = htons(scount); 1775 if (skb) 1776 mld_sendpack(skb); 1777 skb = mld_newpack(idev, dev->mtu); 1778 first = 1; 1779 scount = 0; 1780 } 1781 if (first) { 1782 skb = add_grhead(skb, pmc, type, &pgr); 1783 first = 0; 1784 } 1785 if (!skb) 1786 return NULL; 1787 psrc = skb_put(skb, sizeof(*psrc)); 1788 *psrc = psf->sf_addr; 1789 scount++; stotal++; 1790 if ((type == MLD2_ALLOW_NEW_SOURCES || 1791 type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) { 1792 decrease_sf_crcount: 1793 psf->sf_crcount--; 1794 if ((sdeleted || gdeleted) && psf->sf_crcount == 0) { 1795 if (psf_prev) 1796 psf_prev->sf_next = psf->sf_next; 1797 else 1798 *psf_list = psf->sf_next; 1799 kfree(psf); 1800 continue; 1801 } 1802 } 1803 psf_prev = psf; 1804 } 1805 1806 empty_source: 1807 if (!stotal) { 1808 if (type == MLD2_ALLOW_NEW_SOURCES || 1809 type == MLD2_BLOCK_OLD_SOURCES) 1810 return skb; 1811 if (pmc->mca_crcount || isquery || crsend) { 1812 /* make sure we have room for group header */ 1813 if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)) { 1814 mld_sendpack(skb); 1815 skb = NULL; /* add_grhead will get a new one */ 1816 } 1817 skb = add_grhead(skb, pmc, type, &pgr); 1818 } 1819 } 1820 if (pgr) 1821 pgr->grec_nsrcs = htons(scount); 1822 1823 if (isquery) 1824 pmc->mca_flags &= ~MAF_GSQUERY; /* clear query state */ 1825 return skb; 1826 } 1827 1828 static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc) 1829 { 1830 struct sk_buff *skb = NULL; 1831 int type; 1832 1833 read_lock_bh(&idev->lock); 1834 if (!pmc) { 1835 for (pmc = idev->mc_list; pmc; pmc = pmc->next) { 1836 if (pmc->mca_flags & MAF_NOREPORT) 1837 continue; 1838 spin_lock_bh(&pmc->mca_lock); 1839 if (pmc->mca_sfcount[MCAST_EXCLUDE]) 1840 type = MLD2_MODE_IS_EXCLUDE; 1841 else 1842 type = MLD2_MODE_IS_INCLUDE; 1843 skb = add_grec(skb, pmc, type, 0, 0, 0); 1844 spin_unlock_bh(&pmc->mca_lock); 1845 } 1846 } else { 1847 spin_lock_bh(&pmc->mca_lock); 1848 if (pmc->mca_sfcount[MCAST_EXCLUDE]) 1849 type = MLD2_MODE_IS_EXCLUDE; 1850 else 1851 type = MLD2_MODE_IS_INCLUDE; 1852 skb = add_grec(skb, pmc, type, 0, 0, 0); 1853 spin_unlock_bh(&pmc->mca_lock); 1854 } 1855 read_unlock_bh(&idev->lock); 1856 if (skb) 1857 mld_sendpack(skb); 1858 } 1859 1860 /* 1861 * remove zero-count source records from a source filter list 1862 */ 1863 static void mld_clear_zeros(struct ip6_sf_list **ppsf) 1864 { 1865 struct ip6_sf_list *psf_prev, *psf_next, *psf; 1866 1867 psf_prev = NULL; 1868 for (psf = *ppsf; psf; psf = psf_next) { 1869 psf_next = psf->sf_next; 1870 if (psf->sf_crcount == 0) { 1871 if (psf_prev) 1872 psf_prev->sf_next = psf->sf_next; 1873 else 1874 *ppsf = psf->sf_next; 1875 kfree(psf); 1876 } else 1877 psf_prev = psf; 1878 } 1879 } 1880 1881 static void mld_send_cr(struct inet6_dev *idev) 1882 { 1883 struct ifmcaddr6 *pmc, *pmc_prev, *pmc_next; 1884 struct sk_buff *skb = NULL; 1885 int type, dtype; 1886 1887 read_lock_bh(&idev->lock); 1888 spin_lock(&idev->mc_lock); 1889 1890 /* deleted MCA's */ 1891 pmc_prev = NULL; 1892 for (pmc = idev->mc_tomb; pmc; pmc = pmc_next) { 1893 pmc_next = pmc->next; 1894 if (pmc->mca_sfmode == MCAST_INCLUDE) { 1895 type = MLD2_BLOCK_OLD_SOURCES; 1896 dtype = MLD2_BLOCK_OLD_SOURCES; 1897 skb = add_grec(skb, pmc, type, 1, 0, 0); 1898 skb = add_grec(skb, pmc, dtype, 1, 1, 0); 1899 } 1900 if (pmc->mca_crcount) { 1901 if (pmc->mca_sfmode == MCAST_EXCLUDE) { 1902 type = MLD2_CHANGE_TO_INCLUDE; 1903 skb = add_grec(skb, pmc, type, 1, 0, 0); 1904 } 1905 pmc->mca_crcount--; 1906 if (pmc->mca_crcount == 0) { 1907 mld_clear_zeros(&pmc->mca_tomb); 1908 mld_clear_zeros(&pmc->mca_sources); 1909 } 1910 } 1911 if (pmc->mca_crcount == 0 && !pmc->mca_tomb && 1912 !pmc->mca_sources) { 1913 if (pmc_prev) 1914 pmc_prev->next = pmc_next; 1915 else 1916 idev->mc_tomb = pmc_next; 1917 in6_dev_put(pmc->idev); 1918 kfree(pmc); 1919 } else 1920 pmc_prev = pmc; 1921 } 1922 spin_unlock(&idev->mc_lock); 1923 1924 /* change recs */ 1925 for (pmc = idev->mc_list; pmc; pmc = pmc->next) { 1926 spin_lock_bh(&pmc->mca_lock); 1927 if (pmc->mca_sfcount[MCAST_EXCLUDE]) { 1928 type = MLD2_BLOCK_OLD_SOURCES; 1929 dtype = MLD2_ALLOW_NEW_SOURCES; 1930 } else { 1931 type = MLD2_ALLOW_NEW_SOURCES; 1932 dtype = MLD2_BLOCK_OLD_SOURCES; 1933 } 1934 skb = add_grec(skb, pmc, type, 0, 0, 0); 1935 skb = add_grec(skb, pmc, dtype, 0, 1, 0); /* deleted sources */ 1936 1937 /* filter mode changes */ 1938 if (pmc->mca_crcount) { 1939 if (pmc->mca_sfmode == MCAST_EXCLUDE) 1940 type = MLD2_CHANGE_TO_EXCLUDE; 1941 else 1942 type = MLD2_CHANGE_TO_INCLUDE; 1943 skb = add_grec(skb, pmc, type, 0, 0, 0); 1944 pmc->mca_crcount--; 1945 } 1946 spin_unlock_bh(&pmc->mca_lock); 1947 } 1948 read_unlock_bh(&idev->lock); 1949 if (!skb) 1950 return; 1951 (void) mld_sendpack(skb); 1952 } 1953 1954 static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) 1955 { 1956 struct net *net = dev_net(dev); 1957 struct sock *sk = net->ipv6.igmp_sk; 1958 struct inet6_dev *idev; 1959 struct sk_buff *skb; 1960 struct mld_msg *hdr; 1961 const struct in6_addr *snd_addr, *saddr; 1962 struct in6_addr addr_buf; 1963 int hlen = LL_RESERVED_SPACE(dev); 1964 int tlen = dev->needed_tailroom; 1965 int err, len, payload_len, full_len; 1966 u8 ra[8] = { IPPROTO_ICMPV6, 0, 1967 IPV6_TLV_ROUTERALERT, 2, 0, 0, 1968 IPV6_TLV_PADN, 0 }; 1969 struct flowi6 fl6; 1970 struct dst_entry *dst; 1971 1972 if (type == ICMPV6_MGM_REDUCTION) 1973 snd_addr = &in6addr_linklocal_allrouters; 1974 else 1975 snd_addr = addr; 1976 1977 len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr); 1978 payload_len = len + sizeof(ra); 1979 full_len = sizeof(struct ipv6hdr) + payload_len; 1980 1981 rcu_read_lock(); 1982 IP6_UPD_PO_STATS(net, __in6_dev_get(dev), 1983 IPSTATS_MIB_OUT, full_len); 1984 rcu_read_unlock(); 1985 1986 skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err); 1987 1988 if (!skb) { 1989 rcu_read_lock(); 1990 IP6_INC_STATS(net, __in6_dev_get(dev), 1991 IPSTATS_MIB_OUTDISCARDS); 1992 rcu_read_unlock(); 1993 return; 1994 } 1995 skb->priority = TC_PRIO_CONTROL; 1996 skb_reserve(skb, hlen); 1997 1998 if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) { 1999 /* <draft-ietf-magma-mld-source-05.txt>: 2000 * use unspecified address as the source address 2001 * when a valid link-local address is not available. 2002 */ 2003 saddr = &in6addr_any; 2004 } else 2005 saddr = &addr_buf; 2006 2007 ip6_mc_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len); 2008 2009 skb_put_data(skb, ra, sizeof(ra)); 2010 2011 hdr = skb_put_zero(skb, sizeof(struct mld_msg)); 2012 hdr->mld_type = type; 2013 hdr->mld_mca = *addr; 2014 2015 hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len, 2016 IPPROTO_ICMPV6, 2017 csum_partial(hdr, len, 0)); 2018 2019 rcu_read_lock(); 2020 idev = __in6_dev_get(skb->dev); 2021 2022 icmpv6_flow_init(sk, &fl6, type, 2023 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 2024 skb->dev->ifindex); 2025 dst = icmp6_dst_alloc(skb->dev, &fl6); 2026 if (IS_ERR(dst)) { 2027 err = PTR_ERR(dst); 2028 goto err_out; 2029 } 2030 2031 skb_dst_set(skb, dst); 2032 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, 2033 net, sk, skb, NULL, skb->dev, 2034 dst_output); 2035 out: 2036 if (!err) { 2037 ICMP6MSGOUT_INC_STATS(net, idev, type); 2038 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); 2039 } else 2040 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); 2041 2042 rcu_read_unlock(); 2043 return; 2044 2045 err_out: 2046 kfree_skb(skb); 2047 goto out; 2048 } 2049 2050 static void mld_send_initial_cr(struct inet6_dev *idev) 2051 { 2052 struct sk_buff *skb; 2053 struct ifmcaddr6 *pmc; 2054 int type; 2055 2056 if (mld_in_v1_mode(idev)) 2057 return; 2058 2059 skb = NULL; 2060 read_lock_bh(&idev->lock); 2061 for (pmc = idev->mc_list; pmc; pmc = pmc->next) { 2062 spin_lock_bh(&pmc->mca_lock); 2063 if (pmc->mca_sfcount[MCAST_EXCLUDE]) 2064 type = MLD2_CHANGE_TO_EXCLUDE; 2065 else 2066 type = MLD2_CHANGE_TO_INCLUDE; 2067 skb = add_grec(skb, pmc, type, 0, 0, 1); 2068 spin_unlock_bh(&pmc->mca_lock); 2069 } 2070 read_unlock_bh(&idev->lock); 2071 if (skb) 2072 mld_sendpack(skb); 2073 } 2074 2075 void ipv6_mc_dad_complete(struct inet6_dev *idev) 2076 { 2077 idev->mc_dad_count = idev->mc_qrv; 2078 if (idev->mc_dad_count) { 2079 mld_send_initial_cr(idev); 2080 idev->mc_dad_count--; 2081 if (idev->mc_dad_count) 2082 mld_dad_start_timer(idev, idev->mc_maxdelay); 2083 } 2084 } 2085 2086 static void mld_dad_timer_expire(struct timer_list *t) 2087 { 2088 struct inet6_dev *idev = from_timer(idev, t, mc_dad_timer); 2089 2090 mld_send_initial_cr(idev); 2091 if (idev->mc_dad_count) { 2092 idev->mc_dad_count--; 2093 if (idev->mc_dad_count) 2094 mld_dad_start_timer(idev, idev->mc_maxdelay); 2095 } 2096 in6_dev_put(idev); 2097 } 2098 2099 static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode, 2100 const struct in6_addr *psfsrc) 2101 { 2102 struct ip6_sf_list *psf, *psf_prev; 2103 int rv = 0; 2104 2105 psf_prev = NULL; 2106 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) { 2107 if (ipv6_addr_equal(&psf->sf_addr, psfsrc)) 2108 break; 2109 psf_prev = psf; 2110 } 2111 if (!psf || psf->sf_count[sfmode] == 0) { 2112 /* source filter not found, or count wrong => bug */ 2113 return -ESRCH; 2114 } 2115 psf->sf_count[sfmode]--; 2116 if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) { 2117 struct inet6_dev *idev = pmc->idev; 2118 2119 /* no more filters for this source */ 2120 if (psf_prev) 2121 psf_prev->sf_next = psf->sf_next; 2122 else 2123 pmc->mca_sources = psf->sf_next; 2124 if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) && 2125 !mld_in_v1_mode(idev)) { 2126 psf->sf_crcount = idev->mc_qrv; 2127 psf->sf_next = pmc->mca_tomb; 2128 pmc->mca_tomb = psf; 2129 rv = 1; 2130 } else 2131 kfree(psf); 2132 } 2133 return rv; 2134 } 2135 2136 static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca, 2137 int sfmode, int sfcount, const struct in6_addr *psfsrc, 2138 int delta) 2139 { 2140 struct ifmcaddr6 *pmc; 2141 int changerec = 0; 2142 int i, err; 2143 2144 if (!idev) 2145 return -ENODEV; 2146 read_lock_bh(&idev->lock); 2147 for (pmc = idev->mc_list; pmc; pmc = pmc->next) { 2148 if (ipv6_addr_equal(pmca, &pmc->mca_addr)) 2149 break; 2150 } 2151 if (!pmc) { 2152 /* MCA not found?? bug */ 2153 read_unlock_bh(&idev->lock); 2154 return -ESRCH; 2155 } 2156 spin_lock_bh(&pmc->mca_lock); 2157 sf_markstate(pmc); 2158 if (!delta) { 2159 if (!pmc->mca_sfcount[sfmode]) { 2160 spin_unlock_bh(&pmc->mca_lock); 2161 read_unlock_bh(&idev->lock); 2162 return -EINVAL; 2163 } 2164 pmc->mca_sfcount[sfmode]--; 2165 } 2166 err = 0; 2167 for (i = 0; i < sfcount; i++) { 2168 int rv = ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]); 2169 2170 changerec |= rv > 0; 2171 if (!err && rv < 0) 2172 err = rv; 2173 } 2174 if (pmc->mca_sfmode == MCAST_EXCLUDE && 2175 pmc->mca_sfcount[MCAST_EXCLUDE] == 0 && 2176 pmc->mca_sfcount[MCAST_INCLUDE]) { 2177 struct ip6_sf_list *psf; 2178 2179 /* filter mode change */ 2180 pmc->mca_sfmode = MCAST_INCLUDE; 2181 pmc->mca_crcount = idev->mc_qrv; 2182 idev->mc_ifc_count = pmc->mca_crcount; 2183 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) 2184 psf->sf_crcount = 0; 2185 mld_ifc_event(pmc->idev); 2186 } else if (sf_setstate(pmc) || changerec) 2187 mld_ifc_event(pmc->idev); 2188 spin_unlock_bh(&pmc->mca_lock); 2189 read_unlock_bh(&idev->lock); 2190 return err; 2191 } 2192 2193 /* 2194 * Add multicast single-source filter to the interface list 2195 */ 2196 static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode, 2197 const struct in6_addr *psfsrc) 2198 { 2199 struct ip6_sf_list *psf, *psf_prev; 2200 2201 psf_prev = NULL; 2202 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) { 2203 if (ipv6_addr_equal(&psf->sf_addr, psfsrc)) 2204 break; 2205 psf_prev = psf; 2206 } 2207 if (!psf) { 2208 psf = kzalloc(sizeof(*psf), GFP_ATOMIC); 2209 if (!psf) 2210 return -ENOBUFS; 2211 2212 psf->sf_addr = *psfsrc; 2213 if (psf_prev) { 2214 psf_prev->sf_next = psf; 2215 } else 2216 pmc->mca_sources = psf; 2217 } 2218 psf->sf_count[sfmode]++; 2219 return 0; 2220 } 2221 2222 static void sf_markstate(struct ifmcaddr6 *pmc) 2223 { 2224 struct ip6_sf_list *psf; 2225 int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE]; 2226 2227 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) 2228 if (pmc->mca_sfcount[MCAST_EXCLUDE]) { 2229 psf->sf_oldin = mca_xcount == 2230 psf->sf_count[MCAST_EXCLUDE] && 2231 !psf->sf_count[MCAST_INCLUDE]; 2232 } else 2233 psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0; 2234 } 2235 2236 static int sf_setstate(struct ifmcaddr6 *pmc) 2237 { 2238 struct ip6_sf_list *psf, *dpsf; 2239 int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE]; 2240 int qrv = pmc->idev->mc_qrv; 2241 int new_in, rv; 2242 2243 rv = 0; 2244 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) { 2245 if (pmc->mca_sfcount[MCAST_EXCLUDE]) { 2246 new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] && 2247 !psf->sf_count[MCAST_INCLUDE]; 2248 } else 2249 new_in = psf->sf_count[MCAST_INCLUDE] != 0; 2250 if (new_in) { 2251 if (!psf->sf_oldin) { 2252 struct ip6_sf_list *prev = NULL; 2253 2254 for (dpsf = pmc->mca_tomb; dpsf; 2255 dpsf = dpsf->sf_next) { 2256 if (ipv6_addr_equal(&dpsf->sf_addr, 2257 &psf->sf_addr)) 2258 break; 2259 prev = dpsf; 2260 } 2261 if (dpsf) { 2262 if (prev) 2263 prev->sf_next = dpsf->sf_next; 2264 else 2265 pmc->mca_tomb = dpsf->sf_next; 2266 kfree(dpsf); 2267 } 2268 psf->sf_crcount = qrv; 2269 rv++; 2270 } 2271 } else if (psf->sf_oldin) { 2272 psf->sf_crcount = 0; 2273 /* 2274 * add or update "delete" records if an active filter 2275 * is now inactive 2276 */ 2277 for (dpsf = pmc->mca_tomb; dpsf; dpsf = dpsf->sf_next) 2278 if (ipv6_addr_equal(&dpsf->sf_addr, 2279 &psf->sf_addr)) 2280 break; 2281 if (!dpsf) { 2282 dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC); 2283 if (!dpsf) 2284 continue; 2285 *dpsf = *psf; 2286 /* pmc->mca_lock held by callers */ 2287 dpsf->sf_next = pmc->mca_tomb; 2288 pmc->mca_tomb = dpsf; 2289 } 2290 dpsf->sf_crcount = qrv; 2291 rv++; 2292 } 2293 } 2294 return rv; 2295 } 2296 2297 /* 2298 * Add multicast source filter list to the interface list 2299 */ 2300 static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca, 2301 int sfmode, int sfcount, const struct in6_addr *psfsrc, 2302 int delta) 2303 { 2304 struct ifmcaddr6 *pmc; 2305 int isexclude; 2306 int i, err; 2307 2308 if (!idev) 2309 return -ENODEV; 2310 read_lock_bh(&idev->lock); 2311 for (pmc = idev->mc_list; pmc; pmc = pmc->next) { 2312 if (ipv6_addr_equal(pmca, &pmc->mca_addr)) 2313 break; 2314 } 2315 if (!pmc) { 2316 /* MCA not found?? bug */ 2317 read_unlock_bh(&idev->lock); 2318 return -ESRCH; 2319 } 2320 spin_lock_bh(&pmc->mca_lock); 2321 2322 sf_markstate(pmc); 2323 isexclude = pmc->mca_sfmode == MCAST_EXCLUDE; 2324 if (!delta) 2325 pmc->mca_sfcount[sfmode]++; 2326 err = 0; 2327 for (i = 0; i < sfcount; i++) { 2328 err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]); 2329 if (err) 2330 break; 2331 } 2332 if (err) { 2333 int j; 2334 2335 if (!delta) 2336 pmc->mca_sfcount[sfmode]--; 2337 for (j = 0; j < i; j++) 2338 ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]); 2339 } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) { 2340 struct ip6_sf_list *psf; 2341 2342 /* filter mode change */ 2343 if (pmc->mca_sfcount[MCAST_EXCLUDE]) 2344 pmc->mca_sfmode = MCAST_EXCLUDE; 2345 else if (pmc->mca_sfcount[MCAST_INCLUDE]) 2346 pmc->mca_sfmode = MCAST_INCLUDE; 2347 /* else no filters; keep old mode for reports */ 2348 2349 pmc->mca_crcount = idev->mc_qrv; 2350 idev->mc_ifc_count = pmc->mca_crcount; 2351 for (psf = pmc->mca_sources; psf; psf = psf->sf_next) 2352 psf->sf_crcount = 0; 2353 mld_ifc_event(idev); 2354 } else if (sf_setstate(pmc)) 2355 mld_ifc_event(idev); 2356 spin_unlock_bh(&pmc->mca_lock); 2357 read_unlock_bh(&idev->lock); 2358 return err; 2359 } 2360 2361 static void ip6_mc_clear_src(struct ifmcaddr6 *pmc) 2362 { 2363 struct ip6_sf_list *psf, *nextpsf; 2364 2365 for (psf = pmc->mca_tomb; psf; psf = nextpsf) { 2366 nextpsf = psf->sf_next; 2367 kfree(psf); 2368 } 2369 pmc->mca_tomb = NULL; 2370 for (psf = pmc->mca_sources; psf; psf = nextpsf) { 2371 nextpsf = psf->sf_next; 2372 kfree(psf); 2373 } 2374 pmc->mca_sources = NULL; 2375 pmc->mca_sfmode = MCAST_EXCLUDE; 2376 pmc->mca_sfcount[MCAST_INCLUDE] = 0; 2377 pmc->mca_sfcount[MCAST_EXCLUDE] = 1; 2378 } 2379 2380 2381 static void igmp6_join_group(struct ifmcaddr6 *ma) 2382 { 2383 unsigned long delay; 2384 2385 if (ma->mca_flags & MAF_NOREPORT) 2386 return; 2387 2388 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT); 2389 2390 delay = prandom_u32() % unsolicited_report_interval(ma->idev); 2391 2392 spin_lock_bh(&ma->mca_lock); 2393 if (del_timer(&ma->mca_timer)) { 2394 refcount_dec(&ma->mca_refcnt); 2395 delay = ma->mca_timer.expires - jiffies; 2396 } 2397 2398 if (!mod_timer(&ma->mca_timer, jiffies + delay)) 2399 refcount_inc(&ma->mca_refcnt); 2400 ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER; 2401 spin_unlock_bh(&ma->mca_lock); 2402 } 2403 2404 static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, 2405 struct inet6_dev *idev) 2406 { 2407 int err; 2408 2409 /* callers have the socket lock and rtnl lock 2410 * so no other readers or writers of iml or its sflist 2411 */ 2412 if (!iml->sflist) { 2413 /* any-source empty exclude case */ 2414 return ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0); 2415 } 2416 err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 2417 iml->sflist->sl_count, iml->sflist->sl_addr, 0); 2418 sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max)); 2419 iml->sflist = NULL; 2420 return err; 2421 } 2422 2423 static void igmp6_leave_group(struct ifmcaddr6 *ma) 2424 { 2425 if (mld_in_v1_mode(ma->idev)) { 2426 if (ma->mca_flags & MAF_LAST_REPORTER) 2427 igmp6_send(&ma->mca_addr, ma->idev->dev, 2428 ICMPV6_MGM_REDUCTION); 2429 } else { 2430 mld_add_delrec(ma->idev, ma); 2431 mld_ifc_event(ma->idev); 2432 } 2433 } 2434 2435 static void mld_gq_timer_expire(struct timer_list *t) 2436 { 2437 struct inet6_dev *idev = from_timer(idev, t, mc_gq_timer); 2438 2439 idev->mc_gq_running = 0; 2440 mld_send_report(idev, NULL); 2441 in6_dev_put(idev); 2442 } 2443 2444 static void mld_ifc_timer_expire(struct timer_list *t) 2445 { 2446 struct inet6_dev *idev = from_timer(idev, t, mc_ifc_timer); 2447 2448 mld_send_cr(idev); 2449 if (idev->mc_ifc_count) { 2450 idev->mc_ifc_count--; 2451 if (idev->mc_ifc_count) 2452 mld_ifc_start_timer(idev, idev->mc_maxdelay); 2453 } 2454 in6_dev_put(idev); 2455 } 2456 2457 static void mld_ifc_event(struct inet6_dev *idev) 2458 { 2459 if (mld_in_v1_mode(idev)) 2460 return; 2461 idev->mc_ifc_count = idev->mc_qrv; 2462 mld_ifc_start_timer(idev, 1); 2463 } 2464 2465 static void igmp6_timer_handler(struct timer_list *t) 2466 { 2467 struct ifmcaddr6 *ma = from_timer(ma, t, mca_timer); 2468 2469 if (mld_in_v1_mode(ma->idev)) 2470 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT); 2471 else 2472 mld_send_report(ma->idev, ma); 2473 2474 spin_lock(&ma->mca_lock); 2475 ma->mca_flags |= MAF_LAST_REPORTER; 2476 ma->mca_flags &= ~MAF_TIMER_RUNNING; 2477 spin_unlock(&ma->mca_lock); 2478 ma_put(ma); 2479 } 2480 2481 /* Device changing type */ 2482 2483 void ipv6_mc_unmap(struct inet6_dev *idev) 2484 { 2485 struct ifmcaddr6 *i; 2486 2487 /* Install multicast list, except for all-nodes (already installed) */ 2488 2489 read_lock_bh(&idev->lock); 2490 for (i = idev->mc_list; i; i = i->next) 2491 igmp6_group_dropped(i); 2492 read_unlock_bh(&idev->lock); 2493 } 2494 2495 void ipv6_mc_remap(struct inet6_dev *idev) 2496 { 2497 ipv6_mc_up(idev); 2498 } 2499 2500 /* Device going down */ 2501 2502 void ipv6_mc_down(struct inet6_dev *idev) 2503 { 2504 struct ifmcaddr6 *i; 2505 2506 /* Withdraw multicast list */ 2507 2508 read_lock_bh(&idev->lock); 2509 2510 for (i = idev->mc_list; i; i = i->next) 2511 igmp6_group_dropped(i); 2512 2513 /* Should stop timer after group drop. or we will 2514 * start timer again in mld_ifc_event() 2515 */ 2516 mld_ifc_stop_timer(idev); 2517 mld_gq_stop_timer(idev); 2518 mld_dad_stop_timer(idev); 2519 read_unlock_bh(&idev->lock); 2520 } 2521 2522 static void ipv6_mc_reset(struct inet6_dev *idev) 2523 { 2524 idev->mc_qrv = sysctl_mld_qrv; 2525 idev->mc_qi = MLD_QI_DEFAULT; 2526 idev->mc_qri = MLD_QRI_DEFAULT; 2527 idev->mc_v1_seen = 0; 2528 idev->mc_maxdelay = unsolicited_report_interval(idev); 2529 } 2530 2531 /* Device going up */ 2532 2533 void ipv6_mc_up(struct inet6_dev *idev) 2534 { 2535 struct ifmcaddr6 *i; 2536 2537 /* Install multicast list, except for all-nodes (already installed) */ 2538 2539 read_lock_bh(&idev->lock); 2540 ipv6_mc_reset(idev); 2541 for (i = idev->mc_list; i; i = i->next) { 2542 mld_del_delrec(idev, i); 2543 igmp6_group_added(i); 2544 } 2545 read_unlock_bh(&idev->lock); 2546 } 2547 2548 /* IPv6 device initialization. */ 2549 2550 void ipv6_mc_init_dev(struct inet6_dev *idev) 2551 { 2552 write_lock_bh(&idev->lock); 2553 spin_lock_init(&idev->mc_lock); 2554 idev->mc_gq_running = 0; 2555 timer_setup(&idev->mc_gq_timer, mld_gq_timer_expire, 0); 2556 idev->mc_tomb = NULL; 2557 idev->mc_ifc_count = 0; 2558 timer_setup(&idev->mc_ifc_timer, mld_ifc_timer_expire, 0); 2559 timer_setup(&idev->mc_dad_timer, mld_dad_timer_expire, 0); 2560 ipv6_mc_reset(idev); 2561 write_unlock_bh(&idev->lock); 2562 } 2563 2564 /* 2565 * Device is about to be destroyed: clean up. 2566 */ 2567 2568 void ipv6_mc_destroy_dev(struct inet6_dev *idev) 2569 { 2570 struct ifmcaddr6 *i; 2571 2572 /* Deactivate timers */ 2573 ipv6_mc_down(idev); 2574 mld_clear_delrec(idev); 2575 2576 /* Delete all-nodes address. */ 2577 /* We cannot call ipv6_dev_mc_dec() directly, our caller in 2578 * addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will 2579 * fail. 2580 */ 2581 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allnodes); 2582 2583 if (idev->cnf.forwarding) 2584 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allrouters); 2585 2586 write_lock_bh(&idev->lock); 2587 while ((i = idev->mc_list) != NULL) { 2588 idev->mc_list = i->next; 2589 2590 write_unlock_bh(&idev->lock); 2591 ma_put(i); 2592 write_lock_bh(&idev->lock); 2593 } 2594 write_unlock_bh(&idev->lock); 2595 } 2596 2597 static void ipv6_mc_rejoin_groups(struct inet6_dev *idev) 2598 { 2599 struct ifmcaddr6 *pmc; 2600 2601 ASSERT_RTNL(); 2602 2603 if (mld_in_v1_mode(idev)) { 2604 read_lock_bh(&idev->lock); 2605 for (pmc = idev->mc_list; pmc; pmc = pmc->next) 2606 igmp6_join_group(pmc); 2607 read_unlock_bh(&idev->lock); 2608 } else 2609 mld_send_report(idev, NULL); 2610 } 2611 2612 static int ipv6_mc_netdev_event(struct notifier_block *this, 2613 unsigned long event, 2614 void *ptr) 2615 { 2616 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 2617 struct inet6_dev *idev = __in6_dev_get(dev); 2618 2619 switch (event) { 2620 case NETDEV_RESEND_IGMP: 2621 if (idev) 2622 ipv6_mc_rejoin_groups(idev); 2623 break; 2624 default: 2625 break; 2626 } 2627 2628 return NOTIFY_DONE; 2629 } 2630 2631 static struct notifier_block igmp6_netdev_notifier = { 2632 .notifier_call = ipv6_mc_netdev_event, 2633 }; 2634 2635 #ifdef CONFIG_PROC_FS 2636 struct igmp6_mc_iter_state { 2637 struct seq_net_private p; 2638 struct net_device *dev; 2639 struct inet6_dev *idev; 2640 }; 2641 2642 #define igmp6_mc_seq_private(seq) ((struct igmp6_mc_iter_state *)(seq)->private) 2643 2644 static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq) 2645 { 2646 struct ifmcaddr6 *im = NULL; 2647 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); 2648 struct net *net = seq_file_net(seq); 2649 2650 state->idev = NULL; 2651 for_each_netdev_rcu(net, state->dev) { 2652 struct inet6_dev *idev; 2653 idev = __in6_dev_get(state->dev); 2654 if (!idev) 2655 continue; 2656 read_lock_bh(&idev->lock); 2657 im = idev->mc_list; 2658 if (im) { 2659 state->idev = idev; 2660 break; 2661 } 2662 read_unlock_bh(&idev->lock); 2663 } 2664 return im; 2665 } 2666 2667 static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr6 *im) 2668 { 2669 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); 2670 2671 im = im->next; 2672 while (!im) { 2673 if (likely(state->idev)) 2674 read_unlock_bh(&state->idev->lock); 2675 2676 state->dev = next_net_device_rcu(state->dev); 2677 if (!state->dev) { 2678 state->idev = NULL; 2679 break; 2680 } 2681 state->idev = __in6_dev_get(state->dev); 2682 if (!state->idev) 2683 continue; 2684 read_lock_bh(&state->idev->lock); 2685 im = state->idev->mc_list; 2686 } 2687 return im; 2688 } 2689 2690 static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos) 2691 { 2692 struct ifmcaddr6 *im = igmp6_mc_get_first(seq); 2693 if (im) 2694 while (pos && (im = igmp6_mc_get_next(seq, im)) != NULL) 2695 --pos; 2696 return pos ? NULL : im; 2697 } 2698 2699 static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos) 2700 __acquires(RCU) 2701 { 2702 rcu_read_lock(); 2703 return igmp6_mc_get_idx(seq, *pos); 2704 } 2705 2706 static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2707 { 2708 struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v); 2709 2710 ++*pos; 2711 return im; 2712 } 2713 2714 static void igmp6_mc_seq_stop(struct seq_file *seq, void *v) 2715 __releases(RCU) 2716 { 2717 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); 2718 2719 if (likely(state->idev)) { 2720 read_unlock_bh(&state->idev->lock); 2721 state->idev = NULL; 2722 } 2723 state->dev = NULL; 2724 rcu_read_unlock(); 2725 } 2726 2727 static int igmp6_mc_seq_show(struct seq_file *seq, void *v) 2728 { 2729 struct ifmcaddr6 *im = (struct ifmcaddr6 *)v; 2730 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); 2731 2732 seq_printf(seq, 2733 "%-4d %-15s %pi6 %5d %08X %ld\n", 2734 state->dev->ifindex, state->dev->name, 2735 &im->mca_addr, 2736 im->mca_users, im->mca_flags, 2737 (im->mca_flags&MAF_TIMER_RUNNING) ? 2738 jiffies_to_clock_t(im->mca_timer.expires-jiffies) : 0); 2739 return 0; 2740 } 2741 2742 static const struct seq_operations igmp6_mc_seq_ops = { 2743 .start = igmp6_mc_seq_start, 2744 .next = igmp6_mc_seq_next, 2745 .stop = igmp6_mc_seq_stop, 2746 .show = igmp6_mc_seq_show, 2747 }; 2748 2749 static int igmp6_mc_seq_open(struct inode *inode, struct file *file) 2750 { 2751 return seq_open_net(inode, file, &igmp6_mc_seq_ops, 2752 sizeof(struct igmp6_mc_iter_state)); 2753 } 2754 2755 static const struct file_operations igmp6_mc_seq_fops = { 2756 .owner = THIS_MODULE, 2757 .open = igmp6_mc_seq_open, 2758 .read = seq_read, 2759 .llseek = seq_lseek, 2760 .release = seq_release_net, 2761 }; 2762 2763 struct igmp6_mcf_iter_state { 2764 struct seq_net_private p; 2765 struct net_device *dev; 2766 struct inet6_dev *idev; 2767 struct ifmcaddr6 *im; 2768 }; 2769 2770 #define igmp6_mcf_seq_private(seq) ((struct igmp6_mcf_iter_state *)(seq)->private) 2771 2772 static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq) 2773 { 2774 struct ip6_sf_list *psf = NULL; 2775 struct ifmcaddr6 *im = NULL; 2776 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); 2777 struct net *net = seq_file_net(seq); 2778 2779 state->idev = NULL; 2780 state->im = NULL; 2781 for_each_netdev_rcu(net, state->dev) { 2782 struct inet6_dev *idev; 2783 idev = __in6_dev_get(state->dev); 2784 if (unlikely(idev == NULL)) 2785 continue; 2786 read_lock_bh(&idev->lock); 2787 im = idev->mc_list; 2788 if (likely(im)) { 2789 spin_lock_bh(&im->mca_lock); 2790 psf = im->mca_sources; 2791 if (likely(psf)) { 2792 state->im = im; 2793 state->idev = idev; 2794 break; 2795 } 2796 spin_unlock_bh(&im->mca_lock); 2797 } 2798 read_unlock_bh(&idev->lock); 2799 } 2800 return psf; 2801 } 2802 2803 static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_sf_list *psf) 2804 { 2805 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); 2806 2807 psf = psf->sf_next; 2808 while (!psf) { 2809 spin_unlock_bh(&state->im->mca_lock); 2810 state->im = state->im->next; 2811 while (!state->im) { 2812 if (likely(state->idev)) 2813 read_unlock_bh(&state->idev->lock); 2814 2815 state->dev = next_net_device_rcu(state->dev); 2816 if (!state->dev) { 2817 state->idev = NULL; 2818 goto out; 2819 } 2820 state->idev = __in6_dev_get(state->dev); 2821 if (!state->idev) 2822 continue; 2823 read_lock_bh(&state->idev->lock); 2824 state->im = state->idev->mc_list; 2825 } 2826 if (!state->im) 2827 break; 2828 spin_lock_bh(&state->im->mca_lock); 2829 psf = state->im->mca_sources; 2830 } 2831 out: 2832 return psf; 2833 } 2834 2835 static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos) 2836 { 2837 struct ip6_sf_list *psf = igmp6_mcf_get_first(seq); 2838 if (psf) 2839 while (pos && (psf = igmp6_mcf_get_next(seq, psf)) != NULL) 2840 --pos; 2841 return pos ? NULL : psf; 2842 } 2843 2844 static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos) 2845 __acquires(RCU) 2846 { 2847 rcu_read_lock(); 2848 return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 2849 } 2850 2851 static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2852 { 2853 struct ip6_sf_list *psf; 2854 if (v == SEQ_START_TOKEN) 2855 psf = igmp6_mcf_get_first(seq); 2856 else 2857 psf = igmp6_mcf_get_next(seq, v); 2858 ++*pos; 2859 return psf; 2860 } 2861 2862 static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v) 2863 __releases(RCU) 2864 { 2865 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); 2866 if (likely(state->im)) { 2867 spin_unlock_bh(&state->im->mca_lock); 2868 state->im = NULL; 2869 } 2870 if (likely(state->idev)) { 2871 read_unlock_bh(&state->idev->lock); 2872 state->idev = NULL; 2873 } 2874 state->dev = NULL; 2875 rcu_read_unlock(); 2876 } 2877 2878 static int igmp6_mcf_seq_show(struct seq_file *seq, void *v) 2879 { 2880 struct ip6_sf_list *psf = (struct ip6_sf_list *)v; 2881 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); 2882 2883 if (v == SEQ_START_TOKEN) { 2884 seq_puts(seq, "Idx Device Multicast Address Source Address INC EXC\n"); 2885 } else { 2886 seq_printf(seq, 2887 "%3d %6.6s %pi6 %pi6 %6lu %6lu\n", 2888 state->dev->ifindex, state->dev->name, 2889 &state->im->mca_addr, 2890 &psf->sf_addr, 2891 psf->sf_count[MCAST_INCLUDE], 2892 psf->sf_count[MCAST_EXCLUDE]); 2893 } 2894 return 0; 2895 } 2896 2897 static const struct seq_operations igmp6_mcf_seq_ops = { 2898 .start = igmp6_mcf_seq_start, 2899 .next = igmp6_mcf_seq_next, 2900 .stop = igmp6_mcf_seq_stop, 2901 .show = igmp6_mcf_seq_show, 2902 }; 2903 2904 static int igmp6_mcf_seq_open(struct inode *inode, struct file *file) 2905 { 2906 return seq_open_net(inode, file, &igmp6_mcf_seq_ops, 2907 sizeof(struct igmp6_mcf_iter_state)); 2908 } 2909 2910 static const struct file_operations igmp6_mcf_seq_fops = { 2911 .owner = THIS_MODULE, 2912 .open = igmp6_mcf_seq_open, 2913 .read = seq_read, 2914 .llseek = seq_lseek, 2915 .release = seq_release_net, 2916 }; 2917 2918 static int __net_init igmp6_proc_init(struct net *net) 2919 { 2920 int err; 2921 2922 err = -ENOMEM; 2923 if (!proc_create("igmp6", S_IRUGO, net->proc_net, &igmp6_mc_seq_fops)) 2924 goto out; 2925 if (!proc_create("mcfilter6", S_IRUGO, net->proc_net, 2926 &igmp6_mcf_seq_fops)) 2927 goto out_proc_net_igmp6; 2928 2929 err = 0; 2930 out: 2931 return err; 2932 2933 out_proc_net_igmp6: 2934 remove_proc_entry("igmp6", net->proc_net); 2935 goto out; 2936 } 2937 2938 static void __net_exit igmp6_proc_exit(struct net *net) 2939 { 2940 remove_proc_entry("mcfilter6", net->proc_net); 2941 remove_proc_entry("igmp6", net->proc_net); 2942 } 2943 #else 2944 static inline int igmp6_proc_init(struct net *net) 2945 { 2946 return 0; 2947 } 2948 static inline void igmp6_proc_exit(struct net *net) 2949 { 2950 } 2951 #endif 2952 2953 static int __net_init igmp6_net_init(struct net *net) 2954 { 2955 int err; 2956 2957 err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6, 2958 SOCK_RAW, IPPROTO_ICMPV6, net); 2959 if (err < 0) { 2960 pr_err("Failed to initialize the IGMP6 control socket (err %d)\n", 2961 err); 2962 goto out; 2963 } 2964 2965 inet6_sk(net->ipv6.igmp_sk)->hop_limit = 1; 2966 2967 err = inet_ctl_sock_create(&net->ipv6.mc_autojoin_sk, PF_INET6, 2968 SOCK_RAW, IPPROTO_ICMPV6, net); 2969 if (err < 0) { 2970 pr_err("Failed to initialize the IGMP6 autojoin socket (err %d)\n", 2971 err); 2972 goto out_sock_create; 2973 } 2974 2975 err = igmp6_proc_init(net); 2976 if (err) 2977 goto out_sock_create_autojoin; 2978 2979 return 0; 2980 2981 out_sock_create_autojoin: 2982 inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk); 2983 out_sock_create: 2984 inet_ctl_sock_destroy(net->ipv6.igmp_sk); 2985 out: 2986 return err; 2987 } 2988 2989 static void __net_exit igmp6_net_exit(struct net *net) 2990 { 2991 inet_ctl_sock_destroy(net->ipv6.igmp_sk); 2992 inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk); 2993 igmp6_proc_exit(net); 2994 } 2995 2996 static struct pernet_operations igmp6_net_ops = { 2997 .init = igmp6_net_init, 2998 .exit = igmp6_net_exit, 2999 }; 3000 3001 int __init igmp6_init(void) 3002 { 3003 return register_pernet_subsys(&igmp6_net_ops); 3004 } 3005 3006 int __init igmp6_late_init(void) 3007 { 3008 return register_netdevice_notifier(&igmp6_netdev_notifier); 3009 } 3010 3011 void igmp6_cleanup(void) 3012 { 3013 unregister_pernet_subsys(&igmp6_net_ops); 3014 } 3015 3016 void igmp6_late_cleanup(void) 3017 { 3018 unregister_netdevice_notifier(&igmp6_netdev_notifier); 3019 } 3020