1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux NET3: Internet Group Management Protocol [IGMP] 4 * 5 * This code implements the IGMP protocol as defined in RFC1112. There has 6 * been a further revision of this protocol since which is now supported. 7 * 8 * If you have trouble with this module be careful what gcc you have used, 9 * the older version didn't come out right using gcc 2.5.8, the newer one 10 * seems to fall out with gcc 2.6.2. 11 * 12 * Authors: 13 * Alan Cox <alan@lxorguk.ukuu.org.uk> 14 * 15 * Fixes: 16 * 17 * Alan Cox : Added lots of __inline__ to optimise 18 * the memory usage of all the tiny little 19 * functions. 20 * Alan Cox : Dumped the header building experiment. 21 * Alan Cox : Minor tweaks ready for multicast routing 22 * and extended IGMP protocol. 23 * Alan Cox : Removed a load of inline directives. Gcc 2.5.8 24 * writes utterly bogus code otherwise (sigh) 25 * fixed IGMP loopback to behave in the manner 26 * desired by mrouted, fixed the fact it has been 27 * broken since 1.3.6 and cleaned up a few minor 28 * points. 29 * 30 * Chih-Jen Chang : Tried to revise IGMP to Version 2 31 * Tsu-Sheng Tsao E-mail: chihjenc@scf.usc.edu and tsusheng@scf.usc.edu 32 * The enhancements are mainly based on Steve Deering's 33 * ipmulti-3.5 source code. 34 * Chih-Jen Chang : Added the igmp_get_mrouter_info and 35 * Tsu-Sheng Tsao igmp_set_mrouter_info to keep track of 36 * the mrouted version on that device. 37 * Chih-Jen Chang : Added the max_resp_time parameter to 38 * Tsu-Sheng Tsao igmp_heard_query(). Using this parameter 39 * to identify the multicast router version 40 * and do what the IGMP version 2 specified. 41 * Chih-Jen Chang : Added a timer to revert to IGMP V2 router 42 * Tsu-Sheng Tsao if the specified time expired. 43 * Alan Cox : Stop IGMP from 0.0.0.0 being accepted. 44 * Alan Cox : Use GFP_ATOMIC in the right places. 45 * Christian Daudt : igmp timer wasn't set for local group 46 * memberships but was being deleted, 47 * which caused a "del_timer() called 48 * from %p with timer not initialized\n" 49 * message (960131). 50 * Christian Daudt : removed del_timer from 51 * igmp_timer_expire function (960205). 52 * Christian Daudt : igmp_heard_report now only calls 53 * igmp_timer_expire if tm->running is 54 * true (960216). 55 * Malcolm Beattie : ttl comparison wrong in igmp_rcv made 56 * igmp_heard_query never trigger. Expiry 57 * miscalculation fixed in igmp_heard_query 58 * and random() made to return unsigned to 59 * prevent negative expiry times. 60 * Alexey Kuznetsov: Wrong group leaving behaviour, backport 61 * fix from pending 2.1.x patches. 62 * Alan Cox: Forget to enable FDDI support earlier. 63 * Alexey Kuznetsov: Fixed leaving groups on device down. 64 * Alexey Kuznetsov: Accordance to igmp-v2-06 draft. 65 * David L Stevens: IGMPv3 support, with help from 66 * Vinay Kulkarni 67 */ 68 69 #include <linux/module.h> 70 #include <linux/slab.h> 71 #include <linux/uaccess.h> 72 #include <linux/types.h> 73 #include <linux/kernel.h> 74 #include <linux/jiffies.h> 75 #include <linux/string.h> 76 #include <linux/socket.h> 77 #include <linux/sockios.h> 78 #include <linux/in.h> 79 #include <linux/inet.h> 80 #include <linux/netdevice.h> 81 #include <linux/skbuff.h> 82 #include <linux/inetdevice.h> 83 #include <linux/igmp.h> 84 #include "igmp_internal.h" 85 #include <linux/if_arp.h> 86 #include <linux/rtnetlink.h> 87 #include <linux/times.h> 88 #include <linux/pkt_sched.h> 89 #include <linux/byteorder/generic.h> 90 91 #include <net/net_namespace.h> 92 #include <net/netlink.h> 93 #include <net/addrconf.h> 94 #include <net/arp.h> 95 #include <net/ip.h> 96 #include <net/protocol.h> 97 #include <net/route.h> 98 #include <net/sock.h> 99 #include <net/checksum.h> 100 #include <net/inet_common.h> 101 #include <linux/netfilter_ipv4.h> 102 #ifdef CONFIG_IP_MROUTE 103 #include <linux/mroute.h> 104 #endif 105 #ifdef CONFIG_PROC_FS 106 #include <linux/proc_fs.h> 107 #include <linux/seq_file.h> 108 #endif 109 110 #ifdef CONFIG_IP_MULTICAST 111 /* Parameter names and values are taken from igmp-v2-06 draft */ 112 113 #define IGMP_QUERY_INTERVAL (125*HZ) 114 #define IGMP_QUERY_RESPONSE_INTERVAL (10*HZ) 115 116 #define IGMP_INITIAL_REPORT_DELAY (1) 117 118 /* IGMP_INITIAL_REPORT_DELAY is not from IGMP specs! 119 * IGMP specs require to report membership immediately after 120 * joining a group, but we delay the first report by a 121 * small interval. It seems more natural and still does not 122 * contradict to specs provided this delay is small enough. 123 */ 124 125 static bool IGMP_V1_SEEN(const struct in_device *in_dev) 126 { 127 unsigned long seen; 128 129 if (IPV4_DEVCONF_ALL_RO(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 1) 130 return true; 131 if (IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 1) 132 return true; 133 seen = READ_ONCE(in_dev->mr_v1_seen); 134 return seen && time_before(jiffies, seen); 135 } 136 137 static bool IGMP_V2_SEEN(const struct in_device *in_dev) 138 { 139 unsigned long seen; 140 141 if (IPV4_DEVCONF_ALL_RO(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 2) 142 return true; 143 if (IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 2) 144 return true; 145 seen = READ_ONCE(in_dev->mr_v2_seen); 146 return seen && time_before(jiffies, seen); 147 } 148 149 static int unsolicited_report_interval(struct in_device *in_dev) 150 { 151 int interval_ms, interval_jiffies; 152 153 if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) 154 interval_ms = IN_DEV_CONF_GET( 155 in_dev, 156 IGMPV2_UNSOLICITED_REPORT_INTERVAL); 157 else /* v3 */ 158 interval_ms = IN_DEV_CONF_GET( 159 in_dev, 160 IGMPV3_UNSOLICITED_REPORT_INTERVAL); 161 162 interval_jiffies = msecs_to_jiffies(interval_ms); 163 164 /* _timer functions can't handle a delay of 0 jiffies so ensure 165 * we always return a positive value. 166 */ 167 if (interval_jiffies <= 0) 168 interval_jiffies = 1; 169 return interval_jiffies; 170 } 171 172 static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im, 173 gfp_t gfp); 174 static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im); 175 static void igmpv3_clear_delrec(struct in_device *in_dev); 176 static int sf_setstate(struct ip_mc_list *pmc); 177 static void sf_markstate(struct ip_mc_list *pmc); 178 #endif 179 static void ip_mc_clear_src(struct ip_mc_list *pmc); 180 static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, 181 int sfcount, __be32 *psfsrc, int delta); 182 183 static void ip_ma_put(struct ip_mc_list *im) 184 { 185 if (refcount_dec_and_test(&im->refcnt)) { 186 in_dev_put(im->interface); 187 kfree_rcu(im, rcu); 188 } 189 } 190 191 #define for_each_pmc_rcu(in_dev, pmc) \ 192 for (pmc = rcu_dereference(in_dev->mc_list); \ 193 pmc != NULL; \ 194 pmc = rcu_dereference(pmc->next_rcu)) 195 196 #define for_each_pmc_rtnl(in_dev, pmc) \ 197 for (pmc = rtnl_dereference(in_dev->mc_list); \ 198 pmc != NULL; \ 199 pmc = rtnl_dereference(pmc->next_rcu)) 200 201 static void ip_sf_list_clear_all(struct ip_sf_list *psf) 202 { 203 struct ip_sf_list *next; 204 205 while (psf) { 206 next = psf->sf_next; 207 kfree(psf); 208 psf = next; 209 } 210 } 211 212 #ifdef CONFIG_IP_MULTICAST 213 214 /* 215 * Timer management 216 */ 217 218 static void igmp_stop_timer(struct ip_mc_list *im) 219 { 220 spin_lock_bh(&im->lock); 221 if (timer_delete(&im->timer)) 222 refcount_dec(&im->refcnt); 223 im->tm_running = 0; 224 im->reporter = 0; 225 im->unsolicit_count = 0; 226 spin_unlock_bh(&im->lock); 227 } 228 229 /* It must be called with locked im->lock */ 230 static void igmp_start_timer(struct ip_mc_list *im, int max_delay) 231 { 232 int tv = get_random_u32_below(max_delay); 233 234 im->tm_running = 1; 235 if (refcount_inc_not_zero(&im->refcnt)) { 236 if (mod_timer(&im->timer, jiffies + tv + 2)) 237 ip_ma_put(im); 238 } 239 } 240 241 static void igmp_gq_start_timer(struct in_device *in_dev) 242 { 243 int tv = get_random_u32_below(READ_ONCE(in_dev->mr_maxdelay)); 244 unsigned long exp = jiffies + tv + 2; 245 246 if (in_dev->mr_gq_running && 247 time_after_eq(exp, (in_dev->mr_gq_timer).expires)) 248 return; 249 250 in_dev->mr_gq_running = 1; 251 if (!mod_timer(&in_dev->mr_gq_timer, exp)) 252 in_dev_hold(in_dev); 253 } 254 255 static void igmp_ifc_start_timer(struct in_device *in_dev, int delay) 256 { 257 int tv = get_random_u32_below(delay); 258 259 if (!mod_timer(&in_dev->mr_ifc_timer, jiffies+tv+2)) 260 in_dev_hold(in_dev); 261 } 262 263 static void igmp_mod_timer(struct ip_mc_list *im, int max_delay) 264 { 265 spin_lock_bh(&im->lock); 266 im->unsolicit_count = 0; 267 if (timer_delete(&im->timer)) { 268 if ((long)(im->timer.expires-jiffies) < max_delay) { 269 add_timer(&im->timer); 270 im->tm_running = 1; 271 spin_unlock_bh(&im->lock); 272 return; 273 } 274 refcount_dec(&im->refcnt); 275 } 276 igmp_start_timer(im, max_delay); 277 spin_unlock_bh(&im->lock); 278 } 279 280 281 /* 282 * Send an IGMP report. 283 */ 284 285 #define IGMP_SIZE (sizeof(struct igmphdr)+sizeof(struct iphdr)+4) 286 287 288 static int is_in(struct ip_mc_list *pmc, struct ip_sf_list *psf, int type, 289 int gdeleted, int sdeleted) 290 { 291 switch (type) { 292 case IGMPV3_MODE_IS_INCLUDE: 293 case IGMPV3_MODE_IS_EXCLUDE: 294 if (gdeleted || sdeleted) 295 return 0; 296 if (!(pmc->gsquery && !psf->sf_gsresp)) { 297 if (pmc->sfmode == MCAST_INCLUDE) 298 return 1; 299 /* don't include if this source is excluded 300 * in all filters 301 */ 302 if (psf->sf_count[MCAST_INCLUDE]) 303 return type == IGMPV3_MODE_IS_INCLUDE; 304 return pmc->sfcount[MCAST_EXCLUDE] == 305 psf->sf_count[MCAST_EXCLUDE]; 306 } 307 return 0; 308 case IGMPV3_CHANGE_TO_INCLUDE: 309 if (gdeleted || sdeleted) 310 return 0; 311 return psf->sf_count[MCAST_INCLUDE] != 0; 312 case IGMPV3_CHANGE_TO_EXCLUDE: 313 if (gdeleted || sdeleted) 314 return 0; 315 if (pmc->sfcount[MCAST_EXCLUDE] == 0 || 316 psf->sf_count[MCAST_INCLUDE]) 317 return 0; 318 return pmc->sfcount[MCAST_EXCLUDE] == 319 psf->sf_count[MCAST_EXCLUDE]; 320 case IGMPV3_ALLOW_NEW_SOURCES: 321 if (gdeleted || !psf->sf_crcount) 322 return 0; 323 return (pmc->sfmode == MCAST_INCLUDE) ^ sdeleted; 324 case IGMPV3_BLOCK_OLD_SOURCES: 325 if (pmc->sfmode == MCAST_INCLUDE) 326 return gdeleted || (psf->sf_crcount && sdeleted); 327 return psf->sf_crcount && !gdeleted && !sdeleted; 328 } 329 return 0; 330 } 331 332 static int 333 igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted) 334 { 335 struct ip_sf_list *psf; 336 int scount = 0; 337 338 for (psf = pmc->sources; psf; psf = psf->sf_next) { 339 if (!is_in(pmc, psf, type, gdeleted, sdeleted)) 340 continue; 341 scount++; 342 } 343 return scount; 344 } 345 346 /* source address selection per RFC 3376 section 4.2.13 */ 347 static __be32 igmpv3_get_srcaddr(struct net_device *dev, 348 const struct flowi4 *fl4) 349 { 350 struct in_device *in_dev = __in_dev_get_rcu(dev); 351 const struct in_ifaddr *ifa; 352 353 if (!in_dev) 354 return htonl(INADDR_ANY); 355 356 in_dev_for_each_ifa_rcu(ifa, in_dev) { 357 if (fl4->saddr == ifa->ifa_local) 358 return fl4->saddr; 359 } 360 361 return htonl(INADDR_ANY); 362 } 363 364 static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu) 365 { 366 struct sk_buff *skb; 367 struct rtable *rt; 368 struct iphdr *pip; 369 struct igmpv3_report *pig; 370 struct net *net = dev_net(dev); 371 struct flowi4 fl4; 372 int hlen = LL_RESERVED_SPACE(dev); 373 int tlen = dev->needed_tailroom; 374 unsigned int size; 375 376 size = min(mtu, IP_MAX_MTU); 377 while (1) { 378 skb = alloc_skb(size + hlen + tlen, 379 GFP_ATOMIC | __GFP_NOWARN); 380 if (skb) 381 break; 382 size >>= 1; 383 if (size < 256) 384 return NULL; 385 } 386 skb->priority = TC_PRIO_CONTROL; 387 388 rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0, 389 0, 0, 390 IPPROTO_IGMP, 0, dev->ifindex); 391 if (IS_ERR(rt)) { 392 kfree_skb(skb); 393 return NULL; 394 } 395 396 skb_dst_set(skb, &rt->dst); 397 skb->dev = dev; 398 399 skb_reserve(skb, hlen); 400 skb_tailroom_reserve(skb, mtu, tlen); 401 402 skb_reset_network_header(skb); 403 pip = ip_hdr(skb); 404 skb_put(skb, sizeof(struct iphdr) + 4); 405 406 pip->version = 4; 407 pip->ihl = (sizeof(struct iphdr)+4)>>2; 408 pip->tos = 0xc0; 409 pip->frag_off = htons(IP_DF); 410 pip->ttl = 1; 411 pip->daddr = fl4.daddr; 412 413 rcu_read_lock(); 414 pip->saddr = igmpv3_get_srcaddr(dev, &fl4); 415 rcu_read_unlock(); 416 417 pip->protocol = IPPROTO_IGMP; 418 pip->tot_len = 0; /* filled in later */ 419 ip_select_ident(net, skb, NULL); 420 ((u8 *)&pip[1])[0] = IPOPT_RA; 421 ((u8 *)&pip[1])[1] = 4; 422 ((u8 *)&pip[1])[2] = 0; 423 ((u8 *)&pip[1])[3] = 0; 424 425 skb->transport_header = skb->network_header + sizeof(struct iphdr) + 4; 426 skb_put(skb, sizeof(*pig)); 427 pig = igmpv3_report_hdr(skb); 428 pig->type = IGMPV3_HOST_MEMBERSHIP_REPORT; 429 pig->resv1 = 0; 430 pig->csum = 0; 431 pig->resv2 = 0; 432 pig->ngrec = 0; 433 return skb; 434 } 435 436 static int igmpv3_sendpack(struct sk_buff *skb) 437 { 438 struct igmphdr *pig = igmp_hdr(skb); 439 const int igmplen = skb_tail_pointer(skb) - skb_transport_header(skb); 440 441 pig->csum = ip_compute_csum(igmp_hdr(skb), igmplen); 442 443 return ip_local_out(skb_dst_dev_net(skb), skb->sk, skb); 444 } 445 446 static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel) 447 { 448 return sizeof(struct igmpv3_grec) + 4*igmp_scount(pmc, type, gdel, sdel); 449 } 450 451 static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc, 452 int type, struct igmpv3_grec **ppgr, unsigned int mtu) 453 { 454 struct net_device *dev = pmc->interface->dev; 455 struct igmpv3_report *pih; 456 struct igmpv3_grec *pgr; 457 458 if (!skb) { 459 skb = igmpv3_newpack(dev, mtu); 460 if (!skb) 461 return NULL; 462 } 463 pgr = skb_put(skb, sizeof(struct igmpv3_grec)); 464 pgr->grec_type = type; 465 pgr->grec_auxwords = 0; 466 pgr->grec_nsrcs = 0; 467 pgr->grec_mca = pmc->multiaddr; 468 pih = igmpv3_report_hdr(skb); 469 pih->ngrec = htons(ntohs(pih->ngrec)+1); 470 *ppgr = pgr; 471 return skb; 472 } 473 474 #define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0) 475 476 static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, 477 int type, int gdeleted, int sdeleted) 478 { 479 struct net_device *dev = pmc->interface->dev; 480 struct net *net = dev_net(dev); 481 struct igmpv3_report *pih; 482 struct igmpv3_grec *pgr = NULL; 483 struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list; 484 int scount, stotal, first, isquery, truncate; 485 unsigned int mtu; 486 487 if (pmc->multiaddr == IGMP_ALL_HOSTS) 488 return skb; 489 if (ipv4_is_local_multicast(pmc->multiaddr) && 490 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) 491 return skb; 492 493 mtu = READ_ONCE(dev->mtu); 494 if (mtu < IPV4_MIN_MTU) 495 return skb; 496 497 isquery = type == IGMPV3_MODE_IS_INCLUDE || 498 type == IGMPV3_MODE_IS_EXCLUDE; 499 truncate = type == IGMPV3_MODE_IS_EXCLUDE || 500 type == IGMPV3_CHANGE_TO_EXCLUDE; 501 502 stotal = scount = 0; 503 504 psf_list = sdeleted ? &pmc->tomb : &pmc->sources; 505 506 if (!*psf_list) 507 goto empty_source; 508 509 pih = skb ? igmpv3_report_hdr(skb) : NULL; 510 511 /* EX and TO_EX get a fresh packet, if needed */ 512 if (truncate) { 513 if (pih && pih->ngrec && 514 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { 515 if (skb) 516 igmpv3_sendpack(skb); 517 skb = igmpv3_newpack(dev, mtu); 518 } 519 } 520 first = 1; 521 psf_prev = NULL; 522 for (psf = *psf_list; psf; psf = psf_next) { 523 __be32 *psrc; 524 525 psf_next = psf->sf_next; 526 527 if (!is_in(pmc, psf, type, gdeleted, sdeleted)) { 528 psf_prev = psf; 529 continue; 530 } 531 532 /* Based on RFC3376 5.1. Should not send source-list change 533 * records when there is a filter mode change. 534 */ 535 if (((gdeleted && pmc->sfmode == MCAST_EXCLUDE) || 536 (!gdeleted && pmc->crcount)) && 537 (type == IGMPV3_ALLOW_NEW_SOURCES || 538 type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount) 539 goto decrease_sf_crcount; 540 541 /* clear marks on query responses */ 542 if (isquery) 543 psf->sf_gsresp = 0; 544 545 if (AVAILABLE(skb) < sizeof(__be32) + 546 first*sizeof(struct igmpv3_grec)) { 547 if (truncate && !first) 548 break; /* truncate these */ 549 if (pgr) 550 pgr->grec_nsrcs = htons(scount); 551 if (skb) 552 igmpv3_sendpack(skb); 553 skb = igmpv3_newpack(dev, mtu); 554 first = 1; 555 scount = 0; 556 } 557 if (first) { 558 skb = add_grhead(skb, pmc, type, &pgr, mtu); 559 first = 0; 560 } 561 if (!skb) 562 return NULL; 563 psrc = skb_put(skb, sizeof(__be32)); 564 *psrc = psf->sf_inaddr; 565 scount++; stotal++; 566 if ((type == IGMPV3_ALLOW_NEW_SOURCES || 567 type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount) { 568 decrease_sf_crcount: 569 psf->sf_crcount--; 570 if ((sdeleted || gdeleted) && psf->sf_crcount == 0) { 571 if (psf_prev) 572 psf_prev->sf_next = psf->sf_next; 573 else 574 *psf_list = psf->sf_next; 575 kfree(psf); 576 continue; 577 } 578 } 579 psf_prev = psf; 580 } 581 582 empty_source: 583 if (!stotal) { 584 if (type == IGMPV3_ALLOW_NEW_SOURCES || 585 type == IGMPV3_BLOCK_OLD_SOURCES) 586 return skb; 587 if (pmc->crcount || isquery) { 588 /* make sure we have room for group header */ 589 if (skb && AVAILABLE(skb) < sizeof(struct igmpv3_grec)) { 590 igmpv3_sendpack(skb); 591 skb = NULL; /* add_grhead will get a new one */ 592 } 593 skb = add_grhead(skb, pmc, type, &pgr, mtu); 594 } 595 } 596 if (pgr) 597 pgr->grec_nsrcs = htons(scount); 598 599 if (isquery) 600 pmc->gsquery = 0; /* clear query state on report */ 601 return skb; 602 } 603 604 static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc) 605 { 606 struct sk_buff *skb = NULL; 607 struct net *net = dev_net(in_dev->dev); 608 int type; 609 610 if (!pmc) { 611 rcu_read_lock(); 612 for_each_pmc_rcu(in_dev, pmc) { 613 if (pmc->multiaddr == IGMP_ALL_HOSTS) 614 continue; 615 if (ipv4_is_local_multicast(pmc->multiaddr) && 616 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) 617 continue; 618 spin_lock_bh(&pmc->lock); 619 if (pmc->sfcount[MCAST_EXCLUDE]) 620 type = IGMPV3_MODE_IS_EXCLUDE; 621 else 622 type = IGMPV3_MODE_IS_INCLUDE; 623 skb = add_grec(skb, pmc, type, 0, 0); 624 spin_unlock_bh(&pmc->lock); 625 } 626 rcu_read_unlock(); 627 } else { 628 spin_lock_bh(&pmc->lock); 629 if (pmc->sfcount[MCAST_EXCLUDE]) 630 type = IGMPV3_MODE_IS_EXCLUDE; 631 else 632 type = IGMPV3_MODE_IS_INCLUDE; 633 skb = add_grec(skb, pmc, type, 0, 0); 634 spin_unlock_bh(&pmc->lock); 635 } 636 if (!skb) 637 return 0; 638 return igmpv3_sendpack(skb); 639 } 640 641 /* 642 * remove zero-count source records from a source filter list 643 */ 644 static void igmpv3_clear_zeros(struct ip_sf_list **ppsf) 645 { 646 struct ip_sf_list *psf_prev, *psf_next, *psf; 647 648 psf_prev = NULL; 649 for (psf = *ppsf; psf; psf = psf_next) { 650 psf_next = psf->sf_next; 651 if (psf->sf_crcount == 0) { 652 if (psf_prev) 653 psf_prev->sf_next = psf->sf_next; 654 else 655 *ppsf = psf->sf_next; 656 kfree(psf); 657 } else 658 psf_prev = psf; 659 } 660 } 661 662 static void kfree_pmc(struct ip_mc_list *pmc) 663 { 664 ip_sf_list_clear_all(pmc->sources); 665 ip_sf_list_clear_all(pmc->tomb); 666 kfree(pmc); 667 } 668 669 static void igmpv3_send_cr(struct in_device *in_dev) 670 { 671 struct ip_mc_list *pmc, *pmc_prev, *pmc_next; 672 struct sk_buff *skb = NULL; 673 int type, dtype; 674 675 rcu_read_lock(); 676 spin_lock_bh(&in_dev->mc_tomb_lock); 677 678 /* deleted MCA's */ 679 pmc_prev = NULL; 680 for (pmc = in_dev->mc_tomb; pmc; pmc = pmc_next) { 681 pmc_next = pmc->next; 682 if (pmc->sfmode == MCAST_INCLUDE) { 683 type = IGMPV3_BLOCK_OLD_SOURCES; 684 dtype = IGMPV3_BLOCK_OLD_SOURCES; 685 skb = add_grec(skb, pmc, type, 1, 0); 686 skb = add_grec(skb, pmc, dtype, 1, 1); 687 } 688 if (pmc->crcount) { 689 if (pmc->sfmode == MCAST_EXCLUDE) { 690 type = IGMPV3_CHANGE_TO_INCLUDE; 691 skb = add_grec(skb, pmc, type, 1, 0); 692 } 693 pmc->crcount--; 694 if (pmc->crcount == 0) { 695 igmpv3_clear_zeros(&pmc->tomb); 696 igmpv3_clear_zeros(&pmc->sources); 697 } 698 } 699 if (pmc->crcount == 0 && !pmc->tomb && !pmc->sources) { 700 if (pmc_prev) 701 pmc_prev->next = pmc_next; 702 else 703 in_dev->mc_tomb = pmc_next; 704 in_dev_put(pmc->interface); 705 kfree_pmc(pmc); 706 } else 707 pmc_prev = pmc; 708 } 709 spin_unlock_bh(&in_dev->mc_tomb_lock); 710 711 /* change recs */ 712 for_each_pmc_rcu(in_dev, pmc) { 713 spin_lock_bh(&pmc->lock); 714 if (pmc->sfcount[MCAST_EXCLUDE]) { 715 type = IGMPV3_BLOCK_OLD_SOURCES; 716 dtype = IGMPV3_ALLOW_NEW_SOURCES; 717 } else { 718 type = IGMPV3_ALLOW_NEW_SOURCES; 719 dtype = IGMPV3_BLOCK_OLD_SOURCES; 720 } 721 skb = add_grec(skb, pmc, type, 0, 0); 722 skb = add_grec(skb, pmc, dtype, 0, 1); /* deleted sources */ 723 724 /* filter mode changes */ 725 if (pmc->crcount) { 726 if (pmc->sfmode == MCAST_EXCLUDE) 727 type = IGMPV3_CHANGE_TO_EXCLUDE; 728 else 729 type = IGMPV3_CHANGE_TO_INCLUDE; 730 skb = add_grec(skb, pmc, type, 0, 0); 731 pmc->crcount--; 732 } 733 spin_unlock_bh(&pmc->lock); 734 } 735 rcu_read_unlock(); 736 737 if (!skb) 738 return; 739 (void) igmpv3_sendpack(skb); 740 } 741 742 static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, 743 int type) 744 { 745 struct sk_buff *skb; 746 struct iphdr *iph; 747 struct igmphdr *ih; 748 struct rtable *rt; 749 struct net_device *dev = in_dev->dev; 750 struct net *net = dev_net(dev); 751 __be32 group = pmc ? pmc->multiaddr : 0; 752 struct flowi4 fl4; 753 __be32 dst; 754 int hlen, tlen; 755 756 if (type == IGMPV3_HOST_MEMBERSHIP_REPORT) 757 return igmpv3_send_report(in_dev, pmc); 758 759 if (ipv4_is_local_multicast(group) && 760 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) 761 return 0; 762 763 if (type == IGMP_HOST_LEAVE_MESSAGE) 764 dst = IGMP_ALL_ROUTER; 765 else 766 dst = group; 767 768 rt = ip_route_output_ports(net, &fl4, NULL, dst, 0, 769 0, 0, 770 IPPROTO_IGMP, 0, dev->ifindex); 771 if (IS_ERR(rt)) 772 return -1; 773 774 hlen = LL_RESERVED_SPACE(dev); 775 tlen = dev->needed_tailroom; 776 skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC); 777 if (!skb) { 778 ip_rt_put(rt); 779 return -1; 780 } 781 skb->priority = TC_PRIO_CONTROL; 782 783 skb_dst_set(skb, &rt->dst); 784 785 skb_reserve(skb, hlen); 786 787 skb_reset_network_header(skb); 788 iph = ip_hdr(skb); 789 skb_put(skb, sizeof(struct iphdr) + 4); 790 791 iph->version = 4; 792 iph->ihl = (sizeof(struct iphdr)+4)>>2; 793 iph->tos = 0xc0; 794 iph->frag_off = htons(IP_DF); 795 iph->ttl = 1; 796 iph->daddr = dst; 797 iph->saddr = fl4.saddr; 798 iph->protocol = IPPROTO_IGMP; 799 ip_select_ident(net, skb, NULL); 800 ((u8 *)&iph[1])[0] = IPOPT_RA; 801 ((u8 *)&iph[1])[1] = 4; 802 ((u8 *)&iph[1])[2] = 0; 803 ((u8 *)&iph[1])[3] = 0; 804 805 ih = skb_put(skb, sizeof(struct igmphdr)); 806 ih->type = type; 807 ih->code = 0; 808 ih->csum = 0; 809 ih->group = group; 810 ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr)); 811 812 return ip_local_out(net, skb->sk, skb); 813 } 814 815 static void igmp_gq_timer_expire(struct timer_list *t) 816 { 817 struct in_device *in_dev = timer_container_of(in_dev, t, mr_gq_timer); 818 819 in_dev->mr_gq_running = 0; 820 igmpv3_send_report(in_dev, NULL); 821 in_dev_put(in_dev); 822 } 823 824 static void igmp_ifc_timer_expire(struct timer_list *t) 825 { 826 struct in_device *in_dev = timer_container_of(in_dev, t, mr_ifc_timer); 827 u32 mr_ifc_count; 828 829 igmpv3_send_cr(in_dev); 830 restart: 831 mr_ifc_count = READ_ONCE(in_dev->mr_ifc_count); 832 833 if (mr_ifc_count) { 834 if (cmpxchg(&in_dev->mr_ifc_count, 835 mr_ifc_count, 836 mr_ifc_count - 1) != mr_ifc_count) 837 goto restart; 838 igmp_ifc_start_timer(in_dev, 839 unsolicited_report_interval(in_dev)); 840 } 841 in_dev_put(in_dev); 842 } 843 844 static void igmp_ifc_event(struct in_device *in_dev) 845 { 846 struct net *net = dev_net(in_dev->dev); 847 if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) 848 return; 849 WRITE_ONCE(in_dev->mr_ifc_count, in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv)); 850 igmp_ifc_start_timer(in_dev, 1); 851 } 852 853 854 static void igmp_timer_expire(struct timer_list *t) 855 { 856 struct ip_mc_list *im = timer_container_of(im, t, timer); 857 struct in_device *in_dev = im->interface; 858 859 spin_lock(&im->lock); 860 im->tm_running = 0; 861 862 if (im->unsolicit_count && --im->unsolicit_count) 863 igmp_start_timer(im, unsolicited_report_interval(in_dev)); 864 865 im->reporter = 1; 866 spin_unlock(&im->lock); 867 868 if (IGMP_V1_SEEN(in_dev)) 869 igmp_send_report(in_dev, im, IGMP_HOST_MEMBERSHIP_REPORT); 870 else if (IGMP_V2_SEEN(in_dev)) 871 igmp_send_report(in_dev, im, IGMPV2_HOST_MEMBERSHIP_REPORT); 872 else 873 igmp_send_report(in_dev, im, IGMPV3_HOST_MEMBERSHIP_REPORT); 874 875 ip_ma_put(im); 876 } 877 878 /* mark EXCLUDE-mode sources */ 879 static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs) 880 { 881 struct ip_sf_list *psf; 882 int i, scount; 883 884 scount = 0; 885 for (psf = pmc->sources; psf; psf = psf->sf_next) { 886 if (scount == nsrcs) 887 break; 888 for (i = 0; i < nsrcs; i++) { 889 /* skip inactive filters */ 890 if (psf->sf_count[MCAST_INCLUDE] || 891 pmc->sfcount[MCAST_EXCLUDE] != 892 psf->sf_count[MCAST_EXCLUDE]) 893 break; 894 if (srcs[i] == psf->sf_inaddr) { 895 scount++; 896 break; 897 } 898 } 899 } 900 pmc->gsquery = 0; 901 if (scount == nsrcs) /* all sources excluded */ 902 return 0; 903 return 1; 904 } 905 906 static int igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs) 907 { 908 struct ip_sf_list *psf; 909 int i, scount; 910 911 if (pmc->sfmode == MCAST_EXCLUDE) 912 return igmp_xmarksources(pmc, nsrcs, srcs); 913 914 /* mark INCLUDE-mode sources */ 915 scount = 0; 916 for (psf = pmc->sources; psf; psf = psf->sf_next) { 917 if (scount == nsrcs) 918 break; 919 for (i = 0; i < nsrcs; i++) 920 if (srcs[i] == psf->sf_inaddr) { 921 psf->sf_gsresp = 1; 922 scount++; 923 break; 924 } 925 } 926 if (!scount) { 927 pmc->gsquery = 0; 928 return 0; 929 } 930 pmc->gsquery = 1; 931 return 1; 932 } 933 934 /* return true if packet was dropped */ 935 static bool igmp_heard_report(struct in_device *in_dev, __be32 group) 936 { 937 struct ip_mc_list *im; 938 struct net *net = dev_net(in_dev->dev); 939 940 /* Timers are only set for non-local groups */ 941 942 if (group == IGMP_ALL_HOSTS) 943 return false; 944 if (ipv4_is_local_multicast(group) && 945 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) 946 return false; 947 948 rcu_read_lock(); 949 for_each_pmc_rcu(in_dev, im) { 950 if (im->multiaddr == group) { 951 igmp_stop_timer(im); 952 break; 953 } 954 } 955 rcu_read_unlock(); 956 return false; 957 } 958 959 /* return true if packet was dropped */ 960 static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, 961 int len) 962 { 963 struct igmphdr *ih = igmp_hdr(skb); 964 struct igmpv3_query *ih3 = igmpv3_query_hdr(skb); 965 struct ip_mc_list *im; 966 __be32 group = ih->group; 967 int max_delay; 968 int mark = 0; 969 struct net *net = dev_net(in_dev->dev); 970 unsigned long seen; 971 972 if (len == 8) { 973 seen = jiffies + READ_ONCE(in_dev->mr_qrv) * READ_ONCE(in_dev->mr_qi) + 974 READ_ONCE(in_dev->mr_qri); 975 if (ih->code == 0) { 976 /* Alas, old v1 router presents here. */ 977 978 max_delay = IGMP_QUERY_RESPONSE_INTERVAL; 979 WRITE_ONCE(in_dev->mr_v1_seen, seen); 980 group = 0; 981 } else { 982 /* v2 router present */ 983 max_delay = ih->code*(HZ/IGMP_TIMER_SCALE); 984 WRITE_ONCE(in_dev->mr_v2_seen, seen); 985 } 986 /* cancel the interface change timer */ 987 WRITE_ONCE(in_dev->mr_ifc_count, 0); 988 if (timer_delete(&in_dev->mr_ifc_timer)) 989 __in_dev_put(in_dev); 990 /* clear deleted report items */ 991 igmpv3_clear_delrec(in_dev); 992 } else if (len < 12) { 993 return true; /* ignore bogus packet; freed by caller */ 994 } else if (IGMP_V1_SEEN(in_dev)) { 995 /* This is a v3 query with v1 queriers present */ 996 max_delay = IGMP_QUERY_RESPONSE_INTERVAL; 997 group = 0; 998 } else if (IGMP_V2_SEEN(in_dev)) { 999 /* this is a v3 query with v2 queriers present; 1000 * Interpretation of the max_delay code is problematic here. 1001 * A real v2 host would use ih_code directly, while v3 has a 1002 * different encoding. We use the v3 encoding as more likely 1003 * to be intended in a v3 query. 1004 */ 1005 max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE); 1006 if (!max_delay) 1007 max_delay = 1; /* can't mod w/ 0 */ 1008 } else { /* v3 */ 1009 unsigned long mr_qi; 1010 1011 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) 1012 return true; 1013 1014 ih3 = igmpv3_query_hdr(skb); 1015 if (ih3->nsrcs) { 1016 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query) 1017 + ntohs(ih3->nsrcs)*sizeof(__be32))) 1018 return true; 1019 ih3 = igmpv3_query_hdr(skb); 1020 } 1021 1022 max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE); 1023 if (!max_delay) 1024 max_delay = 1; /* can't mod w/ 0 */ 1025 WRITE_ONCE(in_dev->mr_maxdelay, max_delay); 1026 1027 /* RFC3376, 4.1.6. QRV and 4.1.7. QQIC, when the most recently 1028 * received value was zero, use the default or statically 1029 * configured value. 1030 */ 1031 WRITE_ONCE(in_dev->mr_qrv, 1032 ih3->qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv)); 1033 mr_qi = IGMPV3_QQIC(ih3->qqic)*HZ ?: IGMP_QUERY_INTERVAL; 1034 WRITE_ONCE(in_dev->mr_qi, mr_qi); 1035 /* RFC3376, 8.3. Query Response Interval: 1036 * The number of seconds represented by the [Query Response 1037 * Interval] must be less than the [Query Interval]. 1038 */ 1039 if (READ_ONCE(in_dev->mr_qri) >= mr_qi) 1040 WRITE_ONCE(in_dev->mr_qri, (mr_qi/HZ - 1) * HZ); 1041 1042 if (!group) { /* general query */ 1043 if (ih3->nsrcs) 1044 return true; /* no sources allowed */ 1045 igmp_gq_start_timer(in_dev); 1046 return false; 1047 } 1048 /* mark sources to include, if group & source-specific */ 1049 mark = ih3->nsrcs != 0; 1050 } 1051 1052 /* 1053 * - Start the timers in all of our membership records 1054 * that the query applies to for the interface on 1055 * which the query arrived excl. those that belong 1056 * to a "local" group (224.0.0.X) 1057 * - For timers already running check if they need to 1058 * be reset. 1059 * - Use the igmp->igmp_code field as the maximum 1060 * delay possible 1061 */ 1062 rcu_read_lock(); 1063 for_each_pmc_rcu(in_dev, im) { 1064 int changed; 1065 1066 if (group && group != im->multiaddr) 1067 continue; 1068 if (im->multiaddr == IGMP_ALL_HOSTS) 1069 continue; 1070 if (ipv4_is_local_multicast(im->multiaddr) && 1071 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) 1072 continue; 1073 spin_lock_bh(&im->lock); 1074 if (im->tm_running) 1075 im->gsquery = im->gsquery && mark; 1076 else 1077 im->gsquery = mark; 1078 changed = !im->gsquery || 1079 igmp_marksources(im, ntohs(ih3->nsrcs), ih3->srcs); 1080 spin_unlock_bh(&im->lock); 1081 if (changed) 1082 igmp_mod_timer(im, max_delay); 1083 } 1084 rcu_read_unlock(); 1085 return false; 1086 } 1087 1088 /* called in rcu_read_lock() section */ 1089 int igmp_rcv(struct sk_buff *skb) 1090 { 1091 /* This basically follows the spec line by line -- see RFC1112 */ 1092 struct igmphdr *ih; 1093 struct net_device *dev = skb->dev; 1094 struct in_device *in_dev; 1095 int len = skb->len; 1096 bool dropped = true; 1097 1098 if (netif_is_l3_master(dev)) { 1099 dev = dev_get_by_index_rcu(dev_net(dev), IPCB(skb)->iif); 1100 if (!dev) 1101 goto drop; 1102 } 1103 1104 in_dev = __in_dev_get_rcu(dev); 1105 if (!in_dev) 1106 goto drop; 1107 1108 if (!pskb_may_pull(skb, sizeof(struct igmphdr))) 1109 goto drop; 1110 1111 if (skb_checksum_simple_validate(skb)) 1112 goto drop; 1113 1114 ih = igmp_hdr(skb); 1115 switch (ih->type) { 1116 case IGMP_HOST_MEMBERSHIP_QUERY: 1117 dropped = igmp_heard_query(in_dev, skb, len); 1118 break; 1119 case IGMP_HOST_MEMBERSHIP_REPORT: 1120 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1121 /* Is it our report looped back? */ 1122 if (rt_is_output_route(skb_rtable(skb))) 1123 break; 1124 /* don't rely on MC router hearing unicast reports */ 1125 if (skb->pkt_type == PACKET_MULTICAST || 1126 skb->pkt_type == PACKET_BROADCAST) 1127 dropped = igmp_heard_report(in_dev, ih->group); 1128 break; 1129 case IGMP_PIM: 1130 #ifdef CONFIG_IP_PIMSM_V1 1131 return pim_rcv_v1(skb); 1132 #endif 1133 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1134 case IGMP_DVMRP: 1135 case IGMP_TRACE: 1136 case IGMP_HOST_LEAVE_MESSAGE: 1137 case IGMP_MTRACE: 1138 case IGMP_MTRACE_RESP: 1139 break; 1140 default: 1141 break; 1142 } 1143 1144 drop: 1145 if (dropped) 1146 kfree_skb(skb); 1147 else 1148 consume_skb(skb); 1149 return 0; 1150 } 1151 1152 #endif 1153 1154 1155 /* 1156 * Add a filter to a device 1157 */ 1158 1159 static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr) 1160 { 1161 char buf[MAX_ADDR_LEN]; 1162 struct net_device *dev = in_dev->dev; 1163 1164 /* Checking for IFF_MULTICAST here is WRONG-WRONG-WRONG. 1165 We will get multicast token leakage, when IFF_MULTICAST 1166 is changed. This check should be done in ndo_set_rx_mode 1167 routine. Something sort of: 1168 if (dev->mc_list && dev->flags&IFF_MULTICAST) { do it; } 1169 --ANK 1170 */ 1171 if (arp_mc_map(addr, buf, dev, 0) == 0) 1172 dev_mc_add(dev, buf); 1173 } 1174 1175 /* 1176 * Remove a filter from a device 1177 */ 1178 1179 static void ip_mc_filter_del(struct in_device *in_dev, __be32 addr) 1180 { 1181 char buf[MAX_ADDR_LEN]; 1182 struct net_device *dev = in_dev->dev; 1183 1184 if (arp_mc_map(addr, buf, dev, 0) == 0) 1185 dev_mc_del(dev, buf); 1186 } 1187 1188 #ifdef CONFIG_IP_MULTICAST 1189 /* 1190 * deleted ip_mc_list manipulation 1191 */ 1192 static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im, 1193 gfp_t gfp) 1194 { 1195 struct ip_mc_list *pmc; 1196 struct net *net = dev_net(in_dev->dev); 1197 1198 /* this is an "ip_mc_list" for convenience; only the fields below 1199 * are actually used. In particular, the refcnt and users are not 1200 * used for management of the delete list. Using the same structure 1201 * for deleted items allows change reports to use common code with 1202 * non-deleted or query-response MCA's. 1203 */ 1204 pmc = kzalloc_obj(*pmc, gfp); 1205 if (!pmc) 1206 return; 1207 spin_lock_init(&pmc->lock); 1208 spin_lock_bh(&im->lock); 1209 pmc->interface = im->interface; 1210 in_dev_hold(in_dev); 1211 pmc->multiaddr = im->multiaddr; 1212 pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); 1213 pmc->sfmode = im->sfmode; 1214 if (pmc->sfmode == MCAST_INCLUDE) { 1215 struct ip_sf_list *psf; 1216 1217 pmc->tomb = im->tomb; 1218 pmc->sources = im->sources; 1219 im->tomb = im->sources = NULL; 1220 for (psf = pmc->sources; psf; psf = psf->sf_next) 1221 psf->sf_crcount = pmc->crcount; 1222 } 1223 spin_unlock_bh(&im->lock); 1224 1225 spin_lock_bh(&in_dev->mc_tomb_lock); 1226 pmc->next = in_dev->mc_tomb; 1227 in_dev->mc_tomb = pmc; 1228 spin_unlock_bh(&in_dev->mc_tomb_lock); 1229 } 1230 1231 /* 1232 * restore ip_mc_list deleted records 1233 */ 1234 static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im) 1235 { 1236 struct ip_mc_list *pmc, *pmc_prev; 1237 struct ip_sf_list *psf; 1238 struct net *net = dev_net(in_dev->dev); 1239 __be32 multiaddr = im->multiaddr; 1240 1241 spin_lock_bh(&in_dev->mc_tomb_lock); 1242 pmc_prev = NULL; 1243 for (pmc = in_dev->mc_tomb; pmc; pmc = pmc->next) { 1244 if (pmc->multiaddr == multiaddr) 1245 break; 1246 pmc_prev = pmc; 1247 } 1248 if (pmc) { 1249 if (pmc_prev) 1250 pmc_prev->next = pmc->next; 1251 else 1252 in_dev->mc_tomb = pmc->next; 1253 } 1254 spin_unlock_bh(&in_dev->mc_tomb_lock); 1255 1256 spin_lock_bh(&im->lock); 1257 if (pmc) { 1258 im->interface = pmc->interface; 1259 if (im->sfmode == MCAST_INCLUDE) { 1260 swap(im->tomb, pmc->tomb); 1261 swap(im->sources, pmc->sources); 1262 for (psf = im->sources; psf; psf = psf->sf_next) 1263 psf->sf_crcount = in_dev->mr_qrv ?: 1264 READ_ONCE(net->ipv4.sysctl_igmp_qrv); 1265 } else { 1266 im->crcount = in_dev->mr_qrv ?: 1267 READ_ONCE(net->ipv4.sysctl_igmp_qrv); 1268 } 1269 in_dev_put(pmc->interface); 1270 kfree_pmc(pmc); 1271 } 1272 spin_unlock_bh(&im->lock); 1273 } 1274 1275 /* 1276 * flush ip_mc_list deleted records 1277 */ 1278 static void igmpv3_clear_delrec(struct in_device *in_dev) 1279 { 1280 struct ip_mc_list *pmc, *nextpmc; 1281 1282 spin_lock_bh(&in_dev->mc_tomb_lock); 1283 pmc = in_dev->mc_tomb; 1284 in_dev->mc_tomb = NULL; 1285 spin_unlock_bh(&in_dev->mc_tomb_lock); 1286 1287 for (; pmc; pmc = nextpmc) { 1288 nextpmc = pmc->next; 1289 ip_mc_clear_src(pmc); 1290 in_dev_put(pmc->interface); 1291 kfree_pmc(pmc); 1292 } 1293 /* clear dead sources, too */ 1294 rcu_read_lock(); 1295 for_each_pmc_rcu(in_dev, pmc) { 1296 struct ip_sf_list *psf; 1297 1298 spin_lock_bh(&pmc->lock); 1299 psf = pmc->tomb; 1300 pmc->tomb = NULL; 1301 spin_unlock_bh(&pmc->lock); 1302 ip_sf_list_clear_all(psf); 1303 } 1304 rcu_read_unlock(); 1305 } 1306 #endif 1307 1308 static void __igmp_group_dropped(struct ip_mc_list *im, gfp_t gfp) 1309 { 1310 struct in_device *in_dev = im->interface; 1311 #ifdef CONFIG_IP_MULTICAST 1312 struct net *net = dev_net(in_dev->dev); 1313 int reporter; 1314 #endif 1315 1316 if (im->loaded) { 1317 im->loaded = 0; 1318 ip_mc_filter_del(in_dev, im->multiaddr); 1319 } 1320 1321 #ifdef CONFIG_IP_MULTICAST 1322 if (im->multiaddr == IGMP_ALL_HOSTS) 1323 return; 1324 if (ipv4_is_local_multicast(im->multiaddr) && 1325 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) 1326 return; 1327 1328 reporter = im->reporter; 1329 igmp_stop_timer(im); 1330 1331 if (!in_dev->dead) { 1332 if (IGMP_V1_SEEN(in_dev)) 1333 return; 1334 if (IGMP_V2_SEEN(in_dev)) { 1335 if (reporter) 1336 igmp_send_report(in_dev, im, IGMP_HOST_LEAVE_MESSAGE); 1337 return; 1338 } 1339 /* IGMPv3 */ 1340 igmpv3_add_delrec(in_dev, im, gfp); 1341 1342 igmp_ifc_event(in_dev); 1343 } 1344 #endif 1345 } 1346 1347 static void igmp_group_dropped(struct ip_mc_list *im) 1348 { 1349 __igmp_group_dropped(im, GFP_KERNEL); 1350 } 1351 1352 static void igmp_group_added(struct ip_mc_list *im) 1353 { 1354 struct in_device *in_dev = im->interface; 1355 #ifdef CONFIG_IP_MULTICAST 1356 struct net *net = dev_net(in_dev->dev); 1357 #endif 1358 1359 if (im->loaded == 0) { 1360 im->loaded = 1; 1361 ip_mc_filter_add(in_dev, im->multiaddr); 1362 } 1363 1364 #ifdef CONFIG_IP_MULTICAST 1365 if (im->multiaddr == IGMP_ALL_HOSTS) 1366 return; 1367 if (ipv4_is_local_multicast(im->multiaddr) && 1368 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) 1369 return; 1370 1371 if (in_dev->dead) 1372 return; 1373 1374 im->unsolicit_count = READ_ONCE(net->ipv4.sysctl_igmp_qrv); 1375 if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) { 1376 spin_lock_bh(&im->lock); 1377 igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY); 1378 spin_unlock_bh(&im->lock); 1379 return; 1380 } 1381 /* else, v3 */ 1382 1383 /* Based on RFC3376 5.1, for newly added INCLUDE SSM, we should 1384 * not send filter-mode change record as the mode should be from 1385 * IN() to IN(A). 1386 */ 1387 if (im->sfmode == MCAST_EXCLUDE) 1388 im->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); 1389 1390 igmp_ifc_event(in_dev); 1391 #endif 1392 } 1393 1394 1395 /* 1396 * Multicast list managers 1397 */ 1398 1399 static u32 ip_mc_hash(const struct ip_mc_list *im) 1400 { 1401 return hash_32((__force u32)im->multiaddr, MC_HASH_SZ_LOG); 1402 } 1403 1404 static void ip_mc_hash_add(struct in_device *in_dev, 1405 struct ip_mc_list *im) 1406 { 1407 struct ip_mc_list __rcu **mc_hash; 1408 u32 hash; 1409 1410 mc_hash = rtnl_dereference(in_dev->mc_hash); 1411 if (mc_hash) { 1412 hash = ip_mc_hash(im); 1413 im->next_hash = mc_hash[hash]; 1414 rcu_assign_pointer(mc_hash[hash], im); 1415 return; 1416 } 1417 1418 /* do not use a hash table for small number of items */ 1419 if (in_dev->mc_count < 4) 1420 return; 1421 1422 mc_hash = kzalloc(sizeof(struct ip_mc_list *) << MC_HASH_SZ_LOG, 1423 GFP_KERNEL); 1424 if (!mc_hash) 1425 return; 1426 1427 for_each_pmc_rtnl(in_dev, im) { 1428 hash = ip_mc_hash(im); 1429 im->next_hash = mc_hash[hash]; 1430 RCU_INIT_POINTER(mc_hash[hash], im); 1431 } 1432 1433 rcu_assign_pointer(in_dev->mc_hash, mc_hash); 1434 } 1435 1436 static void ip_mc_hash_remove(struct in_device *in_dev, 1437 struct ip_mc_list *im) 1438 { 1439 struct ip_mc_list __rcu **mc_hash = rtnl_dereference(in_dev->mc_hash); 1440 struct ip_mc_list *aux; 1441 1442 if (!mc_hash) 1443 return; 1444 mc_hash += ip_mc_hash(im); 1445 while ((aux = rtnl_dereference(*mc_hash)) != im) 1446 mc_hash = &aux->next_hash; 1447 *mc_hash = im->next_hash; 1448 } 1449 1450 int inet_fill_ifmcaddr(struct sk_buff *skb, struct net_device *dev, 1451 const struct ip_mc_list *im, 1452 struct inet_fill_args *args) 1453 { 1454 struct ifa_cacheinfo ci; 1455 struct ifaddrmsg *ifm; 1456 struct nlmsghdr *nlh; 1457 1458 nlh = nlmsg_put(skb, args->portid, args->seq, args->event, 1459 sizeof(struct ifaddrmsg), args->flags); 1460 if (!nlh) 1461 return -EMSGSIZE; 1462 1463 ifm = nlmsg_data(nlh); 1464 ifm->ifa_family = AF_INET; 1465 ifm->ifa_prefixlen = 32; 1466 ifm->ifa_flags = IFA_F_PERMANENT; 1467 ifm->ifa_scope = RT_SCOPE_UNIVERSE; 1468 ifm->ifa_index = dev->ifindex; 1469 1470 ci.cstamp = (READ_ONCE(im->mca_cstamp) - INITIAL_JIFFIES) * 100UL / HZ; 1471 ci.tstamp = ci.cstamp; 1472 ci.ifa_prefered = INFINITY_LIFE_TIME; 1473 ci.ifa_valid = INFINITY_LIFE_TIME; 1474 1475 if (nla_put_in_addr(skb, IFA_MULTICAST, im->multiaddr) < 0 || 1476 nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci) < 0) { 1477 nlmsg_cancel(skb, nlh); 1478 return -EMSGSIZE; 1479 } 1480 1481 nlmsg_end(skb, nlh); 1482 return 0; 1483 } 1484 1485 static void inet_ifmcaddr_notify(struct net_device *dev, 1486 const struct ip_mc_list *im, int event) 1487 { 1488 struct inet_fill_args fillargs = { 1489 .event = event, 1490 }; 1491 struct net *net = dev_net(dev); 1492 struct sk_buff *skb; 1493 int err = -ENOMEM; 1494 1495 skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) + 1496 nla_total_size(sizeof(__be32)) + 1497 nla_total_size(sizeof(struct ifa_cacheinfo)), 1498 GFP_KERNEL); 1499 if (!skb) 1500 goto error; 1501 1502 err = inet_fill_ifmcaddr(skb, dev, im, &fillargs); 1503 if (err < 0) { 1504 WARN_ON_ONCE(err == -EMSGSIZE); 1505 nlmsg_free(skb); 1506 goto error; 1507 } 1508 1509 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MCADDR, NULL, GFP_KERNEL); 1510 return; 1511 error: 1512 rtnl_set_sk_err(net, RTNLGRP_IPV4_MCADDR, err); 1513 } 1514 1515 /* 1516 * A socket has joined a multicast group on device dev. 1517 */ 1518 static void ____ip_mc_inc_group(struct in_device *in_dev, __be32 addr, 1519 unsigned int mode, gfp_t gfp) 1520 { 1521 struct ip_mc_list __rcu **mc_hash; 1522 struct ip_mc_list *im; 1523 1524 ASSERT_RTNL(); 1525 1526 mc_hash = rtnl_dereference(in_dev->mc_hash); 1527 if (mc_hash) { 1528 u32 hash = hash_32((__force u32)addr, MC_HASH_SZ_LOG); 1529 1530 for (im = rtnl_dereference(mc_hash[hash]); 1531 im; 1532 im = rtnl_dereference(im->next_hash)) { 1533 if (im->multiaddr == addr) 1534 break; 1535 } 1536 } else { 1537 for_each_pmc_rtnl(in_dev, im) { 1538 if (im->multiaddr == addr) 1539 break; 1540 } 1541 } 1542 1543 if (im) { 1544 im->users++; 1545 ip_mc_add_src(in_dev, &addr, mode, 0, NULL, 0); 1546 goto out; 1547 } 1548 1549 im = kzalloc_obj(*im, gfp); 1550 if (!im) 1551 goto out; 1552 1553 im->users = 1; 1554 im->interface = in_dev; 1555 in_dev_hold(in_dev); 1556 im->multiaddr = addr; 1557 im->mca_cstamp = jiffies; 1558 im->mca_tstamp = im->mca_cstamp; 1559 /* initial mode is (EX, empty) */ 1560 im->sfmode = mode; 1561 im->sfcount[mode] = 1; 1562 refcount_set(&im->refcnt, 1); 1563 spin_lock_init(&im->lock); 1564 #ifdef CONFIG_IP_MULTICAST 1565 timer_setup(&im->timer, igmp_timer_expire, 0); 1566 #endif 1567 1568 im->next_rcu = in_dev->mc_list; 1569 in_dev->mc_count++; 1570 rcu_assign_pointer(in_dev->mc_list, im); 1571 1572 ip_mc_hash_add(in_dev, im); 1573 1574 #ifdef CONFIG_IP_MULTICAST 1575 igmpv3_del_delrec(in_dev, im); 1576 #endif 1577 igmp_group_added(im); 1578 inet_ifmcaddr_notify(in_dev->dev, im, RTM_NEWMULTICAST); 1579 if (!in_dev->dead) 1580 ip_rt_multicast_event(in_dev); 1581 out: 1582 return; 1583 } 1584 1585 void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, gfp_t gfp) 1586 { 1587 ____ip_mc_inc_group(in_dev, addr, MCAST_EXCLUDE, gfp); 1588 } 1589 EXPORT_SYMBOL(__ip_mc_inc_group); 1590 1591 void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) 1592 { 1593 __ip_mc_inc_group(in_dev, addr, GFP_KERNEL); 1594 } 1595 EXPORT_SYMBOL(ip_mc_inc_group); 1596 1597 static int ip_mc_check_iphdr(struct sk_buff *skb) 1598 { 1599 const struct iphdr *iph; 1600 unsigned int len; 1601 unsigned int offset = skb_network_offset(skb) + sizeof(*iph); 1602 1603 if (!pskb_may_pull(skb, offset)) 1604 return -EINVAL; 1605 1606 iph = ip_hdr(skb); 1607 1608 if (iph->version != 4 || ip_hdrlen(skb) < sizeof(*iph)) 1609 return -EINVAL; 1610 1611 offset += ip_hdrlen(skb) - sizeof(*iph); 1612 1613 if (!pskb_may_pull(skb, offset)) 1614 return -EINVAL; 1615 1616 iph = ip_hdr(skb); 1617 1618 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) 1619 return -EINVAL; 1620 1621 len = skb_network_offset(skb) + ntohs(iph->tot_len); 1622 if (skb->len < len || len < offset) 1623 return -EINVAL; 1624 1625 skb_set_transport_header(skb, offset); 1626 1627 return 0; 1628 } 1629 1630 static int ip_mc_check_igmp_reportv3(struct sk_buff *skb) 1631 { 1632 unsigned int len = skb_transport_offset(skb); 1633 1634 len += sizeof(struct igmpv3_report); 1635 1636 return ip_mc_may_pull(skb, len) ? 0 : -EINVAL; 1637 } 1638 1639 static int ip_mc_check_igmp_query(struct sk_buff *skb) 1640 { 1641 unsigned int transport_len = ip_transport_len(skb); 1642 unsigned int len; 1643 1644 /* IGMPv{1,2}? */ 1645 if (transport_len != sizeof(struct igmphdr)) { 1646 /* or IGMPv3? */ 1647 if (transport_len < sizeof(struct igmpv3_query)) 1648 return -EINVAL; 1649 1650 len = skb_transport_offset(skb) + sizeof(struct igmpv3_query); 1651 if (!ip_mc_may_pull(skb, len)) 1652 return -EINVAL; 1653 } 1654 1655 /* RFC2236+RFC3376 (IGMPv2+IGMPv3) require the multicast link layer 1656 * all-systems destination addresses (224.0.0.1) for general queries 1657 */ 1658 if (!igmp_hdr(skb)->group && 1659 ip_hdr(skb)->daddr != htonl(INADDR_ALLHOSTS_GROUP)) 1660 return -EINVAL; 1661 1662 return 0; 1663 } 1664 1665 static int ip_mc_check_igmp_msg(struct sk_buff *skb) 1666 { 1667 switch (igmp_hdr(skb)->type) { 1668 case IGMP_HOST_LEAVE_MESSAGE: 1669 case IGMP_HOST_MEMBERSHIP_REPORT: 1670 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1671 return 0; 1672 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1673 return ip_mc_check_igmp_reportv3(skb); 1674 case IGMP_HOST_MEMBERSHIP_QUERY: 1675 return ip_mc_check_igmp_query(skb); 1676 default: 1677 return -ENOMSG; 1678 } 1679 } 1680 1681 static __sum16 ip_mc_validate_checksum(struct sk_buff *skb) 1682 { 1683 return skb_checksum_simple_validate(skb); 1684 } 1685 1686 static int ip_mc_check_igmp_csum(struct sk_buff *skb) 1687 { 1688 unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr); 1689 unsigned int transport_len = ip_transport_len(skb); 1690 struct sk_buff *skb_chk; 1691 1692 if (!ip_mc_may_pull(skb, len)) 1693 return -EINVAL; 1694 1695 skb_chk = skb_checksum_trimmed(skb, transport_len, 1696 ip_mc_validate_checksum); 1697 if (!skb_chk) 1698 return -EINVAL; 1699 1700 if (skb_chk != skb) 1701 kfree_skb(skb_chk); 1702 1703 return 0; 1704 } 1705 1706 /** 1707 * ip_mc_check_igmp - checks whether this is a sane IGMP packet 1708 * @skb: the skb to validate 1709 * 1710 * Checks whether an IPv4 packet is a valid IGMP packet. If so sets 1711 * skb transport header accordingly and returns zero. 1712 * 1713 * -EINVAL: A broken packet was detected, i.e. it violates some internet 1714 * standard 1715 * -ENOMSG: IP header validation succeeded but it is not an IGMP packet. 1716 * -ENOMEM: A memory allocation failure happened. 1717 * 1718 * Caller needs to set the skb network header and free any returned skb if it 1719 * differs from the provided skb. 1720 */ 1721 int ip_mc_check_igmp(struct sk_buff *skb) 1722 { 1723 int ret = ip_mc_check_iphdr(skb); 1724 1725 if (ret < 0) 1726 return ret; 1727 1728 if (ip_hdr(skb)->protocol != IPPROTO_IGMP) 1729 return -ENOMSG; 1730 1731 ret = ip_mc_check_igmp_csum(skb); 1732 if (ret < 0) 1733 return ret; 1734 1735 return ip_mc_check_igmp_msg(skb); 1736 } 1737 EXPORT_SYMBOL(ip_mc_check_igmp); 1738 1739 /* 1740 * Resend IGMP JOIN report; used by netdev notifier. 1741 */ 1742 static void ip_mc_rejoin_groups(struct in_device *in_dev) 1743 { 1744 #ifdef CONFIG_IP_MULTICAST 1745 struct ip_mc_list *im; 1746 int type; 1747 struct net *net = dev_net(in_dev->dev); 1748 1749 ASSERT_RTNL(); 1750 1751 for_each_pmc_rtnl(in_dev, im) { 1752 if (im->multiaddr == IGMP_ALL_HOSTS) 1753 continue; 1754 if (ipv4_is_local_multicast(im->multiaddr) && 1755 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) 1756 continue; 1757 1758 /* a failover is happening and switches 1759 * must be notified immediately 1760 */ 1761 if (IGMP_V1_SEEN(in_dev)) 1762 type = IGMP_HOST_MEMBERSHIP_REPORT; 1763 else if (IGMP_V2_SEEN(in_dev)) 1764 type = IGMPV2_HOST_MEMBERSHIP_REPORT; 1765 else 1766 type = IGMPV3_HOST_MEMBERSHIP_REPORT; 1767 igmp_send_report(in_dev, im, type); 1768 } 1769 #endif 1770 } 1771 1772 /* 1773 * A socket has left a multicast group on device dev 1774 */ 1775 1776 void __ip_mc_dec_group(struct in_device *in_dev, __be32 addr, gfp_t gfp) 1777 { 1778 struct ip_mc_list *i; 1779 struct ip_mc_list __rcu **ip; 1780 1781 ASSERT_RTNL(); 1782 1783 for (ip = &in_dev->mc_list; 1784 (i = rtnl_dereference(*ip)) != NULL; 1785 ip = &i->next_rcu) { 1786 if (i->multiaddr == addr) { 1787 if (--i->users == 0) { 1788 ip_mc_hash_remove(in_dev, i); 1789 *ip = i->next_rcu; 1790 in_dev->mc_count--; 1791 __igmp_group_dropped(i, gfp); 1792 inet_ifmcaddr_notify(in_dev->dev, i, 1793 RTM_DELMULTICAST); 1794 ip_mc_clear_src(i); 1795 1796 if (!in_dev->dead) 1797 ip_rt_multicast_event(in_dev); 1798 1799 ip_ma_put(i); 1800 return; 1801 } 1802 break; 1803 } 1804 } 1805 } 1806 EXPORT_SYMBOL(__ip_mc_dec_group); 1807 1808 /* Device changing type */ 1809 1810 void ip_mc_unmap(struct in_device *in_dev) 1811 { 1812 struct ip_mc_list *pmc; 1813 1814 ASSERT_RTNL(); 1815 1816 for_each_pmc_rtnl(in_dev, pmc) 1817 igmp_group_dropped(pmc); 1818 } 1819 1820 void ip_mc_remap(struct in_device *in_dev) 1821 { 1822 struct ip_mc_list *pmc; 1823 1824 ASSERT_RTNL(); 1825 1826 for_each_pmc_rtnl(in_dev, pmc) { 1827 #ifdef CONFIG_IP_MULTICAST 1828 igmpv3_del_delrec(in_dev, pmc); 1829 #endif 1830 igmp_group_added(pmc); 1831 } 1832 } 1833 1834 /* Device going down */ 1835 1836 void ip_mc_down(struct in_device *in_dev) 1837 { 1838 struct ip_mc_list *pmc; 1839 1840 ASSERT_RTNL(); 1841 1842 for_each_pmc_rtnl(in_dev, pmc) 1843 igmp_group_dropped(pmc); 1844 1845 #ifdef CONFIG_IP_MULTICAST 1846 WRITE_ONCE(in_dev->mr_ifc_count, 0); 1847 if (timer_delete(&in_dev->mr_ifc_timer)) 1848 __in_dev_put(in_dev); 1849 in_dev->mr_gq_running = 0; 1850 if (timer_delete(&in_dev->mr_gq_timer)) 1851 __in_dev_put(in_dev); 1852 #endif 1853 1854 ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS); 1855 } 1856 1857 #ifdef CONFIG_IP_MULTICAST 1858 static void ip_mc_reset(struct in_device *in_dev) 1859 { 1860 struct net *net = dev_net(in_dev->dev); 1861 1862 in_dev->mr_qi = IGMP_QUERY_INTERVAL; 1863 in_dev->mr_qri = IGMP_QUERY_RESPONSE_INTERVAL; 1864 in_dev->mr_qrv = READ_ONCE(net->ipv4.sysctl_igmp_qrv); 1865 } 1866 #else 1867 static void ip_mc_reset(struct in_device *in_dev) 1868 { 1869 } 1870 #endif 1871 1872 void ip_mc_init_dev(struct in_device *in_dev) 1873 { 1874 ASSERT_RTNL(); 1875 1876 #ifdef CONFIG_IP_MULTICAST 1877 timer_setup(&in_dev->mr_gq_timer, igmp_gq_timer_expire, 0); 1878 timer_setup(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire, 0); 1879 #endif 1880 ip_mc_reset(in_dev); 1881 1882 spin_lock_init(&in_dev->mc_tomb_lock); 1883 } 1884 1885 /* Device going up */ 1886 1887 void ip_mc_up(struct in_device *in_dev) 1888 { 1889 struct ip_mc_list *pmc; 1890 1891 ASSERT_RTNL(); 1892 1893 ip_mc_reset(in_dev); 1894 ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); 1895 1896 for_each_pmc_rtnl(in_dev, pmc) { 1897 #ifdef CONFIG_IP_MULTICAST 1898 igmpv3_del_delrec(in_dev, pmc); 1899 #endif 1900 igmp_group_added(pmc); 1901 } 1902 } 1903 1904 /* 1905 * Device is about to be destroyed: clean up. 1906 */ 1907 1908 void ip_mc_destroy_dev(struct in_device *in_dev) 1909 { 1910 struct ip_mc_list *i; 1911 1912 ASSERT_RTNL(); 1913 1914 /* Deactivate timers */ 1915 ip_mc_down(in_dev); 1916 #ifdef CONFIG_IP_MULTICAST 1917 igmpv3_clear_delrec(in_dev); 1918 #endif 1919 1920 while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) { 1921 in_dev->mc_list = i->next_rcu; 1922 in_dev->mc_count--; 1923 ip_mc_clear_src(i); 1924 ip_ma_put(i); 1925 } 1926 } 1927 1928 /* RTNL is locked */ 1929 static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr) 1930 { 1931 struct net_device *dev = NULL; 1932 struct in_device *idev = NULL; 1933 1934 if (imr->imr_ifindex) { 1935 idev = inetdev_by_index(net, imr->imr_ifindex); 1936 return idev; 1937 } 1938 if (imr->imr_address.s_addr) { 1939 dev = __ip_dev_find(net, imr->imr_address.s_addr, false); 1940 if (!dev) 1941 return NULL; 1942 } 1943 1944 if (!dev) { 1945 struct rtable *rt = ip_route_output(net, 1946 imr->imr_multiaddr.s_addr, 1947 0, 0, 0, 1948 RT_SCOPE_UNIVERSE); 1949 if (!IS_ERR(rt)) { 1950 dev = rt->dst.dev; 1951 ip_rt_put(rt); 1952 } 1953 } 1954 if (dev) { 1955 imr->imr_ifindex = dev->ifindex; 1956 idev = __in_dev_get_rtnl(dev); 1957 } 1958 return idev; 1959 } 1960 1961 /* 1962 * Join a socket to a group 1963 */ 1964 1965 static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode, 1966 __be32 *psfsrc) 1967 { 1968 struct ip_sf_list *psf, *psf_prev; 1969 int rv = 0; 1970 1971 psf_prev = NULL; 1972 for (psf = pmc->sources; psf; psf = psf->sf_next) { 1973 if (psf->sf_inaddr == *psfsrc) 1974 break; 1975 psf_prev = psf; 1976 } 1977 if (!psf || psf->sf_count[sfmode] == 0) { 1978 /* source filter not found, or count wrong => bug */ 1979 return -ESRCH; 1980 } 1981 psf->sf_count[sfmode]--; 1982 if (psf->sf_count[sfmode] == 0) { 1983 ip_rt_multicast_event(pmc->interface); 1984 } 1985 if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) { 1986 #ifdef CONFIG_IP_MULTICAST 1987 struct in_device *in_dev = pmc->interface; 1988 struct net *net = dev_net(in_dev->dev); 1989 #endif 1990 1991 /* no more filters for this source */ 1992 if (psf_prev) 1993 psf_prev->sf_next = psf->sf_next; 1994 else 1995 pmc->sources = psf->sf_next; 1996 #ifdef CONFIG_IP_MULTICAST 1997 if (psf->sf_oldin && 1998 !IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) { 1999 psf->sf_crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); 2000 psf->sf_next = pmc->tomb; 2001 pmc->tomb = psf; 2002 rv = 1; 2003 } else 2004 #endif 2005 kfree(psf); 2006 } 2007 return rv; 2008 } 2009 2010 #ifndef CONFIG_IP_MULTICAST 2011 #define igmp_ifc_event(x) do { } while (0) 2012 #endif 2013 2014 static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode, 2015 int sfcount, __be32 *psfsrc, int delta) 2016 { 2017 struct ip_mc_list *pmc; 2018 int changerec = 0; 2019 int i, err; 2020 2021 if (!in_dev) 2022 return -ENODEV; 2023 rcu_read_lock(); 2024 for_each_pmc_rcu(in_dev, pmc) { 2025 if (*pmca == pmc->multiaddr) 2026 break; 2027 } 2028 if (!pmc) { 2029 /* MCA not found?? bug */ 2030 rcu_read_unlock(); 2031 return -ESRCH; 2032 } 2033 spin_lock_bh(&pmc->lock); 2034 rcu_read_unlock(); 2035 #ifdef CONFIG_IP_MULTICAST 2036 sf_markstate(pmc); 2037 #endif 2038 if (!delta) { 2039 err = -EINVAL; 2040 if (!pmc->sfcount[sfmode]) 2041 goto out_unlock; 2042 pmc->sfcount[sfmode]--; 2043 } 2044 err = 0; 2045 for (i = 0; i < sfcount; i++) { 2046 int rv = ip_mc_del1_src(pmc, sfmode, &psfsrc[i]); 2047 2048 changerec |= rv > 0; 2049 if (!err && rv < 0) 2050 err = rv; 2051 } 2052 if (pmc->sfmode == MCAST_EXCLUDE && 2053 pmc->sfcount[MCAST_EXCLUDE] == 0 && 2054 pmc->sfcount[MCAST_INCLUDE]) { 2055 #ifdef CONFIG_IP_MULTICAST 2056 struct ip_sf_list *psf; 2057 struct net *net = dev_net(in_dev->dev); 2058 #endif 2059 2060 /* filter mode change */ 2061 pmc->sfmode = MCAST_INCLUDE; 2062 #ifdef CONFIG_IP_MULTICAST 2063 pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); 2064 WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount); 2065 for (psf = pmc->sources; psf; psf = psf->sf_next) 2066 psf->sf_crcount = 0; 2067 igmp_ifc_event(pmc->interface); 2068 } else if (sf_setstate(pmc) || changerec) { 2069 igmp_ifc_event(pmc->interface); 2070 #endif 2071 } 2072 out_unlock: 2073 spin_unlock_bh(&pmc->lock); 2074 return err; 2075 } 2076 2077 /* 2078 * Add multicast single-source filter to the interface list 2079 */ 2080 static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode, 2081 __be32 *psfsrc) 2082 { 2083 struct ip_sf_list *psf, *psf_prev; 2084 2085 psf_prev = NULL; 2086 for (psf = pmc->sources; psf; psf = psf->sf_next) { 2087 if (psf->sf_inaddr == *psfsrc) 2088 break; 2089 psf_prev = psf; 2090 } 2091 if (!psf) { 2092 psf = kzalloc_obj(*psf, GFP_ATOMIC); 2093 if (!psf) 2094 return -ENOBUFS; 2095 psf->sf_inaddr = *psfsrc; 2096 if (psf_prev) { 2097 psf_prev->sf_next = psf; 2098 } else 2099 pmc->sources = psf; 2100 } 2101 psf->sf_count[sfmode]++; 2102 if (psf->sf_count[sfmode] == 1) { 2103 ip_rt_multicast_event(pmc->interface); 2104 } 2105 return 0; 2106 } 2107 2108 #ifdef CONFIG_IP_MULTICAST 2109 static void sf_markstate(struct ip_mc_list *pmc) 2110 { 2111 struct ip_sf_list *psf; 2112 int mca_xcount = pmc->sfcount[MCAST_EXCLUDE]; 2113 2114 for (psf = pmc->sources; psf; psf = psf->sf_next) 2115 if (pmc->sfcount[MCAST_EXCLUDE]) { 2116 psf->sf_oldin = mca_xcount == 2117 psf->sf_count[MCAST_EXCLUDE] && 2118 !psf->sf_count[MCAST_INCLUDE]; 2119 } else 2120 psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0; 2121 } 2122 2123 static int sf_setstate(struct ip_mc_list *pmc) 2124 { 2125 struct ip_sf_list *psf, *dpsf; 2126 int mca_xcount = pmc->sfcount[MCAST_EXCLUDE]; 2127 int qrv = pmc->interface->mr_qrv; 2128 int new_in, rv; 2129 2130 rv = 0; 2131 for (psf = pmc->sources; psf; psf = psf->sf_next) { 2132 if (pmc->sfcount[MCAST_EXCLUDE]) { 2133 new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] && 2134 !psf->sf_count[MCAST_INCLUDE]; 2135 } else 2136 new_in = psf->sf_count[MCAST_INCLUDE] != 0; 2137 if (new_in) { 2138 if (!psf->sf_oldin) { 2139 struct ip_sf_list *prev = NULL; 2140 2141 for (dpsf = pmc->tomb; dpsf; dpsf = dpsf->sf_next) { 2142 if (dpsf->sf_inaddr == psf->sf_inaddr) 2143 break; 2144 prev = dpsf; 2145 } 2146 if (dpsf) { 2147 if (prev) 2148 prev->sf_next = dpsf->sf_next; 2149 else 2150 pmc->tomb = dpsf->sf_next; 2151 kfree(dpsf); 2152 } 2153 psf->sf_crcount = qrv; 2154 rv++; 2155 } 2156 } else if (psf->sf_oldin) { 2157 2158 psf->sf_crcount = 0; 2159 /* 2160 * add or update "delete" records if an active filter 2161 * is now inactive 2162 */ 2163 for (dpsf = pmc->tomb; dpsf; dpsf = dpsf->sf_next) 2164 if (dpsf->sf_inaddr == psf->sf_inaddr) 2165 break; 2166 if (!dpsf) { 2167 dpsf = kmalloc_obj(*dpsf, GFP_ATOMIC); 2168 if (!dpsf) 2169 continue; 2170 *dpsf = *psf; 2171 /* pmc->lock held by callers */ 2172 dpsf->sf_next = pmc->tomb; 2173 pmc->tomb = dpsf; 2174 } 2175 dpsf->sf_crcount = qrv; 2176 rv++; 2177 } 2178 } 2179 return rv; 2180 } 2181 #endif 2182 2183 /* 2184 * Add multicast source filter list to the interface list 2185 */ 2186 static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, 2187 int sfcount, __be32 *psfsrc, int delta) 2188 { 2189 struct ip_mc_list *pmc; 2190 int isexclude; 2191 int i, err; 2192 2193 if (!in_dev) 2194 return -ENODEV; 2195 rcu_read_lock(); 2196 for_each_pmc_rcu(in_dev, pmc) { 2197 if (*pmca == pmc->multiaddr) 2198 break; 2199 } 2200 if (!pmc) { 2201 /* MCA not found?? bug */ 2202 rcu_read_unlock(); 2203 return -ESRCH; 2204 } 2205 spin_lock_bh(&pmc->lock); 2206 rcu_read_unlock(); 2207 2208 #ifdef CONFIG_IP_MULTICAST 2209 sf_markstate(pmc); 2210 #endif 2211 isexclude = pmc->sfmode == MCAST_EXCLUDE; 2212 if (!delta) 2213 pmc->sfcount[sfmode]++; 2214 err = 0; 2215 for (i = 0; i < sfcount; i++) { 2216 err = ip_mc_add1_src(pmc, sfmode, &psfsrc[i]); 2217 if (err) 2218 break; 2219 } 2220 if (err) { 2221 int j; 2222 2223 if (!delta) 2224 pmc->sfcount[sfmode]--; 2225 for (j = 0; j < i; j++) 2226 (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]); 2227 } else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) { 2228 #ifdef CONFIG_IP_MULTICAST 2229 struct ip_sf_list *psf; 2230 struct net *net = dev_net(pmc->interface->dev); 2231 in_dev = pmc->interface; 2232 #endif 2233 2234 /* filter mode change */ 2235 if (pmc->sfcount[MCAST_EXCLUDE]) 2236 pmc->sfmode = MCAST_EXCLUDE; 2237 else if (pmc->sfcount[MCAST_INCLUDE]) 2238 pmc->sfmode = MCAST_INCLUDE; 2239 #ifdef CONFIG_IP_MULTICAST 2240 /* else no filters; keep old mode for reports */ 2241 2242 pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); 2243 WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount); 2244 for (psf = pmc->sources; psf; psf = psf->sf_next) 2245 psf->sf_crcount = 0; 2246 igmp_ifc_event(in_dev); 2247 } else if (sf_setstate(pmc)) { 2248 igmp_ifc_event(in_dev); 2249 #endif 2250 } 2251 spin_unlock_bh(&pmc->lock); 2252 return err; 2253 } 2254 2255 static void ip_mc_clear_src(struct ip_mc_list *pmc) 2256 { 2257 struct ip_sf_list *tomb, *sources; 2258 2259 spin_lock_bh(&pmc->lock); 2260 tomb = pmc->tomb; 2261 pmc->tomb = NULL; 2262 sources = pmc->sources; 2263 pmc->sources = NULL; 2264 pmc->sfmode = MCAST_EXCLUDE; 2265 pmc->sfcount[MCAST_INCLUDE] = 0; 2266 pmc->sfcount[MCAST_EXCLUDE] = 1; 2267 spin_unlock_bh(&pmc->lock); 2268 2269 ip_sf_list_clear_all(tomb); 2270 ip_sf_list_clear_all(sources); 2271 } 2272 2273 /* Join a multicast group 2274 */ 2275 static int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr, 2276 unsigned int mode) 2277 { 2278 __be32 addr = imr->imr_multiaddr.s_addr; 2279 struct ip_mc_socklist *iml, *i; 2280 struct in_device *in_dev; 2281 struct inet_sock *inet = inet_sk(sk); 2282 struct net *net = sock_net(sk); 2283 int ifindex; 2284 int count = 0; 2285 int err; 2286 2287 ASSERT_RTNL(); 2288 2289 if (!ipv4_is_multicast(addr)) 2290 return -EINVAL; 2291 2292 in_dev = ip_mc_find_dev(net, imr); 2293 2294 if (!in_dev) { 2295 err = -ENODEV; 2296 goto done; 2297 } 2298 2299 err = -EADDRINUSE; 2300 ifindex = imr->imr_ifindex; 2301 for_each_pmc_rtnl(inet, i) { 2302 if (i->multi.imr_multiaddr.s_addr == addr && 2303 i->multi.imr_ifindex == ifindex) 2304 goto done; 2305 count++; 2306 } 2307 err = -ENOBUFS; 2308 if (count >= READ_ONCE(net->ipv4.sysctl_igmp_max_memberships)) 2309 goto done; 2310 iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL); 2311 if (!iml) 2312 goto done; 2313 2314 memcpy(&iml->multi, imr, sizeof(*imr)); 2315 iml->next_rcu = inet->mc_list; 2316 iml->sflist = NULL; 2317 iml->sfmode = mode; 2318 rcu_assign_pointer(inet->mc_list, iml); 2319 ____ip_mc_inc_group(in_dev, addr, mode, GFP_KERNEL); 2320 err = 0; 2321 done: 2322 return err; 2323 } 2324 2325 /* Join ASM (Any-Source Multicast) group 2326 */ 2327 int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr) 2328 { 2329 return __ip_mc_join_group(sk, imr, MCAST_EXCLUDE); 2330 } 2331 EXPORT_SYMBOL(ip_mc_join_group); 2332 2333 /* Join SSM (Source-Specific Multicast) group 2334 */ 2335 int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr, 2336 unsigned int mode) 2337 { 2338 return __ip_mc_join_group(sk, imr, mode); 2339 } 2340 2341 static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, 2342 struct in_device *in_dev) 2343 { 2344 struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist); 2345 int err; 2346 2347 if (!psf) { 2348 /* any-source empty exclude case */ 2349 return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, 2350 iml->sfmode, 0, NULL, 0); 2351 } 2352 err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, 2353 iml->sfmode, psf->sl_count, psf->sl_addr, 0); 2354 RCU_INIT_POINTER(iml->sflist, NULL); 2355 /* decrease mem now to avoid the memleak warning */ 2356 atomic_sub(struct_size(psf, sl_addr, psf->sl_max), &sk->sk_omem_alloc); 2357 kfree_rcu(psf, rcu); 2358 return err; 2359 } 2360 2361 int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) 2362 { 2363 struct inet_sock *inet = inet_sk(sk); 2364 struct ip_mc_socklist *iml; 2365 struct ip_mc_socklist __rcu **imlp; 2366 struct in_device *in_dev; 2367 struct net *net = sock_net(sk); 2368 __be32 group = imr->imr_multiaddr.s_addr; 2369 u32 ifindex; 2370 int ret = -EADDRNOTAVAIL; 2371 2372 ASSERT_RTNL(); 2373 2374 in_dev = ip_mc_find_dev(net, imr); 2375 if (!imr->imr_ifindex && !imr->imr_address.s_addr && !in_dev) { 2376 ret = -ENODEV; 2377 goto out; 2378 } 2379 ifindex = imr->imr_ifindex; 2380 for (imlp = &inet->mc_list; 2381 (iml = rtnl_dereference(*imlp)) != NULL; 2382 imlp = &iml->next_rcu) { 2383 if (iml->multi.imr_multiaddr.s_addr != group) 2384 continue; 2385 if (ifindex) { 2386 if (iml->multi.imr_ifindex != ifindex) 2387 continue; 2388 } else if (imr->imr_address.s_addr && imr->imr_address.s_addr != 2389 iml->multi.imr_address.s_addr) 2390 continue; 2391 2392 (void) ip_mc_leave_src(sk, iml, in_dev); 2393 2394 *imlp = iml->next_rcu; 2395 2396 if (in_dev) 2397 ip_mc_dec_group(in_dev, group); 2398 2399 /* decrease mem now to avoid the memleak warning */ 2400 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); 2401 kfree_rcu(iml, rcu); 2402 return 0; 2403 } 2404 out: 2405 return ret; 2406 } 2407 EXPORT_SYMBOL(ip_mc_leave_group); 2408 2409 int ip_mc_source(int add, int omode, struct sock *sk, struct 2410 ip_mreq_source *mreqs, int ifindex) 2411 { 2412 int err; 2413 struct ip_mreqn imr; 2414 __be32 addr = mreqs->imr_multiaddr; 2415 struct ip_mc_socklist *pmc; 2416 struct in_device *in_dev = NULL; 2417 struct inet_sock *inet = inet_sk(sk); 2418 struct ip_sf_socklist *psl; 2419 struct net *net = sock_net(sk); 2420 int leavegroup = 0; 2421 int i, j, rv; 2422 2423 if (!ipv4_is_multicast(addr)) 2424 return -EINVAL; 2425 2426 ASSERT_RTNL(); 2427 2428 imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr; 2429 imr.imr_address.s_addr = mreqs->imr_interface; 2430 imr.imr_ifindex = ifindex; 2431 in_dev = ip_mc_find_dev(net, &imr); 2432 2433 if (!in_dev) { 2434 err = -ENODEV; 2435 goto done; 2436 } 2437 err = -EADDRNOTAVAIL; 2438 2439 for_each_pmc_rtnl(inet, pmc) { 2440 if ((pmc->multi.imr_multiaddr.s_addr == 2441 imr.imr_multiaddr.s_addr) && 2442 (pmc->multi.imr_ifindex == imr.imr_ifindex)) 2443 break; 2444 } 2445 if (!pmc) { /* must have a prior join */ 2446 err = -EINVAL; 2447 goto done; 2448 } 2449 /* if a source filter was set, must be the same mode as before */ 2450 if (pmc->sflist) { 2451 if (pmc->sfmode != omode) { 2452 err = -EINVAL; 2453 goto done; 2454 } 2455 } else if (pmc->sfmode != omode) { 2456 /* allow mode switches for empty-set filters */ 2457 ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 0, NULL, 0); 2458 ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, pmc->sfmode, 0, 2459 NULL, 0); 2460 pmc->sfmode = omode; 2461 } 2462 2463 psl = rtnl_dereference(pmc->sflist); 2464 if (!add) { 2465 if (!psl) 2466 goto done; /* err = -EADDRNOTAVAIL */ 2467 rv = !0; 2468 for (i = 0; i < psl->sl_count; i++) { 2469 rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr, 2470 sizeof(__be32)); 2471 if (rv == 0) 2472 break; 2473 } 2474 if (rv) /* source not found */ 2475 goto done; /* err = -EADDRNOTAVAIL */ 2476 2477 /* special case - (INCLUDE, empty) == LEAVE_GROUP */ 2478 if (psl->sl_count == 1 && omode == MCAST_INCLUDE) { 2479 leavegroup = 1; 2480 goto done; 2481 } 2482 2483 /* update the interface filter */ 2484 ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, omode, 1, 2485 &mreqs->imr_sourceaddr, 1); 2486 2487 for (j = i+1; j < psl->sl_count; j++) 2488 psl->sl_addr[j-1] = psl->sl_addr[j]; 2489 psl->sl_count--; 2490 err = 0; 2491 goto done; 2492 } 2493 /* else, add a new source to the filter */ 2494 2495 if (psl && psl->sl_count >= READ_ONCE(net->ipv4.sysctl_igmp_max_msf)) { 2496 err = -ENOBUFS; 2497 goto done; 2498 } 2499 if (!psl || psl->sl_count == psl->sl_max) { 2500 struct ip_sf_socklist *newpsl; 2501 int count = IP_SFBLOCK; 2502 2503 if (psl) 2504 count += psl->sl_max; 2505 newpsl = sock_kmalloc(sk, struct_size(newpsl, sl_addr, count), 2506 GFP_KERNEL); 2507 if (!newpsl) { 2508 err = -ENOBUFS; 2509 goto done; 2510 } 2511 newpsl->sl_max = count; 2512 newpsl->sl_count = count - IP_SFBLOCK; 2513 if (psl) { 2514 for (i = 0; i < psl->sl_count; i++) 2515 newpsl->sl_addr[i] = psl->sl_addr[i]; 2516 /* decrease mem now to avoid the memleak warning */ 2517 atomic_sub(struct_size(psl, sl_addr, psl->sl_max), 2518 &sk->sk_omem_alloc); 2519 } 2520 rcu_assign_pointer(pmc->sflist, newpsl); 2521 if (psl) 2522 kfree_rcu(psl, rcu); 2523 psl = newpsl; 2524 } 2525 rv = 1; /* > 0 for insert logic below if sl_count is 0 */ 2526 for (i = 0; i < psl->sl_count; i++) { 2527 rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr, 2528 sizeof(__be32)); 2529 if (rv == 0) 2530 break; 2531 } 2532 if (rv == 0) /* address already there is an error */ 2533 goto done; 2534 for (j = psl->sl_count-1; j >= i; j--) 2535 psl->sl_addr[j+1] = psl->sl_addr[j]; 2536 psl->sl_addr[i] = mreqs->imr_sourceaddr; 2537 psl->sl_count++; 2538 err = 0; 2539 /* update the interface list */ 2540 ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 1, 2541 &mreqs->imr_sourceaddr, 1); 2542 done: 2543 if (leavegroup) 2544 err = ip_mc_leave_group(sk, &imr); 2545 return err; 2546 } 2547 2548 int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) 2549 { 2550 int err = 0; 2551 struct ip_mreqn imr; 2552 __be32 addr = msf->imsf_multiaddr; 2553 struct ip_mc_socklist *pmc; 2554 struct in_device *in_dev; 2555 struct inet_sock *inet = inet_sk(sk); 2556 struct ip_sf_socklist *newpsl, *psl; 2557 struct net *net = sock_net(sk); 2558 int leavegroup = 0; 2559 2560 if (!ipv4_is_multicast(addr)) 2561 return -EINVAL; 2562 if (msf->imsf_fmode != MCAST_INCLUDE && 2563 msf->imsf_fmode != MCAST_EXCLUDE) 2564 return -EINVAL; 2565 2566 ASSERT_RTNL(); 2567 2568 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; 2569 imr.imr_address.s_addr = msf->imsf_interface; 2570 imr.imr_ifindex = ifindex; 2571 in_dev = ip_mc_find_dev(net, &imr); 2572 2573 if (!in_dev) { 2574 err = -ENODEV; 2575 goto done; 2576 } 2577 2578 /* special case - (INCLUDE, empty) == LEAVE_GROUP */ 2579 if (msf->imsf_fmode == MCAST_INCLUDE && msf->imsf_numsrc == 0) { 2580 leavegroup = 1; 2581 goto done; 2582 } 2583 2584 for_each_pmc_rtnl(inet, pmc) { 2585 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr && 2586 pmc->multi.imr_ifindex == imr.imr_ifindex) 2587 break; 2588 } 2589 if (!pmc) { /* must have a prior join */ 2590 err = -EINVAL; 2591 goto done; 2592 } 2593 if (msf->imsf_numsrc) { 2594 newpsl = sock_kmalloc(sk, struct_size(newpsl, sl_addr, 2595 msf->imsf_numsrc), 2596 GFP_KERNEL); 2597 if (!newpsl) { 2598 err = -ENOBUFS; 2599 goto done; 2600 } 2601 newpsl->sl_max = newpsl->sl_count = msf->imsf_numsrc; 2602 memcpy(newpsl->sl_addr, msf->imsf_slist_flex, 2603 flex_array_size(msf, imsf_slist_flex, msf->imsf_numsrc)); 2604 err = ip_mc_add_src(in_dev, &msf->imsf_multiaddr, 2605 msf->imsf_fmode, newpsl->sl_count, newpsl->sl_addr, 0); 2606 if (err) { 2607 sock_kfree_s(sk, newpsl, 2608 struct_size(newpsl, sl_addr, 2609 newpsl->sl_max)); 2610 goto done; 2611 } 2612 } else { 2613 newpsl = NULL; 2614 (void) ip_mc_add_src(in_dev, &msf->imsf_multiaddr, 2615 msf->imsf_fmode, 0, NULL, 0); 2616 } 2617 psl = rtnl_dereference(pmc->sflist); 2618 if (psl) { 2619 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, 2620 psl->sl_count, psl->sl_addr, 0); 2621 /* decrease mem now to avoid the memleak warning */ 2622 atomic_sub(struct_size(psl, sl_addr, psl->sl_max), 2623 &sk->sk_omem_alloc); 2624 } else { 2625 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, 2626 0, NULL, 0); 2627 } 2628 rcu_assign_pointer(pmc->sflist, newpsl); 2629 if (psl) 2630 kfree_rcu(psl, rcu); 2631 pmc->sfmode = msf->imsf_fmode; 2632 err = 0; 2633 done: 2634 if (leavegroup) 2635 err = ip_mc_leave_group(sk, &imr); 2636 return err; 2637 } 2638 int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, 2639 sockptr_t optval, sockptr_t optlen) 2640 { 2641 int err, len, count, copycount, msf_size; 2642 struct ip_mreqn imr; 2643 __be32 addr = msf->imsf_multiaddr; 2644 struct ip_mc_socklist *pmc; 2645 struct in_device *in_dev; 2646 struct inet_sock *inet = inet_sk(sk); 2647 struct ip_sf_socklist *psl; 2648 struct net *net = sock_net(sk); 2649 2650 ASSERT_RTNL(); 2651 2652 if (!ipv4_is_multicast(addr)) 2653 return -EINVAL; 2654 2655 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; 2656 imr.imr_address.s_addr = msf->imsf_interface; 2657 imr.imr_ifindex = 0; 2658 in_dev = ip_mc_find_dev(net, &imr); 2659 2660 if (!in_dev) { 2661 err = -ENODEV; 2662 goto done; 2663 } 2664 err = -EADDRNOTAVAIL; 2665 2666 for_each_pmc_rtnl(inet, pmc) { 2667 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr && 2668 pmc->multi.imr_ifindex == imr.imr_ifindex) 2669 break; 2670 } 2671 if (!pmc) /* must have a prior join */ 2672 goto done; 2673 msf->imsf_fmode = pmc->sfmode; 2674 psl = rtnl_dereference(pmc->sflist); 2675 if (!psl) { 2676 count = 0; 2677 } else { 2678 count = psl->sl_count; 2679 } 2680 copycount = count < msf->imsf_numsrc ? count : msf->imsf_numsrc; 2681 len = flex_array_size(psl, sl_addr, copycount); 2682 msf->imsf_numsrc = count; 2683 msf_size = IP_MSFILTER_SIZE(copycount); 2684 if (copy_to_sockptr(optlen, &msf_size, sizeof(int)) || 2685 copy_to_sockptr(optval, msf, IP_MSFILTER_SIZE(0))) { 2686 return -EFAULT; 2687 } 2688 if (len && 2689 copy_to_sockptr_offset(optval, 2690 offsetof(struct ip_msfilter, imsf_slist_flex), 2691 psl->sl_addr, len)) 2692 return -EFAULT; 2693 return 0; 2694 done: 2695 return err; 2696 } 2697 2698 int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf, 2699 sockptr_t optval, size_t ss_offset) 2700 { 2701 int i, count, copycount; 2702 struct sockaddr_in *psin; 2703 __be32 addr; 2704 struct ip_mc_socklist *pmc; 2705 struct inet_sock *inet = inet_sk(sk); 2706 struct ip_sf_socklist *psl; 2707 2708 ASSERT_RTNL(); 2709 2710 psin = (struct sockaddr_in *)&gsf->gf_group; 2711 if (psin->sin_family != AF_INET) 2712 return -EINVAL; 2713 addr = psin->sin_addr.s_addr; 2714 if (!ipv4_is_multicast(addr)) 2715 return -EINVAL; 2716 2717 for_each_pmc_rtnl(inet, pmc) { 2718 if (pmc->multi.imr_multiaddr.s_addr == addr && 2719 pmc->multi.imr_ifindex == gsf->gf_interface) 2720 break; 2721 } 2722 if (!pmc) /* must have a prior join */ 2723 return -EADDRNOTAVAIL; 2724 gsf->gf_fmode = pmc->sfmode; 2725 psl = rtnl_dereference(pmc->sflist); 2726 count = psl ? psl->sl_count : 0; 2727 copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc; 2728 gsf->gf_numsrc = count; 2729 for (i = 0; i < copycount; i++) { 2730 struct sockaddr_storage ss; 2731 2732 psin = (struct sockaddr_in *)&ss; 2733 memset(&ss, 0, sizeof(ss)); 2734 psin->sin_family = AF_INET; 2735 psin->sin_addr.s_addr = psl->sl_addr[i]; 2736 if (copy_to_sockptr_offset(optval, ss_offset, 2737 &ss, sizeof(ss))) 2738 return -EFAULT; 2739 ss_offset += sizeof(ss); 2740 } 2741 return 0; 2742 } 2743 2744 /* 2745 * check if a multicast source filter allows delivery for a given <src,dst,intf> 2746 */ 2747 int ip_mc_sf_allow(const struct sock *sk, __be32 loc_addr, __be32 rmt_addr, 2748 int dif, int sdif) 2749 { 2750 const struct inet_sock *inet = inet_sk(sk); 2751 struct ip_mc_socklist *pmc; 2752 struct ip_sf_socklist *psl; 2753 int i; 2754 int ret; 2755 2756 ret = 1; 2757 if (!ipv4_is_multicast(loc_addr)) 2758 goto out; 2759 2760 rcu_read_lock(); 2761 for_each_pmc_rcu(inet, pmc) { 2762 if (pmc->multi.imr_multiaddr.s_addr == loc_addr && 2763 (pmc->multi.imr_ifindex == dif || 2764 (sdif && pmc->multi.imr_ifindex == sdif))) 2765 break; 2766 } 2767 ret = inet_test_bit(MC_ALL, sk); 2768 if (!pmc) 2769 goto unlock; 2770 psl = rcu_dereference(pmc->sflist); 2771 ret = (pmc->sfmode == MCAST_EXCLUDE); 2772 if (!psl) 2773 goto unlock; 2774 2775 for (i = 0; i < psl->sl_count; i++) { 2776 if (psl->sl_addr[i] == rmt_addr) 2777 break; 2778 } 2779 ret = 0; 2780 if (pmc->sfmode == MCAST_INCLUDE && i >= psl->sl_count) 2781 goto unlock; 2782 if (pmc->sfmode == MCAST_EXCLUDE && i < psl->sl_count) 2783 goto unlock; 2784 ret = 1; 2785 unlock: 2786 rcu_read_unlock(); 2787 out: 2788 return ret; 2789 } 2790 2791 /* 2792 * A socket is closing. 2793 */ 2794 2795 void ip_mc_drop_socket(struct sock *sk) 2796 { 2797 struct inet_sock *inet = inet_sk(sk); 2798 struct ip_mc_socklist *iml; 2799 struct net *net = sock_net(sk); 2800 2801 if (!inet->mc_list) 2802 return; 2803 2804 rtnl_lock(); 2805 while ((iml = rtnl_dereference(inet->mc_list)) != NULL) { 2806 struct in_device *in_dev; 2807 2808 inet->mc_list = iml->next_rcu; 2809 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); 2810 (void) ip_mc_leave_src(sk, iml, in_dev); 2811 if (in_dev) 2812 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); 2813 /* decrease mem now to avoid the memleak warning */ 2814 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); 2815 kfree_rcu(iml, rcu); 2816 } 2817 rtnl_unlock(); 2818 } 2819 2820 /* called with rcu_read_lock() */ 2821 int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u8 proto) 2822 { 2823 struct ip_mc_list *im; 2824 struct ip_mc_list __rcu **mc_hash; 2825 struct ip_sf_list *psf; 2826 int rv = 0; 2827 2828 mc_hash = rcu_dereference(in_dev->mc_hash); 2829 if (mc_hash) { 2830 u32 hash = hash_32((__force u32)mc_addr, MC_HASH_SZ_LOG); 2831 2832 for (im = rcu_dereference(mc_hash[hash]); 2833 im != NULL; 2834 im = rcu_dereference(im->next_hash)) { 2835 if (im->multiaddr == mc_addr) 2836 break; 2837 } 2838 } else { 2839 for_each_pmc_rcu(in_dev, im) { 2840 if (im->multiaddr == mc_addr) 2841 break; 2842 } 2843 } 2844 if (im && proto == IPPROTO_IGMP) { 2845 rv = 1; 2846 } else if (im) { 2847 if (src_addr) { 2848 spin_lock_bh(&im->lock); 2849 for (psf = im->sources; psf; psf = psf->sf_next) { 2850 if (psf->sf_inaddr == src_addr) 2851 break; 2852 } 2853 if (psf) 2854 rv = psf->sf_count[MCAST_INCLUDE] || 2855 psf->sf_count[MCAST_EXCLUDE] != 2856 im->sfcount[MCAST_EXCLUDE]; 2857 else 2858 rv = im->sfcount[MCAST_EXCLUDE] != 0; 2859 spin_unlock_bh(&im->lock); 2860 } else 2861 rv = 1; /* unspecified source; tentatively allow */ 2862 } 2863 return rv; 2864 } 2865 2866 #if defined(CONFIG_PROC_FS) 2867 struct igmp_mc_iter_state { 2868 struct seq_net_private p; 2869 struct net_device *dev; 2870 struct in_device *in_dev; 2871 }; 2872 2873 #define igmp_mc_seq_private(seq) ((struct igmp_mc_iter_state *)(seq)->private) 2874 2875 static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq) 2876 { 2877 struct net *net = seq_file_net(seq); 2878 struct ip_mc_list *im = NULL; 2879 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); 2880 2881 state->in_dev = NULL; 2882 for_each_netdev_rcu(net, state->dev) { 2883 struct in_device *in_dev; 2884 2885 in_dev = __in_dev_get_rcu(state->dev); 2886 if (!in_dev) 2887 continue; 2888 im = rcu_dereference(in_dev->mc_list); 2889 if (im) { 2890 state->in_dev = in_dev; 2891 break; 2892 } 2893 } 2894 return im; 2895 } 2896 2897 static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_list *im) 2898 { 2899 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); 2900 2901 im = rcu_dereference(im->next_rcu); 2902 while (!im) { 2903 state->dev = next_net_device_rcu(state->dev); 2904 if (!state->dev) { 2905 state->in_dev = NULL; 2906 break; 2907 } 2908 state->in_dev = __in_dev_get_rcu(state->dev); 2909 if (!state->in_dev) 2910 continue; 2911 im = rcu_dereference(state->in_dev->mc_list); 2912 } 2913 return im; 2914 } 2915 2916 static struct ip_mc_list *igmp_mc_get_idx(struct seq_file *seq, loff_t pos) 2917 { 2918 struct ip_mc_list *im = igmp_mc_get_first(seq); 2919 if (im) 2920 while (pos && (im = igmp_mc_get_next(seq, im)) != NULL) 2921 --pos; 2922 return pos ? NULL : im; 2923 } 2924 2925 static void *igmp_mc_seq_start(struct seq_file *seq, loff_t *pos) 2926 __acquires(rcu) 2927 { 2928 rcu_read_lock(); 2929 return *pos ? igmp_mc_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 2930 } 2931 2932 static void *igmp_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2933 { 2934 struct ip_mc_list *im; 2935 if (v == SEQ_START_TOKEN) 2936 im = igmp_mc_get_first(seq); 2937 else 2938 im = igmp_mc_get_next(seq, v); 2939 ++*pos; 2940 return im; 2941 } 2942 2943 static void igmp_mc_seq_stop(struct seq_file *seq, void *v) 2944 __releases(rcu) 2945 { 2946 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); 2947 2948 state->in_dev = NULL; 2949 state->dev = NULL; 2950 rcu_read_unlock(); 2951 } 2952 2953 static int igmp_mc_seq_show(struct seq_file *seq, void *v) 2954 { 2955 if (v == SEQ_START_TOKEN) 2956 seq_puts(seq, 2957 "Idx\tDevice : Count Querier\tGroup Users Timer\tReporter\n"); 2958 else { 2959 struct ip_mc_list *im = v; 2960 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); 2961 char *querier; 2962 long delta; 2963 2964 #ifdef CONFIG_IP_MULTICAST 2965 querier = IGMP_V1_SEEN(state->in_dev) ? "V1" : 2966 IGMP_V2_SEEN(state->in_dev) ? "V2" : 2967 "V3"; 2968 #else 2969 querier = "NONE"; 2970 #endif 2971 2972 if (rcu_access_pointer(state->in_dev->mc_list) == im) { 2973 seq_printf(seq, "%d\t%-10s: %5d %7s\n", 2974 state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier); 2975 } 2976 2977 delta = im->timer.expires - jiffies; 2978 seq_printf(seq, 2979 "\t\t\t\t%08X %5d %d:%08lX\t\t%d\n", 2980 im->multiaddr, im->users, 2981 im->tm_running, 2982 im->tm_running ? jiffies_delta_to_clock_t(delta) : 0, 2983 im->reporter); 2984 } 2985 return 0; 2986 } 2987 2988 static const struct seq_operations igmp_mc_seq_ops = { 2989 .start = igmp_mc_seq_start, 2990 .next = igmp_mc_seq_next, 2991 .stop = igmp_mc_seq_stop, 2992 .show = igmp_mc_seq_show, 2993 }; 2994 2995 struct igmp_mcf_iter_state { 2996 struct seq_net_private p; 2997 struct net_device *dev; 2998 struct in_device *idev; 2999 struct ip_mc_list *im; 3000 }; 3001 3002 #define igmp_mcf_seq_private(seq) ((struct igmp_mcf_iter_state *)(seq)->private) 3003 3004 static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq) 3005 { 3006 struct net *net = seq_file_net(seq); 3007 struct ip_sf_list *psf = NULL; 3008 struct ip_mc_list *im = NULL; 3009 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); 3010 3011 state->idev = NULL; 3012 state->im = NULL; 3013 for_each_netdev_rcu(net, state->dev) { 3014 struct in_device *idev; 3015 idev = __in_dev_get_rcu(state->dev); 3016 if (unlikely(!idev)) 3017 continue; 3018 im = rcu_dereference(idev->mc_list); 3019 if (likely(im)) { 3020 spin_lock_bh(&im->lock); 3021 psf = im->sources; 3022 if (likely(psf)) { 3023 state->im = im; 3024 state->idev = idev; 3025 break; 3026 } 3027 spin_unlock_bh(&im->lock); 3028 } 3029 } 3030 return psf; 3031 } 3032 3033 static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_list *psf) 3034 { 3035 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); 3036 3037 psf = psf->sf_next; 3038 while (!psf) { 3039 spin_unlock_bh(&state->im->lock); 3040 state->im = state->im->next; 3041 while (!state->im) { 3042 state->dev = next_net_device_rcu(state->dev); 3043 if (!state->dev) { 3044 state->idev = NULL; 3045 goto out; 3046 } 3047 state->idev = __in_dev_get_rcu(state->dev); 3048 if (!state->idev) 3049 continue; 3050 state->im = rcu_dereference(state->idev->mc_list); 3051 } 3052 spin_lock_bh(&state->im->lock); 3053 psf = state->im->sources; 3054 } 3055 out: 3056 return psf; 3057 } 3058 3059 static struct ip_sf_list *igmp_mcf_get_idx(struct seq_file *seq, loff_t pos) 3060 { 3061 struct ip_sf_list *psf = igmp_mcf_get_first(seq); 3062 if (psf) 3063 while (pos && (psf = igmp_mcf_get_next(seq, psf)) != NULL) 3064 --pos; 3065 return pos ? NULL : psf; 3066 } 3067 3068 static void *igmp_mcf_seq_start(struct seq_file *seq, loff_t *pos) 3069 __acquires(rcu) 3070 { 3071 rcu_read_lock(); 3072 return *pos ? igmp_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 3073 } 3074 3075 static void *igmp_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3076 { 3077 struct ip_sf_list *psf; 3078 if (v == SEQ_START_TOKEN) 3079 psf = igmp_mcf_get_first(seq); 3080 else 3081 psf = igmp_mcf_get_next(seq, v); 3082 ++*pos; 3083 return psf; 3084 } 3085 3086 static void igmp_mcf_seq_stop(struct seq_file *seq, void *v) 3087 __releases(rcu) 3088 { 3089 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); 3090 if (likely(state->im)) { 3091 spin_unlock_bh(&state->im->lock); 3092 state->im = NULL; 3093 } 3094 state->idev = NULL; 3095 state->dev = NULL; 3096 rcu_read_unlock(); 3097 } 3098 3099 static int igmp_mcf_seq_show(struct seq_file *seq, void *v) 3100 { 3101 struct ip_sf_list *psf = v; 3102 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); 3103 3104 if (v == SEQ_START_TOKEN) { 3105 seq_puts(seq, "Idx Device MCA SRC INC EXC\n"); 3106 } else { 3107 seq_printf(seq, 3108 "%3d %6.6s 0x%08x " 3109 "0x%08x %6lu %6lu\n", 3110 state->dev->ifindex, state->dev->name, 3111 ntohl(state->im->multiaddr), 3112 ntohl(psf->sf_inaddr), 3113 psf->sf_count[MCAST_INCLUDE], 3114 psf->sf_count[MCAST_EXCLUDE]); 3115 } 3116 return 0; 3117 } 3118 3119 static const struct seq_operations igmp_mcf_seq_ops = { 3120 .start = igmp_mcf_seq_start, 3121 .next = igmp_mcf_seq_next, 3122 .stop = igmp_mcf_seq_stop, 3123 .show = igmp_mcf_seq_show, 3124 }; 3125 3126 static int __net_init igmp_net_init(struct net *net) 3127 { 3128 struct proc_dir_entry *pde; 3129 int err; 3130 3131 pde = proc_create_net("igmp", 0444, net->proc_net, &igmp_mc_seq_ops, 3132 sizeof(struct igmp_mc_iter_state)); 3133 if (!pde) 3134 goto out_igmp; 3135 pde = proc_create_net("mcfilter", 0444, net->proc_net, 3136 &igmp_mcf_seq_ops, sizeof(struct igmp_mcf_iter_state)); 3137 if (!pde) 3138 goto out_mcfilter; 3139 err = inet_ctl_sock_create(&net->ipv4.mc_autojoin_sk, AF_INET, 3140 SOCK_DGRAM, 0, net); 3141 if (err < 0) { 3142 pr_err("Failed to initialize the IGMP autojoin socket (err %d)\n", 3143 err); 3144 goto out_sock; 3145 } 3146 3147 return 0; 3148 3149 out_sock: 3150 remove_proc_entry("mcfilter", net->proc_net); 3151 out_mcfilter: 3152 remove_proc_entry("igmp", net->proc_net); 3153 out_igmp: 3154 return -ENOMEM; 3155 } 3156 3157 static void __net_exit igmp_net_exit(struct net *net) 3158 { 3159 remove_proc_entry("mcfilter", net->proc_net); 3160 remove_proc_entry("igmp", net->proc_net); 3161 inet_ctl_sock_destroy(net->ipv4.mc_autojoin_sk); 3162 } 3163 3164 static struct pernet_operations igmp_net_ops = { 3165 .init = igmp_net_init, 3166 .exit = igmp_net_exit, 3167 }; 3168 #endif 3169 3170 static int igmp_netdev_event(struct notifier_block *this, 3171 unsigned long event, void *ptr) 3172 { 3173 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3174 struct in_device *in_dev; 3175 3176 switch (event) { 3177 case NETDEV_RESEND_IGMP: 3178 in_dev = __in_dev_get_rtnl(dev); 3179 if (in_dev) 3180 ip_mc_rejoin_groups(in_dev); 3181 break; 3182 default: 3183 break; 3184 } 3185 return NOTIFY_DONE; 3186 } 3187 3188 static struct notifier_block igmp_notifier = { 3189 .notifier_call = igmp_netdev_event, 3190 }; 3191 3192 int __init igmp_mc_init(void) 3193 { 3194 #if defined(CONFIG_PROC_FS) 3195 int err; 3196 3197 err = register_pernet_subsys(&igmp_net_ops); 3198 if (err) 3199 return err; 3200 err = register_netdevice_notifier(&igmp_notifier); 3201 if (err) 3202 goto reg_notif_fail; 3203 return 0; 3204 3205 reg_notif_fail: 3206 unregister_pernet_subsys(&igmp_net_ops); 3207 return err; 3208 #else 3209 return register_netdevice_notifier(&igmp_notifier); 3210 #endif 3211 } 3212