1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux NET3: Internet Group Management Protocol [IGMP] 4 * 5 * This code implements the IGMP protocol as defined in RFC1112. There has 6 * been a further revision of this protocol since which is now supported. 7 * 8 * If you have trouble with this module be careful what gcc you have used, 9 * the older version didn't come out right using gcc 2.5.8, the newer one 10 * seems to fall out with gcc 2.6.2. 11 * 12 * Authors: 13 * Alan Cox <alan@lxorguk.ukuu.org.uk> 14 * 15 * Fixes: 16 * 17 * Alan Cox : Added lots of __inline__ to optimise 18 * the memory usage of all the tiny little 19 * functions. 20 * Alan Cox : Dumped the header building experiment. 21 * Alan Cox : Minor tweaks ready for multicast routing 22 * and extended IGMP protocol. 23 * Alan Cox : Removed a load of inline directives. Gcc 2.5.8 24 * writes utterly bogus code otherwise (sigh) 25 * fixed IGMP loopback to behave in the manner 26 * desired by mrouted, fixed the fact it has been 27 * broken since 1.3.6 and cleaned up a few minor 28 * points. 29 * 30 * Chih-Jen Chang : Tried to revise IGMP to Version 2 31 * Tsu-Sheng Tsao E-mail: chihjenc@scf.usc.edu and tsusheng@scf.usc.edu 32 * The enhancements are mainly based on Steve Deering's 33 * ipmulti-3.5 source code. 34 * Chih-Jen Chang : Added the igmp_get_mrouter_info and 35 * Tsu-Sheng Tsao igmp_set_mrouter_info to keep track of 36 * the mrouted version on that device. 37 * Chih-Jen Chang : Added the max_resp_time parameter to 38 * Tsu-Sheng Tsao igmp_heard_query(). Using this parameter 39 * to identify the multicast router version 40 * and do what the IGMP version 2 specified. 41 * Chih-Jen Chang : Added a timer to revert to IGMP V2 router 42 * Tsu-Sheng Tsao if the specified time expired. 43 * Alan Cox : Stop IGMP from 0.0.0.0 being accepted. 44 * Alan Cox : Use GFP_ATOMIC in the right places. 45 * Christian Daudt : igmp timer wasn't set for local group 46 * memberships but was being deleted, 47 * which caused a "del_timer() called 48 * from %p with timer not initialized\n" 49 * message (960131). 50 * Christian Daudt : removed del_timer from 51 * igmp_timer_expire function (960205). 52 * Christian Daudt : igmp_heard_report now only calls 53 * igmp_timer_expire if tm->running is 54 * true (960216). 55 * Malcolm Beattie : ttl comparison wrong in igmp_rcv made 56 * igmp_heard_query never trigger. Expiry 57 * miscalculation fixed in igmp_heard_query 58 * and random() made to return unsigned to 59 * prevent negative expiry times. 60 * Alexey Kuznetsov: Wrong group leaving behaviour, backport 61 * fix from pending 2.1.x patches. 62 * Alan Cox: Forget to enable FDDI support earlier. 63 * Alexey Kuznetsov: Fixed leaving groups on device down. 64 * Alexey Kuznetsov: Accordance to igmp-v2-06 draft. 65 * David L Stevens: IGMPv3 support, with help from 66 * Vinay Kulkarni 67 */ 68 69 #include <linux/module.h> 70 #include <linux/slab.h> 71 #include <linux/uaccess.h> 72 #include <linux/types.h> 73 #include <linux/kernel.h> 74 #include <linux/jiffies.h> 75 #include <linux/string.h> 76 #include <linux/socket.h> 77 #include <linux/sockios.h> 78 #include <linux/in.h> 79 #include <linux/inet.h> 80 #include <linux/netdevice.h> 81 #include <linux/skbuff.h> 82 #include <linux/inetdevice.h> 83 #include <linux/igmp.h> 84 #include <linux/if_arp.h> 85 #include <linux/rtnetlink.h> 86 #include <linux/times.h> 87 #include <linux/pkt_sched.h> 88 #include <linux/byteorder/generic.h> 89 90 #include <net/net_namespace.h> 91 #include <net/arp.h> 92 #include <net/ip.h> 93 #include <net/protocol.h> 94 #include <net/route.h> 95 #include <net/sock.h> 96 #include <net/checksum.h> 97 #include <net/inet_common.h> 98 #include <linux/netfilter_ipv4.h> 99 #ifdef CONFIG_IP_MROUTE 100 #include <linux/mroute.h> 101 #endif 102 #ifdef CONFIG_PROC_FS 103 #include <linux/proc_fs.h> 104 #include <linux/seq_file.h> 105 #endif 106 107 #ifdef CONFIG_IP_MULTICAST 108 /* Parameter names and values are taken from igmp-v2-06 draft */ 109 110 #define IGMP_QUERY_INTERVAL (125*HZ) 111 #define IGMP_QUERY_RESPONSE_INTERVAL (10*HZ) 112 113 #define IGMP_INITIAL_REPORT_DELAY (1) 114 115 /* IGMP_INITIAL_REPORT_DELAY is not from IGMP specs! 116 * IGMP specs require to report membership immediately after 117 * joining a group, but we delay the first report by a 118 * small interval. It seems more natural and still does not 119 * contradict to specs provided this delay is small enough. 120 */ 121 122 #define IGMP_V1_SEEN(in_dev) \ 123 (IPV4_DEVCONF_ALL_RO(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 1 || \ 124 IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 1 || \ 125 ((in_dev)->mr_v1_seen && \ 126 time_before(jiffies, (in_dev)->mr_v1_seen))) 127 #define IGMP_V2_SEEN(in_dev) \ 128 (IPV4_DEVCONF_ALL_RO(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 2 || \ 129 IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 2 || \ 130 ((in_dev)->mr_v2_seen && \ 131 time_before(jiffies, (in_dev)->mr_v2_seen))) 132 133 static int unsolicited_report_interval(struct in_device *in_dev) 134 { 135 int interval_ms, interval_jiffies; 136 137 if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) 138 interval_ms = IN_DEV_CONF_GET( 139 in_dev, 140 IGMPV2_UNSOLICITED_REPORT_INTERVAL); 141 else /* v3 */ 142 interval_ms = IN_DEV_CONF_GET( 143 in_dev, 144 IGMPV3_UNSOLICITED_REPORT_INTERVAL); 145 146 interval_jiffies = msecs_to_jiffies(interval_ms); 147 148 /* _timer functions can't handle a delay of 0 jiffies so ensure 149 * we always return a positive value. 150 */ 151 if (interval_jiffies <= 0) 152 interval_jiffies = 1; 153 return interval_jiffies; 154 } 155 156 static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im, 157 gfp_t gfp); 158 static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im); 159 static void igmpv3_clear_delrec(struct in_device *in_dev); 160 static int sf_setstate(struct ip_mc_list *pmc); 161 static void sf_markstate(struct ip_mc_list *pmc); 162 #endif 163 static void ip_mc_clear_src(struct ip_mc_list *pmc); 164 static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, 165 int sfcount, __be32 *psfsrc, int delta); 166 167 static void ip_ma_put(struct ip_mc_list *im) 168 { 169 if (refcount_dec_and_test(&im->refcnt)) { 170 in_dev_put(im->interface); 171 kfree_rcu(im, rcu); 172 } 173 } 174 175 #define for_each_pmc_rcu(in_dev, pmc) \ 176 for (pmc = rcu_dereference(in_dev->mc_list); \ 177 pmc != NULL; \ 178 pmc = rcu_dereference(pmc->next_rcu)) 179 180 #define for_each_pmc_rtnl(in_dev, pmc) \ 181 for (pmc = rtnl_dereference(in_dev->mc_list); \ 182 pmc != NULL; \ 183 pmc = rtnl_dereference(pmc->next_rcu)) 184 185 static void ip_sf_list_clear_all(struct ip_sf_list *psf) 186 { 187 struct ip_sf_list *next; 188 189 while (psf) { 190 next = psf->sf_next; 191 kfree(psf); 192 psf = next; 193 } 194 } 195 196 #ifdef CONFIG_IP_MULTICAST 197 198 /* 199 * Timer management 200 */ 201 202 static void igmp_stop_timer(struct ip_mc_list *im) 203 { 204 spin_lock_bh(&im->lock); 205 if (del_timer(&im->timer)) 206 refcount_dec(&im->refcnt); 207 im->tm_running = 0; 208 im->reporter = 0; 209 im->unsolicit_count = 0; 210 spin_unlock_bh(&im->lock); 211 } 212 213 /* It must be called with locked im->lock */ 214 static void igmp_start_timer(struct ip_mc_list *im, int max_delay) 215 { 216 int tv = get_random_u32_below(max_delay); 217 218 im->tm_running = 1; 219 if (refcount_inc_not_zero(&im->refcnt)) { 220 if (mod_timer(&im->timer, jiffies + tv + 2)) 221 ip_ma_put(im); 222 } 223 } 224 225 static void igmp_gq_start_timer(struct in_device *in_dev) 226 { 227 int tv = get_random_u32_below(in_dev->mr_maxdelay); 228 unsigned long exp = jiffies + tv + 2; 229 230 if (in_dev->mr_gq_running && 231 time_after_eq(exp, (in_dev->mr_gq_timer).expires)) 232 return; 233 234 in_dev->mr_gq_running = 1; 235 if (!mod_timer(&in_dev->mr_gq_timer, exp)) 236 in_dev_hold(in_dev); 237 } 238 239 static void igmp_ifc_start_timer(struct in_device *in_dev, int delay) 240 { 241 int tv = get_random_u32_below(delay); 242 243 if (!mod_timer(&in_dev->mr_ifc_timer, jiffies+tv+2)) 244 in_dev_hold(in_dev); 245 } 246 247 static void igmp_mod_timer(struct ip_mc_list *im, int max_delay) 248 { 249 spin_lock_bh(&im->lock); 250 im->unsolicit_count = 0; 251 if (del_timer(&im->timer)) { 252 if ((long)(im->timer.expires-jiffies) < max_delay) { 253 add_timer(&im->timer); 254 im->tm_running = 1; 255 spin_unlock_bh(&im->lock); 256 return; 257 } 258 refcount_dec(&im->refcnt); 259 } 260 igmp_start_timer(im, max_delay); 261 spin_unlock_bh(&im->lock); 262 } 263 264 265 /* 266 * Send an IGMP report. 267 */ 268 269 #define IGMP_SIZE (sizeof(struct igmphdr)+sizeof(struct iphdr)+4) 270 271 272 static int is_in(struct ip_mc_list *pmc, struct ip_sf_list *psf, int type, 273 int gdeleted, int sdeleted) 274 { 275 switch (type) { 276 case IGMPV3_MODE_IS_INCLUDE: 277 case IGMPV3_MODE_IS_EXCLUDE: 278 if (gdeleted || sdeleted) 279 return 0; 280 if (!(pmc->gsquery && !psf->sf_gsresp)) { 281 if (pmc->sfmode == MCAST_INCLUDE) 282 return 1; 283 /* don't include if this source is excluded 284 * in all filters 285 */ 286 if (psf->sf_count[MCAST_INCLUDE]) 287 return type == IGMPV3_MODE_IS_INCLUDE; 288 return pmc->sfcount[MCAST_EXCLUDE] == 289 psf->sf_count[MCAST_EXCLUDE]; 290 } 291 return 0; 292 case IGMPV3_CHANGE_TO_INCLUDE: 293 if (gdeleted || sdeleted) 294 return 0; 295 return psf->sf_count[MCAST_INCLUDE] != 0; 296 case IGMPV3_CHANGE_TO_EXCLUDE: 297 if (gdeleted || sdeleted) 298 return 0; 299 if (pmc->sfcount[MCAST_EXCLUDE] == 0 || 300 psf->sf_count[MCAST_INCLUDE]) 301 return 0; 302 return pmc->sfcount[MCAST_EXCLUDE] == 303 psf->sf_count[MCAST_EXCLUDE]; 304 case IGMPV3_ALLOW_NEW_SOURCES: 305 if (gdeleted || !psf->sf_crcount) 306 return 0; 307 return (pmc->sfmode == MCAST_INCLUDE) ^ sdeleted; 308 case IGMPV3_BLOCK_OLD_SOURCES: 309 if (pmc->sfmode == MCAST_INCLUDE) 310 return gdeleted || (psf->sf_crcount && sdeleted); 311 return psf->sf_crcount && !gdeleted && !sdeleted; 312 } 313 return 0; 314 } 315 316 static int 317 igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted) 318 { 319 struct ip_sf_list *psf; 320 int scount = 0; 321 322 for (psf = pmc->sources; psf; psf = psf->sf_next) { 323 if (!is_in(pmc, psf, type, gdeleted, sdeleted)) 324 continue; 325 scount++; 326 } 327 return scount; 328 } 329 330 /* source address selection per RFC 3376 section 4.2.13 */ 331 static __be32 igmpv3_get_srcaddr(struct net_device *dev, 332 const struct flowi4 *fl4) 333 { 334 struct in_device *in_dev = __in_dev_get_rcu(dev); 335 const struct in_ifaddr *ifa; 336 337 if (!in_dev) 338 return htonl(INADDR_ANY); 339 340 in_dev_for_each_ifa_rcu(ifa, in_dev) { 341 if (fl4->saddr == ifa->ifa_local) 342 return fl4->saddr; 343 } 344 345 return htonl(INADDR_ANY); 346 } 347 348 static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu) 349 { 350 struct sk_buff *skb; 351 struct rtable *rt; 352 struct iphdr *pip; 353 struct igmpv3_report *pig; 354 struct net *net = dev_net(dev); 355 struct flowi4 fl4; 356 int hlen = LL_RESERVED_SPACE(dev); 357 int tlen = dev->needed_tailroom; 358 unsigned int size; 359 360 size = min(mtu, IP_MAX_MTU); 361 while (1) { 362 skb = alloc_skb(size + hlen + tlen, 363 GFP_ATOMIC | __GFP_NOWARN); 364 if (skb) 365 break; 366 size >>= 1; 367 if (size < 256) 368 return NULL; 369 } 370 skb->priority = TC_PRIO_CONTROL; 371 372 rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0, 373 0, 0, 374 IPPROTO_IGMP, 0, dev->ifindex); 375 if (IS_ERR(rt)) { 376 kfree_skb(skb); 377 return NULL; 378 } 379 380 skb_dst_set(skb, &rt->dst); 381 skb->dev = dev; 382 383 skb_reserve(skb, hlen); 384 skb_tailroom_reserve(skb, mtu, tlen); 385 386 skb_reset_network_header(skb); 387 pip = ip_hdr(skb); 388 skb_put(skb, sizeof(struct iphdr) + 4); 389 390 pip->version = 4; 391 pip->ihl = (sizeof(struct iphdr)+4)>>2; 392 pip->tos = 0xc0; 393 pip->frag_off = htons(IP_DF); 394 pip->ttl = 1; 395 pip->daddr = fl4.daddr; 396 397 rcu_read_lock(); 398 pip->saddr = igmpv3_get_srcaddr(dev, &fl4); 399 rcu_read_unlock(); 400 401 pip->protocol = IPPROTO_IGMP; 402 pip->tot_len = 0; /* filled in later */ 403 ip_select_ident(net, skb, NULL); 404 ((u8 *)&pip[1])[0] = IPOPT_RA; 405 ((u8 *)&pip[1])[1] = 4; 406 ((u8 *)&pip[1])[2] = 0; 407 ((u8 *)&pip[1])[3] = 0; 408 409 skb->transport_header = skb->network_header + sizeof(struct iphdr) + 4; 410 skb_put(skb, sizeof(*pig)); 411 pig = igmpv3_report_hdr(skb); 412 pig->type = IGMPV3_HOST_MEMBERSHIP_REPORT; 413 pig->resv1 = 0; 414 pig->csum = 0; 415 pig->resv2 = 0; 416 pig->ngrec = 0; 417 return skb; 418 } 419 420 static int igmpv3_sendpack(struct sk_buff *skb) 421 { 422 struct igmphdr *pig = igmp_hdr(skb); 423 const int igmplen = skb_tail_pointer(skb) - skb_transport_header(skb); 424 425 pig->csum = ip_compute_csum(igmp_hdr(skb), igmplen); 426 427 return ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); 428 } 429 430 static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel) 431 { 432 return sizeof(struct igmpv3_grec) + 4*igmp_scount(pmc, type, gdel, sdel); 433 } 434 435 static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc, 436 int type, struct igmpv3_grec **ppgr, unsigned int mtu) 437 { 438 struct net_device *dev = pmc->interface->dev; 439 struct igmpv3_report *pih; 440 struct igmpv3_grec *pgr; 441 442 if (!skb) { 443 skb = igmpv3_newpack(dev, mtu); 444 if (!skb) 445 return NULL; 446 } 447 pgr = skb_put(skb, sizeof(struct igmpv3_grec)); 448 pgr->grec_type = type; 449 pgr->grec_auxwords = 0; 450 pgr->grec_nsrcs = 0; 451 pgr->grec_mca = pmc->multiaddr; 452 pih = igmpv3_report_hdr(skb); 453 pih->ngrec = htons(ntohs(pih->ngrec)+1); 454 *ppgr = pgr; 455 return skb; 456 } 457 458 #define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0) 459 460 static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, 461 int type, int gdeleted, int sdeleted) 462 { 463 struct net_device *dev = pmc->interface->dev; 464 struct net *net = dev_net(dev); 465 struct igmpv3_report *pih; 466 struct igmpv3_grec *pgr = NULL; 467 struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list; 468 int scount, stotal, first, isquery, truncate; 469 unsigned int mtu; 470 471 if (pmc->multiaddr == IGMP_ALL_HOSTS) 472 return skb; 473 if (ipv4_is_local_multicast(pmc->multiaddr) && 474 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) 475 return skb; 476 477 mtu = READ_ONCE(dev->mtu); 478 if (mtu < IPV4_MIN_MTU) 479 return skb; 480 481 isquery = type == IGMPV3_MODE_IS_INCLUDE || 482 type == IGMPV3_MODE_IS_EXCLUDE; 483 truncate = type == IGMPV3_MODE_IS_EXCLUDE || 484 type == IGMPV3_CHANGE_TO_EXCLUDE; 485 486 stotal = scount = 0; 487 488 psf_list = sdeleted ? &pmc->tomb : &pmc->sources; 489 490 if (!*psf_list) 491 goto empty_source; 492 493 pih = skb ? igmpv3_report_hdr(skb) : NULL; 494 495 /* EX and TO_EX get a fresh packet, if needed */ 496 if (truncate) { 497 if (pih && pih->ngrec && 498 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { 499 if (skb) 500 igmpv3_sendpack(skb); 501 skb = igmpv3_newpack(dev, mtu); 502 } 503 } 504 first = 1; 505 psf_prev = NULL; 506 for (psf = *psf_list; psf; psf = psf_next) { 507 __be32 *psrc; 508 509 psf_next = psf->sf_next; 510 511 if (!is_in(pmc, psf, type, gdeleted, sdeleted)) { 512 psf_prev = psf; 513 continue; 514 } 515 516 /* Based on RFC3376 5.1. Should not send source-list change 517 * records when there is a filter mode change. 518 */ 519 if (((gdeleted && pmc->sfmode == MCAST_EXCLUDE) || 520 (!gdeleted && pmc->crcount)) && 521 (type == IGMPV3_ALLOW_NEW_SOURCES || 522 type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount) 523 goto decrease_sf_crcount; 524 525 /* clear marks on query responses */ 526 if (isquery) 527 psf->sf_gsresp = 0; 528 529 if (AVAILABLE(skb) < sizeof(__be32) + 530 first*sizeof(struct igmpv3_grec)) { 531 if (truncate && !first) 532 break; /* truncate these */ 533 if (pgr) 534 pgr->grec_nsrcs = htons(scount); 535 if (skb) 536 igmpv3_sendpack(skb); 537 skb = igmpv3_newpack(dev, mtu); 538 first = 1; 539 scount = 0; 540 } 541 if (first) { 542 skb = add_grhead(skb, pmc, type, &pgr, mtu); 543 first = 0; 544 } 545 if (!skb) 546 return NULL; 547 psrc = skb_put(skb, sizeof(__be32)); 548 *psrc = psf->sf_inaddr; 549 scount++; stotal++; 550 if ((type == IGMPV3_ALLOW_NEW_SOURCES || 551 type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount) { 552 decrease_sf_crcount: 553 psf->sf_crcount--; 554 if ((sdeleted || gdeleted) && psf->sf_crcount == 0) { 555 if (psf_prev) 556 psf_prev->sf_next = psf->sf_next; 557 else 558 *psf_list = psf->sf_next; 559 kfree(psf); 560 continue; 561 } 562 } 563 psf_prev = psf; 564 } 565 566 empty_source: 567 if (!stotal) { 568 if (type == IGMPV3_ALLOW_NEW_SOURCES || 569 type == IGMPV3_BLOCK_OLD_SOURCES) 570 return skb; 571 if (pmc->crcount || isquery) { 572 /* make sure we have room for group header */ 573 if (skb && AVAILABLE(skb) < sizeof(struct igmpv3_grec)) { 574 igmpv3_sendpack(skb); 575 skb = NULL; /* add_grhead will get a new one */ 576 } 577 skb = add_grhead(skb, pmc, type, &pgr, mtu); 578 } 579 } 580 if (pgr) 581 pgr->grec_nsrcs = htons(scount); 582 583 if (isquery) 584 pmc->gsquery = 0; /* clear query state on report */ 585 return skb; 586 } 587 588 static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc) 589 { 590 struct sk_buff *skb = NULL; 591 struct net *net = dev_net(in_dev->dev); 592 int type; 593 594 if (!pmc) { 595 rcu_read_lock(); 596 for_each_pmc_rcu(in_dev, pmc) { 597 if (pmc->multiaddr == IGMP_ALL_HOSTS) 598 continue; 599 if (ipv4_is_local_multicast(pmc->multiaddr) && 600 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) 601 continue; 602 spin_lock_bh(&pmc->lock); 603 if (pmc->sfcount[MCAST_EXCLUDE]) 604 type = IGMPV3_MODE_IS_EXCLUDE; 605 else 606 type = IGMPV3_MODE_IS_INCLUDE; 607 skb = add_grec(skb, pmc, type, 0, 0); 608 spin_unlock_bh(&pmc->lock); 609 } 610 rcu_read_unlock(); 611 } else { 612 spin_lock_bh(&pmc->lock); 613 if (pmc->sfcount[MCAST_EXCLUDE]) 614 type = IGMPV3_MODE_IS_EXCLUDE; 615 else 616 type = IGMPV3_MODE_IS_INCLUDE; 617 skb = add_grec(skb, pmc, type, 0, 0); 618 spin_unlock_bh(&pmc->lock); 619 } 620 if (!skb) 621 return 0; 622 return igmpv3_sendpack(skb); 623 } 624 625 /* 626 * remove zero-count source records from a source filter list 627 */ 628 static void igmpv3_clear_zeros(struct ip_sf_list **ppsf) 629 { 630 struct ip_sf_list *psf_prev, *psf_next, *psf; 631 632 psf_prev = NULL; 633 for (psf = *ppsf; psf; psf = psf_next) { 634 psf_next = psf->sf_next; 635 if (psf->sf_crcount == 0) { 636 if (psf_prev) 637 psf_prev->sf_next = psf->sf_next; 638 else 639 *ppsf = psf->sf_next; 640 kfree(psf); 641 } else 642 psf_prev = psf; 643 } 644 } 645 646 static void kfree_pmc(struct ip_mc_list *pmc) 647 { 648 ip_sf_list_clear_all(pmc->sources); 649 ip_sf_list_clear_all(pmc->tomb); 650 kfree(pmc); 651 } 652 653 static void igmpv3_send_cr(struct in_device *in_dev) 654 { 655 struct ip_mc_list *pmc, *pmc_prev, *pmc_next; 656 struct sk_buff *skb = NULL; 657 int type, dtype; 658 659 rcu_read_lock(); 660 spin_lock_bh(&in_dev->mc_tomb_lock); 661 662 /* deleted MCA's */ 663 pmc_prev = NULL; 664 for (pmc = in_dev->mc_tomb; pmc; pmc = pmc_next) { 665 pmc_next = pmc->next; 666 if (pmc->sfmode == MCAST_INCLUDE) { 667 type = IGMPV3_BLOCK_OLD_SOURCES; 668 dtype = IGMPV3_BLOCK_OLD_SOURCES; 669 skb = add_grec(skb, pmc, type, 1, 0); 670 skb = add_grec(skb, pmc, dtype, 1, 1); 671 } 672 if (pmc->crcount) { 673 if (pmc->sfmode == MCAST_EXCLUDE) { 674 type = IGMPV3_CHANGE_TO_INCLUDE; 675 skb = add_grec(skb, pmc, type, 1, 0); 676 } 677 pmc->crcount--; 678 if (pmc->crcount == 0) { 679 igmpv3_clear_zeros(&pmc->tomb); 680 igmpv3_clear_zeros(&pmc->sources); 681 } 682 } 683 if (pmc->crcount == 0 && !pmc->tomb && !pmc->sources) { 684 if (pmc_prev) 685 pmc_prev->next = pmc_next; 686 else 687 in_dev->mc_tomb = pmc_next; 688 in_dev_put(pmc->interface); 689 kfree_pmc(pmc); 690 } else 691 pmc_prev = pmc; 692 } 693 spin_unlock_bh(&in_dev->mc_tomb_lock); 694 695 /* change recs */ 696 for_each_pmc_rcu(in_dev, pmc) { 697 spin_lock_bh(&pmc->lock); 698 if (pmc->sfcount[MCAST_EXCLUDE]) { 699 type = IGMPV3_BLOCK_OLD_SOURCES; 700 dtype = IGMPV3_ALLOW_NEW_SOURCES; 701 } else { 702 type = IGMPV3_ALLOW_NEW_SOURCES; 703 dtype = IGMPV3_BLOCK_OLD_SOURCES; 704 } 705 skb = add_grec(skb, pmc, type, 0, 0); 706 skb = add_grec(skb, pmc, dtype, 0, 1); /* deleted sources */ 707 708 /* filter mode changes */ 709 if (pmc->crcount) { 710 if (pmc->sfmode == MCAST_EXCLUDE) 711 type = IGMPV3_CHANGE_TO_EXCLUDE; 712 else 713 type = IGMPV3_CHANGE_TO_INCLUDE; 714 skb = add_grec(skb, pmc, type, 0, 0); 715 pmc->crcount--; 716 } 717 spin_unlock_bh(&pmc->lock); 718 } 719 rcu_read_unlock(); 720 721 if (!skb) 722 return; 723 (void) igmpv3_sendpack(skb); 724 } 725 726 static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, 727 int type) 728 { 729 struct sk_buff *skb; 730 struct iphdr *iph; 731 struct igmphdr *ih; 732 struct rtable *rt; 733 struct net_device *dev = in_dev->dev; 734 struct net *net = dev_net(dev); 735 __be32 group = pmc ? pmc->multiaddr : 0; 736 struct flowi4 fl4; 737 __be32 dst; 738 int hlen, tlen; 739 740 if (type == IGMPV3_HOST_MEMBERSHIP_REPORT) 741 return igmpv3_send_report(in_dev, pmc); 742 743 if (ipv4_is_local_multicast(group) && 744 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) 745 return 0; 746 747 if (type == IGMP_HOST_LEAVE_MESSAGE) 748 dst = IGMP_ALL_ROUTER; 749 else 750 dst = group; 751 752 rt = ip_route_output_ports(net, &fl4, NULL, dst, 0, 753 0, 0, 754 IPPROTO_IGMP, 0, dev->ifindex); 755 if (IS_ERR(rt)) 756 return -1; 757 758 hlen = LL_RESERVED_SPACE(dev); 759 tlen = dev->needed_tailroom; 760 skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC); 761 if (!skb) { 762 ip_rt_put(rt); 763 return -1; 764 } 765 skb->priority = TC_PRIO_CONTROL; 766 767 skb_dst_set(skb, &rt->dst); 768 769 skb_reserve(skb, hlen); 770 771 skb_reset_network_header(skb); 772 iph = ip_hdr(skb); 773 skb_put(skb, sizeof(struct iphdr) + 4); 774 775 iph->version = 4; 776 iph->ihl = (sizeof(struct iphdr)+4)>>2; 777 iph->tos = 0xc0; 778 iph->frag_off = htons(IP_DF); 779 iph->ttl = 1; 780 iph->daddr = dst; 781 iph->saddr = fl4.saddr; 782 iph->protocol = IPPROTO_IGMP; 783 ip_select_ident(net, skb, NULL); 784 ((u8 *)&iph[1])[0] = IPOPT_RA; 785 ((u8 *)&iph[1])[1] = 4; 786 ((u8 *)&iph[1])[2] = 0; 787 ((u8 *)&iph[1])[3] = 0; 788 789 ih = skb_put(skb, sizeof(struct igmphdr)); 790 ih->type = type; 791 ih->code = 0; 792 ih->csum = 0; 793 ih->group = group; 794 ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr)); 795 796 return ip_local_out(net, skb->sk, skb); 797 } 798 799 static void igmp_gq_timer_expire(struct timer_list *t) 800 { 801 struct in_device *in_dev = from_timer(in_dev, t, mr_gq_timer); 802 803 in_dev->mr_gq_running = 0; 804 igmpv3_send_report(in_dev, NULL); 805 in_dev_put(in_dev); 806 } 807 808 static void igmp_ifc_timer_expire(struct timer_list *t) 809 { 810 struct in_device *in_dev = from_timer(in_dev, t, mr_ifc_timer); 811 u32 mr_ifc_count; 812 813 igmpv3_send_cr(in_dev); 814 restart: 815 mr_ifc_count = READ_ONCE(in_dev->mr_ifc_count); 816 817 if (mr_ifc_count) { 818 if (cmpxchg(&in_dev->mr_ifc_count, 819 mr_ifc_count, 820 mr_ifc_count - 1) != mr_ifc_count) 821 goto restart; 822 igmp_ifc_start_timer(in_dev, 823 unsolicited_report_interval(in_dev)); 824 } 825 in_dev_put(in_dev); 826 } 827 828 static void igmp_ifc_event(struct in_device *in_dev) 829 { 830 struct net *net = dev_net(in_dev->dev); 831 if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) 832 return; 833 WRITE_ONCE(in_dev->mr_ifc_count, in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv)); 834 igmp_ifc_start_timer(in_dev, 1); 835 } 836 837 838 static void igmp_timer_expire(struct timer_list *t) 839 { 840 struct ip_mc_list *im = from_timer(im, t, timer); 841 struct in_device *in_dev = im->interface; 842 843 spin_lock(&im->lock); 844 im->tm_running = 0; 845 846 if (im->unsolicit_count && --im->unsolicit_count) 847 igmp_start_timer(im, unsolicited_report_interval(in_dev)); 848 849 im->reporter = 1; 850 spin_unlock(&im->lock); 851 852 if (IGMP_V1_SEEN(in_dev)) 853 igmp_send_report(in_dev, im, IGMP_HOST_MEMBERSHIP_REPORT); 854 else if (IGMP_V2_SEEN(in_dev)) 855 igmp_send_report(in_dev, im, IGMPV2_HOST_MEMBERSHIP_REPORT); 856 else 857 igmp_send_report(in_dev, im, IGMPV3_HOST_MEMBERSHIP_REPORT); 858 859 ip_ma_put(im); 860 } 861 862 /* mark EXCLUDE-mode sources */ 863 static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs) 864 { 865 struct ip_sf_list *psf; 866 int i, scount; 867 868 scount = 0; 869 for (psf = pmc->sources; psf; psf = psf->sf_next) { 870 if (scount == nsrcs) 871 break; 872 for (i = 0; i < nsrcs; i++) { 873 /* skip inactive filters */ 874 if (psf->sf_count[MCAST_INCLUDE] || 875 pmc->sfcount[MCAST_EXCLUDE] != 876 psf->sf_count[MCAST_EXCLUDE]) 877 break; 878 if (srcs[i] == psf->sf_inaddr) { 879 scount++; 880 break; 881 } 882 } 883 } 884 pmc->gsquery = 0; 885 if (scount == nsrcs) /* all sources excluded */ 886 return 0; 887 return 1; 888 } 889 890 static int igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs) 891 { 892 struct ip_sf_list *psf; 893 int i, scount; 894 895 if (pmc->sfmode == MCAST_EXCLUDE) 896 return igmp_xmarksources(pmc, nsrcs, srcs); 897 898 /* mark INCLUDE-mode sources */ 899 scount = 0; 900 for (psf = pmc->sources; psf; psf = psf->sf_next) { 901 if (scount == nsrcs) 902 break; 903 for (i = 0; i < nsrcs; i++) 904 if (srcs[i] == psf->sf_inaddr) { 905 psf->sf_gsresp = 1; 906 scount++; 907 break; 908 } 909 } 910 if (!scount) { 911 pmc->gsquery = 0; 912 return 0; 913 } 914 pmc->gsquery = 1; 915 return 1; 916 } 917 918 /* return true if packet was dropped */ 919 static bool igmp_heard_report(struct in_device *in_dev, __be32 group) 920 { 921 struct ip_mc_list *im; 922 struct net *net = dev_net(in_dev->dev); 923 924 /* Timers are only set for non-local groups */ 925 926 if (group == IGMP_ALL_HOSTS) 927 return false; 928 if (ipv4_is_local_multicast(group) && 929 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) 930 return false; 931 932 rcu_read_lock(); 933 for_each_pmc_rcu(in_dev, im) { 934 if (im->multiaddr == group) { 935 igmp_stop_timer(im); 936 break; 937 } 938 } 939 rcu_read_unlock(); 940 return false; 941 } 942 943 /* return true if packet was dropped */ 944 static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, 945 int len) 946 { 947 struct igmphdr *ih = igmp_hdr(skb); 948 struct igmpv3_query *ih3 = igmpv3_query_hdr(skb); 949 struct ip_mc_list *im; 950 __be32 group = ih->group; 951 int max_delay; 952 int mark = 0; 953 struct net *net = dev_net(in_dev->dev); 954 955 956 if (len == 8) { 957 if (ih->code == 0) { 958 /* Alas, old v1 router presents here. */ 959 960 max_delay = IGMP_QUERY_RESPONSE_INTERVAL; 961 in_dev->mr_v1_seen = jiffies + 962 (in_dev->mr_qrv * in_dev->mr_qi) + 963 in_dev->mr_qri; 964 group = 0; 965 } else { 966 /* v2 router present */ 967 max_delay = ih->code*(HZ/IGMP_TIMER_SCALE); 968 in_dev->mr_v2_seen = jiffies + 969 (in_dev->mr_qrv * in_dev->mr_qi) + 970 in_dev->mr_qri; 971 } 972 /* cancel the interface change timer */ 973 WRITE_ONCE(in_dev->mr_ifc_count, 0); 974 if (del_timer(&in_dev->mr_ifc_timer)) 975 __in_dev_put(in_dev); 976 /* clear deleted report items */ 977 igmpv3_clear_delrec(in_dev); 978 } else if (len < 12) { 979 return true; /* ignore bogus packet; freed by caller */ 980 } else if (IGMP_V1_SEEN(in_dev)) { 981 /* This is a v3 query with v1 queriers present */ 982 max_delay = IGMP_QUERY_RESPONSE_INTERVAL; 983 group = 0; 984 } else if (IGMP_V2_SEEN(in_dev)) { 985 /* this is a v3 query with v2 queriers present; 986 * Interpretation of the max_delay code is problematic here. 987 * A real v2 host would use ih_code directly, while v3 has a 988 * different encoding. We use the v3 encoding as more likely 989 * to be intended in a v3 query. 990 */ 991 max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE); 992 if (!max_delay) 993 max_delay = 1; /* can't mod w/ 0 */ 994 } else { /* v3 */ 995 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) 996 return true; 997 998 ih3 = igmpv3_query_hdr(skb); 999 if (ih3->nsrcs) { 1000 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query) 1001 + ntohs(ih3->nsrcs)*sizeof(__be32))) 1002 return true; 1003 ih3 = igmpv3_query_hdr(skb); 1004 } 1005 1006 max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE); 1007 if (!max_delay) 1008 max_delay = 1; /* can't mod w/ 0 */ 1009 in_dev->mr_maxdelay = max_delay; 1010 1011 /* RFC3376, 4.1.6. QRV and 4.1.7. QQIC, when the most recently 1012 * received value was zero, use the default or statically 1013 * configured value. 1014 */ 1015 in_dev->mr_qrv = ih3->qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); 1016 in_dev->mr_qi = IGMPV3_QQIC(ih3->qqic)*HZ ?: IGMP_QUERY_INTERVAL; 1017 1018 /* RFC3376, 8.3. Query Response Interval: 1019 * The number of seconds represented by the [Query Response 1020 * Interval] must be less than the [Query Interval]. 1021 */ 1022 if (in_dev->mr_qri >= in_dev->mr_qi) 1023 in_dev->mr_qri = (in_dev->mr_qi/HZ - 1)*HZ; 1024 1025 if (!group) { /* general query */ 1026 if (ih3->nsrcs) 1027 return true; /* no sources allowed */ 1028 igmp_gq_start_timer(in_dev); 1029 return false; 1030 } 1031 /* mark sources to include, if group & source-specific */ 1032 mark = ih3->nsrcs != 0; 1033 } 1034 1035 /* 1036 * - Start the timers in all of our membership records 1037 * that the query applies to for the interface on 1038 * which the query arrived excl. those that belong 1039 * to a "local" group (224.0.0.X) 1040 * - For timers already running check if they need to 1041 * be reset. 1042 * - Use the igmp->igmp_code field as the maximum 1043 * delay possible 1044 */ 1045 rcu_read_lock(); 1046 for_each_pmc_rcu(in_dev, im) { 1047 int changed; 1048 1049 if (group && group != im->multiaddr) 1050 continue; 1051 if (im->multiaddr == IGMP_ALL_HOSTS) 1052 continue; 1053 if (ipv4_is_local_multicast(im->multiaddr) && 1054 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) 1055 continue; 1056 spin_lock_bh(&im->lock); 1057 if (im->tm_running) 1058 im->gsquery = im->gsquery && mark; 1059 else 1060 im->gsquery = mark; 1061 changed = !im->gsquery || 1062 igmp_marksources(im, ntohs(ih3->nsrcs), ih3->srcs); 1063 spin_unlock_bh(&im->lock); 1064 if (changed) 1065 igmp_mod_timer(im, max_delay); 1066 } 1067 rcu_read_unlock(); 1068 return false; 1069 } 1070 1071 /* called in rcu_read_lock() section */ 1072 int igmp_rcv(struct sk_buff *skb) 1073 { 1074 /* This basically follows the spec line by line -- see RFC1112 */ 1075 struct igmphdr *ih; 1076 struct net_device *dev = skb->dev; 1077 struct in_device *in_dev; 1078 int len = skb->len; 1079 bool dropped = true; 1080 1081 if (netif_is_l3_master(dev)) { 1082 dev = dev_get_by_index_rcu(dev_net(dev), IPCB(skb)->iif); 1083 if (!dev) 1084 goto drop; 1085 } 1086 1087 in_dev = __in_dev_get_rcu(dev); 1088 if (!in_dev) 1089 goto drop; 1090 1091 if (!pskb_may_pull(skb, sizeof(struct igmphdr))) 1092 goto drop; 1093 1094 if (skb_checksum_simple_validate(skb)) 1095 goto drop; 1096 1097 ih = igmp_hdr(skb); 1098 switch (ih->type) { 1099 case IGMP_HOST_MEMBERSHIP_QUERY: 1100 dropped = igmp_heard_query(in_dev, skb, len); 1101 break; 1102 case IGMP_HOST_MEMBERSHIP_REPORT: 1103 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1104 /* Is it our report looped back? */ 1105 if (rt_is_output_route(skb_rtable(skb))) 1106 break; 1107 /* don't rely on MC router hearing unicast reports */ 1108 if (skb->pkt_type == PACKET_MULTICAST || 1109 skb->pkt_type == PACKET_BROADCAST) 1110 dropped = igmp_heard_report(in_dev, ih->group); 1111 break; 1112 case IGMP_PIM: 1113 #ifdef CONFIG_IP_PIMSM_V1 1114 return pim_rcv_v1(skb); 1115 #endif 1116 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1117 case IGMP_DVMRP: 1118 case IGMP_TRACE: 1119 case IGMP_HOST_LEAVE_MESSAGE: 1120 case IGMP_MTRACE: 1121 case IGMP_MTRACE_RESP: 1122 break; 1123 default: 1124 break; 1125 } 1126 1127 drop: 1128 if (dropped) 1129 kfree_skb(skb); 1130 else 1131 consume_skb(skb); 1132 return 0; 1133 } 1134 1135 #endif 1136 1137 1138 /* 1139 * Add a filter to a device 1140 */ 1141 1142 static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr) 1143 { 1144 char buf[MAX_ADDR_LEN]; 1145 struct net_device *dev = in_dev->dev; 1146 1147 /* Checking for IFF_MULTICAST here is WRONG-WRONG-WRONG. 1148 We will get multicast token leakage, when IFF_MULTICAST 1149 is changed. This check should be done in ndo_set_rx_mode 1150 routine. Something sort of: 1151 if (dev->mc_list && dev->flags&IFF_MULTICAST) { do it; } 1152 --ANK 1153 */ 1154 if (arp_mc_map(addr, buf, dev, 0) == 0) 1155 dev_mc_add(dev, buf); 1156 } 1157 1158 /* 1159 * Remove a filter from a device 1160 */ 1161 1162 static void ip_mc_filter_del(struct in_device *in_dev, __be32 addr) 1163 { 1164 char buf[MAX_ADDR_LEN]; 1165 struct net_device *dev = in_dev->dev; 1166 1167 if (arp_mc_map(addr, buf, dev, 0) == 0) 1168 dev_mc_del(dev, buf); 1169 } 1170 1171 #ifdef CONFIG_IP_MULTICAST 1172 /* 1173 * deleted ip_mc_list manipulation 1174 */ 1175 static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im, 1176 gfp_t gfp) 1177 { 1178 struct ip_mc_list *pmc; 1179 struct net *net = dev_net(in_dev->dev); 1180 1181 /* this is an "ip_mc_list" for convenience; only the fields below 1182 * are actually used. In particular, the refcnt and users are not 1183 * used for management of the delete list. Using the same structure 1184 * for deleted items allows change reports to use common code with 1185 * non-deleted or query-response MCA's. 1186 */ 1187 pmc = kzalloc(sizeof(*pmc), gfp); 1188 if (!pmc) 1189 return; 1190 spin_lock_init(&pmc->lock); 1191 spin_lock_bh(&im->lock); 1192 pmc->interface = im->interface; 1193 in_dev_hold(in_dev); 1194 pmc->multiaddr = im->multiaddr; 1195 pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); 1196 pmc->sfmode = im->sfmode; 1197 if (pmc->sfmode == MCAST_INCLUDE) { 1198 struct ip_sf_list *psf; 1199 1200 pmc->tomb = im->tomb; 1201 pmc->sources = im->sources; 1202 im->tomb = im->sources = NULL; 1203 for (psf = pmc->sources; psf; psf = psf->sf_next) 1204 psf->sf_crcount = pmc->crcount; 1205 } 1206 spin_unlock_bh(&im->lock); 1207 1208 spin_lock_bh(&in_dev->mc_tomb_lock); 1209 pmc->next = in_dev->mc_tomb; 1210 in_dev->mc_tomb = pmc; 1211 spin_unlock_bh(&in_dev->mc_tomb_lock); 1212 } 1213 1214 /* 1215 * restore ip_mc_list deleted records 1216 */ 1217 static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im) 1218 { 1219 struct ip_mc_list *pmc, *pmc_prev; 1220 struct ip_sf_list *psf; 1221 struct net *net = dev_net(in_dev->dev); 1222 __be32 multiaddr = im->multiaddr; 1223 1224 spin_lock_bh(&in_dev->mc_tomb_lock); 1225 pmc_prev = NULL; 1226 for (pmc = in_dev->mc_tomb; pmc; pmc = pmc->next) { 1227 if (pmc->multiaddr == multiaddr) 1228 break; 1229 pmc_prev = pmc; 1230 } 1231 if (pmc) { 1232 if (pmc_prev) 1233 pmc_prev->next = pmc->next; 1234 else 1235 in_dev->mc_tomb = pmc->next; 1236 } 1237 spin_unlock_bh(&in_dev->mc_tomb_lock); 1238 1239 spin_lock_bh(&im->lock); 1240 if (pmc) { 1241 im->interface = pmc->interface; 1242 if (im->sfmode == MCAST_INCLUDE) { 1243 swap(im->tomb, pmc->tomb); 1244 swap(im->sources, pmc->sources); 1245 for (psf = im->sources; psf; psf = psf->sf_next) 1246 psf->sf_crcount = in_dev->mr_qrv ?: 1247 READ_ONCE(net->ipv4.sysctl_igmp_qrv); 1248 } else { 1249 im->crcount = in_dev->mr_qrv ?: 1250 READ_ONCE(net->ipv4.sysctl_igmp_qrv); 1251 } 1252 in_dev_put(pmc->interface); 1253 kfree_pmc(pmc); 1254 } 1255 spin_unlock_bh(&im->lock); 1256 } 1257 1258 /* 1259 * flush ip_mc_list deleted records 1260 */ 1261 static void igmpv3_clear_delrec(struct in_device *in_dev) 1262 { 1263 struct ip_mc_list *pmc, *nextpmc; 1264 1265 spin_lock_bh(&in_dev->mc_tomb_lock); 1266 pmc = in_dev->mc_tomb; 1267 in_dev->mc_tomb = NULL; 1268 spin_unlock_bh(&in_dev->mc_tomb_lock); 1269 1270 for (; pmc; pmc = nextpmc) { 1271 nextpmc = pmc->next; 1272 ip_mc_clear_src(pmc); 1273 in_dev_put(pmc->interface); 1274 kfree_pmc(pmc); 1275 } 1276 /* clear dead sources, too */ 1277 rcu_read_lock(); 1278 for_each_pmc_rcu(in_dev, pmc) { 1279 struct ip_sf_list *psf; 1280 1281 spin_lock_bh(&pmc->lock); 1282 psf = pmc->tomb; 1283 pmc->tomb = NULL; 1284 spin_unlock_bh(&pmc->lock); 1285 ip_sf_list_clear_all(psf); 1286 } 1287 rcu_read_unlock(); 1288 } 1289 #endif 1290 1291 static void __igmp_group_dropped(struct ip_mc_list *im, gfp_t gfp) 1292 { 1293 struct in_device *in_dev = im->interface; 1294 #ifdef CONFIG_IP_MULTICAST 1295 struct net *net = dev_net(in_dev->dev); 1296 int reporter; 1297 #endif 1298 1299 if (im->loaded) { 1300 im->loaded = 0; 1301 ip_mc_filter_del(in_dev, im->multiaddr); 1302 } 1303 1304 #ifdef CONFIG_IP_MULTICAST 1305 if (im->multiaddr == IGMP_ALL_HOSTS) 1306 return; 1307 if (ipv4_is_local_multicast(im->multiaddr) && 1308 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) 1309 return; 1310 1311 reporter = im->reporter; 1312 igmp_stop_timer(im); 1313 1314 if (!in_dev->dead) { 1315 if (IGMP_V1_SEEN(in_dev)) 1316 return; 1317 if (IGMP_V2_SEEN(in_dev)) { 1318 if (reporter) 1319 igmp_send_report(in_dev, im, IGMP_HOST_LEAVE_MESSAGE); 1320 return; 1321 } 1322 /* IGMPv3 */ 1323 igmpv3_add_delrec(in_dev, im, gfp); 1324 1325 igmp_ifc_event(in_dev); 1326 } 1327 #endif 1328 } 1329 1330 static void igmp_group_dropped(struct ip_mc_list *im) 1331 { 1332 __igmp_group_dropped(im, GFP_KERNEL); 1333 } 1334 1335 static void igmp_group_added(struct ip_mc_list *im) 1336 { 1337 struct in_device *in_dev = im->interface; 1338 #ifdef CONFIG_IP_MULTICAST 1339 struct net *net = dev_net(in_dev->dev); 1340 #endif 1341 1342 if (im->loaded == 0) { 1343 im->loaded = 1; 1344 ip_mc_filter_add(in_dev, im->multiaddr); 1345 } 1346 1347 #ifdef CONFIG_IP_MULTICAST 1348 if (im->multiaddr == IGMP_ALL_HOSTS) 1349 return; 1350 if (ipv4_is_local_multicast(im->multiaddr) && 1351 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) 1352 return; 1353 1354 if (in_dev->dead) 1355 return; 1356 1357 im->unsolicit_count = READ_ONCE(net->ipv4.sysctl_igmp_qrv); 1358 if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) { 1359 spin_lock_bh(&im->lock); 1360 igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY); 1361 spin_unlock_bh(&im->lock); 1362 return; 1363 } 1364 /* else, v3 */ 1365 1366 /* Based on RFC3376 5.1, for newly added INCLUDE SSM, we should 1367 * not send filter-mode change record as the mode should be from 1368 * IN() to IN(A). 1369 */ 1370 if (im->sfmode == MCAST_EXCLUDE) 1371 im->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); 1372 1373 igmp_ifc_event(in_dev); 1374 #endif 1375 } 1376 1377 1378 /* 1379 * Multicast list managers 1380 */ 1381 1382 static u32 ip_mc_hash(const struct ip_mc_list *im) 1383 { 1384 return hash_32((__force u32)im->multiaddr, MC_HASH_SZ_LOG); 1385 } 1386 1387 static void ip_mc_hash_add(struct in_device *in_dev, 1388 struct ip_mc_list *im) 1389 { 1390 struct ip_mc_list __rcu **mc_hash; 1391 u32 hash; 1392 1393 mc_hash = rtnl_dereference(in_dev->mc_hash); 1394 if (mc_hash) { 1395 hash = ip_mc_hash(im); 1396 im->next_hash = mc_hash[hash]; 1397 rcu_assign_pointer(mc_hash[hash], im); 1398 return; 1399 } 1400 1401 /* do not use a hash table for small number of items */ 1402 if (in_dev->mc_count < 4) 1403 return; 1404 1405 mc_hash = kzalloc(sizeof(struct ip_mc_list *) << MC_HASH_SZ_LOG, 1406 GFP_KERNEL); 1407 if (!mc_hash) 1408 return; 1409 1410 for_each_pmc_rtnl(in_dev, im) { 1411 hash = ip_mc_hash(im); 1412 im->next_hash = mc_hash[hash]; 1413 RCU_INIT_POINTER(mc_hash[hash], im); 1414 } 1415 1416 rcu_assign_pointer(in_dev->mc_hash, mc_hash); 1417 } 1418 1419 static void ip_mc_hash_remove(struct in_device *in_dev, 1420 struct ip_mc_list *im) 1421 { 1422 struct ip_mc_list __rcu **mc_hash = rtnl_dereference(in_dev->mc_hash); 1423 struct ip_mc_list *aux; 1424 1425 if (!mc_hash) 1426 return; 1427 mc_hash += ip_mc_hash(im); 1428 while ((aux = rtnl_dereference(*mc_hash)) != im) 1429 mc_hash = &aux->next_hash; 1430 *mc_hash = im->next_hash; 1431 } 1432 1433 1434 /* 1435 * A socket has joined a multicast group on device dev. 1436 */ 1437 static void ____ip_mc_inc_group(struct in_device *in_dev, __be32 addr, 1438 unsigned int mode, gfp_t gfp) 1439 { 1440 struct ip_mc_list __rcu **mc_hash; 1441 struct ip_mc_list *im; 1442 1443 ASSERT_RTNL(); 1444 1445 mc_hash = rtnl_dereference(in_dev->mc_hash); 1446 if (mc_hash) { 1447 u32 hash = hash_32((__force u32)addr, MC_HASH_SZ_LOG); 1448 1449 for (im = rtnl_dereference(mc_hash[hash]); 1450 im; 1451 im = rtnl_dereference(im->next_hash)) { 1452 if (im->multiaddr == addr) 1453 break; 1454 } 1455 } else { 1456 for_each_pmc_rtnl(in_dev, im) { 1457 if (im->multiaddr == addr) 1458 break; 1459 } 1460 } 1461 1462 if (im) { 1463 im->users++; 1464 ip_mc_add_src(in_dev, &addr, mode, 0, NULL, 0); 1465 goto out; 1466 } 1467 1468 im = kzalloc(sizeof(*im), gfp); 1469 if (!im) 1470 goto out; 1471 1472 im->users = 1; 1473 im->interface = in_dev; 1474 in_dev_hold(in_dev); 1475 im->multiaddr = addr; 1476 /* initial mode is (EX, empty) */ 1477 im->sfmode = mode; 1478 im->sfcount[mode] = 1; 1479 refcount_set(&im->refcnt, 1); 1480 spin_lock_init(&im->lock); 1481 #ifdef CONFIG_IP_MULTICAST 1482 timer_setup(&im->timer, igmp_timer_expire, 0); 1483 #endif 1484 1485 im->next_rcu = in_dev->mc_list; 1486 in_dev->mc_count++; 1487 rcu_assign_pointer(in_dev->mc_list, im); 1488 1489 ip_mc_hash_add(in_dev, im); 1490 1491 #ifdef CONFIG_IP_MULTICAST 1492 igmpv3_del_delrec(in_dev, im); 1493 #endif 1494 igmp_group_added(im); 1495 if (!in_dev->dead) 1496 ip_rt_multicast_event(in_dev); 1497 out: 1498 return; 1499 } 1500 1501 void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, gfp_t gfp) 1502 { 1503 ____ip_mc_inc_group(in_dev, addr, MCAST_EXCLUDE, gfp); 1504 } 1505 EXPORT_SYMBOL(__ip_mc_inc_group); 1506 1507 void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) 1508 { 1509 __ip_mc_inc_group(in_dev, addr, GFP_KERNEL); 1510 } 1511 EXPORT_SYMBOL(ip_mc_inc_group); 1512 1513 static int ip_mc_check_iphdr(struct sk_buff *skb) 1514 { 1515 const struct iphdr *iph; 1516 unsigned int len; 1517 unsigned int offset = skb_network_offset(skb) + sizeof(*iph); 1518 1519 if (!pskb_may_pull(skb, offset)) 1520 return -EINVAL; 1521 1522 iph = ip_hdr(skb); 1523 1524 if (iph->version != 4 || ip_hdrlen(skb) < sizeof(*iph)) 1525 return -EINVAL; 1526 1527 offset += ip_hdrlen(skb) - sizeof(*iph); 1528 1529 if (!pskb_may_pull(skb, offset)) 1530 return -EINVAL; 1531 1532 iph = ip_hdr(skb); 1533 1534 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) 1535 return -EINVAL; 1536 1537 len = skb_network_offset(skb) + ntohs(iph->tot_len); 1538 if (skb->len < len || len < offset) 1539 return -EINVAL; 1540 1541 skb_set_transport_header(skb, offset); 1542 1543 return 0; 1544 } 1545 1546 static int ip_mc_check_igmp_reportv3(struct sk_buff *skb) 1547 { 1548 unsigned int len = skb_transport_offset(skb); 1549 1550 len += sizeof(struct igmpv3_report); 1551 1552 return ip_mc_may_pull(skb, len) ? 0 : -EINVAL; 1553 } 1554 1555 static int ip_mc_check_igmp_query(struct sk_buff *skb) 1556 { 1557 unsigned int transport_len = ip_transport_len(skb); 1558 unsigned int len; 1559 1560 /* IGMPv{1,2}? */ 1561 if (transport_len != sizeof(struct igmphdr)) { 1562 /* or IGMPv3? */ 1563 if (transport_len < sizeof(struct igmpv3_query)) 1564 return -EINVAL; 1565 1566 len = skb_transport_offset(skb) + sizeof(struct igmpv3_query); 1567 if (!ip_mc_may_pull(skb, len)) 1568 return -EINVAL; 1569 } 1570 1571 /* RFC2236+RFC3376 (IGMPv2+IGMPv3) require the multicast link layer 1572 * all-systems destination addresses (224.0.0.1) for general queries 1573 */ 1574 if (!igmp_hdr(skb)->group && 1575 ip_hdr(skb)->daddr != htonl(INADDR_ALLHOSTS_GROUP)) 1576 return -EINVAL; 1577 1578 return 0; 1579 } 1580 1581 static int ip_mc_check_igmp_msg(struct sk_buff *skb) 1582 { 1583 switch (igmp_hdr(skb)->type) { 1584 case IGMP_HOST_LEAVE_MESSAGE: 1585 case IGMP_HOST_MEMBERSHIP_REPORT: 1586 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1587 return 0; 1588 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1589 return ip_mc_check_igmp_reportv3(skb); 1590 case IGMP_HOST_MEMBERSHIP_QUERY: 1591 return ip_mc_check_igmp_query(skb); 1592 default: 1593 return -ENOMSG; 1594 } 1595 } 1596 1597 static __sum16 ip_mc_validate_checksum(struct sk_buff *skb) 1598 { 1599 return skb_checksum_simple_validate(skb); 1600 } 1601 1602 static int ip_mc_check_igmp_csum(struct sk_buff *skb) 1603 { 1604 unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr); 1605 unsigned int transport_len = ip_transport_len(skb); 1606 struct sk_buff *skb_chk; 1607 1608 if (!ip_mc_may_pull(skb, len)) 1609 return -EINVAL; 1610 1611 skb_chk = skb_checksum_trimmed(skb, transport_len, 1612 ip_mc_validate_checksum); 1613 if (!skb_chk) 1614 return -EINVAL; 1615 1616 if (skb_chk != skb) 1617 kfree_skb(skb_chk); 1618 1619 return 0; 1620 } 1621 1622 /** 1623 * ip_mc_check_igmp - checks whether this is a sane IGMP packet 1624 * @skb: the skb to validate 1625 * 1626 * Checks whether an IPv4 packet is a valid IGMP packet. If so sets 1627 * skb transport header accordingly and returns zero. 1628 * 1629 * -EINVAL: A broken packet was detected, i.e. it violates some internet 1630 * standard 1631 * -ENOMSG: IP header validation succeeded but it is not an IGMP packet. 1632 * -ENOMEM: A memory allocation failure happened. 1633 * 1634 * Caller needs to set the skb network header and free any returned skb if it 1635 * differs from the provided skb. 1636 */ 1637 int ip_mc_check_igmp(struct sk_buff *skb) 1638 { 1639 int ret = ip_mc_check_iphdr(skb); 1640 1641 if (ret < 0) 1642 return ret; 1643 1644 if (ip_hdr(skb)->protocol != IPPROTO_IGMP) 1645 return -ENOMSG; 1646 1647 ret = ip_mc_check_igmp_csum(skb); 1648 if (ret < 0) 1649 return ret; 1650 1651 return ip_mc_check_igmp_msg(skb); 1652 } 1653 EXPORT_SYMBOL(ip_mc_check_igmp); 1654 1655 /* 1656 * Resend IGMP JOIN report; used by netdev notifier. 1657 */ 1658 static void ip_mc_rejoin_groups(struct in_device *in_dev) 1659 { 1660 #ifdef CONFIG_IP_MULTICAST 1661 struct ip_mc_list *im; 1662 int type; 1663 struct net *net = dev_net(in_dev->dev); 1664 1665 ASSERT_RTNL(); 1666 1667 for_each_pmc_rtnl(in_dev, im) { 1668 if (im->multiaddr == IGMP_ALL_HOSTS) 1669 continue; 1670 if (ipv4_is_local_multicast(im->multiaddr) && 1671 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) 1672 continue; 1673 1674 /* a failover is happening and switches 1675 * must be notified immediately 1676 */ 1677 if (IGMP_V1_SEEN(in_dev)) 1678 type = IGMP_HOST_MEMBERSHIP_REPORT; 1679 else if (IGMP_V2_SEEN(in_dev)) 1680 type = IGMPV2_HOST_MEMBERSHIP_REPORT; 1681 else 1682 type = IGMPV3_HOST_MEMBERSHIP_REPORT; 1683 igmp_send_report(in_dev, im, type); 1684 } 1685 #endif 1686 } 1687 1688 /* 1689 * A socket has left a multicast group on device dev 1690 */ 1691 1692 void __ip_mc_dec_group(struct in_device *in_dev, __be32 addr, gfp_t gfp) 1693 { 1694 struct ip_mc_list *i; 1695 struct ip_mc_list __rcu **ip; 1696 1697 ASSERT_RTNL(); 1698 1699 for (ip = &in_dev->mc_list; 1700 (i = rtnl_dereference(*ip)) != NULL; 1701 ip = &i->next_rcu) { 1702 if (i->multiaddr == addr) { 1703 if (--i->users == 0) { 1704 ip_mc_hash_remove(in_dev, i); 1705 *ip = i->next_rcu; 1706 in_dev->mc_count--; 1707 __igmp_group_dropped(i, gfp); 1708 ip_mc_clear_src(i); 1709 1710 if (!in_dev->dead) 1711 ip_rt_multicast_event(in_dev); 1712 1713 ip_ma_put(i); 1714 return; 1715 } 1716 break; 1717 } 1718 } 1719 } 1720 EXPORT_SYMBOL(__ip_mc_dec_group); 1721 1722 /* Device changing type */ 1723 1724 void ip_mc_unmap(struct in_device *in_dev) 1725 { 1726 struct ip_mc_list *pmc; 1727 1728 ASSERT_RTNL(); 1729 1730 for_each_pmc_rtnl(in_dev, pmc) 1731 igmp_group_dropped(pmc); 1732 } 1733 1734 void ip_mc_remap(struct in_device *in_dev) 1735 { 1736 struct ip_mc_list *pmc; 1737 1738 ASSERT_RTNL(); 1739 1740 for_each_pmc_rtnl(in_dev, pmc) { 1741 #ifdef CONFIG_IP_MULTICAST 1742 igmpv3_del_delrec(in_dev, pmc); 1743 #endif 1744 igmp_group_added(pmc); 1745 } 1746 } 1747 1748 /* Device going down */ 1749 1750 void ip_mc_down(struct in_device *in_dev) 1751 { 1752 struct ip_mc_list *pmc; 1753 1754 ASSERT_RTNL(); 1755 1756 for_each_pmc_rtnl(in_dev, pmc) 1757 igmp_group_dropped(pmc); 1758 1759 #ifdef CONFIG_IP_MULTICAST 1760 WRITE_ONCE(in_dev->mr_ifc_count, 0); 1761 if (del_timer(&in_dev->mr_ifc_timer)) 1762 __in_dev_put(in_dev); 1763 in_dev->mr_gq_running = 0; 1764 if (del_timer(&in_dev->mr_gq_timer)) 1765 __in_dev_put(in_dev); 1766 #endif 1767 1768 ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS); 1769 } 1770 1771 #ifdef CONFIG_IP_MULTICAST 1772 static void ip_mc_reset(struct in_device *in_dev) 1773 { 1774 struct net *net = dev_net(in_dev->dev); 1775 1776 in_dev->mr_qi = IGMP_QUERY_INTERVAL; 1777 in_dev->mr_qri = IGMP_QUERY_RESPONSE_INTERVAL; 1778 in_dev->mr_qrv = READ_ONCE(net->ipv4.sysctl_igmp_qrv); 1779 } 1780 #else 1781 static void ip_mc_reset(struct in_device *in_dev) 1782 { 1783 } 1784 #endif 1785 1786 void ip_mc_init_dev(struct in_device *in_dev) 1787 { 1788 ASSERT_RTNL(); 1789 1790 #ifdef CONFIG_IP_MULTICAST 1791 timer_setup(&in_dev->mr_gq_timer, igmp_gq_timer_expire, 0); 1792 timer_setup(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire, 0); 1793 #endif 1794 ip_mc_reset(in_dev); 1795 1796 spin_lock_init(&in_dev->mc_tomb_lock); 1797 } 1798 1799 /* Device going up */ 1800 1801 void ip_mc_up(struct in_device *in_dev) 1802 { 1803 struct ip_mc_list *pmc; 1804 1805 ASSERT_RTNL(); 1806 1807 ip_mc_reset(in_dev); 1808 ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); 1809 1810 for_each_pmc_rtnl(in_dev, pmc) { 1811 #ifdef CONFIG_IP_MULTICAST 1812 igmpv3_del_delrec(in_dev, pmc); 1813 #endif 1814 igmp_group_added(pmc); 1815 } 1816 } 1817 1818 /* 1819 * Device is about to be destroyed: clean up. 1820 */ 1821 1822 void ip_mc_destroy_dev(struct in_device *in_dev) 1823 { 1824 struct ip_mc_list *i; 1825 1826 ASSERT_RTNL(); 1827 1828 /* Deactivate timers */ 1829 ip_mc_down(in_dev); 1830 #ifdef CONFIG_IP_MULTICAST 1831 igmpv3_clear_delrec(in_dev); 1832 #endif 1833 1834 while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) { 1835 in_dev->mc_list = i->next_rcu; 1836 in_dev->mc_count--; 1837 ip_mc_clear_src(i); 1838 ip_ma_put(i); 1839 } 1840 } 1841 1842 /* RTNL is locked */ 1843 static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr) 1844 { 1845 struct net_device *dev = NULL; 1846 struct in_device *idev = NULL; 1847 1848 if (imr->imr_ifindex) { 1849 idev = inetdev_by_index(net, imr->imr_ifindex); 1850 return idev; 1851 } 1852 if (imr->imr_address.s_addr) { 1853 dev = __ip_dev_find(net, imr->imr_address.s_addr, false); 1854 if (!dev) 1855 return NULL; 1856 } 1857 1858 if (!dev) { 1859 struct rtable *rt = ip_route_output(net, 1860 imr->imr_multiaddr.s_addr, 1861 0, 0, 0, 1862 RT_SCOPE_UNIVERSE); 1863 if (!IS_ERR(rt)) { 1864 dev = rt->dst.dev; 1865 ip_rt_put(rt); 1866 } 1867 } 1868 if (dev) { 1869 imr->imr_ifindex = dev->ifindex; 1870 idev = __in_dev_get_rtnl(dev); 1871 } 1872 return idev; 1873 } 1874 1875 /* 1876 * Join a socket to a group 1877 */ 1878 1879 static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode, 1880 __be32 *psfsrc) 1881 { 1882 struct ip_sf_list *psf, *psf_prev; 1883 int rv = 0; 1884 1885 psf_prev = NULL; 1886 for (psf = pmc->sources; psf; psf = psf->sf_next) { 1887 if (psf->sf_inaddr == *psfsrc) 1888 break; 1889 psf_prev = psf; 1890 } 1891 if (!psf || psf->sf_count[sfmode] == 0) { 1892 /* source filter not found, or count wrong => bug */ 1893 return -ESRCH; 1894 } 1895 psf->sf_count[sfmode]--; 1896 if (psf->sf_count[sfmode] == 0) { 1897 ip_rt_multicast_event(pmc->interface); 1898 } 1899 if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) { 1900 #ifdef CONFIG_IP_MULTICAST 1901 struct in_device *in_dev = pmc->interface; 1902 struct net *net = dev_net(in_dev->dev); 1903 #endif 1904 1905 /* no more filters for this source */ 1906 if (psf_prev) 1907 psf_prev->sf_next = psf->sf_next; 1908 else 1909 pmc->sources = psf->sf_next; 1910 #ifdef CONFIG_IP_MULTICAST 1911 if (psf->sf_oldin && 1912 !IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) { 1913 psf->sf_crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); 1914 psf->sf_next = pmc->tomb; 1915 pmc->tomb = psf; 1916 rv = 1; 1917 } else 1918 #endif 1919 kfree(psf); 1920 } 1921 return rv; 1922 } 1923 1924 #ifndef CONFIG_IP_MULTICAST 1925 #define igmp_ifc_event(x) do { } while (0) 1926 #endif 1927 1928 static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode, 1929 int sfcount, __be32 *psfsrc, int delta) 1930 { 1931 struct ip_mc_list *pmc; 1932 int changerec = 0; 1933 int i, err; 1934 1935 if (!in_dev) 1936 return -ENODEV; 1937 rcu_read_lock(); 1938 for_each_pmc_rcu(in_dev, pmc) { 1939 if (*pmca == pmc->multiaddr) 1940 break; 1941 } 1942 if (!pmc) { 1943 /* MCA not found?? bug */ 1944 rcu_read_unlock(); 1945 return -ESRCH; 1946 } 1947 spin_lock_bh(&pmc->lock); 1948 rcu_read_unlock(); 1949 #ifdef CONFIG_IP_MULTICAST 1950 sf_markstate(pmc); 1951 #endif 1952 if (!delta) { 1953 err = -EINVAL; 1954 if (!pmc->sfcount[sfmode]) 1955 goto out_unlock; 1956 pmc->sfcount[sfmode]--; 1957 } 1958 err = 0; 1959 for (i = 0; i < sfcount; i++) { 1960 int rv = ip_mc_del1_src(pmc, sfmode, &psfsrc[i]); 1961 1962 changerec |= rv > 0; 1963 if (!err && rv < 0) 1964 err = rv; 1965 } 1966 if (pmc->sfmode == MCAST_EXCLUDE && 1967 pmc->sfcount[MCAST_EXCLUDE] == 0 && 1968 pmc->sfcount[MCAST_INCLUDE]) { 1969 #ifdef CONFIG_IP_MULTICAST 1970 struct ip_sf_list *psf; 1971 struct net *net = dev_net(in_dev->dev); 1972 #endif 1973 1974 /* filter mode change */ 1975 pmc->sfmode = MCAST_INCLUDE; 1976 #ifdef CONFIG_IP_MULTICAST 1977 pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); 1978 WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount); 1979 for (psf = pmc->sources; psf; psf = psf->sf_next) 1980 psf->sf_crcount = 0; 1981 igmp_ifc_event(pmc->interface); 1982 } else if (sf_setstate(pmc) || changerec) { 1983 igmp_ifc_event(pmc->interface); 1984 #endif 1985 } 1986 out_unlock: 1987 spin_unlock_bh(&pmc->lock); 1988 return err; 1989 } 1990 1991 /* 1992 * Add multicast single-source filter to the interface list 1993 */ 1994 static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode, 1995 __be32 *psfsrc) 1996 { 1997 struct ip_sf_list *psf, *psf_prev; 1998 1999 psf_prev = NULL; 2000 for (psf = pmc->sources; psf; psf = psf->sf_next) { 2001 if (psf->sf_inaddr == *psfsrc) 2002 break; 2003 psf_prev = psf; 2004 } 2005 if (!psf) { 2006 psf = kzalloc(sizeof(*psf), GFP_ATOMIC); 2007 if (!psf) 2008 return -ENOBUFS; 2009 psf->sf_inaddr = *psfsrc; 2010 if (psf_prev) { 2011 psf_prev->sf_next = psf; 2012 } else 2013 pmc->sources = psf; 2014 } 2015 psf->sf_count[sfmode]++; 2016 if (psf->sf_count[sfmode] == 1) { 2017 ip_rt_multicast_event(pmc->interface); 2018 } 2019 return 0; 2020 } 2021 2022 #ifdef CONFIG_IP_MULTICAST 2023 static void sf_markstate(struct ip_mc_list *pmc) 2024 { 2025 struct ip_sf_list *psf; 2026 int mca_xcount = pmc->sfcount[MCAST_EXCLUDE]; 2027 2028 for (psf = pmc->sources; psf; psf = psf->sf_next) 2029 if (pmc->sfcount[MCAST_EXCLUDE]) { 2030 psf->sf_oldin = mca_xcount == 2031 psf->sf_count[MCAST_EXCLUDE] && 2032 !psf->sf_count[MCAST_INCLUDE]; 2033 } else 2034 psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0; 2035 } 2036 2037 static int sf_setstate(struct ip_mc_list *pmc) 2038 { 2039 struct ip_sf_list *psf, *dpsf; 2040 int mca_xcount = pmc->sfcount[MCAST_EXCLUDE]; 2041 int qrv = pmc->interface->mr_qrv; 2042 int new_in, rv; 2043 2044 rv = 0; 2045 for (psf = pmc->sources; psf; psf = psf->sf_next) { 2046 if (pmc->sfcount[MCAST_EXCLUDE]) { 2047 new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] && 2048 !psf->sf_count[MCAST_INCLUDE]; 2049 } else 2050 new_in = psf->sf_count[MCAST_INCLUDE] != 0; 2051 if (new_in) { 2052 if (!psf->sf_oldin) { 2053 struct ip_sf_list *prev = NULL; 2054 2055 for (dpsf = pmc->tomb; dpsf; dpsf = dpsf->sf_next) { 2056 if (dpsf->sf_inaddr == psf->sf_inaddr) 2057 break; 2058 prev = dpsf; 2059 } 2060 if (dpsf) { 2061 if (prev) 2062 prev->sf_next = dpsf->sf_next; 2063 else 2064 pmc->tomb = dpsf->sf_next; 2065 kfree(dpsf); 2066 } 2067 psf->sf_crcount = qrv; 2068 rv++; 2069 } 2070 } else if (psf->sf_oldin) { 2071 2072 psf->sf_crcount = 0; 2073 /* 2074 * add or update "delete" records if an active filter 2075 * is now inactive 2076 */ 2077 for (dpsf = pmc->tomb; dpsf; dpsf = dpsf->sf_next) 2078 if (dpsf->sf_inaddr == psf->sf_inaddr) 2079 break; 2080 if (!dpsf) { 2081 dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC); 2082 if (!dpsf) 2083 continue; 2084 *dpsf = *psf; 2085 /* pmc->lock held by callers */ 2086 dpsf->sf_next = pmc->tomb; 2087 pmc->tomb = dpsf; 2088 } 2089 dpsf->sf_crcount = qrv; 2090 rv++; 2091 } 2092 } 2093 return rv; 2094 } 2095 #endif 2096 2097 /* 2098 * Add multicast source filter list to the interface list 2099 */ 2100 static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, 2101 int sfcount, __be32 *psfsrc, int delta) 2102 { 2103 struct ip_mc_list *pmc; 2104 int isexclude; 2105 int i, err; 2106 2107 if (!in_dev) 2108 return -ENODEV; 2109 rcu_read_lock(); 2110 for_each_pmc_rcu(in_dev, pmc) { 2111 if (*pmca == pmc->multiaddr) 2112 break; 2113 } 2114 if (!pmc) { 2115 /* MCA not found?? bug */ 2116 rcu_read_unlock(); 2117 return -ESRCH; 2118 } 2119 spin_lock_bh(&pmc->lock); 2120 rcu_read_unlock(); 2121 2122 #ifdef CONFIG_IP_MULTICAST 2123 sf_markstate(pmc); 2124 #endif 2125 isexclude = pmc->sfmode == MCAST_EXCLUDE; 2126 if (!delta) 2127 pmc->sfcount[sfmode]++; 2128 err = 0; 2129 for (i = 0; i < sfcount; i++) { 2130 err = ip_mc_add1_src(pmc, sfmode, &psfsrc[i]); 2131 if (err) 2132 break; 2133 } 2134 if (err) { 2135 int j; 2136 2137 if (!delta) 2138 pmc->sfcount[sfmode]--; 2139 for (j = 0; j < i; j++) 2140 (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]); 2141 } else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) { 2142 #ifdef CONFIG_IP_MULTICAST 2143 struct ip_sf_list *psf; 2144 struct net *net = dev_net(pmc->interface->dev); 2145 in_dev = pmc->interface; 2146 #endif 2147 2148 /* filter mode change */ 2149 if (pmc->sfcount[MCAST_EXCLUDE]) 2150 pmc->sfmode = MCAST_EXCLUDE; 2151 else if (pmc->sfcount[MCAST_INCLUDE]) 2152 pmc->sfmode = MCAST_INCLUDE; 2153 #ifdef CONFIG_IP_MULTICAST 2154 /* else no filters; keep old mode for reports */ 2155 2156 pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); 2157 WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount); 2158 for (psf = pmc->sources; psf; psf = psf->sf_next) 2159 psf->sf_crcount = 0; 2160 igmp_ifc_event(in_dev); 2161 } else if (sf_setstate(pmc)) { 2162 igmp_ifc_event(in_dev); 2163 #endif 2164 } 2165 spin_unlock_bh(&pmc->lock); 2166 return err; 2167 } 2168 2169 static void ip_mc_clear_src(struct ip_mc_list *pmc) 2170 { 2171 struct ip_sf_list *tomb, *sources; 2172 2173 spin_lock_bh(&pmc->lock); 2174 tomb = pmc->tomb; 2175 pmc->tomb = NULL; 2176 sources = pmc->sources; 2177 pmc->sources = NULL; 2178 pmc->sfmode = MCAST_EXCLUDE; 2179 pmc->sfcount[MCAST_INCLUDE] = 0; 2180 pmc->sfcount[MCAST_EXCLUDE] = 1; 2181 spin_unlock_bh(&pmc->lock); 2182 2183 ip_sf_list_clear_all(tomb); 2184 ip_sf_list_clear_all(sources); 2185 } 2186 2187 /* Join a multicast group 2188 */ 2189 static int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr, 2190 unsigned int mode) 2191 { 2192 __be32 addr = imr->imr_multiaddr.s_addr; 2193 struct ip_mc_socklist *iml, *i; 2194 struct in_device *in_dev; 2195 struct inet_sock *inet = inet_sk(sk); 2196 struct net *net = sock_net(sk); 2197 int ifindex; 2198 int count = 0; 2199 int err; 2200 2201 ASSERT_RTNL(); 2202 2203 if (!ipv4_is_multicast(addr)) 2204 return -EINVAL; 2205 2206 in_dev = ip_mc_find_dev(net, imr); 2207 2208 if (!in_dev) { 2209 err = -ENODEV; 2210 goto done; 2211 } 2212 2213 err = -EADDRINUSE; 2214 ifindex = imr->imr_ifindex; 2215 for_each_pmc_rtnl(inet, i) { 2216 if (i->multi.imr_multiaddr.s_addr == addr && 2217 i->multi.imr_ifindex == ifindex) 2218 goto done; 2219 count++; 2220 } 2221 err = -ENOBUFS; 2222 if (count >= READ_ONCE(net->ipv4.sysctl_igmp_max_memberships)) 2223 goto done; 2224 iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL); 2225 if (!iml) 2226 goto done; 2227 2228 memcpy(&iml->multi, imr, sizeof(*imr)); 2229 iml->next_rcu = inet->mc_list; 2230 iml->sflist = NULL; 2231 iml->sfmode = mode; 2232 rcu_assign_pointer(inet->mc_list, iml); 2233 ____ip_mc_inc_group(in_dev, addr, mode, GFP_KERNEL); 2234 err = 0; 2235 done: 2236 return err; 2237 } 2238 2239 /* Join ASM (Any-Source Multicast) group 2240 */ 2241 int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr) 2242 { 2243 return __ip_mc_join_group(sk, imr, MCAST_EXCLUDE); 2244 } 2245 EXPORT_SYMBOL(ip_mc_join_group); 2246 2247 /* Join SSM (Source-Specific Multicast) group 2248 */ 2249 int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr, 2250 unsigned int mode) 2251 { 2252 return __ip_mc_join_group(sk, imr, mode); 2253 } 2254 2255 static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, 2256 struct in_device *in_dev) 2257 { 2258 struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist); 2259 int err; 2260 2261 if (!psf) { 2262 /* any-source empty exclude case */ 2263 return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, 2264 iml->sfmode, 0, NULL, 0); 2265 } 2266 err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, 2267 iml->sfmode, psf->sl_count, psf->sl_addr, 0); 2268 RCU_INIT_POINTER(iml->sflist, NULL); 2269 /* decrease mem now to avoid the memleak warning */ 2270 atomic_sub(struct_size(psf, sl_addr, psf->sl_max), &sk->sk_omem_alloc); 2271 kfree_rcu(psf, rcu); 2272 return err; 2273 } 2274 2275 int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) 2276 { 2277 struct inet_sock *inet = inet_sk(sk); 2278 struct ip_mc_socklist *iml; 2279 struct ip_mc_socklist __rcu **imlp; 2280 struct in_device *in_dev; 2281 struct net *net = sock_net(sk); 2282 __be32 group = imr->imr_multiaddr.s_addr; 2283 u32 ifindex; 2284 int ret = -EADDRNOTAVAIL; 2285 2286 ASSERT_RTNL(); 2287 2288 in_dev = ip_mc_find_dev(net, imr); 2289 if (!imr->imr_ifindex && !imr->imr_address.s_addr && !in_dev) { 2290 ret = -ENODEV; 2291 goto out; 2292 } 2293 ifindex = imr->imr_ifindex; 2294 for (imlp = &inet->mc_list; 2295 (iml = rtnl_dereference(*imlp)) != NULL; 2296 imlp = &iml->next_rcu) { 2297 if (iml->multi.imr_multiaddr.s_addr != group) 2298 continue; 2299 if (ifindex) { 2300 if (iml->multi.imr_ifindex != ifindex) 2301 continue; 2302 } else if (imr->imr_address.s_addr && imr->imr_address.s_addr != 2303 iml->multi.imr_address.s_addr) 2304 continue; 2305 2306 (void) ip_mc_leave_src(sk, iml, in_dev); 2307 2308 *imlp = iml->next_rcu; 2309 2310 if (in_dev) 2311 ip_mc_dec_group(in_dev, group); 2312 2313 /* decrease mem now to avoid the memleak warning */ 2314 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); 2315 kfree_rcu(iml, rcu); 2316 return 0; 2317 } 2318 out: 2319 return ret; 2320 } 2321 EXPORT_SYMBOL(ip_mc_leave_group); 2322 2323 int ip_mc_source(int add, int omode, struct sock *sk, struct 2324 ip_mreq_source *mreqs, int ifindex) 2325 { 2326 int err; 2327 struct ip_mreqn imr; 2328 __be32 addr = mreqs->imr_multiaddr; 2329 struct ip_mc_socklist *pmc; 2330 struct in_device *in_dev = NULL; 2331 struct inet_sock *inet = inet_sk(sk); 2332 struct ip_sf_socklist *psl; 2333 struct net *net = sock_net(sk); 2334 int leavegroup = 0; 2335 int i, j, rv; 2336 2337 if (!ipv4_is_multicast(addr)) 2338 return -EINVAL; 2339 2340 ASSERT_RTNL(); 2341 2342 imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr; 2343 imr.imr_address.s_addr = mreqs->imr_interface; 2344 imr.imr_ifindex = ifindex; 2345 in_dev = ip_mc_find_dev(net, &imr); 2346 2347 if (!in_dev) { 2348 err = -ENODEV; 2349 goto done; 2350 } 2351 err = -EADDRNOTAVAIL; 2352 2353 for_each_pmc_rtnl(inet, pmc) { 2354 if ((pmc->multi.imr_multiaddr.s_addr == 2355 imr.imr_multiaddr.s_addr) && 2356 (pmc->multi.imr_ifindex == imr.imr_ifindex)) 2357 break; 2358 } 2359 if (!pmc) { /* must have a prior join */ 2360 err = -EINVAL; 2361 goto done; 2362 } 2363 /* if a source filter was set, must be the same mode as before */ 2364 if (pmc->sflist) { 2365 if (pmc->sfmode != omode) { 2366 err = -EINVAL; 2367 goto done; 2368 } 2369 } else if (pmc->sfmode != omode) { 2370 /* allow mode switches for empty-set filters */ 2371 ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 0, NULL, 0); 2372 ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, pmc->sfmode, 0, 2373 NULL, 0); 2374 pmc->sfmode = omode; 2375 } 2376 2377 psl = rtnl_dereference(pmc->sflist); 2378 if (!add) { 2379 if (!psl) 2380 goto done; /* err = -EADDRNOTAVAIL */ 2381 rv = !0; 2382 for (i = 0; i < psl->sl_count; i++) { 2383 rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr, 2384 sizeof(__be32)); 2385 if (rv == 0) 2386 break; 2387 } 2388 if (rv) /* source not found */ 2389 goto done; /* err = -EADDRNOTAVAIL */ 2390 2391 /* special case - (INCLUDE, empty) == LEAVE_GROUP */ 2392 if (psl->sl_count == 1 && omode == MCAST_INCLUDE) { 2393 leavegroup = 1; 2394 goto done; 2395 } 2396 2397 /* update the interface filter */ 2398 ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, omode, 1, 2399 &mreqs->imr_sourceaddr, 1); 2400 2401 for (j = i+1; j < psl->sl_count; j++) 2402 psl->sl_addr[j-1] = psl->sl_addr[j]; 2403 psl->sl_count--; 2404 err = 0; 2405 goto done; 2406 } 2407 /* else, add a new source to the filter */ 2408 2409 if (psl && psl->sl_count >= READ_ONCE(net->ipv4.sysctl_igmp_max_msf)) { 2410 err = -ENOBUFS; 2411 goto done; 2412 } 2413 if (!psl || psl->sl_count == psl->sl_max) { 2414 struct ip_sf_socklist *newpsl; 2415 int count = IP_SFBLOCK; 2416 2417 if (psl) 2418 count += psl->sl_max; 2419 newpsl = sock_kmalloc(sk, struct_size(newpsl, sl_addr, count), 2420 GFP_KERNEL); 2421 if (!newpsl) { 2422 err = -ENOBUFS; 2423 goto done; 2424 } 2425 newpsl->sl_max = count; 2426 newpsl->sl_count = count - IP_SFBLOCK; 2427 if (psl) { 2428 for (i = 0; i < psl->sl_count; i++) 2429 newpsl->sl_addr[i] = psl->sl_addr[i]; 2430 /* decrease mem now to avoid the memleak warning */ 2431 atomic_sub(struct_size(psl, sl_addr, psl->sl_max), 2432 &sk->sk_omem_alloc); 2433 } 2434 rcu_assign_pointer(pmc->sflist, newpsl); 2435 if (psl) 2436 kfree_rcu(psl, rcu); 2437 psl = newpsl; 2438 } 2439 rv = 1; /* > 0 for insert logic below if sl_count is 0 */ 2440 for (i = 0; i < psl->sl_count; i++) { 2441 rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr, 2442 sizeof(__be32)); 2443 if (rv == 0) 2444 break; 2445 } 2446 if (rv == 0) /* address already there is an error */ 2447 goto done; 2448 for (j = psl->sl_count-1; j >= i; j--) 2449 psl->sl_addr[j+1] = psl->sl_addr[j]; 2450 psl->sl_addr[i] = mreqs->imr_sourceaddr; 2451 psl->sl_count++; 2452 err = 0; 2453 /* update the interface list */ 2454 ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 1, 2455 &mreqs->imr_sourceaddr, 1); 2456 done: 2457 if (leavegroup) 2458 err = ip_mc_leave_group(sk, &imr); 2459 return err; 2460 } 2461 2462 int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) 2463 { 2464 int err = 0; 2465 struct ip_mreqn imr; 2466 __be32 addr = msf->imsf_multiaddr; 2467 struct ip_mc_socklist *pmc; 2468 struct in_device *in_dev; 2469 struct inet_sock *inet = inet_sk(sk); 2470 struct ip_sf_socklist *newpsl, *psl; 2471 struct net *net = sock_net(sk); 2472 int leavegroup = 0; 2473 2474 if (!ipv4_is_multicast(addr)) 2475 return -EINVAL; 2476 if (msf->imsf_fmode != MCAST_INCLUDE && 2477 msf->imsf_fmode != MCAST_EXCLUDE) 2478 return -EINVAL; 2479 2480 ASSERT_RTNL(); 2481 2482 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; 2483 imr.imr_address.s_addr = msf->imsf_interface; 2484 imr.imr_ifindex = ifindex; 2485 in_dev = ip_mc_find_dev(net, &imr); 2486 2487 if (!in_dev) { 2488 err = -ENODEV; 2489 goto done; 2490 } 2491 2492 /* special case - (INCLUDE, empty) == LEAVE_GROUP */ 2493 if (msf->imsf_fmode == MCAST_INCLUDE && msf->imsf_numsrc == 0) { 2494 leavegroup = 1; 2495 goto done; 2496 } 2497 2498 for_each_pmc_rtnl(inet, pmc) { 2499 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr && 2500 pmc->multi.imr_ifindex == imr.imr_ifindex) 2501 break; 2502 } 2503 if (!pmc) { /* must have a prior join */ 2504 err = -EINVAL; 2505 goto done; 2506 } 2507 if (msf->imsf_numsrc) { 2508 newpsl = sock_kmalloc(sk, struct_size(newpsl, sl_addr, 2509 msf->imsf_numsrc), 2510 GFP_KERNEL); 2511 if (!newpsl) { 2512 err = -ENOBUFS; 2513 goto done; 2514 } 2515 newpsl->sl_max = newpsl->sl_count = msf->imsf_numsrc; 2516 memcpy(newpsl->sl_addr, msf->imsf_slist_flex, 2517 flex_array_size(msf, imsf_slist_flex, msf->imsf_numsrc)); 2518 err = ip_mc_add_src(in_dev, &msf->imsf_multiaddr, 2519 msf->imsf_fmode, newpsl->sl_count, newpsl->sl_addr, 0); 2520 if (err) { 2521 sock_kfree_s(sk, newpsl, 2522 struct_size(newpsl, sl_addr, 2523 newpsl->sl_max)); 2524 goto done; 2525 } 2526 } else { 2527 newpsl = NULL; 2528 (void) ip_mc_add_src(in_dev, &msf->imsf_multiaddr, 2529 msf->imsf_fmode, 0, NULL, 0); 2530 } 2531 psl = rtnl_dereference(pmc->sflist); 2532 if (psl) { 2533 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, 2534 psl->sl_count, psl->sl_addr, 0); 2535 /* decrease mem now to avoid the memleak warning */ 2536 atomic_sub(struct_size(psl, sl_addr, psl->sl_max), 2537 &sk->sk_omem_alloc); 2538 } else { 2539 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, 2540 0, NULL, 0); 2541 } 2542 rcu_assign_pointer(pmc->sflist, newpsl); 2543 if (psl) 2544 kfree_rcu(psl, rcu); 2545 pmc->sfmode = msf->imsf_fmode; 2546 err = 0; 2547 done: 2548 if (leavegroup) 2549 err = ip_mc_leave_group(sk, &imr); 2550 return err; 2551 } 2552 int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, 2553 sockptr_t optval, sockptr_t optlen) 2554 { 2555 int err, len, count, copycount, msf_size; 2556 struct ip_mreqn imr; 2557 __be32 addr = msf->imsf_multiaddr; 2558 struct ip_mc_socklist *pmc; 2559 struct in_device *in_dev; 2560 struct inet_sock *inet = inet_sk(sk); 2561 struct ip_sf_socklist *psl; 2562 struct net *net = sock_net(sk); 2563 2564 ASSERT_RTNL(); 2565 2566 if (!ipv4_is_multicast(addr)) 2567 return -EINVAL; 2568 2569 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; 2570 imr.imr_address.s_addr = msf->imsf_interface; 2571 imr.imr_ifindex = 0; 2572 in_dev = ip_mc_find_dev(net, &imr); 2573 2574 if (!in_dev) { 2575 err = -ENODEV; 2576 goto done; 2577 } 2578 err = -EADDRNOTAVAIL; 2579 2580 for_each_pmc_rtnl(inet, pmc) { 2581 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr && 2582 pmc->multi.imr_ifindex == imr.imr_ifindex) 2583 break; 2584 } 2585 if (!pmc) /* must have a prior join */ 2586 goto done; 2587 msf->imsf_fmode = pmc->sfmode; 2588 psl = rtnl_dereference(pmc->sflist); 2589 if (!psl) { 2590 count = 0; 2591 } else { 2592 count = psl->sl_count; 2593 } 2594 copycount = count < msf->imsf_numsrc ? count : msf->imsf_numsrc; 2595 len = flex_array_size(psl, sl_addr, copycount); 2596 msf->imsf_numsrc = count; 2597 msf_size = IP_MSFILTER_SIZE(copycount); 2598 if (copy_to_sockptr(optlen, &msf_size, sizeof(int)) || 2599 copy_to_sockptr(optval, msf, IP_MSFILTER_SIZE(0))) { 2600 return -EFAULT; 2601 } 2602 if (len && 2603 copy_to_sockptr_offset(optval, 2604 offsetof(struct ip_msfilter, imsf_slist_flex), 2605 psl->sl_addr, len)) 2606 return -EFAULT; 2607 return 0; 2608 done: 2609 return err; 2610 } 2611 2612 int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf, 2613 sockptr_t optval, size_t ss_offset) 2614 { 2615 int i, count, copycount; 2616 struct sockaddr_in *psin; 2617 __be32 addr; 2618 struct ip_mc_socklist *pmc; 2619 struct inet_sock *inet = inet_sk(sk); 2620 struct ip_sf_socklist *psl; 2621 2622 ASSERT_RTNL(); 2623 2624 psin = (struct sockaddr_in *)&gsf->gf_group; 2625 if (psin->sin_family != AF_INET) 2626 return -EINVAL; 2627 addr = psin->sin_addr.s_addr; 2628 if (!ipv4_is_multicast(addr)) 2629 return -EINVAL; 2630 2631 for_each_pmc_rtnl(inet, pmc) { 2632 if (pmc->multi.imr_multiaddr.s_addr == addr && 2633 pmc->multi.imr_ifindex == gsf->gf_interface) 2634 break; 2635 } 2636 if (!pmc) /* must have a prior join */ 2637 return -EADDRNOTAVAIL; 2638 gsf->gf_fmode = pmc->sfmode; 2639 psl = rtnl_dereference(pmc->sflist); 2640 count = psl ? psl->sl_count : 0; 2641 copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc; 2642 gsf->gf_numsrc = count; 2643 for (i = 0; i < copycount; i++) { 2644 struct sockaddr_storage ss; 2645 2646 psin = (struct sockaddr_in *)&ss; 2647 memset(&ss, 0, sizeof(ss)); 2648 psin->sin_family = AF_INET; 2649 psin->sin_addr.s_addr = psl->sl_addr[i]; 2650 if (copy_to_sockptr_offset(optval, ss_offset, 2651 &ss, sizeof(ss))) 2652 return -EFAULT; 2653 ss_offset += sizeof(ss); 2654 } 2655 return 0; 2656 } 2657 2658 /* 2659 * check if a multicast source filter allows delivery for a given <src,dst,intf> 2660 */ 2661 int ip_mc_sf_allow(const struct sock *sk, __be32 loc_addr, __be32 rmt_addr, 2662 int dif, int sdif) 2663 { 2664 const struct inet_sock *inet = inet_sk(sk); 2665 struct ip_mc_socklist *pmc; 2666 struct ip_sf_socklist *psl; 2667 int i; 2668 int ret; 2669 2670 ret = 1; 2671 if (!ipv4_is_multicast(loc_addr)) 2672 goto out; 2673 2674 rcu_read_lock(); 2675 for_each_pmc_rcu(inet, pmc) { 2676 if (pmc->multi.imr_multiaddr.s_addr == loc_addr && 2677 (pmc->multi.imr_ifindex == dif || 2678 (sdif && pmc->multi.imr_ifindex == sdif))) 2679 break; 2680 } 2681 ret = inet_test_bit(MC_ALL, sk); 2682 if (!pmc) 2683 goto unlock; 2684 psl = rcu_dereference(pmc->sflist); 2685 ret = (pmc->sfmode == MCAST_EXCLUDE); 2686 if (!psl) 2687 goto unlock; 2688 2689 for (i = 0; i < psl->sl_count; i++) { 2690 if (psl->sl_addr[i] == rmt_addr) 2691 break; 2692 } 2693 ret = 0; 2694 if (pmc->sfmode == MCAST_INCLUDE && i >= psl->sl_count) 2695 goto unlock; 2696 if (pmc->sfmode == MCAST_EXCLUDE && i < psl->sl_count) 2697 goto unlock; 2698 ret = 1; 2699 unlock: 2700 rcu_read_unlock(); 2701 out: 2702 return ret; 2703 } 2704 2705 /* 2706 * A socket is closing. 2707 */ 2708 2709 void ip_mc_drop_socket(struct sock *sk) 2710 { 2711 struct inet_sock *inet = inet_sk(sk); 2712 struct ip_mc_socklist *iml; 2713 struct net *net = sock_net(sk); 2714 2715 if (!inet->mc_list) 2716 return; 2717 2718 rtnl_lock(); 2719 while ((iml = rtnl_dereference(inet->mc_list)) != NULL) { 2720 struct in_device *in_dev; 2721 2722 inet->mc_list = iml->next_rcu; 2723 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); 2724 (void) ip_mc_leave_src(sk, iml, in_dev); 2725 if (in_dev) 2726 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); 2727 /* decrease mem now to avoid the memleak warning */ 2728 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); 2729 kfree_rcu(iml, rcu); 2730 } 2731 rtnl_unlock(); 2732 } 2733 2734 /* called with rcu_read_lock() */ 2735 int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u8 proto) 2736 { 2737 struct ip_mc_list *im; 2738 struct ip_mc_list __rcu **mc_hash; 2739 struct ip_sf_list *psf; 2740 int rv = 0; 2741 2742 mc_hash = rcu_dereference(in_dev->mc_hash); 2743 if (mc_hash) { 2744 u32 hash = hash_32((__force u32)mc_addr, MC_HASH_SZ_LOG); 2745 2746 for (im = rcu_dereference(mc_hash[hash]); 2747 im != NULL; 2748 im = rcu_dereference(im->next_hash)) { 2749 if (im->multiaddr == mc_addr) 2750 break; 2751 } 2752 } else { 2753 for_each_pmc_rcu(in_dev, im) { 2754 if (im->multiaddr == mc_addr) 2755 break; 2756 } 2757 } 2758 if (im && proto == IPPROTO_IGMP) { 2759 rv = 1; 2760 } else if (im) { 2761 if (src_addr) { 2762 spin_lock_bh(&im->lock); 2763 for (psf = im->sources; psf; psf = psf->sf_next) { 2764 if (psf->sf_inaddr == src_addr) 2765 break; 2766 } 2767 if (psf) 2768 rv = psf->sf_count[MCAST_INCLUDE] || 2769 psf->sf_count[MCAST_EXCLUDE] != 2770 im->sfcount[MCAST_EXCLUDE]; 2771 else 2772 rv = im->sfcount[MCAST_EXCLUDE] != 0; 2773 spin_unlock_bh(&im->lock); 2774 } else 2775 rv = 1; /* unspecified source; tentatively allow */ 2776 } 2777 return rv; 2778 } 2779 2780 #if defined(CONFIG_PROC_FS) 2781 struct igmp_mc_iter_state { 2782 struct seq_net_private p; 2783 struct net_device *dev; 2784 struct in_device *in_dev; 2785 }; 2786 2787 #define igmp_mc_seq_private(seq) ((struct igmp_mc_iter_state *)(seq)->private) 2788 2789 static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq) 2790 { 2791 struct net *net = seq_file_net(seq); 2792 struct ip_mc_list *im = NULL; 2793 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); 2794 2795 state->in_dev = NULL; 2796 for_each_netdev_rcu(net, state->dev) { 2797 struct in_device *in_dev; 2798 2799 in_dev = __in_dev_get_rcu(state->dev); 2800 if (!in_dev) 2801 continue; 2802 im = rcu_dereference(in_dev->mc_list); 2803 if (im) { 2804 state->in_dev = in_dev; 2805 break; 2806 } 2807 } 2808 return im; 2809 } 2810 2811 static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_list *im) 2812 { 2813 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); 2814 2815 im = rcu_dereference(im->next_rcu); 2816 while (!im) { 2817 state->dev = next_net_device_rcu(state->dev); 2818 if (!state->dev) { 2819 state->in_dev = NULL; 2820 break; 2821 } 2822 state->in_dev = __in_dev_get_rcu(state->dev); 2823 if (!state->in_dev) 2824 continue; 2825 im = rcu_dereference(state->in_dev->mc_list); 2826 } 2827 return im; 2828 } 2829 2830 static struct ip_mc_list *igmp_mc_get_idx(struct seq_file *seq, loff_t pos) 2831 { 2832 struct ip_mc_list *im = igmp_mc_get_first(seq); 2833 if (im) 2834 while (pos && (im = igmp_mc_get_next(seq, im)) != NULL) 2835 --pos; 2836 return pos ? NULL : im; 2837 } 2838 2839 static void *igmp_mc_seq_start(struct seq_file *seq, loff_t *pos) 2840 __acquires(rcu) 2841 { 2842 rcu_read_lock(); 2843 return *pos ? igmp_mc_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 2844 } 2845 2846 static void *igmp_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2847 { 2848 struct ip_mc_list *im; 2849 if (v == SEQ_START_TOKEN) 2850 im = igmp_mc_get_first(seq); 2851 else 2852 im = igmp_mc_get_next(seq, v); 2853 ++*pos; 2854 return im; 2855 } 2856 2857 static void igmp_mc_seq_stop(struct seq_file *seq, void *v) 2858 __releases(rcu) 2859 { 2860 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); 2861 2862 state->in_dev = NULL; 2863 state->dev = NULL; 2864 rcu_read_unlock(); 2865 } 2866 2867 static int igmp_mc_seq_show(struct seq_file *seq, void *v) 2868 { 2869 if (v == SEQ_START_TOKEN) 2870 seq_puts(seq, 2871 "Idx\tDevice : Count Querier\tGroup Users Timer\tReporter\n"); 2872 else { 2873 struct ip_mc_list *im = v; 2874 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); 2875 char *querier; 2876 long delta; 2877 2878 #ifdef CONFIG_IP_MULTICAST 2879 querier = IGMP_V1_SEEN(state->in_dev) ? "V1" : 2880 IGMP_V2_SEEN(state->in_dev) ? "V2" : 2881 "V3"; 2882 #else 2883 querier = "NONE"; 2884 #endif 2885 2886 if (rcu_access_pointer(state->in_dev->mc_list) == im) { 2887 seq_printf(seq, "%d\t%-10s: %5d %7s\n", 2888 state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier); 2889 } 2890 2891 delta = im->timer.expires - jiffies; 2892 seq_printf(seq, 2893 "\t\t\t\t%08X %5d %d:%08lX\t\t%d\n", 2894 im->multiaddr, im->users, 2895 im->tm_running, 2896 im->tm_running ? jiffies_delta_to_clock_t(delta) : 0, 2897 im->reporter); 2898 } 2899 return 0; 2900 } 2901 2902 static const struct seq_operations igmp_mc_seq_ops = { 2903 .start = igmp_mc_seq_start, 2904 .next = igmp_mc_seq_next, 2905 .stop = igmp_mc_seq_stop, 2906 .show = igmp_mc_seq_show, 2907 }; 2908 2909 struct igmp_mcf_iter_state { 2910 struct seq_net_private p; 2911 struct net_device *dev; 2912 struct in_device *idev; 2913 struct ip_mc_list *im; 2914 }; 2915 2916 #define igmp_mcf_seq_private(seq) ((struct igmp_mcf_iter_state *)(seq)->private) 2917 2918 static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq) 2919 { 2920 struct net *net = seq_file_net(seq); 2921 struct ip_sf_list *psf = NULL; 2922 struct ip_mc_list *im = NULL; 2923 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); 2924 2925 state->idev = NULL; 2926 state->im = NULL; 2927 for_each_netdev_rcu(net, state->dev) { 2928 struct in_device *idev; 2929 idev = __in_dev_get_rcu(state->dev); 2930 if (unlikely(!idev)) 2931 continue; 2932 im = rcu_dereference(idev->mc_list); 2933 if (likely(im)) { 2934 spin_lock_bh(&im->lock); 2935 psf = im->sources; 2936 if (likely(psf)) { 2937 state->im = im; 2938 state->idev = idev; 2939 break; 2940 } 2941 spin_unlock_bh(&im->lock); 2942 } 2943 } 2944 return psf; 2945 } 2946 2947 static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_list *psf) 2948 { 2949 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); 2950 2951 psf = psf->sf_next; 2952 while (!psf) { 2953 spin_unlock_bh(&state->im->lock); 2954 state->im = state->im->next; 2955 while (!state->im) { 2956 state->dev = next_net_device_rcu(state->dev); 2957 if (!state->dev) { 2958 state->idev = NULL; 2959 goto out; 2960 } 2961 state->idev = __in_dev_get_rcu(state->dev); 2962 if (!state->idev) 2963 continue; 2964 state->im = rcu_dereference(state->idev->mc_list); 2965 } 2966 spin_lock_bh(&state->im->lock); 2967 psf = state->im->sources; 2968 } 2969 out: 2970 return psf; 2971 } 2972 2973 static struct ip_sf_list *igmp_mcf_get_idx(struct seq_file *seq, loff_t pos) 2974 { 2975 struct ip_sf_list *psf = igmp_mcf_get_first(seq); 2976 if (psf) 2977 while (pos && (psf = igmp_mcf_get_next(seq, psf)) != NULL) 2978 --pos; 2979 return pos ? NULL : psf; 2980 } 2981 2982 static void *igmp_mcf_seq_start(struct seq_file *seq, loff_t *pos) 2983 __acquires(rcu) 2984 { 2985 rcu_read_lock(); 2986 return *pos ? igmp_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 2987 } 2988 2989 static void *igmp_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2990 { 2991 struct ip_sf_list *psf; 2992 if (v == SEQ_START_TOKEN) 2993 psf = igmp_mcf_get_first(seq); 2994 else 2995 psf = igmp_mcf_get_next(seq, v); 2996 ++*pos; 2997 return psf; 2998 } 2999 3000 static void igmp_mcf_seq_stop(struct seq_file *seq, void *v) 3001 __releases(rcu) 3002 { 3003 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); 3004 if (likely(state->im)) { 3005 spin_unlock_bh(&state->im->lock); 3006 state->im = NULL; 3007 } 3008 state->idev = NULL; 3009 state->dev = NULL; 3010 rcu_read_unlock(); 3011 } 3012 3013 static int igmp_mcf_seq_show(struct seq_file *seq, void *v) 3014 { 3015 struct ip_sf_list *psf = v; 3016 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); 3017 3018 if (v == SEQ_START_TOKEN) { 3019 seq_puts(seq, "Idx Device MCA SRC INC EXC\n"); 3020 } else { 3021 seq_printf(seq, 3022 "%3d %6.6s 0x%08x " 3023 "0x%08x %6lu %6lu\n", 3024 state->dev->ifindex, state->dev->name, 3025 ntohl(state->im->multiaddr), 3026 ntohl(psf->sf_inaddr), 3027 psf->sf_count[MCAST_INCLUDE], 3028 psf->sf_count[MCAST_EXCLUDE]); 3029 } 3030 return 0; 3031 } 3032 3033 static const struct seq_operations igmp_mcf_seq_ops = { 3034 .start = igmp_mcf_seq_start, 3035 .next = igmp_mcf_seq_next, 3036 .stop = igmp_mcf_seq_stop, 3037 .show = igmp_mcf_seq_show, 3038 }; 3039 3040 static int __net_init igmp_net_init(struct net *net) 3041 { 3042 struct proc_dir_entry *pde; 3043 int err; 3044 3045 pde = proc_create_net("igmp", 0444, net->proc_net, &igmp_mc_seq_ops, 3046 sizeof(struct igmp_mc_iter_state)); 3047 if (!pde) 3048 goto out_igmp; 3049 pde = proc_create_net("mcfilter", 0444, net->proc_net, 3050 &igmp_mcf_seq_ops, sizeof(struct igmp_mcf_iter_state)); 3051 if (!pde) 3052 goto out_mcfilter; 3053 err = inet_ctl_sock_create(&net->ipv4.mc_autojoin_sk, AF_INET, 3054 SOCK_DGRAM, 0, net); 3055 if (err < 0) { 3056 pr_err("Failed to initialize the IGMP autojoin socket (err %d)\n", 3057 err); 3058 goto out_sock; 3059 } 3060 3061 return 0; 3062 3063 out_sock: 3064 remove_proc_entry("mcfilter", net->proc_net); 3065 out_mcfilter: 3066 remove_proc_entry("igmp", net->proc_net); 3067 out_igmp: 3068 return -ENOMEM; 3069 } 3070 3071 static void __net_exit igmp_net_exit(struct net *net) 3072 { 3073 remove_proc_entry("mcfilter", net->proc_net); 3074 remove_proc_entry("igmp", net->proc_net); 3075 inet_ctl_sock_destroy(net->ipv4.mc_autojoin_sk); 3076 } 3077 3078 static struct pernet_operations igmp_net_ops = { 3079 .init = igmp_net_init, 3080 .exit = igmp_net_exit, 3081 }; 3082 #endif 3083 3084 static int igmp_netdev_event(struct notifier_block *this, 3085 unsigned long event, void *ptr) 3086 { 3087 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3088 struct in_device *in_dev; 3089 3090 switch (event) { 3091 case NETDEV_RESEND_IGMP: 3092 in_dev = __in_dev_get_rtnl(dev); 3093 if (in_dev) 3094 ip_mc_rejoin_groups(in_dev); 3095 break; 3096 default: 3097 break; 3098 } 3099 return NOTIFY_DONE; 3100 } 3101 3102 static struct notifier_block igmp_notifier = { 3103 .notifier_call = igmp_netdev_event, 3104 }; 3105 3106 int __init igmp_mc_init(void) 3107 { 3108 #if defined(CONFIG_PROC_FS) 3109 int err; 3110 3111 err = register_pernet_subsys(&igmp_net_ops); 3112 if (err) 3113 return err; 3114 err = register_netdevice_notifier(&igmp_notifier); 3115 if (err) 3116 goto reg_notif_fail; 3117 return 0; 3118 3119 reg_notif_fail: 3120 unregister_pernet_subsys(&igmp_net_ops); 3121 return err; 3122 #else 3123 return register_netdevice_notifier(&igmp_notifier); 3124 #endif 3125 } 3126