1 /*- 2 * Copyright (c) 2007-2009 Bruce Simpson. 3 * Copyright (c) 1988 Stephen Deering. 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Stephen Deering of Stanford University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)igmp.c 8.1 (Berkeley) 7/19/93 35 */ 36 37 /* 38 * Internet Group Management Protocol (IGMP) routines. 39 * [RFC1112, RFC2236, RFC3376] 40 * 41 * Written by Steve Deering, Stanford, May 1988. 42 * Modified by Rosen Sharma, Stanford, Aug 1994. 43 * Modified by Bill Fenner, Xerox PARC, Feb 1995. 44 * Modified to fully comply to IGMPv2 by Bill Fenner, Oct 1995. 45 * Significantly rewritten for IGMPv3, VIMAGE, and SMP by Bruce Simpson. 46 * 47 * MULTICAST Revision: 3.5.1.4 48 */ 49 50 #include <sys/cdefs.h> 51 __FBSDID("$FreeBSD$"); 52 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/module.h> 56 #include <sys/malloc.h> 57 #include <sys/mbuf.h> 58 #include <sys/socket.h> 59 #include <sys/protosw.h> 60 #include <sys/kernel.h> 61 #include <sys/sysctl.h> 62 #include <sys/ktr.h> 63 #include <sys/condvar.h> 64 65 #include <net/if.h> 66 #include <net/netisr.h> 67 #include <net/vnet.h> 68 69 #include <netinet/in.h> 70 #include <netinet/in_var.h> 71 #include <netinet/in_systm.h> 72 #include <netinet/ip.h> 73 #include <netinet/ip_var.h> 74 #include <netinet/ip_options.h> 75 #include <netinet/igmp.h> 76 #include <netinet/igmp_var.h> 77 78 #include <machine/in_cksum.h> 79 80 #include <security/mac/mac_framework.h> 81 82 #ifndef KTR_IGMPV3 83 #define KTR_IGMPV3 KTR_INET 84 #endif 85 86 static struct igmp_ifinfo * 87 igi_alloc_locked(struct ifnet *); 88 static void igi_delete_locked(const struct ifnet *); 89 static void igmp_dispatch_queue(struct ifqueue *, int, const int); 90 static void igmp_fasttimo_vnet(void); 91 static void igmp_final_leave(struct in_multi *, struct igmp_ifinfo *); 92 static int igmp_handle_state_change(struct in_multi *, 93 struct igmp_ifinfo *); 94 static int igmp_initial_join(struct in_multi *, struct igmp_ifinfo *); 95 static int igmp_input_v1_query(struct ifnet *, const struct ip *, 96 const struct igmp *); 97 static int igmp_input_v2_query(struct ifnet *, const struct ip *, 98 const struct igmp *); 99 static int igmp_input_v3_query(struct ifnet *, const struct ip *, 100 /*const*/ struct igmpv3 *); 101 static int igmp_input_v3_group_query(struct in_multi *, 102 struct igmp_ifinfo *, int, /*const*/ struct igmpv3 *); 103 static int igmp_input_v1_report(struct ifnet *, /*const*/ struct ip *, 104 /*const*/ struct igmp *); 105 static int igmp_input_v2_report(struct ifnet *, /*const*/ struct ip *, 106 /*const*/ struct igmp *); 107 static void igmp_intr(struct mbuf *); 108 static int igmp_isgroupreported(const struct in_addr); 109 static struct mbuf * 110 igmp_ra_alloc(void); 111 #ifdef KTR 112 static char * igmp_rec_type_to_str(const int); 113 #endif 114 static void igmp_set_version(struct igmp_ifinfo *, const int); 115 static void igmp_slowtimo_vnet(void); 116 static int igmp_v1v2_queue_report(struct in_multi *, const int); 117 static void igmp_v1v2_process_group_timer(struct in_multi *, const int); 118 static void igmp_v1v2_process_querier_timers(struct igmp_ifinfo *); 119 static void igmp_v2_update_group(struct in_multi *, const int); 120 static void igmp_v3_cancel_link_timers(struct igmp_ifinfo *); 121 static void igmp_v3_dispatch_general_query(struct igmp_ifinfo *); 122 static struct mbuf * 123 igmp_v3_encap_report(struct ifnet *, struct mbuf *); 124 static int igmp_v3_enqueue_group_record(struct ifqueue *, 125 struct in_multi *, const int, const int, const int); 126 static int igmp_v3_enqueue_filter_change(struct ifqueue *, 127 struct in_multi *); 128 static void igmp_v3_process_group_timers(struct igmp_ifinfo *, 129 struct ifqueue *, struct ifqueue *, struct in_multi *, 130 const int); 131 static int igmp_v3_merge_state_changes(struct in_multi *, 132 struct ifqueue *); 133 static void igmp_v3_suppress_group_record(struct in_multi *); 134 static int sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS); 135 static int sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS); 136 static int sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS); 137 138 static const struct netisr_handler igmp_nh = { 139 .nh_name = "igmp", 140 .nh_handler = igmp_intr, 141 .nh_proto = NETISR_IGMP, 142 .nh_policy = NETISR_POLICY_SOURCE, 143 }; 144 145 /* 146 * System-wide globals. 147 * 148 * Unlocked access to these is OK, except for the global IGMP output 149 * queue. The IGMP subsystem lock ends up being system-wide for the moment, 150 * because all VIMAGEs have to share a global output queue, as netisrs 151 * themselves are not virtualized. 152 * 153 * Locking: 154 * * The permitted lock order is: IN_MULTI_LOCK, IGMP_LOCK, IF_ADDR_LOCK. 155 * Any may be taken independently; if any are held at the same 156 * time, the above lock order must be followed. 157 * * All output is delegated to the netisr. 158 * Now that Giant has been eliminated, the netisr may be inlined. 159 * * IN_MULTI_LOCK covers in_multi. 160 * * IGMP_LOCK covers igmp_ifinfo and any global variables in this file, 161 * including the output queue. 162 * * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of 163 * per-link state iterators. 164 * * igmp_ifinfo is valid as long as PF_INET is attached to the interface, 165 * therefore it is not refcounted. 166 * We allow unlocked reads of igmp_ifinfo when accessed via in_multi. 167 * 168 * Reference counting 169 * * IGMP acquires its own reference every time an in_multi is passed to 170 * it and the group is being joined for the first time. 171 * * IGMP releases its reference(s) on in_multi in a deferred way, 172 * because the operations which process the release run as part of 173 * a loop whose control variables are directly affected by the release 174 * (that, and not recursing on the IF_ADDR_LOCK). 175 * 176 * VIMAGE: Each in_multi corresponds to an ifp, and each ifp corresponds 177 * to a vnet in ifp->if_vnet. 178 * 179 * SMPng: XXX We may potentially race operations on ifma_protospec. 180 * The problem is that we currently lack a clean way of taking the 181 * IF_ADDR_LOCK() between the ifnet and in layers w/o recursing, 182 * as anything which modifies ifma needs to be covered by that lock. 183 * So check for ifma_protospec being NULL before proceeding. 184 */ 185 struct mtx igmp_mtx; 186 187 struct mbuf *m_raopt; /* Router Alert option */ 188 static MALLOC_DEFINE(M_IGMP, "igmp", "igmp state"); 189 190 /* 191 * VIMAGE-wide globals. 192 * 193 * The IGMPv3 timers themselves need to run per-image, however, 194 * protosw timers run globally (see tcp). 195 * An ifnet can only be in one vimage at a time, and the loopback 196 * ifnet, loif, is itself virtualized. 197 * It would otherwise be possible to seriously hose IGMP state, 198 * and create inconsistencies in upstream multicast routing, if you have 199 * multiple VIMAGEs running on the same link joining different multicast 200 * groups, UNLESS the "primary IP address" is different. This is because 201 * IGMP for IPv4 does not force link-local addresses to be used for each 202 * node, unlike MLD for IPv6. 203 * Obviously the IGMPv3 per-interface state has per-vimage granularity 204 * also as a result. 205 * 206 * FUTURE: Stop using IFP_TO_IA/INADDR_ANY, and use source address selection 207 * policy to control the address used by IGMP on the link. 208 */ 209 static VNET_DEFINE(int, interface_timers_running); /* IGMPv3 general 210 * query response */ 211 static VNET_DEFINE(int, state_change_timers_running); /* IGMPv3 state-change 212 * retransmit */ 213 static VNET_DEFINE(int, current_state_timers_running); /* IGMPv1/v2 host 214 * report; IGMPv3 g/sg 215 * query response */ 216 217 #define V_interface_timers_running VNET(interface_timers_running) 218 #define V_state_change_timers_running VNET(state_change_timers_running) 219 #define V_current_state_timers_running VNET(current_state_timers_running) 220 221 static VNET_DEFINE(LIST_HEAD(, igmp_ifinfo), igi_head); 222 static VNET_DEFINE(struct igmpstat, igmpstat) = { 223 .igps_version = IGPS_VERSION_3, 224 .igps_len = sizeof(struct igmpstat), 225 }; 226 static VNET_DEFINE(struct timeval, igmp_gsrdelay) = {10, 0}; 227 228 #define V_igi_head VNET(igi_head) 229 #define V_igmpstat VNET(igmpstat) 230 #define V_igmp_gsrdelay VNET(igmp_gsrdelay) 231 232 static VNET_DEFINE(int, igmp_recvifkludge) = 1; 233 static VNET_DEFINE(int, igmp_sendra) = 1; 234 static VNET_DEFINE(int, igmp_sendlocal) = 1; 235 static VNET_DEFINE(int, igmp_v1enable) = 1; 236 static VNET_DEFINE(int, igmp_v2enable) = 1; 237 static VNET_DEFINE(int, igmp_legacysupp); 238 static VNET_DEFINE(int, igmp_default_version) = IGMP_VERSION_3; 239 240 #define V_igmp_recvifkludge VNET(igmp_recvifkludge) 241 #define V_igmp_sendra VNET(igmp_sendra) 242 #define V_igmp_sendlocal VNET(igmp_sendlocal) 243 #define V_igmp_v1enable VNET(igmp_v1enable) 244 #define V_igmp_v2enable VNET(igmp_v2enable) 245 #define V_igmp_legacysupp VNET(igmp_legacysupp) 246 #define V_igmp_default_version VNET(igmp_default_version) 247 248 /* 249 * Virtualized sysctls. 250 */ 251 SYSCTL_VNET_STRUCT(_net_inet_igmp, IGMPCTL_STATS, stats, CTLFLAG_RW, 252 &VNET_NAME(igmpstat), igmpstat, ""); 253 SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, recvifkludge, CTLFLAG_RW, 254 &VNET_NAME(igmp_recvifkludge), 0, 255 "Rewrite IGMPv1/v2 reports from 0.0.0.0 to contain subnet address"); 256 SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, sendra, CTLFLAG_RW, 257 &VNET_NAME(igmp_sendra), 0, 258 "Send IP Router Alert option in IGMPv2/v3 messages"); 259 SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, sendlocal, CTLFLAG_RW, 260 &VNET_NAME(igmp_sendlocal), 0, 261 "Send IGMP membership reports for 224.0.0.0/24 groups"); 262 SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, v1enable, CTLFLAG_RW, 263 &VNET_NAME(igmp_v1enable), 0, 264 "Enable backwards compatibility with IGMPv1"); 265 SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, v2enable, CTLFLAG_RW, 266 &VNET_NAME(igmp_v2enable), 0, 267 "Enable backwards compatibility with IGMPv2"); 268 SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, legacysupp, CTLFLAG_RW, 269 &VNET_NAME(igmp_legacysupp), 0, 270 "Allow v1/v2 reports to suppress v3 group responses"); 271 SYSCTL_VNET_PROC(_net_inet_igmp, OID_AUTO, default_version, 272 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 273 &VNET_NAME(igmp_default_version), 0, sysctl_igmp_default_version, "I", 274 "Default version of IGMP to run on each interface"); 275 SYSCTL_VNET_PROC(_net_inet_igmp, OID_AUTO, gsrdelay, 276 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 277 &VNET_NAME(igmp_gsrdelay.tv_sec), 0, sysctl_igmp_gsr, "I", 278 "Rate limit for IGMPv3 Group-and-Source queries in seconds"); 279 280 /* 281 * Non-virtualized sysctls. 282 */ 283 static SYSCTL_NODE(_net_inet_igmp, OID_AUTO, ifinfo, 284 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_igmp_ifinfo, 285 "Per-interface IGMPv3 state"); 286 287 static __inline void 288 igmp_save_context(struct mbuf *m, struct ifnet *ifp) 289 { 290 291 #ifdef VIMAGE 292 m->m_pkthdr.header = ifp->if_vnet; 293 #endif /* VIMAGE */ 294 m->m_pkthdr.flowid = ifp->if_index; 295 } 296 297 static __inline void 298 igmp_scrub_context(struct mbuf *m) 299 { 300 301 m->m_pkthdr.header = NULL; 302 m->m_pkthdr.flowid = 0; 303 } 304 305 #ifdef KTR 306 static __inline char * 307 inet_ntoa_haddr(in_addr_t haddr) 308 { 309 struct in_addr ia; 310 311 ia.s_addr = htonl(haddr); 312 return (inet_ntoa(ia)); 313 } 314 #endif 315 316 /* 317 * Restore context from a queued IGMP output chain. 318 * Return saved ifindex. 319 * 320 * VIMAGE: The assertion is there to make sure that we 321 * actually called CURVNET_SET() with what's in the mbuf chain. 322 */ 323 static __inline uint32_t 324 igmp_restore_context(struct mbuf *m) 325 { 326 327 #ifdef notyet 328 #if defined(VIMAGE) && defined(INVARIANTS) 329 KASSERT(curvnet == (m->m_pkthdr.header), 330 ("%s: called when curvnet was not restored", __func__)); 331 #endif 332 #endif 333 return (m->m_pkthdr.flowid); 334 } 335 336 /* 337 * Retrieve or set default IGMP version. 338 * 339 * VIMAGE: Assume curvnet set by caller. 340 * SMPng: NOTE: Serialized by IGMP lock. 341 */ 342 static int 343 sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS) 344 { 345 int error; 346 int new; 347 348 error = sysctl_wire_old_buffer(req, sizeof(int)); 349 if (error) 350 return (error); 351 352 IGMP_LOCK(); 353 354 new = V_igmp_default_version; 355 356 error = sysctl_handle_int(oidp, &new, 0, req); 357 if (error || !req->newptr) 358 goto out_locked; 359 360 if (new < IGMP_VERSION_1 || new > IGMP_VERSION_3) { 361 error = EINVAL; 362 goto out_locked; 363 } 364 365 CTR2(KTR_IGMPV3, "change igmp_default_version from %d to %d", 366 V_igmp_default_version, new); 367 368 V_igmp_default_version = new; 369 370 out_locked: 371 IGMP_UNLOCK(); 372 return (error); 373 } 374 375 /* 376 * Retrieve or set threshold between group-source queries in seconds. 377 * 378 * VIMAGE: Assume curvnet set by caller. 379 * SMPng: NOTE: Serialized by IGMP lock. 380 */ 381 static int 382 sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS) 383 { 384 int error; 385 int i; 386 387 error = sysctl_wire_old_buffer(req, sizeof(int)); 388 if (error) 389 return (error); 390 391 IGMP_LOCK(); 392 393 i = V_igmp_gsrdelay.tv_sec; 394 395 error = sysctl_handle_int(oidp, &i, 0, req); 396 if (error || !req->newptr) 397 goto out_locked; 398 399 if (i < -1 || i >= 60) { 400 error = EINVAL; 401 goto out_locked; 402 } 403 404 CTR2(KTR_IGMPV3, "change igmp_gsrdelay from %d to %d", 405 V_igmp_gsrdelay.tv_sec, i); 406 V_igmp_gsrdelay.tv_sec = i; 407 408 out_locked: 409 IGMP_UNLOCK(); 410 return (error); 411 } 412 413 /* 414 * Expose struct igmp_ifinfo to userland, keyed by ifindex. 415 * For use by ifmcstat(8). 416 * 417 * SMPng: NOTE: Does an unlocked ifindex space read. 418 * VIMAGE: Assume curvnet set by caller. The node handler itself 419 * is not directly virtualized. 420 */ 421 static int 422 sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS) 423 { 424 int *name; 425 int error; 426 u_int namelen; 427 struct ifnet *ifp; 428 struct igmp_ifinfo *igi; 429 430 name = (int *)arg1; 431 namelen = arg2; 432 433 if (req->newptr != NULL) 434 return (EPERM); 435 436 if (namelen != 1) 437 return (EINVAL); 438 439 error = sysctl_wire_old_buffer(req, sizeof(struct igmp_ifinfo)); 440 if (error) 441 return (error); 442 443 IN_MULTI_LOCK(); 444 IGMP_LOCK(); 445 446 if (name[0] <= 0 || name[0] > V_if_index) { 447 error = ENOENT; 448 goto out_locked; 449 } 450 451 error = ENOENT; 452 453 ifp = ifnet_byindex(name[0]); 454 if (ifp == NULL) 455 goto out_locked; 456 457 LIST_FOREACH(igi, &V_igi_head, igi_link) { 458 if (ifp == igi->igi_ifp) { 459 error = SYSCTL_OUT(req, igi, 460 sizeof(struct igmp_ifinfo)); 461 break; 462 } 463 } 464 465 out_locked: 466 IGMP_UNLOCK(); 467 IN_MULTI_UNLOCK(); 468 return (error); 469 } 470 471 /* 472 * Dispatch an entire queue of pending packet chains 473 * using the netisr. 474 * VIMAGE: Assumes the vnet pointer has been set. 475 */ 476 static void 477 igmp_dispatch_queue(struct ifqueue *ifq, int limit, const int loop) 478 { 479 struct mbuf *m; 480 481 for (;;) { 482 _IF_DEQUEUE(ifq, m); 483 if (m == NULL) 484 break; 485 CTR3(KTR_IGMPV3, "%s: dispatch %p from %p", __func__, ifq, m); 486 if (loop) 487 m->m_flags |= M_IGMP_LOOP; 488 netisr_dispatch(NETISR_IGMP, m); 489 if (--limit == 0) 490 break; 491 } 492 } 493 494 /* 495 * Filter outgoing IGMP report state by group. 496 * 497 * Reports are ALWAYS suppressed for ALL-HOSTS (224.0.0.1). 498 * If the net.inet.igmp.sendlocal sysctl is 0, then IGMP reports are 499 * disabled for all groups in the 224.0.0.0/24 link-local scope. However, 500 * this may break certain IGMP snooping switches which rely on the old 501 * report behaviour. 502 * 503 * Return zero if the given group is one for which IGMP reports 504 * should be suppressed, or non-zero if reports should be issued. 505 */ 506 static __inline int 507 igmp_isgroupreported(const struct in_addr addr) 508 { 509 510 if (in_allhosts(addr) || 511 ((!V_igmp_sendlocal && IN_LOCAL_GROUP(ntohl(addr.s_addr))))) 512 return (0); 513 514 return (1); 515 } 516 517 /* 518 * Construct a Router Alert option to use in outgoing packets. 519 */ 520 static struct mbuf * 521 igmp_ra_alloc(void) 522 { 523 struct mbuf *m; 524 struct ipoption *p; 525 526 m = m_get(M_WAITOK, MT_DATA); 527 p = mtod(m, struct ipoption *); 528 p->ipopt_dst.s_addr = INADDR_ANY; 529 p->ipopt_list[0] = IPOPT_RA; /* Router Alert Option */ 530 p->ipopt_list[1] = 0x04; /* 4 bytes long */ 531 p->ipopt_list[2] = IPOPT_EOL; /* End of IP option list */ 532 p->ipopt_list[3] = 0x00; /* pad byte */ 533 m->m_len = sizeof(p->ipopt_dst) + p->ipopt_list[1]; 534 535 return (m); 536 } 537 538 /* 539 * Attach IGMP when PF_INET is attached to an interface. 540 */ 541 struct igmp_ifinfo * 542 igmp_domifattach(struct ifnet *ifp) 543 { 544 struct igmp_ifinfo *igi; 545 546 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)", 547 __func__, ifp, ifp->if_xname); 548 549 IGMP_LOCK(); 550 551 igi = igi_alloc_locked(ifp); 552 if (!(ifp->if_flags & IFF_MULTICAST)) 553 igi->igi_flags |= IGIF_SILENT; 554 555 IGMP_UNLOCK(); 556 557 return (igi); 558 } 559 560 /* 561 * VIMAGE: assume curvnet set by caller. 562 */ 563 static struct igmp_ifinfo * 564 igi_alloc_locked(/*const*/ struct ifnet *ifp) 565 { 566 struct igmp_ifinfo *igi; 567 568 IGMP_LOCK_ASSERT(); 569 570 igi = malloc(sizeof(struct igmp_ifinfo), M_IGMP, M_NOWAIT|M_ZERO); 571 if (igi == NULL) 572 goto out; 573 574 igi->igi_ifp = ifp; 575 igi->igi_version = V_igmp_default_version; 576 igi->igi_flags = 0; 577 igi->igi_rv = IGMP_RV_INIT; 578 igi->igi_qi = IGMP_QI_INIT; 579 igi->igi_qri = IGMP_QRI_INIT; 580 igi->igi_uri = IGMP_URI_INIT; 581 582 SLIST_INIT(&igi->igi_relinmhead); 583 584 /* 585 * Responses to general queries are subject to bounds. 586 */ 587 IFQ_SET_MAXLEN(&igi->igi_gq, IGMP_MAX_RESPONSE_PACKETS); 588 589 LIST_INSERT_HEAD(&V_igi_head, igi, igi_link); 590 591 CTR2(KTR_IGMPV3, "allocate igmp_ifinfo for ifp %p(%s)", 592 ifp, ifp->if_xname); 593 594 out: 595 return (igi); 596 } 597 598 /* 599 * Hook for ifdetach. 600 * 601 * NOTE: Some finalization tasks need to run before the protocol domain 602 * is detached, but also before the link layer does its cleanup. 603 * 604 * SMPNG: igmp_ifdetach() needs to take IF_ADDR_LOCK(). 605 * XXX This is also bitten by unlocked ifma_protospec access. 606 */ 607 void 608 igmp_ifdetach(struct ifnet *ifp) 609 { 610 struct igmp_ifinfo *igi; 611 struct ifmultiaddr *ifma; 612 struct in_multi *inm, *tinm; 613 614 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)", __func__, ifp, 615 ifp->if_xname); 616 617 IGMP_LOCK(); 618 619 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp; 620 if (igi->igi_version == IGMP_VERSION_3) { 621 IF_ADDR_RLOCK(ifp); 622 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 623 if (ifma->ifma_addr->sa_family != AF_INET || 624 ifma->ifma_protospec == NULL) 625 continue; 626 #if 0 627 KASSERT(ifma->ifma_protospec != NULL, 628 ("%s: ifma_protospec is NULL", __func__)); 629 #endif 630 inm = (struct in_multi *)ifma->ifma_protospec; 631 if (inm->inm_state == IGMP_LEAVING_MEMBER) { 632 SLIST_INSERT_HEAD(&igi->igi_relinmhead, 633 inm, inm_nrele); 634 } 635 inm_clear_recorded(inm); 636 } 637 IF_ADDR_RUNLOCK(ifp); 638 /* 639 * Free the in_multi reference(s) for this IGMP lifecycle. 640 */ 641 SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead, inm_nrele, 642 tinm) { 643 SLIST_REMOVE_HEAD(&igi->igi_relinmhead, inm_nrele); 644 inm_release_locked(inm); 645 } 646 } 647 648 IGMP_UNLOCK(); 649 } 650 651 /* 652 * Hook for domifdetach. 653 */ 654 void 655 igmp_domifdetach(struct ifnet *ifp) 656 { 657 struct igmp_ifinfo *igi; 658 659 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)", 660 __func__, ifp, ifp->if_xname); 661 662 IGMP_LOCK(); 663 664 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp; 665 igi_delete_locked(ifp); 666 667 IGMP_UNLOCK(); 668 } 669 670 static void 671 igi_delete_locked(const struct ifnet *ifp) 672 { 673 struct igmp_ifinfo *igi, *tigi; 674 675 CTR3(KTR_IGMPV3, "%s: freeing igmp_ifinfo for ifp %p(%s)", 676 __func__, ifp, ifp->if_xname); 677 678 IGMP_LOCK_ASSERT(); 679 680 LIST_FOREACH_SAFE(igi, &V_igi_head, igi_link, tigi) { 681 if (igi->igi_ifp == ifp) { 682 /* 683 * Free deferred General Query responses. 684 */ 685 _IF_DRAIN(&igi->igi_gq); 686 687 LIST_REMOVE(igi, igi_link); 688 689 KASSERT(SLIST_EMPTY(&igi->igi_relinmhead), 690 ("%s: there are dangling in_multi references", 691 __func__)); 692 693 free(igi, M_IGMP); 694 return; 695 } 696 } 697 698 #ifdef INVARIANTS 699 panic("%s: igmp_ifinfo not found for ifp %p\n", __func__, ifp); 700 #endif 701 } 702 703 /* 704 * Process a received IGMPv1 query. 705 * Return non-zero if the message should be dropped. 706 * 707 * VIMAGE: The curvnet pointer is derived from the input ifp. 708 */ 709 static int 710 igmp_input_v1_query(struct ifnet *ifp, const struct ip *ip, 711 const struct igmp *igmp) 712 { 713 struct ifmultiaddr *ifma; 714 struct igmp_ifinfo *igi; 715 struct in_multi *inm; 716 717 /* 718 * IGMPv1 Host Mmembership Queries SHOULD always be addressed to 719 * 224.0.0.1. They are always treated as General Queries. 720 * igmp_group is always ignored. Do not drop it as a userland 721 * daemon may wish to see it. 722 * XXX SMPng: unlocked increments in igmpstat assumed atomic. 723 */ 724 if (!in_allhosts(ip->ip_dst) || !in_nullhost(igmp->igmp_group)) { 725 IGMPSTAT_INC(igps_rcv_badqueries); 726 return (0); 727 } 728 IGMPSTAT_INC(igps_rcv_gen_queries); 729 730 IN_MULTI_LOCK(); 731 IGMP_LOCK(); 732 733 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp; 734 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp)); 735 736 if (igi->igi_flags & IGIF_LOOPBACK) { 737 CTR2(KTR_IGMPV3, "ignore v1 query on IGIF_LOOPBACK ifp %p(%s)", 738 ifp, ifp->if_xname); 739 goto out_locked; 740 } 741 742 /* 743 * Switch to IGMPv1 host compatibility mode. 744 */ 745 igmp_set_version(igi, IGMP_VERSION_1); 746 747 CTR2(KTR_IGMPV3, "process v1 query on ifp %p(%s)", ifp, ifp->if_xname); 748 749 /* 750 * Start the timers in all of our group records 751 * for the interface on which the query arrived, 752 * except those which are already running. 753 */ 754 IF_ADDR_RLOCK(ifp); 755 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 756 if (ifma->ifma_addr->sa_family != AF_INET || 757 ifma->ifma_protospec == NULL) 758 continue; 759 inm = (struct in_multi *)ifma->ifma_protospec; 760 if (inm->inm_timer != 0) 761 continue; 762 switch (inm->inm_state) { 763 case IGMP_NOT_MEMBER: 764 case IGMP_SILENT_MEMBER: 765 break; 766 case IGMP_G_QUERY_PENDING_MEMBER: 767 case IGMP_SG_QUERY_PENDING_MEMBER: 768 case IGMP_REPORTING_MEMBER: 769 case IGMP_IDLE_MEMBER: 770 case IGMP_LAZY_MEMBER: 771 case IGMP_SLEEPING_MEMBER: 772 case IGMP_AWAKENING_MEMBER: 773 inm->inm_state = IGMP_REPORTING_MEMBER; 774 inm->inm_timer = IGMP_RANDOM_DELAY( 775 IGMP_V1V2_MAX_RI * PR_FASTHZ); 776 V_current_state_timers_running = 1; 777 break; 778 case IGMP_LEAVING_MEMBER: 779 break; 780 } 781 } 782 IF_ADDR_RUNLOCK(ifp); 783 784 out_locked: 785 IGMP_UNLOCK(); 786 IN_MULTI_UNLOCK(); 787 788 return (0); 789 } 790 791 /* 792 * Process a received IGMPv2 general or group-specific query. 793 */ 794 static int 795 igmp_input_v2_query(struct ifnet *ifp, const struct ip *ip, 796 const struct igmp *igmp) 797 { 798 struct ifmultiaddr *ifma; 799 struct igmp_ifinfo *igi; 800 struct in_multi *inm; 801 int is_general_query; 802 uint16_t timer; 803 804 is_general_query = 0; 805 806 /* 807 * Validate address fields upfront. 808 * XXX SMPng: unlocked increments in igmpstat assumed atomic. 809 */ 810 if (in_nullhost(igmp->igmp_group)) { 811 /* 812 * IGMPv2 General Query. 813 * If this was not sent to the all-hosts group, ignore it. 814 */ 815 if (!in_allhosts(ip->ip_dst)) 816 return (0); 817 IGMPSTAT_INC(igps_rcv_gen_queries); 818 is_general_query = 1; 819 } else { 820 /* IGMPv2 Group-Specific Query. */ 821 IGMPSTAT_INC(igps_rcv_group_queries); 822 } 823 824 IN_MULTI_LOCK(); 825 IGMP_LOCK(); 826 827 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp; 828 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp)); 829 830 if (igi->igi_flags & IGIF_LOOPBACK) { 831 CTR2(KTR_IGMPV3, "ignore v2 query on IGIF_LOOPBACK ifp %p(%s)", 832 ifp, ifp->if_xname); 833 goto out_locked; 834 } 835 836 /* 837 * Ignore v2 query if in v1 Compatibility Mode. 838 */ 839 if (igi->igi_version == IGMP_VERSION_1) 840 goto out_locked; 841 842 igmp_set_version(igi, IGMP_VERSION_2); 843 844 timer = igmp->igmp_code * PR_FASTHZ / IGMP_TIMER_SCALE; 845 if (timer == 0) 846 timer = 1; 847 848 if (is_general_query) { 849 /* 850 * For each reporting group joined on this 851 * interface, kick the report timer. 852 */ 853 CTR2(KTR_IGMPV3, "process v2 general query on ifp %p(%s)", 854 ifp, ifp->if_xname); 855 IF_ADDR_RLOCK(ifp); 856 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 857 if (ifma->ifma_addr->sa_family != AF_INET || 858 ifma->ifma_protospec == NULL) 859 continue; 860 inm = (struct in_multi *)ifma->ifma_protospec; 861 igmp_v2_update_group(inm, timer); 862 } 863 IF_ADDR_RUNLOCK(ifp); 864 } else { 865 /* 866 * Group-specific IGMPv2 query, we need only 867 * look up the single group to process it. 868 */ 869 inm = inm_lookup(ifp, igmp->igmp_group); 870 if (inm != NULL) { 871 CTR3(KTR_IGMPV3, "process v2 query %s on ifp %p(%s)", 872 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname); 873 igmp_v2_update_group(inm, timer); 874 } 875 } 876 877 out_locked: 878 IGMP_UNLOCK(); 879 IN_MULTI_UNLOCK(); 880 881 return (0); 882 } 883 884 /* 885 * Update the report timer on a group in response to an IGMPv2 query. 886 * 887 * If we are becoming the reporting member for this group, start the timer. 888 * If we already are the reporting member for this group, and timer is 889 * below the threshold, reset it. 890 * 891 * We may be updating the group for the first time since we switched 892 * to IGMPv3. If we are, then we must clear any recorded source lists, 893 * and transition to REPORTING state; the group timer is overloaded 894 * for group and group-source query responses. 895 * 896 * Unlike IGMPv3, the delay per group should be jittered 897 * to avoid bursts of IGMPv2 reports. 898 */ 899 static void 900 igmp_v2_update_group(struct in_multi *inm, const int timer) 901 { 902 903 CTR4(KTR_IGMPV3, "%s: %s/%s timer=%d", __func__, 904 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname, timer); 905 906 IN_MULTI_LOCK_ASSERT(); 907 908 switch (inm->inm_state) { 909 case IGMP_NOT_MEMBER: 910 case IGMP_SILENT_MEMBER: 911 break; 912 case IGMP_REPORTING_MEMBER: 913 if (inm->inm_timer != 0 && 914 inm->inm_timer <= timer) { 915 CTR1(KTR_IGMPV3, "%s: REPORTING and timer running, " 916 "skipping.", __func__); 917 break; 918 } 919 /* FALLTHROUGH */ 920 case IGMP_SG_QUERY_PENDING_MEMBER: 921 case IGMP_G_QUERY_PENDING_MEMBER: 922 case IGMP_IDLE_MEMBER: 923 case IGMP_LAZY_MEMBER: 924 case IGMP_AWAKENING_MEMBER: 925 CTR1(KTR_IGMPV3, "%s: ->REPORTING", __func__); 926 inm->inm_state = IGMP_REPORTING_MEMBER; 927 inm->inm_timer = IGMP_RANDOM_DELAY(timer); 928 V_current_state_timers_running = 1; 929 break; 930 case IGMP_SLEEPING_MEMBER: 931 CTR1(KTR_IGMPV3, "%s: ->AWAKENING", __func__); 932 inm->inm_state = IGMP_AWAKENING_MEMBER; 933 break; 934 case IGMP_LEAVING_MEMBER: 935 break; 936 } 937 } 938 939 /* 940 * Process a received IGMPv3 general, group-specific or 941 * group-and-source-specific query. 942 * Assumes m has already been pulled up to the full IGMP message length. 943 * Return 0 if successful, otherwise an appropriate error code is returned. 944 */ 945 static int 946 igmp_input_v3_query(struct ifnet *ifp, const struct ip *ip, 947 /*const*/ struct igmpv3 *igmpv3) 948 { 949 struct igmp_ifinfo *igi; 950 struct in_multi *inm; 951 int is_general_query; 952 uint32_t maxresp, nsrc, qqi; 953 uint16_t timer; 954 uint8_t qrv; 955 956 is_general_query = 0; 957 958 CTR2(KTR_IGMPV3, "process v3 query on ifp %p(%s)", ifp, ifp->if_xname); 959 960 maxresp = igmpv3->igmp_code; /* in 1/10ths of a second */ 961 if (maxresp >= 128) { 962 maxresp = IGMP_MANT(igmpv3->igmp_code) << 963 (IGMP_EXP(igmpv3->igmp_code) + 3); 964 } 965 966 /* 967 * Robustness must never be less than 2 for on-wire IGMPv3. 968 * FUTURE: Check if ifp has IGIF_LOOPBACK set, as we will make 969 * an exception for interfaces whose IGMPv3 state changes 970 * are redirected to loopback (e.g. MANET). 971 */ 972 qrv = IGMP_QRV(igmpv3->igmp_misc); 973 if (qrv < 2) { 974 CTR3(KTR_IGMPV3, "%s: clamping qrv %d to %d", __func__, 975 qrv, IGMP_RV_INIT); 976 qrv = IGMP_RV_INIT; 977 } 978 979 qqi = igmpv3->igmp_qqi; 980 if (qqi >= 128) { 981 qqi = IGMP_MANT(igmpv3->igmp_qqi) << 982 (IGMP_EXP(igmpv3->igmp_qqi) + 3); 983 } 984 985 timer = maxresp * PR_FASTHZ / IGMP_TIMER_SCALE; 986 if (timer == 0) 987 timer = 1; 988 989 nsrc = ntohs(igmpv3->igmp_numsrc); 990 991 /* 992 * Validate address fields and versions upfront before 993 * accepting v3 query. 994 * XXX SMPng: Unlocked access to igmpstat counters here. 995 */ 996 if (in_nullhost(igmpv3->igmp_group)) { 997 /* 998 * IGMPv3 General Query. 999 * 1000 * General Queries SHOULD be directed to 224.0.0.1. 1001 * A general query with a source list has undefined 1002 * behaviour; discard it. 1003 */ 1004 IGMPSTAT_INC(igps_rcv_gen_queries); 1005 if (!in_allhosts(ip->ip_dst) || nsrc > 0) { 1006 IGMPSTAT_INC(igps_rcv_badqueries); 1007 return (0); 1008 } 1009 is_general_query = 1; 1010 } else { 1011 /* Group or group-source specific query. */ 1012 if (nsrc == 0) 1013 IGMPSTAT_INC(igps_rcv_group_queries); 1014 else 1015 IGMPSTAT_INC(igps_rcv_gsr_queries); 1016 } 1017 1018 IN_MULTI_LOCK(); 1019 IGMP_LOCK(); 1020 1021 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp; 1022 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp)); 1023 1024 if (igi->igi_flags & IGIF_LOOPBACK) { 1025 CTR2(KTR_IGMPV3, "ignore v3 query on IGIF_LOOPBACK ifp %p(%s)", 1026 ifp, ifp->if_xname); 1027 goto out_locked; 1028 } 1029 1030 /* 1031 * Discard the v3 query if we're in Compatibility Mode. 1032 * The RFC is not obviously worded that hosts need to stay in 1033 * compatibility mode until the Old Version Querier Present 1034 * timer expires. 1035 */ 1036 if (igi->igi_version != IGMP_VERSION_3) { 1037 CTR3(KTR_IGMPV3, "ignore v3 query in v%d mode on ifp %p(%s)", 1038 igi->igi_version, ifp, ifp->if_xname); 1039 goto out_locked; 1040 } 1041 1042 igmp_set_version(igi, IGMP_VERSION_3); 1043 igi->igi_rv = qrv; 1044 igi->igi_qi = qqi; 1045 igi->igi_qri = maxresp; 1046 1047 CTR4(KTR_IGMPV3, "%s: qrv %d qi %d qri %d", __func__, qrv, qqi, 1048 maxresp); 1049 1050 if (is_general_query) { 1051 /* 1052 * Schedule a current-state report on this ifp for 1053 * all groups, possibly containing source lists. 1054 * If there is a pending General Query response 1055 * scheduled earlier than the selected delay, do 1056 * not schedule any other reports. 1057 * Otherwise, reset the interface timer. 1058 */ 1059 CTR2(KTR_IGMPV3, "process v3 general query on ifp %p(%s)", 1060 ifp, ifp->if_xname); 1061 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer) { 1062 igi->igi_v3_timer = IGMP_RANDOM_DELAY(timer); 1063 V_interface_timers_running = 1; 1064 } 1065 } else { 1066 /* 1067 * Group-source-specific queries are throttled on 1068 * a per-group basis to defeat denial-of-service attempts. 1069 * Queries for groups we are not a member of on this 1070 * link are simply ignored. 1071 */ 1072 inm = inm_lookup(ifp, igmpv3->igmp_group); 1073 if (inm == NULL) 1074 goto out_locked; 1075 if (nsrc > 0) { 1076 if (!ratecheck(&inm->inm_lastgsrtv, 1077 &V_igmp_gsrdelay)) { 1078 CTR1(KTR_IGMPV3, "%s: GS query throttled.", 1079 __func__); 1080 IGMPSTAT_INC(igps_drop_gsr_queries); 1081 goto out_locked; 1082 } 1083 } 1084 CTR3(KTR_IGMPV3, "process v3 %s query on ifp %p(%s)", 1085 inet_ntoa(igmpv3->igmp_group), ifp, ifp->if_xname); 1086 /* 1087 * If there is a pending General Query response 1088 * scheduled sooner than the selected delay, no 1089 * further report need be scheduled. 1090 * Otherwise, prepare to respond to the 1091 * group-specific or group-and-source query. 1092 */ 1093 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer) 1094 igmp_input_v3_group_query(inm, igi, timer, igmpv3); 1095 } 1096 1097 out_locked: 1098 IGMP_UNLOCK(); 1099 IN_MULTI_UNLOCK(); 1100 1101 return (0); 1102 } 1103 1104 /* 1105 * Process a recieved IGMPv3 group-specific or group-and-source-specific 1106 * query. 1107 * Return <0 if any error occured. Currently this is ignored. 1108 */ 1109 static int 1110 igmp_input_v3_group_query(struct in_multi *inm, struct igmp_ifinfo *igi, 1111 int timer, /*const*/ struct igmpv3 *igmpv3) 1112 { 1113 int retval; 1114 uint16_t nsrc; 1115 1116 IN_MULTI_LOCK_ASSERT(); 1117 IGMP_LOCK_ASSERT(); 1118 1119 retval = 0; 1120 1121 switch (inm->inm_state) { 1122 case IGMP_NOT_MEMBER: 1123 case IGMP_SILENT_MEMBER: 1124 case IGMP_SLEEPING_MEMBER: 1125 case IGMP_LAZY_MEMBER: 1126 case IGMP_AWAKENING_MEMBER: 1127 case IGMP_IDLE_MEMBER: 1128 case IGMP_LEAVING_MEMBER: 1129 return (retval); 1130 break; 1131 case IGMP_REPORTING_MEMBER: 1132 case IGMP_G_QUERY_PENDING_MEMBER: 1133 case IGMP_SG_QUERY_PENDING_MEMBER: 1134 break; 1135 } 1136 1137 nsrc = ntohs(igmpv3->igmp_numsrc); 1138 1139 /* 1140 * Deal with group-specific queries upfront. 1141 * If any group query is already pending, purge any recorded 1142 * source-list state if it exists, and schedule a query response 1143 * for this group-specific query. 1144 */ 1145 if (nsrc == 0) { 1146 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER || 1147 inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) { 1148 inm_clear_recorded(inm); 1149 timer = min(inm->inm_timer, timer); 1150 } 1151 inm->inm_state = IGMP_G_QUERY_PENDING_MEMBER; 1152 inm->inm_timer = IGMP_RANDOM_DELAY(timer); 1153 V_current_state_timers_running = 1; 1154 return (retval); 1155 } 1156 1157 /* 1158 * Deal with the case where a group-and-source-specific query has 1159 * been received but a group-specific query is already pending. 1160 */ 1161 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER) { 1162 timer = min(inm->inm_timer, timer); 1163 inm->inm_timer = IGMP_RANDOM_DELAY(timer); 1164 V_current_state_timers_running = 1; 1165 return (retval); 1166 } 1167 1168 /* 1169 * Finally, deal with the case where a group-and-source-specific 1170 * query has been received, where a response to a previous g-s-r 1171 * query exists, or none exists. 1172 * In this case, we need to parse the source-list which the Querier 1173 * has provided us with and check if we have any source list filter 1174 * entries at T1 for these sources. If we do not, there is no need 1175 * schedule a report and the query may be dropped. 1176 * If we do, we must record them and schedule a current-state 1177 * report for those sources. 1178 * FIXME: Handling source lists larger than 1 mbuf requires that 1179 * we pass the mbuf chain pointer down to this function, and use 1180 * m_getptr() to walk the chain. 1181 */ 1182 if (inm->inm_nsrc > 0) { 1183 const struct in_addr *ap; 1184 int i, nrecorded; 1185 1186 ap = (const struct in_addr *)(igmpv3 + 1); 1187 nrecorded = 0; 1188 for (i = 0; i < nsrc; i++, ap++) { 1189 retval = inm_record_source(inm, ap->s_addr); 1190 if (retval < 0) 1191 break; 1192 nrecorded += retval; 1193 } 1194 if (nrecorded > 0) { 1195 CTR1(KTR_IGMPV3, 1196 "%s: schedule response to SG query", __func__); 1197 inm->inm_state = IGMP_SG_QUERY_PENDING_MEMBER; 1198 inm->inm_timer = IGMP_RANDOM_DELAY(timer); 1199 V_current_state_timers_running = 1; 1200 } 1201 } 1202 1203 return (retval); 1204 } 1205 1206 /* 1207 * Process a received IGMPv1 host membership report. 1208 * 1209 * NOTE: 0.0.0.0 workaround breaks const correctness. 1210 */ 1211 static int 1212 igmp_input_v1_report(struct ifnet *ifp, /*const*/ struct ip *ip, 1213 /*const*/ struct igmp *igmp) 1214 { 1215 struct in_ifaddr *ia; 1216 struct in_multi *inm; 1217 1218 IGMPSTAT_INC(igps_rcv_reports); 1219 1220 if (ifp->if_flags & IFF_LOOPBACK) 1221 return (0); 1222 1223 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) || 1224 !in_hosteq(igmp->igmp_group, ip->ip_dst)) { 1225 IGMPSTAT_INC(igps_rcv_badreports); 1226 return (EINVAL); 1227 } 1228 1229 /* 1230 * RFC 3376, Section 4.2.13, 9.2, 9.3: 1231 * Booting clients may use the source address 0.0.0.0. Some 1232 * IGMP daemons may not know how to use IP_RECVIF to determine 1233 * the interface upon which this message was received. 1234 * Replace 0.0.0.0 with the subnet address if told to do so. 1235 */ 1236 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) { 1237 IFP_TO_IA(ifp, ia); 1238 if (ia != NULL) { 1239 ip->ip_src.s_addr = htonl(ia->ia_subnet); 1240 ifa_free(&ia->ia_ifa); 1241 } 1242 } 1243 1244 CTR3(KTR_IGMPV3, "process v1 report %s on ifp %p(%s)", 1245 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname); 1246 1247 /* 1248 * IGMPv1 report suppression. 1249 * If we are a member of this group, and our membership should be 1250 * reported, stop our group timer and transition to the 'lazy' state. 1251 */ 1252 IN_MULTI_LOCK(); 1253 inm = inm_lookup(ifp, igmp->igmp_group); 1254 if (inm != NULL) { 1255 struct igmp_ifinfo *igi; 1256 1257 igi = inm->inm_igi; 1258 if (igi == NULL) { 1259 KASSERT(igi != NULL, 1260 ("%s: no igi for ifp %p", __func__, ifp)); 1261 goto out_locked; 1262 } 1263 1264 IGMPSTAT_INC(igps_rcv_ourreports); 1265 1266 /* 1267 * If we are in IGMPv3 host mode, do not allow the 1268 * other host's IGMPv1 report to suppress our reports 1269 * unless explicitly configured to do so. 1270 */ 1271 if (igi->igi_version == IGMP_VERSION_3) { 1272 if (V_igmp_legacysupp) 1273 igmp_v3_suppress_group_record(inm); 1274 goto out_locked; 1275 } 1276 1277 inm->inm_timer = 0; 1278 1279 switch (inm->inm_state) { 1280 case IGMP_NOT_MEMBER: 1281 case IGMP_SILENT_MEMBER: 1282 break; 1283 case IGMP_IDLE_MEMBER: 1284 case IGMP_LAZY_MEMBER: 1285 case IGMP_AWAKENING_MEMBER: 1286 CTR3(KTR_IGMPV3, 1287 "report suppressed for %s on ifp %p(%s)", 1288 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname); 1289 case IGMP_SLEEPING_MEMBER: 1290 inm->inm_state = IGMP_SLEEPING_MEMBER; 1291 break; 1292 case IGMP_REPORTING_MEMBER: 1293 CTR3(KTR_IGMPV3, 1294 "report suppressed for %s on ifp %p(%s)", 1295 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname); 1296 if (igi->igi_version == IGMP_VERSION_1) 1297 inm->inm_state = IGMP_LAZY_MEMBER; 1298 else if (igi->igi_version == IGMP_VERSION_2) 1299 inm->inm_state = IGMP_SLEEPING_MEMBER; 1300 break; 1301 case IGMP_G_QUERY_PENDING_MEMBER: 1302 case IGMP_SG_QUERY_PENDING_MEMBER: 1303 case IGMP_LEAVING_MEMBER: 1304 break; 1305 } 1306 } 1307 1308 out_locked: 1309 IN_MULTI_UNLOCK(); 1310 1311 return (0); 1312 } 1313 1314 /* 1315 * Process a received IGMPv2 host membership report. 1316 * 1317 * NOTE: 0.0.0.0 workaround breaks const correctness. 1318 */ 1319 static int 1320 igmp_input_v2_report(struct ifnet *ifp, /*const*/ struct ip *ip, 1321 /*const*/ struct igmp *igmp) 1322 { 1323 struct in_ifaddr *ia; 1324 struct in_multi *inm; 1325 1326 /* 1327 * Make sure we don't hear our own membership report. Fast 1328 * leave requires knowing that we are the only member of a 1329 * group. 1330 */ 1331 IFP_TO_IA(ifp, ia); 1332 if (ia != NULL && in_hosteq(ip->ip_src, IA_SIN(ia)->sin_addr)) { 1333 ifa_free(&ia->ia_ifa); 1334 return (0); 1335 } 1336 1337 IGMPSTAT_INC(igps_rcv_reports); 1338 1339 if (ifp->if_flags & IFF_LOOPBACK) { 1340 if (ia != NULL) 1341 ifa_free(&ia->ia_ifa); 1342 return (0); 1343 } 1344 1345 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) || 1346 !in_hosteq(igmp->igmp_group, ip->ip_dst)) { 1347 if (ia != NULL) 1348 ifa_free(&ia->ia_ifa); 1349 IGMPSTAT_INC(igps_rcv_badreports); 1350 return (EINVAL); 1351 } 1352 1353 /* 1354 * RFC 3376, Section 4.2.13, 9.2, 9.3: 1355 * Booting clients may use the source address 0.0.0.0. Some 1356 * IGMP daemons may not know how to use IP_RECVIF to determine 1357 * the interface upon which this message was received. 1358 * Replace 0.0.0.0 with the subnet address if told to do so. 1359 */ 1360 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) { 1361 if (ia != NULL) 1362 ip->ip_src.s_addr = htonl(ia->ia_subnet); 1363 } 1364 if (ia != NULL) 1365 ifa_free(&ia->ia_ifa); 1366 1367 CTR3(KTR_IGMPV3, "process v2 report %s on ifp %p(%s)", 1368 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname); 1369 1370 /* 1371 * IGMPv2 report suppression. 1372 * If we are a member of this group, and our membership should be 1373 * reported, and our group timer is pending or about to be reset, 1374 * stop our group timer by transitioning to the 'lazy' state. 1375 */ 1376 IN_MULTI_LOCK(); 1377 inm = inm_lookup(ifp, igmp->igmp_group); 1378 if (inm != NULL) { 1379 struct igmp_ifinfo *igi; 1380 1381 igi = inm->inm_igi; 1382 KASSERT(igi != NULL, ("%s: no igi for ifp %p", __func__, ifp)); 1383 1384 IGMPSTAT_INC(igps_rcv_ourreports); 1385 1386 /* 1387 * If we are in IGMPv3 host mode, do not allow the 1388 * other host's IGMPv1 report to suppress our reports 1389 * unless explicitly configured to do so. 1390 */ 1391 if (igi->igi_version == IGMP_VERSION_3) { 1392 if (V_igmp_legacysupp) 1393 igmp_v3_suppress_group_record(inm); 1394 goto out_locked; 1395 } 1396 1397 inm->inm_timer = 0; 1398 1399 switch (inm->inm_state) { 1400 case IGMP_NOT_MEMBER: 1401 case IGMP_SILENT_MEMBER: 1402 case IGMP_SLEEPING_MEMBER: 1403 break; 1404 case IGMP_REPORTING_MEMBER: 1405 case IGMP_IDLE_MEMBER: 1406 case IGMP_AWAKENING_MEMBER: 1407 CTR3(KTR_IGMPV3, 1408 "report suppressed for %s on ifp %p(%s)", 1409 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname); 1410 case IGMP_LAZY_MEMBER: 1411 inm->inm_state = IGMP_LAZY_MEMBER; 1412 break; 1413 case IGMP_G_QUERY_PENDING_MEMBER: 1414 case IGMP_SG_QUERY_PENDING_MEMBER: 1415 case IGMP_LEAVING_MEMBER: 1416 break; 1417 } 1418 } 1419 1420 out_locked: 1421 IN_MULTI_UNLOCK(); 1422 1423 return (0); 1424 } 1425 1426 void 1427 igmp_input(struct mbuf *m, int off) 1428 { 1429 int iphlen; 1430 struct ifnet *ifp; 1431 struct igmp *igmp; 1432 struct ip *ip; 1433 int igmplen; 1434 int minlen; 1435 int queryver; 1436 1437 CTR3(KTR_IGMPV3, "%s: called w/mbuf (%p,%d)", __func__, m, off); 1438 1439 ifp = m->m_pkthdr.rcvif; 1440 1441 IGMPSTAT_INC(igps_rcv_total); 1442 1443 ip = mtod(m, struct ip *); 1444 iphlen = off; 1445 igmplen = ntohs(ip->ip_len) - off; 1446 1447 /* 1448 * Validate lengths. 1449 */ 1450 if (igmplen < IGMP_MINLEN) { 1451 IGMPSTAT_INC(igps_rcv_tooshort); 1452 m_freem(m); 1453 return; 1454 } 1455 1456 /* 1457 * Always pullup to the minimum size for v1/v2 or v3 1458 * to amortize calls to m_pullup(). 1459 */ 1460 minlen = iphlen; 1461 if (igmplen >= IGMP_V3_QUERY_MINLEN) 1462 minlen += IGMP_V3_QUERY_MINLEN; 1463 else 1464 minlen += IGMP_MINLEN; 1465 if ((m->m_flags & M_EXT || m->m_len < minlen) && 1466 (m = m_pullup(m, minlen)) == 0) { 1467 IGMPSTAT_INC(igps_rcv_tooshort); 1468 return; 1469 } 1470 ip = mtod(m, struct ip *); 1471 1472 /* 1473 * Validate checksum. 1474 */ 1475 m->m_data += iphlen; 1476 m->m_len -= iphlen; 1477 igmp = mtod(m, struct igmp *); 1478 if (in_cksum(m, igmplen)) { 1479 IGMPSTAT_INC(igps_rcv_badsum); 1480 m_freem(m); 1481 return; 1482 } 1483 m->m_data -= iphlen; 1484 m->m_len += iphlen; 1485 1486 /* 1487 * IGMP control traffic is link-scope, and must have a TTL of 1. 1488 * DVMRP traffic (e.g. mrinfo, mtrace) is an exception; 1489 * probe packets may come from beyond the LAN. 1490 */ 1491 if (igmp->igmp_type != IGMP_DVMRP && ip->ip_ttl != 1) { 1492 IGMPSTAT_INC(igps_rcv_badttl); 1493 m_freem(m); 1494 return; 1495 } 1496 1497 switch (igmp->igmp_type) { 1498 case IGMP_HOST_MEMBERSHIP_QUERY: 1499 if (igmplen == IGMP_MINLEN) { 1500 if (igmp->igmp_code == 0) 1501 queryver = IGMP_VERSION_1; 1502 else 1503 queryver = IGMP_VERSION_2; 1504 } else if (igmplen >= IGMP_V3_QUERY_MINLEN) { 1505 queryver = IGMP_VERSION_3; 1506 } else { 1507 IGMPSTAT_INC(igps_rcv_tooshort); 1508 m_freem(m); 1509 return; 1510 } 1511 1512 switch (queryver) { 1513 case IGMP_VERSION_1: 1514 IGMPSTAT_INC(igps_rcv_v1v2_queries); 1515 if (!V_igmp_v1enable) 1516 break; 1517 if (igmp_input_v1_query(ifp, ip, igmp) != 0) { 1518 m_freem(m); 1519 return; 1520 } 1521 break; 1522 1523 case IGMP_VERSION_2: 1524 IGMPSTAT_INC(igps_rcv_v1v2_queries); 1525 if (!V_igmp_v2enable) 1526 break; 1527 if (igmp_input_v2_query(ifp, ip, igmp) != 0) { 1528 m_freem(m); 1529 return; 1530 } 1531 break; 1532 1533 case IGMP_VERSION_3: { 1534 struct igmpv3 *igmpv3; 1535 uint16_t igmpv3len; 1536 uint16_t srclen; 1537 int nsrc; 1538 1539 IGMPSTAT_INC(igps_rcv_v3_queries); 1540 igmpv3 = (struct igmpv3 *)igmp; 1541 /* 1542 * Validate length based on source count. 1543 */ 1544 nsrc = ntohs(igmpv3->igmp_numsrc); 1545 srclen = sizeof(struct in_addr) * nsrc; 1546 if (nsrc * sizeof(in_addr_t) > srclen) { 1547 IGMPSTAT_INC(igps_rcv_tooshort); 1548 return; 1549 } 1550 /* 1551 * m_pullup() may modify m, so pullup in 1552 * this scope. 1553 */ 1554 igmpv3len = iphlen + IGMP_V3_QUERY_MINLEN + 1555 srclen; 1556 if ((m->m_flags & M_EXT || 1557 m->m_len < igmpv3len) && 1558 (m = m_pullup(m, igmpv3len)) == NULL) { 1559 IGMPSTAT_INC(igps_rcv_tooshort); 1560 return; 1561 } 1562 igmpv3 = (struct igmpv3 *)(mtod(m, uint8_t *) 1563 + iphlen); 1564 if (igmp_input_v3_query(ifp, ip, igmpv3) != 0) { 1565 m_freem(m); 1566 return; 1567 } 1568 } 1569 break; 1570 } 1571 break; 1572 1573 case IGMP_v1_HOST_MEMBERSHIP_REPORT: 1574 if (!V_igmp_v1enable) 1575 break; 1576 if (igmp_input_v1_report(ifp, ip, igmp) != 0) { 1577 m_freem(m); 1578 return; 1579 } 1580 break; 1581 1582 case IGMP_v2_HOST_MEMBERSHIP_REPORT: 1583 if (!V_igmp_v2enable) 1584 break; 1585 if (!ip_checkrouteralert(m)) 1586 IGMPSTAT_INC(igps_rcv_nora); 1587 if (igmp_input_v2_report(ifp, ip, igmp) != 0) { 1588 m_freem(m); 1589 return; 1590 } 1591 break; 1592 1593 case IGMP_v3_HOST_MEMBERSHIP_REPORT: 1594 /* 1595 * Hosts do not need to process IGMPv3 membership reports, 1596 * as report suppression is no longer required. 1597 */ 1598 if (!ip_checkrouteralert(m)) 1599 IGMPSTAT_INC(igps_rcv_nora); 1600 break; 1601 1602 default: 1603 break; 1604 } 1605 1606 /* 1607 * Pass all valid IGMP packets up to any process(es) listening on a 1608 * raw IGMP socket. 1609 */ 1610 rip_input(m, off); 1611 } 1612 1613 1614 /* 1615 * Fast timeout handler (global). 1616 * VIMAGE: Timeout handlers are expected to service all vimages. 1617 */ 1618 void 1619 igmp_fasttimo(void) 1620 { 1621 VNET_ITERATOR_DECL(vnet_iter); 1622 1623 VNET_LIST_RLOCK_NOSLEEP(); 1624 VNET_FOREACH(vnet_iter) { 1625 CURVNET_SET(vnet_iter); 1626 igmp_fasttimo_vnet(); 1627 CURVNET_RESTORE(); 1628 } 1629 VNET_LIST_RUNLOCK_NOSLEEP(); 1630 } 1631 1632 /* 1633 * Fast timeout handler (per-vnet). 1634 * Sends are shuffled off to a netisr to deal with Giant. 1635 * 1636 * VIMAGE: Assume caller has set up our curvnet. 1637 */ 1638 static void 1639 igmp_fasttimo_vnet(void) 1640 { 1641 struct ifqueue scq; /* State-change packets */ 1642 struct ifqueue qrq; /* Query response packets */ 1643 struct ifnet *ifp; 1644 struct igmp_ifinfo *igi; 1645 struct ifmultiaddr *ifma; 1646 struct in_multi *inm; 1647 int loop, uri_fasthz; 1648 1649 loop = 0; 1650 uri_fasthz = 0; 1651 1652 /* 1653 * Quick check to see if any work needs to be done, in order to 1654 * minimize the overhead of fasttimo processing. 1655 * SMPng: XXX Unlocked reads. 1656 */ 1657 if (!V_current_state_timers_running && 1658 !V_interface_timers_running && 1659 !V_state_change_timers_running) 1660 return; 1661 1662 IN_MULTI_LOCK(); 1663 IGMP_LOCK(); 1664 1665 /* 1666 * IGMPv3 General Query response timer processing. 1667 */ 1668 if (V_interface_timers_running) { 1669 CTR1(KTR_IGMPV3, "%s: interface timers running", __func__); 1670 1671 V_interface_timers_running = 0; 1672 LIST_FOREACH(igi, &V_igi_head, igi_link) { 1673 if (igi->igi_v3_timer == 0) { 1674 /* Do nothing. */ 1675 } else if (--igi->igi_v3_timer == 0) { 1676 igmp_v3_dispatch_general_query(igi); 1677 } else { 1678 V_interface_timers_running = 1; 1679 } 1680 } 1681 } 1682 1683 if (!V_current_state_timers_running && 1684 !V_state_change_timers_running) 1685 goto out_locked; 1686 1687 V_current_state_timers_running = 0; 1688 V_state_change_timers_running = 0; 1689 1690 CTR1(KTR_IGMPV3, "%s: state change timers running", __func__); 1691 1692 /* 1693 * IGMPv1/v2/v3 host report and state-change timer processing. 1694 * Note: Processing a v3 group timer may remove a node. 1695 */ 1696 LIST_FOREACH(igi, &V_igi_head, igi_link) { 1697 ifp = igi->igi_ifp; 1698 1699 if (igi->igi_version == IGMP_VERSION_3) { 1700 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0; 1701 uri_fasthz = IGMP_RANDOM_DELAY(igi->igi_uri * 1702 PR_FASTHZ); 1703 1704 memset(&qrq, 0, sizeof(struct ifqueue)); 1705 IFQ_SET_MAXLEN(&qrq, IGMP_MAX_G_GS_PACKETS); 1706 1707 memset(&scq, 0, sizeof(struct ifqueue)); 1708 IFQ_SET_MAXLEN(&scq, IGMP_MAX_STATE_CHANGE_PACKETS); 1709 } 1710 1711 IF_ADDR_RLOCK(ifp); 1712 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1713 if (ifma->ifma_addr->sa_family != AF_INET || 1714 ifma->ifma_protospec == NULL) 1715 continue; 1716 inm = (struct in_multi *)ifma->ifma_protospec; 1717 switch (igi->igi_version) { 1718 case IGMP_VERSION_1: 1719 case IGMP_VERSION_2: 1720 igmp_v1v2_process_group_timer(inm, 1721 igi->igi_version); 1722 break; 1723 case IGMP_VERSION_3: 1724 igmp_v3_process_group_timers(igi, &qrq, 1725 &scq, inm, uri_fasthz); 1726 break; 1727 } 1728 } 1729 IF_ADDR_RUNLOCK(ifp); 1730 1731 if (igi->igi_version == IGMP_VERSION_3) { 1732 struct in_multi *tinm; 1733 1734 igmp_dispatch_queue(&qrq, 0, loop); 1735 igmp_dispatch_queue(&scq, 0, loop); 1736 1737 /* 1738 * Free the in_multi reference(s) for this 1739 * IGMP lifecycle. 1740 */ 1741 SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead, 1742 inm_nrele, tinm) { 1743 SLIST_REMOVE_HEAD(&igi->igi_relinmhead, 1744 inm_nrele); 1745 inm_release_locked(inm); 1746 } 1747 } 1748 } 1749 1750 out_locked: 1751 IGMP_UNLOCK(); 1752 IN_MULTI_UNLOCK(); 1753 } 1754 1755 /* 1756 * Update host report group timer for IGMPv1/v2. 1757 * Will update the global pending timer flags. 1758 */ 1759 static void 1760 igmp_v1v2_process_group_timer(struct in_multi *inm, const int version) 1761 { 1762 int report_timer_expired; 1763 1764 IN_MULTI_LOCK_ASSERT(); 1765 IGMP_LOCK_ASSERT(); 1766 1767 if (inm->inm_timer == 0) { 1768 report_timer_expired = 0; 1769 } else if (--inm->inm_timer == 0) { 1770 report_timer_expired = 1; 1771 } else { 1772 V_current_state_timers_running = 1; 1773 return; 1774 } 1775 1776 switch (inm->inm_state) { 1777 case IGMP_NOT_MEMBER: 1778 case IGMP_SILENT_MEMBER: 1779 case IGMP_IDLE_MEMBER: 1780 case IGMP_LAZY_MEMBER: 1781 case IGMP_SLEEPING_MEMBER: 1782 case IGMP_AWAKENING_MEMBER: 1783 break; 1784 case IGMP_REPORTING_MEMBER: 1785 if (report_timer_expired) { 1786 inm->inm_state = IGMP_IDLE_MEMBER; 1787 (void)igmp_v1v2_queue_report(inm, 1788 (version == IGMP_VERSION_2) ? 1789 IGMP_v2_HOST_MEMBERSHIP_REPORT : 1790 IGMP_v1_HOST_MEMBERSHIP_REPORT); 1791 } 1792 break; 1793 case IGMP_G_QUERY_PENDING_MEMBER: 1794 case IGMP_SG_QUERY_PENDING_MEMBER: 1795 case IGMP_LEAVING_MEMBER: 1796 break; 1797 } 1798 } 1799 1800 /* 1801 * Update a group's timers for IGMPv3. 1802 * Will update the global pending timer flags. 1803 * Note: Unlocked read from igi. 1804 */ 1805 static void 1806 igmp_v3_process_group_timers(struct igmp_ifinfo *igi, 1807 struct ifqueue *qrq, struct ifqueue *scq, 1808 struct in_multi *inm, const int uri_fasthz) 1809 { 1810 int query_response_timer_expired; 1811 int state_change_retransmit_timer_expired; 1812 1813 IN_MULTI_LOCK_ASSERT(); 1814 IGMP_LOCK_ASSERT(); 1815 1816 query_response_timer_expired = 0; 1817 state_change_retransmit_timer_expired = 0; 1818 1819 /* 1820 * During a transition from v1/v2 compatibility mode back to v3, 1821 * a group record in REPORTING state may still have its group 1822 * timer active. This is a no-op in this function; it is easier 1823 * to deal with it here than to complicate the slow-timeout path. 1824 */ 1825 if (inm->inm_timer == 0) { 1826 query_response_timer_expired = 0; 1827 } else if (--inm->inm_timer == 0) { 1828 query_response_timer_expired = 1; 1829 } else { 1830 V_current_state_timers_running = 1; 1831 } 1832 1833 if (inm->inm_sctimer == 0) { 1834 state_change_retransmit_timer_expired = 0; 1835 } else if (--inm->inm_sctimer == 0) { 1836 state_change_retransmit_timer_expired = 1; 1837 } else { 1838 V_state_change_timers_running = 1; 1839 } 1840 1841 /* We are in fasttimo, so be quick about it. */ 1842 if (!state_change_retransmit_timer_expired && 1843 !query_response_timer_expired) 1844 return; 1845 1846 switch (inm->inm_state) { 1847 case IGMP_NOT_MEMBER: 1848 case IGMP_SILENT_MEMBER: 1849 case IGMP_SLEEPING_MEMBER: 1850 case IGMP_LAZY_MEMBER: 1851 case IGMP_AWAKENING_MEMBER: 1852 case IGMP_IDLE_MEMBER: 1853 break; 1854 case IGMP_G_QUERY_PENDING_MEMBER: 1855 case IGMP_SG_QUERY_PENDING_MEMBER: 1856 /* 1857 * Respond to a previously pending Group-Specific 1858 * or Group-and-Source-Specific query by enqueueing 1859 * the appropriate Current-State report for 1860 * immediate transmission. 1861 */ 1862 if (query_response_timer_expired) { 1863 int retval; 1864 1865 retval = igmp_v3_enqueue_group_record(qrq, inm, 0, 1, 1866 (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER)); 1867 CTR2(KTR_IGMPV3, "%s: enqueue record = %d", 1868 __func__, retval); 1869 inm->inm_state = IGMP_REPORTING_MEMBER; 1870 /* XXX Clear recorded sources for next time. */ 1871 inm_clear_recorded(inm); 1872 } 1873 /* FALLTHROUGH */ 1874 case IGMP_REPORTING_MEMBER: 1875 case IGMP_LEAVING_MEMBER: 1876 if (state_change_retransmit_timer_expired) { 1877 /* 1878 * State-change retransmission timer fired. 1879 * If there are any further pending retransmissions, 1880 * set the global pending state-change flag, and 1881 * reset the timer. 1882 */ 1883 if (--inm->inm_scrv > 0) { 1884 inm->inm_sctimer = uri_fasthz; 1885 V_state_change_timers_running = 1; 1886 } 1887 /* 1888 * Retransmit the previously computed state-change 1889 * report. If there are no further pending 1890 * retransmissions, the mbuf queue will be consumed. 1891 * Update T0 state to T1 as we have now sent 1892 * a state-change. 1893 */ 1894 (void)igmp_v3_merge_state_changes(inm, scq); 1895 1896 inm_commit(inm); 1897 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__, 1898 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname); 1899 1900 /* 1901 * If we are leaving the group for good, make sure 1902 * we release IGMP's reference to it. 1903 * This release must be deferred using a SLIST, 1904 * as we are called from a loop which traverses 1905 * the in_ifmultiaddr TAILQ. 1906 */ 1907 if (inm->inm_state == IGMP_LEAVING_MEMBER && 1908 inm->inm_scrv == 0) { 1909 inm->inm_state = IGMP_NOT_MEMBER; 1910 SLIST_INSERT_HEAD(&igi->igi_relinmhead, 1911 inm, inm_nrele); 1912 } 1913 } 1914 break; 1915 } 1916 } 1917 1918 1919 /* 1920 * Suppress a group's pending response to a group or source/group query. 1921 * 1922 * Do NOT suppress state changes. This leads to IGMPv3 inconsistency. 1923 * Do NOT update ST1/ST0 as this operation merely suppresses 1924 * the currently pending group record. 1925 * Do NOT suppress the response to a general query. It is possible but 1926 * it would require adding another state or flag. 1927 */ 1928 static void 1929 igmp_v3_suppress_group_record(struct in_multi *inm) 1930 { 1931 1932 IN_MULTI_LOCK_ASSERT(); 1933 1934 KASSERT(inm->inm_igi->igi_version == IGMP_VERSION_3, 1935 ("%s: not IGMPv3 mode on link", __func__)); 1936 1937 if (inm->inm_state != IGMP_G_QUERY_PENDING_MEMBER || 1938 inm->inm_state != IGMP_SG_QUERY_PENDING_MEMBER) 1939 return; 1940 1941 if (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) 1942 inm_clear_recorded(inm); 1943 1944 inm->inm_timer = 0; 1945 inm->inm_state = IGMP_REPORTING_MEMBER; 1946 } 1947 1948 /* 1949 * Switch to a different IGMP version on the given interface, 1950 * as per Section 7.2.1. 1951 */ 1952 static void 1953 igmp_set_version(struct igmp_ifinfo *igi, const int version) 1954 { 1955 int old_version_timer; 1956 1957 IGMP_LOCK_ASSERT(); 1958 1959 CTR4(KTR_IGMPV3, "%s: switching to v%d on ifp %p(%s)", __func__, 1960 version, igi->igi_ifp, igi->igi_ifp->if_xname); 1961 1962 if (version == IGMP_VERSION_1 || version == IGMP_VERSION_2) { 1963 /* 1964 * Compute the "Older Version Querier Present" timer as per 1965 * Section 8.12. 1966 */ 1967 old_version_timer = igi->igi_rv * igi->igi_qi + igi->igi_qri; 1968 old_version_timer *= PR_SLOWHZ; 1969 1970 if (version == IGMP_VERSION_1) { 1971 igi->igi_v1_timer = old_version_timer; 1972 igi->igi_v2_timer = 0; 1973 } else if (version == IGMP_VERSION_2) { 1974 igi->igi_v1_timer = 0; 1975 igi->igi_v2_timer = old_version_timer; 1976 } 1977 } 1978 1979 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) { 1980 if (igi->igi_version != IGMP_VERSION_2) { 1981 igi->igi_version = IGMP_VERSION_2; 1982 igmp_v3_cancel_link_timers(igi); 1983 } 1984 } else if (igi->igi_v1_timer > 0) { 1985 if (igi->igi_version != IGMP_VERSION_1) { 1986 igi->igi_version = IGMP_VERSION_1; 1987 igmp_v3_cancel_link_timers(igi); 1988 } 1989 } 1990 } 1991 1992 /* 1993 * Cancel pending IGMPv3 timers for the given link and all groups 1994 * joined on it; state-change, general-query, and group-query timers. 1995 * 1996 * Only ever called on a transition from v3 to Compatibility mode. Kill 1997 * the timers stone dead (this may be expensive for large N groups), they 1998 * will be restarted if Compatibility Mode deems that they must be due to 1999 * query processing. 2000 */ 2001 static void 2002 igmp_v3_cancel_link_timers(struct igmp_ifinfo *igi) 2003 { 2004 struct ifmultiaddr *ifma; 2005 struct ifnet *ifp; 2006 struct in_multi *inm, *tinm; 2007 2008 CTR3(KTR_IGMPV3, "%s: cancel v3 timers on ifp %p(%s)", __func__, 2009 igi->igi_ifp, igi->igi_ifp->if_xname); 2010 2011 IN_MULTI_LOCK_ASSERT(); 2012 IGMP_LOCK_ASSERT(); 2013 2014 /* 2015 * Stop the v3 General Query Response on this link stone dead. 2016 * If fasttimo is woken up due to V_interface_timers_running, 2017 * the flag will be cleared if there are no pending link timers. 2018 */ 2019 igi->igi_v3_timer = 0; 2020 2021 /* 2022 * Now clear the current-state and state-change report timers 2023 * for all memberships scoped to this link. 2024 */ 2025 ifp = igi->igi_ifp; 2026 IF_ADDR_RLOCK(ifp); 2027 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2028 if (ifma->ifma_addr->sa_family != AF_INET || 2029 ifma->ifma_protospec == NULL) 2030 continue; 2031 inm = (struct in_multi *)ifma->ifma_protospec; 2032 switch (inm->inm_state) { 2033 case IGMP_NOT_MEMBER: 2034 case IGMP_SILENT_MEMBER: 2035 case IGMP_IDLE_MEMBER: 2036 case IGMP_LAZY_MEMBER: 2037 case IGMP_SLEEPING_MEMBER: 2038 case IGMP_AWAKENING_MEMBER: 2039 /* 2040 * These states are either not relevant in v3 mode, 2041 * or are unreported. Do nothing. 2042 */ 2043 break; 2044 case IGMP_LEAVING_MEMBER: 2045 /* 2046 * If we are leaving the group and switching to 2047 * compatibility mode, we need to release the final 2048 * reference held for issuing the INCLUDE {}, and 2049 * transition to REPORTING to ensure the host leave 2050 * message is sent upstream to the old querier -- 2051 * transition to NOT would lose the leave and race. 2052 */ 2053 SLIST_INSERT_HEAD(&igi->igi_relinmhead, inm, inm_nrele); 2054 /* FALLTHROUGH */ 2055 case IGMP_G_QUERY_PENDING_MEMBER: 2056 case IGMP_SG_QUERY_PENDING_MEMBER: 2057 inm_clear_recorded(inm); 2058 /* FALLTHROUGH */ 2059 case IGMP_REPORTING_MEMBER: 2060 inm->inm_state = IGMP_REPORTING_MEMBER; 2061 break; 2062 } 2063 /* 2064 * Always clear state-change and group report timers. 2065 * Free any pending IGMPv3 state-change records. 2066 */ 2067 inm->inm_sctimer = 0; 2068 inm->inm_timer = 0; 2069 _IF_DRAIN(&inm->inm_scq); 2070 } 2071 IF_ADDR_RUNLOCK(ifp); 2072 SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead, inm_nrele, tinm) { 2073 SLIST_REMOVE_HEAD(&igi->igi_relinmhead, inm_nrele); 2074 inm_release_locked(inm); 2075 } 2076 } 2077 2078 /* 2079 * Update the Older Version Querier Present timers for a link. 2080 * See Section 7.2.1 of RFC 3376. 2081 */ 2082 static void 2083 igmp_v1v2_process_querier_timers(struct igmp_ifinfo *igi) 2084 { 2085 2086 IGMP_LOCK_ASSERT(); 2087 2088 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer == 0) { 2089 /* 2090 * IGMPv1 and IGMPv2 Querier Present timers expired. 2091 * 2092 * Revert to IGMPv3. 2093 */ 2094 if (igi->igi_version != IGMP_VERSION_3) { 2095 CTR5(KTR_IGMPV3, 2096 "%s: transition from v%d -> v%d on %p(%s)", 2097 __func__, igi->igi_version, IGMP_VERSION_3, 2098 igi->igi_ifp, igi->igi_ifp->if_xname); 2099 igi->igi_version = IGMP_VERSION_3; 2100 } 2101 } else if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) { 2102 /* 2103 * IGMPv1 Querier Present timer expired, 2104 * IGMPv2 Querier Present timer running. 2105 * If IGMPv2 was disabled since last timeout, 2106 * revert to IGMPv3. 2107 * If IGMPv2 is enabled, revert to IGMPv2. 2108 */ 2109 if (!V_igmp_v2enable) { 2110 CTR5(KTR_IGMPV3, 2111 "%s: transition from v%d -> v%d on %p(%s)", 2112 __func__, igi->igi_version, IGMP_VERSION_3, 2113 igi->igi_ifp, igi->igi_ifp->if_xname); 2114 igi->igi_v2_timer = 0; 2115 igi->igi_version = IGMP_VERSION_3; 2116 } else { 2117 --igi->igi_v2_timer; 2118 if (igi->igi_version != IGMP_VERSION_2) { 2119 CTR5(KTR_IGMPV3, 2120 "%s: transition from v%d -> v%d on %p(%s)", 2121 __func__, igi->igi_version, IGMP_VERSION_2, 2122 igi->igi_ifp, igi->igi_ifp->if_xname); 2123 igi->igi_version = IGMP_VERSION_2; 2124 } 2125 } 2126 } else if (igi->igi_v1_timer > 0) { 2127 /* 2128 * IGMPv1 Querier Present timer running. 2129 * Stop IGMPv2 timer if running. 2130 * 2131 * If IGMPv1 was disabled since last timeout, 2132 * revert to IGMPv3. 2133 * If IGMPv1 is enabled, reset IGMPv2 timer if running. 2134 */ 2135 if (!V_igmp_v1enable) { 2136 CTR5(KTR_IGMPV3, 2137 "%s: transition from v%d -> v%d on %p(%s)", 2138 __func__, igi->igi_version, IGMP_VERSION_3, 2139 igi->igi_ifp, igi->igi_ifp->if_xname); 2140 igi->igi_v1_timer = 0; 2141 igi->igi_version = IGMP_VERSION_3; 2142 } else { 2143 --igi->igi_v1_timer; 2144 } 2145 if (igi->igi_v2_timer > 0) { 2146 CTR3(KTR_IGMPV3, 2147 "%s: cancel v2 timer on %p(%s)", 2148 __func__, igi->igi_ifp, igi->igi_ifp->if_xname); 2149 igi->igi_v2_timer = 0; 2150 } 2151 } 2152 } 2153 2154 /* 2155 * Global slowtimo handler. 2156 * VIMAGE: Timeout handlers are expected to service all vimages. 2157 */ 2158 void 2159 igmp_slowtimo(void) 2160 { 2161 VNET_ITERATOR_DECL(vnet_iter); 2162 2163 VNET_LIST_RLOCK_NOSLEEP(); 2164 VNET_FOREACH(vnet_iter) { 2165 CURVNET_SET(vnet_iter); 2166 igmp_slowtimo_vnet(); 2167 CURVNET_RESTORE(); 2168 } 2169 VNET_LIST_RUNLOCK_NOSLEEP(); 2170 } 2171 2172 /* 2173 * Per-vnet slowtimo handler. 2174 */ 2175 static void 2176 igmp_slowtimo_vnet(void) 2177 { 2178 struct igmp_ifinfo *igi; 2179 2180 IGMP_LOCK(); 2181 2182 LIST_FOREACH(igi, &V_igi_head, igi_link) { 2183 igmp_v1v2_process_querier_timers(igi); 2184 } 2185 2186 IGMP_UNLOCK(); 2187 } 2188 2189 /* 2190 * Dispatch an IGMPv1/v2 host report or leave message. 2191 * These are always small enough to fit inside a single mbuf. 2192 */ 2193 static int 2194 igmp_v1v2_queue_report(struct in_multi *inm, const int type) 2195 { 2196 struct ifnet *ifp; 2197 struct igmp *igmp; 2198 struct ip *ip; 2199 struct mbuf *m; 2200 2201 IN_MULTI_LOCK_ASSERT(); 2202 IGMP_LOCK_ASSERT(); 2203 2204 ifp = inm->inm_ifp; 2205 2206 m = m_gethdr(M_NOWAIT, MT_DATA); 2207 if (m == NULL) 2208 return (ENOMEM); 2209 MH_ALIGN(m, sizeof(struct ip) + sizeof(struct igmp)); 2210 2211 m->m_pkthdr.len = sizeof(struct ip) + sizeof(struct igmp); 2212 2213 m->m_data += sizeof(struct ip); 2214 m->m_len = sizeof(struct igmp); 2215 2216 igmp = mtod(m, struct igmp *); 2217 igmp->igmp_type = type; 2218 igmp->igmp_code = 0; 2219 igmp->igmp_group = inm->inm_addr; 2220 igmp->igmp_cksum = 0; 2221 igmp->igmp_cksum = in_cksum(m, sizeof(struct igmp)); 2222 2223 m->m_data -= sizeof(struct ip); 2224 m->m_len += sizeof(struct ip); 2225 2226 ip = mtod(m, struct ip *); 2227 ip->ip_tos = 0; 2228 ip->ip_len = htons(sizeof(struct ip) + sizeof(struct igmp)); 2229 ip->ip_off = 0; 2230 ip->ip_p = IPPROTO_IGMP; 2231 ip->ip_src.s_addr = INADDR_ANY; 2232 2233 if (type == IGMP_HOST_LEAVE_MESSAGE) 2234 ip->ip_dst.s_addr = htonl(INADDR_ALLRTRS_GROUP); 2235 else 2236 ip->ip_dst = inm->inm_addr; 2237 2238 igmp_save_context(m, ifp); 2239 2240 m->m_flags |= M_IGMPV2; 2241 if (inm->inm_igi->igi_flags & IGIF_LOOPBACK) 2242 m->m_flags |= M_IGMP_LOOP; 2243 2244 CTR2(KTR_IGMPV3, "%s: netisr_dispatch(NETISR_IGMP, %p)", __func__, m); 2245 netisr_dispatch(NETISR_IGMP, m); 2246 2247 return (0); 2248 } 2249 2250 /* 2251 * Process a state change from the upper layer for the given IPv4 group. 2252 * 2253 * Each socket holds a reference on the in_multi in its own ip_moptions. 2254 * The socket layer will have made the necessary updates to.the group 2255 * state, it is now up to IGMP to issue a state change report if there 2256 * has been any change between T0 (when the last state-change was issued) 2257 * and T1 (now). 2258 * 2259 * We use the IGMPv3 state machine at group level. The IGMP module 2260 * however makes the decision as to which IGMP protocol version to speak. 2261 * A state change *from* INCLUDE {} always means an initial join. 2262 * A state change *to* INCLUDE {} always means a final leave. 2263 * 2264 * FUTURE: If IGIF_V3LITE is enabled for this interface, then we can 2265 * save ourselves a bunch of work; any exclusive mode groups need not 2266 * compute source filter lists. 2267 * 2268 * VIMAGE: curvnet should have been set by caller, as this routine 2269 * is called from the socket option handlers. 2270 */ 2271 int 2272 igmp_change_state(struct in_multi *inm) 2273 { 2274 struct igmp_ifinfo *igi; 2275 struct ifnet *ifp; 2276 int error; 2277 2278 IN_MULTI_LOCK_ASSERT(); 2279 2280 error = 0; 2281 2282 /* 2283 * Try to detect if the upper layer just asked us to change state 2284 * for an interface which has now gone away. 2285 */ 2286 KASSERT(inm->inm_ifma != NULL, ("%s: no ifma", __func__)); 2287 ifp = inm->inm_ifma->ifma_ifp; 2288 /* 2289 * Sanity check that netinet's notion of ifp is the 2290 * same as net's. 2291 */ 2292 KASSERT(inm->inm_ifp == ifp, ("%s: bad ifp", __func__)); 2293 2294 IGMP_LOCK(); 2295 2296 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp; 2297 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp)); 2298 2299 /* 2300 * If we detect a state transition to or from MCAST_UNDEFINED 2301 * for this group, then we are starting or finishing an IGMP 2302 * life cycle for this group. 2303 */ 2304 if (inm->inm_st[1].iss_fmode != inm->inm_st[0].iss_fmode) { 2305 CTR3(KTR_IGMPV3, "%s: inm transition %d -> %d", __func__, 2306 inm->inm_st[0].iss_fmode, inm->inm_st[1].iss_fmode); 2307 if (inm->inm_st[0].iss_fmode == MCAST_UNDEFINED) { 2308 CTR1(KTR_IGMPV3, "%s: initial join", __func__); 2309 error = igmp_initial_join(inm, igi); 2310 goto out_locked; 2311 } else if (inm->inm_st[1].iss_fmode == MCAST_UNDEFINED) { 2312 CTR1(KTR_IGMPV3, "%s: final leave", __func__); 2313 igmp_final_leave(inm, igi); 2314 goto out_locked; 2315 } 2316 } else { 2317 CTR1(KTR_IGMPV3, "%s: filter set change", __func__); 2318 } 2319 2320 error = igmp_handle_state_change(inm, igi); 2321 2322 out_locked: 2323 IGMP_UNLOCK(); 2324 return (error); 2325 } 2326 2327 /* 2328 * Perform the initial join for an IGMP group. 2329 * 2330 * When joining a group: 2331 * If the group should have its IGMP traffic suppressed, do nothing. 2332 * IGMPv1 starts sending IGMPv1 host membership reports. 2333 * IGMPv2 starts sending IGMPv2 host membership reports. 2334 * IGMPv3 will schedule an IGMPv3 state-change report containing the 2335 * initial state of the membership. 2336 */ 2337 static int 2338 igmp_initial_join(struct in_multi *inm, struct igmp_ifinfo *igi) 2339 { 2340 struct ifnet *ifp; 2341 struct ifqueue *ifq; 2342 int error, retval, syncstates; 2343 2344 CTR4(KTR_IGMPV3, "%s: initial join %s on ifp %p(%s)", 2345 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp, 2346 inm->inm_ifp->if_xname); 2347 2348 error = 0; 2349 syncstates = 1; 2350 2351 ifp = inm->inm_ifp; 2352 2353 IN_MULTI_LOCK_ASSERT(); 2354 IGMP_LOCK_ASSERT(); 2355 2356 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__)); 2357 2358 /* 2359 * Groups joined on loopback or marked as 'not reported', 2360 * e.g. 224.0.0.1, enter the IGMP_SILENT_MEMBER state and 2361 * are never reported in any IGMP protocol exchanges. 2362 * All other groups enter the appropriate IGMP state machine 2363 * for the version in use on this link. 2364 * A link marked as IGIF_SILENT causes IGMP to be completely 2365 * disabled for the link. 2366 */ 2367 if ((ifp->if_flags & IFF_LOOPBACK) || 2368 (igi->igi_flags & IGIF_SILENT) || 2369 !igmp_isgroupreported(inm->inm_addr)) { 2370 CTR1(KTR_IGMPV3, 2371 "%s: not kicking state machine for silent group", __func__); 2372 inm->inm_state = IGMP_SILENT_MEMBER; 2373 inm->inm_timer = 0; 2374 } else { 2375 /* 2376 * Deal with overlapping in_multi lifecycle. 2377 * If this group was LEAVING, then make sure 2378 * we drop the reference we picked up to keep the 2379 * group around for the final INCLUDE {} enqueue. 2380 */ 2381 if (igi->igi_version == IGMP_VERSION_3 && 2382 inm->inm_state == IGMP_LEAVING_MEMBER) 2383 inm_release_locked(inm); 2384 2385 inm->inm_state = IGMP_REPORTING_MEMBER; 2386 2387 switch (igi->igi_version) { 2388 case IGMP_VERSION_1: 2389 case IGMP_VERSION_2: 2390 inm->inm_state = IGMP_IDLE_MEMBER; 2391 error = igmp_v1v2_queue_report(inm, 2392 (igi->igi_version == IGMP_VERSION_2) ? 2393 IGMP_v2_HOST_MEMBERSHIP_REPORT : 2394 IGMP_v1_HOST_MEMBERSHIP_REPORT); 2395 if (error == 0) { 2396 inm->inm_timer = IGMP_RANDOM_DELAY( 2397 IGMP_V1V2_MAX_RI * PR_FASTHZ); 2398 V_current_state_timers_running = 1; 2399 } 2400 break; 2401 2402 case IGMP_VERSION_3: 2403 /* 2404 * Defer update of T0 to T1, until the first copy 2405 * of the state change has been transmitted. 2406 */ 2407 syncstates = 0; 2408 2409 /* 2410 * Immediately enqueue a State-Change Report for 2411 * this interface, freeing any previous reports. 2412 * Don't kick the timers if there is nothing to do, 2413 * or if an error occurred. 2414 */ 2415 ifq = &inm->inm_scq; 2416 _IF_DRAIN(ifq); 2417 retval = igmp_v3_enqueue_group_record(ifq, inm, 1, 2418 0, 0); 2419 CTR2(KTR_IGMPV3, "%s: enqueue record = %d", 2420 __func__, retval); 2421 if (retval <= 0) { 2422 error = retval * -1; 2423 break; 2424 } 2425 2426 /* 2427 * Schedule transmission of pending state-change 2428 * report up to RV times for this link. The timer 2429 * will fire at the next igmp_fasttimo (~200ms), 2430 * giving us an opportunity to merge the reports. 2431 */ 2432 if (igi->igi_flags & IGIF_LOOPBACK) { 2433 inm->inm_scrv = 1; 2434 } else { 2435 KASSERT(igi->igi_rv > 1, 2436 ("%s: invalid robustness %d", __func__, 2437 igi->igi_rv)); 2438 inm->inm_scrv = igi->igi_rv; 2439 } 2440 inm->inm_sctimer = 1; 2441 V_state_change_timers_running = 1; 2442 2443 error = 0; 2444 break; 2445 } 2446 } 2447 2448 /* 2449 * Only update the T0 state if state change is atomic, 2450 * i.e. we don't need to wait for a timer to fire before we 2451 * can consider the state change to have been communicated. 2452 */ 2453 if (syncstates) { 2454 inm_commit(inm); 2455 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__, 2456 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname); 2457 } 2458 2459 return (error); 2460 } 2461 2462 /* 2463 * Issue an intermediate state change during the IGMP life-cycle. 2464 */ 2465 static int 2466 igmp_handle_state_change(struct in_multi *inm, struct igmp_ifinfo *igi) 2467 { 2468 struct ifnet *ifp; 2469 int retval; 2470 2471 CTR4(KTR_IGMPV3, "%s: state change for %s on ifp %p(%s)", 2472 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp, 2473 inm->inm_ifp->if_xname); 2474 2475 ifp = inm->inm_ifp; 2476 2477 IN_MULTI_LOCK_ASSERT(); 2478 IGMP_LOCK_ASSERT(); 2479 2480 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__)); 2481 2482 if ((ifp->if_flags & IFF_LOOPBACK) || 2483 (igi->igi_flags & IGIF_SILENT) || 2484 !igmp_isgroupreported(inm->inm_addr) || 2485 (igi->igi_version != IGMP_VERSION_3)) { 2486 if (!igmp_isgroupreported(inm->inm_addr)) { 2487 CTR1(KTR_IGMPV3, 2488 "%s: not kicking state machine for silent group", __func__); 2489 } 2490 CTR1(KTR_IGMPV3, "%s: nothing to do", __func__); 2491 inm_commit(inm); 2492 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__, 2493 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname); 2494 return (0); 2495 } 2496 2497 _IF_DRAIN(&inm->inm_scq); 2498 2499 retval = igmp_v3_enqueue_group_record(&inm->inm_scq, inm, 1, 0, 0); 2500 CTR2(KTR_IGMPV3, "%s: enqueue record = %d", __func__, retval); 2501 if (retval <= 0) 2502 return (-retval); 2503 2504 /* 2505 * If record(s) were enqueued, start the state-change 2506 * report timer for this group. 2507 */ 2508 inm->inm_scrv = ((igi->igi_flags & IGIF_LOOPBACK) ? 1 : igi->igi_rv); 2509 inm->inm_sctimer = 1; 2510 V_state_change_timers_running = 1; 2511 2512 return (0); 2513 } 2514 2515 /* 2516 * Perform the final leave for an IGMP group. 2517 * 2518 * When leaving a group: 2519 * IGMPv1 does nothing. 2520 * IGMPv2 sends a host leave message, if and only if we are the reporter. 2521 * IGMPv3 enqueues a state-change report containing a transition 2522 * to INCLUDE {} for immediate transmission. 2523 */ 2524 static void 2525 igmp_final_leave(struct in_multi *inm, struct igmp_ifinfo *igi) 2526 { 2527 int syncstates; 2528 2529 syncstates = 1; 2530 2531 CTR4(KTR_IGMPV3, "%s: final leave %s on ifp %p(%s)", 2532 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp, 2533 inm->inm_ifp->if_xname); 2534 2535 IN_MULTI_LOCK_ASSERT(); 2536 IGMP_LOCK_ASSERT(); 2537 2538 switch (inm->inm_state) { 2539 case IGMP_NOT_MEMBER: 2540 case IGMP_SILENT_MEMBER: 2541 case IGMP_LEAVING_MEMBER: 2542 /* Already leaving or left; do nothing. */ 2543 CTR1(KTR_IGMPV3, 2544 "%s: not kicking state machine for silent group", __func__); 2545 break; 2546 case IGMP_REPORTING_MEMBER: 2547 case IGMP_IDLE_MEMBER: 2548 case IGMP_G_QUERY_PENDING_MEMBER: 2549 case IGMP_SG_QUERY_PENDING_MEMBER: 2550 if (igi->igi_version == IGMP_VERSION_2) { 2551 #ifdef INVARIANTS 2552 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER || 2553 inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) 2554 panic("%s: IGMPv3 state reached, not IGMPv3 mode", 2555 __func__); 2556 #endif 2557 igmp_v1v2_queue_report(inm, IGMP_HOST_LEAVE_MESSAGE); 2558 inm->inm_state = IGMP_NOT_MEMBER; 2559 } else if (igi->igi_version == IGMP_VERSION_3) { 2560 /* 2561 * Stop group timer and all pending reports. 2562 * Immediately enqueue a state-change report 2563 * TO_IN {} to be sent on the next fast timeout, 2564 * giving us an opportunity to merge reports. 2565 */ 2566 _IF_DRAIN(&inm->inm_scq); 2567 inm->inm_timer = 0; 2568 if (igi->igi_flags & IGIF_LOOPBACK) { 2569 inm->inm_scrv = 1; 2570 } else { 2571 inm->inm_scrv = igi->igi_rv; 2572 } 2573 CTR4(KTR_IGMPV3, "%s: Leaving %s/%s with %d " 2574 "pending retransmissions.", __func__, 2575 inet_ntoa(inm->inm_addr), 2576 inm->inm_ifp->if_xname, inm->inm_scrv); 2577 if (inm->inm_scrv == 0) { 2578 inm->inm_state = IGMP_NOT_MEMBER; 2579 inm->inm_sctimer = 0; 2580 } else { 2581 int retval; 2582 2583 inm_acquire_locked(inm); 2584 2585 retval = igmp_v3_enqueue_group_record( 2586 &inm->inm_scq, inm, 1, 0, 0); 2587 KASSERT(retval != 0, 2588 ("%s: enqueue record = %d", __func__, 2589 retval)); 2590 2591 inm->inm_state = IGMP_LEAVING_MEMBER; 2592 inm->inm_sctimer = 1; 2593 V_state_change_timers_running = 1; 2594 syncstates = 0; 2595 } 2596 break; 2597 } 2598 break; 2599 case IGMP_LAZY_MEMBER: 2600 case IGMP_SLEEPING_MEMBER: 2601 case IGMP_AWAKENING_MEMBER: 2602 /* Our reports are suppressed; do nothing. */ 2603 break; 2604 } 2605 2606 if (syncstates) { 2607 inm_commit(inm); 2608 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__, 2609 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname); 2610 inm->inm_st[1].iss_fmode = MCAST_UNDEFINED; 2611 CTR3(KTR_IGMPV3, "%s: T1 now MCAST_UNDEFINED for %s/%s", 2612 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname); 2613 } 2614 } 2615 2616 /* 2617 * Enqueue an IGMPv3 group record to the given output queue. 2618 * 2619 * XXX This function could do with having the allocation code 2620 * split out, and the multiple-tree-walks coalesced into a single 2621 * routine as has been done in igmp_v3_enqueue_filter_change(). 2622 * 2623 * If is_state_change is zero, a current-state record is appended. 2624 * If is_state_change is non-zero, a state-change report is appended. 2625 * 2626 * If is_group_query is non-zero, an mbuf packet chain is allocated. 2627 * If is_group_query is zero, and if there is a packet with free space 2628 * at the tail of the queue, it will be appended to providing there 2629 * is enough free space. 2630 * Otherwise a new mbuf packet chain is allocated. 2631 * 2632 * If is_source_query is non-zero, each source is checked to see if 2633 * it was recorded for a Group-Source query, and will be omitted if 2634 * it is not both in-mode and recorded. 2635 * 2636 * The function will attempt to allocate leading space in the packet 2637 * for the IP/IGMP header to be prepended without fragmenting the chain. 2638 * 2639 * If successful the size of all data appended to the queue is returned, 2640 * otherwise an error code less than zero is returned, or zero if 2641 * no record(s) were appended. 2642 */ 2643 static int 2644 igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm, 2645 const int is_state_change, const int is_group_query, 2646 const int is_source_query) 2647 { 2648 struct igmp_grouprec ig; 2649 struct igmp_grouprec *pig; 2650 struct ifnet *ifp; 2651 struct ip_msource *ims, *nims; 2652 struct mbuf *m0, *m, *md; 2653 int error, is_filter_list_change; 2654 int minrec0len, m0srcs, msrcs, nbytes, off; 2655 int record_has_sources; 2656 int now; 2657 int type; 2658 in_addr_t naddr; 2659 uint8_t mode; 2660 2661 IN_MULTI_LOCK_ASSERT(); 2662 2663 error = 0; 2664 ifp = inm->inm_ifp; 2665 is_filter_list_change = 0; 2666 m = NULL; 2667 m0 = NULL; 2668 m0srcs = 0; 2669 msrcs = 0; 2670 nbytes = 0; 2671 nims = NULL; 2672 record_has_sources = 1; 2673 pig = NULL; 2674 type = IGMP_DO_NOTHING; 2675 mode = inm->inm_st[1].iss_fmode; 2676 2677 /* 2678 * If we did not transition out of ASM mode during t0->t1, 2679 * and there are no source nodes to process, we can skip 2680 * the generation of source records. 2681 */ 2682 if (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0 && 2683 inm->inm_nsrc == 0) 2684 record_has_sources = 0; 2685 2686 if (is_state_change) { 2687 /* 2688 * Queue a state change record. 2689 * If the mode did not change, and there are non-ASM 2690 * listeners or source filters present, 2691 * we potentially need to issue two records for the group. 2692 * If we are transitioning to MCAST_UNDEFINED, we need 2693 * not send any sources. 2694 * If there are ASM listeners, and there was no filter 2695 * mode transition of any kind, do nothing. 2696 */ 2697 if (mode != inm->inm_st[0].iss_fmode) { 2698 if (mode == MCAST_EXCLUDE) { 2699 CTR1(KTR_IGMPV3, "%s: change to EXCLUDE", 2700 __func__); 2701 type = IGMP_CHANGE_TO_EXCLUDE_MODE; 2702 } else { 2703 CTR1(KTR_IGMPV3, "%s: change to INCLUDE", 2704 __func__); 2705 type = IGMP_CHANGE_TO_INCLUDE_MODE; 2706 if (mode == MCAST_UNDEFINED) 2707 record_has_sources = 0; 2708 } 2709 } else { 2710 if (record_has_sources) { 2711 is_filter_list_change = 1; 2712 } else { 2713 type = IGMP_DO_NOTHING; 2714 } 2715 } 2716 } else { 2717 /* 2718 * Queue a current state record. 2719 */ 2720 if (mode == MCAST_EXCLUDE) { 2721 type = IGMP_MODE_IS_EXCLUDE; 2722 } else if (mode == MCAST_INCLUDE) { 2723 type = IGMP_MODE_IS_INCLUDE; 2724 KASSERT(inm->inm_st[1].iss_asm == 0, 2725 ("%s: inm %p is INCLUDE but ASM count is %d", 2726 __func__, inm, inm->inm_st[1].iss_asm)); 2727 } 2728 } 2729 2730 /* 2731 * Generate the filter list changes using a separate function. 2732 */ 2733 if (is_filter_list_change) 2734 return (igmp_v3_enqueue_filter_change(ifq, inm)); 2735 2736 if (type == IGMP_DO_NOTHING) { 2737 CTR3(KTR_IGMPV3, "%s: nothing to do for %s/%s", 2738 __func__, inet_ntoa(inm->inm_addr), 2739 inm->inm_ifp->if_xname); 2740 return (0); 2741 } 2742 2743 /* 2744 * If any sources are present, we must be able to fit at least 2745 * one in the trailing space of the tail packet's mbuf, 2746 * ideally more. 2747 */ 2748 minrec0len = sizeof(struct igmp_grouprec); 2749 if (record_has_sources) 2750 minrec0len += sizeof(in_addr_t); 2751 2752 CTR4(KTR_IGMPV3, "%s: queueing %s for %s/%s", __func__, 2753 igmp_rec_type_to_str(type), inet_ntoa(inm->inm_addr), 2754 inm->inm_ifp->if_xname); 2755 2756 /* 2757 * Check if we have a packet in the tail of the queue for this 2758 * group into which the first group record for this group will fit. 2759 * Otherwise allocate a new packet. 2760 * Always allocate leading space for IP+RA_OPT+IGMP+REPORT. 2761 * Note: Group records for G/GSR query responses MUST be sent 2762 * in their own packet. 2763 */ 2764 m0 = ifq->ifq_tail; 2765 if (!is_group_query && 2766 m0 != NULL && 2767 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= IGMP_V3_REPORT_MAXRECS) && 2768 (m0->m_pkthdr.len + minrec0len) < 2769 (ifp->if_mtu - IGMP_LEADINGSPACE)) { 2770 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len - 2771 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t); 2772 m = m0; 2773 CTR1(KTR_IGMPV3, "%s: use existing packet", __func__); 2774 } else { 2775 if (_IF_QFULL(ifq)) { 2776 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__); 2777 return (-ENOMEM); 2778 } 2779 m = NULL; 2780 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE - 2781 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t); 2782 if (!is_state_change && !is_group_query) { 2783 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 2784 if (m) 2785 m->m_data += IGMP_LEADINGSPACE; 2786 } 2787 if (m == NULL) { 2788 m = m_gethdr(M_NOWAIT, MT_DATA); 2789 if (m) 2790 MH_ALIGN(m, IGMP_LEADINGSPACE); 2791 } 2792 if (m == NULL) 2793 return (-ENOMEM); 2794 2795 igmp_save_context(m, ifp); 2796 2797 CTR1(KTR_IGMPV3, "%s: allocated first packet", __func__); 2798 } 2799 2800 /* 2801 * Append group record. 2802 * If we have sources, we don't know how many yet. 2803 */ 2804 ig.ig_type = type; 2805 ig.ig_datalen = 0; 2806 ig.ig_numsrc = 0; 2807 ig.ig_group = inm->inm_addr; 2808 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) { 2809 if (m != m0) 2810 m_freem(m); 2811 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__); 2812 return (-ENOMEM); 2813 } 2814 nbytes += sizeof(struct igmp_grouprec); 2815 2816 /* 2817 * Append as many sources as will fit in the first packet. 2818 * If we are appending to a new packet, the chain allocation 2819 * may potentially use clusters; use m_getptr() in this case. 2820 * If we are appending to an existing packet, we need to obtain 2821 * a pointer to the group record after m_append(), in case a new 2822 * mbuf was allocated. 2823 * Only append sources which are in-mode at t1. If we are 2824 * transitioning to MCAST_UNDEFINED state on the group, do not 2825 * include source entries. 2826 * Only report recorded sources in our filter set when responding 2827 * to a group-source query. 2828 */ 2829 if (record_has_sources) { 2830 if (m == m0) { 2831 md = m_last(m); 2832 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) + 2833 md->m_len - nbytes); 2834 } else { 2835 md = m_getptr(m, 0, &off); 2836 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) + 2837 off); 2838 } 2839 msrcs = 0; 2840 RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, nims) { 2841 CTR2(KTR_IGMPV3, "%s: visit node %s", __func__, 2842 inet_ntoa_haddr(ims->ims_haddr)); 2843 now = ims_get_mode(inm, ims, 1); 2844 CTR2(KTR_IGMPV3, "%s: node is %d", __func__, now); 2845 if ((now != mode) || 2846 (now == mode && mode == MCAST_UNDEFINED)) { 2847 CTR1(KTR_IGMPV3, "%s: skip node", __func__); 2848 continue; 2849 } 2850 if (is_source_query && ims->ims_stp == 0) { 2851 CTR1(KTR_IGMPV3, "%s: skip unrecorded node", 2852 __func__); 2853 continue; 2854 } 2855 CTR1(KTR_IGMPV3, "%s: append node", __func__); 2856 naddr = htonl(ims->ims_haddr); 2857 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) { 2858 if (m != m0) 2859 m_freem(m); 2860 CTR1(KTR_IGMPV3, "%s: m_append() failed.", 2861 __func__); 2862 return (-ENOMEM); 2863 } 2864 nbytes += sizeof(in_addr_t); 2865 ++msrcs; 2866 if (msrcs == m0srcs) 2867 break; 2868 } 2869 CTR2(KTR_IGMPV3, "%s: msrcs is %d this packet", __func__, 2870 msrcs); 2871 pig->ig_numsrc = htons(msrcs); 2872 nbytes += (msrcs * sizeof(in_addr_t)); 2873 } 2874 2875 if (is_source_query && msrcs == 0) { 2876 CTR1(KTR_IGMPV3, "%s: no recorded sources to report", __func__); 2877 if (m != m0) 2878 m_freem(m); 2879 return (0); 2880 } 2881 2882 /* 2883 * We are good to go with first packet. 2884 */ 2885 if (m != m0) { 2886 CTR1(KTR_IGMPV3, "%s: enqueueing first packet", __func__); 2887 m->m_pkthdr.PH_vt.vt_nrecs = 1; 2888 _IF_ENQUEUE(ifq, m); 2889 } else 2890 m->m_pkthdr.PH_vt.vt_nrecs++; 2891 2892 /* 2893 * No further work needed if no source list in packet(s). 2894 */ 2895 if (!record_has_sources) 2896 return (nbytes); 2897 2898 /* 2899 * Whilst sources remain to be announced, we need to allocate 2900 * a new packet and fill out as many sources as will fit. 2901 * Always try for a cluster first. 2902 */ 2903 while (nims != NULL) { 2904 if (_IF_QFULL(ifq)) { 2905 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__); 2906 return (-ENOMEM); 2907 } 2908 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 2909 if (m) 2910 m->m_data += IGMP_LEADINGSPACE; 2911 if (m == NULL) { 2912 m = m_gethdr(M_NOWAIT, MT_DATA); 2913 if (m) 2914 MH_ALIGN(m, IGMP_LEADINGSPACE); 2915 } 2916 if (m == NULL) 2917 return (-ENOMEM); 2918 igmp_save_context(m, ifp); 2919 md = m_getptr(m, 0, &off); 2920 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) + off); 2921 CTR1(KTR_IGMPV3, "%s: allocated next packet", __func__); 2922 2923 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) { 2924 if (m != m0) 2925 m_freem(m); 2926 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__); 2927 return (-ENOMEM); 2928 } 2929 m->m_pkthdr.PH_vt.vt_nrecs = 1; 2930 nbytes += sizeof(struct igmp_grouprec); 2931 2932 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE - 2933 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t); 2934 2935 msrcs = 0; 2936 RB_FOREACH_FROM(ims, ip_msource_tree, nims) { 2937 CTR2(KTR_IGMPV3, "%s: visit node %s", __func__, 2938 inet_ntoa_haddr(ims->ims_haddr)); 2939 now = ims_get_mode(inm, ims, 1); 2940 if ((now != mode) || 2941 (now == mode && mode == MCAST_UNDEFINED)) { 2942 CTR1(KTR_IGMPV3, "%s: skip node", __func__); 2943 continue; 2944 } 2945 if (is_source_query && ims->ims_stp == 0) { 2946 CTR1(KTR_IGMPV3, "%s: skip unrecorded node", 2947 __func__); 2948 continue; 2949 } 2950 CTR1(KTR_IGMPV3, "%s: append node", __func__); 2951 naddr = htonl(ims->ims_haddr); 2952 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) { 2953 if (m != m0) 2954 m_freem(m); 2955 CTR1(KTR_IGMPV3, "%s: m_append() failed.", 2956 __func__); 2957 return (-ENOMEM); 2958 } 2959 ++msrcs; 2960 if (msrcs == m0srcs) 2961 break; 2962 } 2963 pig->ig_numsrc = htons(msrcs); 2964 nbytes += (msrcs * sizeof(in_addr_t)); 2965 2966 CTR1(KTR_IGMPV3, "%s: enqueueing next packet", __func__); 2967 _IF_ENQUEUE(ifq, m); 2968 } 2969 2970 return (nbytes); 2971 } 2972 2973 /* 2974 * Type used to mark record pass completion. 2975 * We exploit the fact we can cast to this easily from the 2976 * current filter modes on each ip_msource node. 2977 */ 2978 typedef enum { 2979 REC_NONE = 0x00, /* MCAST_UNDEFINED */ 2980 REC_ALLOW = 0x01, /* MCAST_INCLUDE */ 2981 REC_BLOCK = 0x02, /* MCAST_EXCLUDE */ 2982 REC_FULL = REC_ALLOW | REC_BLOCK 2983 } rectype_t; 2984 2985 /* 2986 * Enqueue an IGMPv3 filter list change to the given output queue. 2987 * 2988 * Source list filter state is held in an RB-tree. When the filter list 2989 * for a group is changed without changing its mode, we need to compute 2990 * the deltas between T0 and T1 for each source in the filter set, 2991 * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records. 2992 * 2993 * As we may potentially queue two record types, and the entire R-B tree 2994 * needs to be walked at once, we break this out into its own function 2995 * so we can generate a tightly packed queue of packets. 2996 * 2997 * XXX This could be written to only use one tree walk, although that makes 2998 * serializing into the mbuf chains a bit harder. For now we do two walks 2999 * which makes things easier on us, and it may or may not be harder on 3000 * the L2 cache. 3001 * 3002 * If successful the size of all data appended to the queue is returned, 3003 * otherwise an error code less than zero is returned, or zero if 3004 * no record(s) were appended. 3005 */ 3006 static int 3007 igmp_v3_enqueue_filter_change(struct ifqueue *ifq, struct in_multi *inm) 3008 { 3009 static const int MINRECLEN = 3010 sizeof(struct igmp_grouprec) + sizeof(in_addr_t); 3011 struct ifnet *ifp; 3012 struct igmp_grouprec ig; 3013 struct igmp_grouprec *pig; 3014 struct ip_msource *ims, *nims; 3015 struct mbuf *m, *m0, *md; 3016 in_addr_t naddr; 3017 int m0srcs, nbytes, npbytes, off, rsrcs, schanged; 3018 int nallow, nblock; 3019 uint8_t mode, now, then; 3020 rectype_t crt, drt, nrt; 3021 3022 IN_MULTI_LOCK_ASSERT(); 3023 3024 if (inm->inm_nsrc == 0 || 3025 (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0)) 3026 return (0); 3027 3028 ifp = inm->inm_ifp; /* interface */ 3029 mode = inm->inm_st[1].iss_fmode; /* filter mode at t1 */ 3030 crt = REC_NONE; /* current group record type */ 3031 drt = REC_NONE; /* mask of completed group record types */ 3032 nrt = REC_NONE; /* record type for current node */ 3033 m0srcs = 0; /* # source which will fit in current mbuf chain */ 3034 nbytes = 0; /* # of bytes appended to group's state-change queue */ 3035 npbytes = 0; /* # of bytes appended this packet */ 3036 rsrcs = 0; /* # sources encoded in current record */ 3037 schanged = 0; /* # nodes encoded in overall filter change */ 3038 nallow = 0; /* # of source entries in ALLOW_NEW */ 3039 nblock = 0; /* # of source entries in BLOCK_OLD */ 3040 nims = NULL; /* next tree node pointer */ 3041 3042 /* 3043 * For each possible filter record mode. 3044 * The first kind of source we encounter tells us which 3045 * is the first kind of record we start appending. 3046 * If a node transitioned to UNDEFINED at t1, its mode is treated 3047 * as the inverse of the group's filter mode. 3048 */ 3049 while (drt != REC_FULL) { 3050 do { 3051 m0 = ifq->ifq_tail; 3052 if (m0 != NULL && 3053 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= 3054 IGMP_V3_REPORT_MAXRECS) && 3055 (m0->m_pkthdr.len + MINRECLEN) < 3056 (ifp->if_mtu - IGMP_LEADINGSPACE)) { 3057 m = m0; 3058 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len - 3059 sizeof(struct igmp_grouprec)) / 3060 sizeof(in_addr_t); 3061 CTR1(KTR_IGMPV3, 3062 "%s: use previous packet", __func__); 3063 } else { 3064 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 3065 if (m) 3066 m->m_data += IGMP_LEADINGSPACE; 3067 if (m == NULL) { 3068 m = m_gethdr(M_NOWAIT, MT_DATA); 3069 if (m) 3070 MH_ALIGN(m, IGMP_LEADINGSPACE); 3071 } 3072 if (m == NULL) { 3073 CTR1(KTR_IGMPV3, 3074 "%s: m_get*() failed", __func__); 3075 return (-ENOMEM); 3076 } 3077 m->m_pkthdr.PH_vt.vt_nrecs = 0; 3078 igmp_save_context(m, ifp); 3079 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE - 3080 sizeof(struct igmp_grouprec)) / 3081 sizeof(in_addr_t); 3082 npbytes = 0; 3083 CTR1(KTR_IGMPV3, 3084 "%s: allocated new packet", __func__); 3085 } 3086 /* 3087 * Append the IGMP group record header to the 3088 * current packet's data area. 3089 * Recalculate pointer to free space for next 3090 * group record, in case m_append() allocated 3091 * a new mbuf or cluster. 3092 */ 3093 memset(&ig, 0, sizeof(ig)); 3094 ig.ig_group = inm->inm_addr; 3095 if (!m_append(m, sizeof(ig), (void *)&ig)) { 3096 if (m != m0) 3097 m_freem(m); 3098 CTR1(KTR_IGMPV3, 3099 "%s: m_append() failed", __func__); 3100 return (-ENOMEM); 3101 } 3102 npbytes += sizeof(struct igmp_grouprec); 3103 if (m != m0) { 3104 /* new packet; offset in c hain */ 3105 md = m_getptr(m, npbytes - 3106 sizeof(struct igmp_grouprec), &off); 3107 pig = (struct igmp_grouprec *)(mtod(md, 3108 uint8_t *) + off); 3109 } else { 3110 /* current packet; offset from last append */ 3111 md = m_last(m); 3112 pig = (struct igmp_grouprec *)(mtod(md, 3113 uint8_t *) + md->m_len - 3114 sizeof(struct igmp_grouprec)); 3115 } 3116 /* 3117 * Begin walking the tree for this record type 3118 * pass, or continue from where we left off 3119 * previously if we had to allocate a new packet. 3120 * Only report deltas in-mode at t1. 3121 * We need not report included sources as allowed 3122 * if we are in inclusive mode on the group, 3123 * however the converse is not true. 3124 */ 3125 rsrcs = 0; 3126 if (nims == NULL) 3127 nims = RB_MIN(ip_msource_tree, &inm->inm_srcs); 3128 RB_FOREACH_FROM(ims, ip_msource_tree, nims) { 3129 CTR2(KTR_IGMPV3, "%s: visit node %s", 3130 __func__, inet_ntoa_haddr(ims->ims_haddr)); 3131 now = ims_get_mode(inm, ims, 1); 3132 then = ims_get_mode(inm, ims, 0); 3133 CTR3(KTR_IGMPV3, "%s: mode: t0 %d, t1 %d", 3134 __func__, then, now); 3135 if (now == then) { 3136 CTR1(KTR_IGMPV3, 3137 "%s: skip unchanged", __func__); 3138 continue; 3139 } 3140 if (mode == MCAST_EXCLUDE && 3141 now == MCAST_INCLUDE) { 3142 CTR1(KTR_IGMPV3, 3143 "%s: skip IN src on EX group", 3144 __func__); 3145 continue; 3146 } 3147 nrt = (rectype_t)now; 3148 if (nrt == REC_NONE) 3149 nrt = (rectype_t)(~mode & REC_FULL); 3150 if (schanged++ == 0) { 3151 crt = nrt; 3152 } else if (crt != nrt) 3153 continue; 3154 naddr = htonl(ims->ims_haddr); 3155 if (!m_append(m, sizeof(in_addr_t), 3156 (void *)&naddr)) { 3157 if (m != m0) 3158 m_freem(m); 3159 CTR1(KTR_IGMPV3, 3160 "%s: m_append() failed", __func__); 3161 return (-ENOMEM); 3162 } 3163 nallow += !!(crt == REC_ALLOW); 3164 nblock += !!(crt == REC_BLOCK); 3165 if (++rsrcs == m0srcs) 3166 break; 3167 } 3168 /* 3169 * If we did not append any tree nodes on this 3170 * pass, back out of allocations. 3171 */ 3172 if (rsrcs == 0) { 3173 npbytes -= sizeof(struct igmp_grouprec); 3174 if (m != m0) { 3175 CTR1(KTR_IGMPV3, 3176 "%s: m_free(m)", __func__); 3177 m_freem(m); 3178 } else { 3179 CTR1(KTR_IGMPV3, 3180 "%s: m_adj(m, -ig)", __func__); 3181 m_adj(m, -((int)sizeof( 3182 struct igmp_grouprec))); 3183 } 3184 continue; 3185 } 3186 npbytes += (rsrcs * sizeof(in_addr_t)); 3187 if (crt == REC_ALLOW) 3188 pig->ig_type = IGMP_ALLOW_NEW_SOURCES; 3189 else if (crt == REC_BLOCK) 3190 pig->ig_type = IGMP_BLOCK_OLD_SOURCES; 3191 pig->ig_numsrc = htons(rsrcs); 3192 /* 3193 * Count the new group record, and enqueue this 3194 * packet if it wasn't already queued. 3195 */ 3196 m->m_pkthdr.PH_vt.vt_nrecs++; 3197 if (m != m0) 3198 _IF_ENQUEUE(ifq, m); 3199 nbytes += npbytes; 3200 } while (nims != NULL); 3201 drt |= crt; 3202 crt = (~crt & REC_FULL); 3203 } 3204 3205 CTR3(KTR_IGMPV3, "%s: queued %d ALLOW_NEW, %d BLOCK_OLD", __func__, 3206 nallow, nblock); 3207 3208 return (nbytes); 3209 } 3210 3211 static int 3212 igmp_v3_merge_state_changes(struct in_multi *inm, struct ifqueue *ifscq) 3213 { 3214 struct ifqueue *gq; 3215 struct mbuf *m; /* pending state-change */ 3216 struct mbuf *m0; /* copy of pending state-change */ 3217 struct mbuf *mt; /* last state-change in packet */ 3218 int docopy, domerge; 3219 u_int recslen; 3220 3221 docopy = 0; 3222 domerge = 0; 3223 recslen = 0; 3224 3225 IN_MULTI_LOCK_ASSERT(); 3226 IGMP_LOCK_ASSERT(); 3227 3228 /* 3229 * If there are further pending retransmissions, make a writable 3230 * copy of each queued state-change message before merging. 3231 */ 3232 if (inm->inm_scrv > 0) 3233 docopy = 1; 3234 3235 gq = &inm->inm_scq; 3236 #ifdef KTR 3237 if (gq->ifq_head == NULL) { 3238 CTR2(KTR_IGMPV3, "%s: WARNING: queue for inm %p is empty", 3239 __func__, inm); 3240 } 3241 #endif 3242 3243 m = gq->ifq_head; 3244 while (m != NULL) { 3245 /* 3246 * Only merge the report into the current packet if 3247 * there is sufficient space to do so; an IGMPv3 report 3248 * packet may only contain 65,535 group records. 3249 * Always use a simple mbuf chain concatentation to do this, 3250 * as large state changes for single groups may have 3251 * allocated clusters. 3252 */ 3253 domerge = 0; 3254 mt = ifscq->ifq_tail; 3255 if (mt != NULL) { 3256 recslen = m_length(m, NULL); 3257 3258 if ((mt->m_pkthdr.PH_vt.vt_nrecs + 3259 m->m_pkthdr.PH_vt.vt_nrecs <= 3260 IGMP_V3_REPORT_MAXRECS) && 3261 (mt->m_pkthdr.len + recslen <= 3262 (inm->inm_ifp->if_mtu - IGMP_LEADINGSPACE))) 3263 domerge = 1; 3264 } 3265 3266 if (!domerge && _IF_QFULL(gq)) { 3267 CTR2(KTR_IGMPV3, 3268 "%s: outbound queue full, skipping whole packet %p", 3269 __func__, m); 3270 mt = m->m_nextpkt; 3271 if (!docopy) 3272 m_freem(m); 3273 m = mt; 3274 continue; 3275 } 3276 3277 if (!docopy) { 3278 CTR2(KTR_IGMPV3, "%s: dequeueing %p", __func__, m); 3279 _IF_DEQUEUE(gq, m0); 3280 m = m0->m_nextpkt; 3281 } else { 3282 CTR2(KTR_IGMPV3, "%s: copying %p", __func__, m); 3283 m0 = m_dup(m, M_NOWAIT); 3284 if (m0 == NULL) 3285 return (ENOMEM); 3286 m0->m_nextpkt = NULL; 3287 m = m->m_nextpkt; 3288 } 3289 3290 if (!domerge) { 3291 CTR3(KTR_IGMPV3, "%s: queueing %p to ifscq %p)", 3292 __func__, m0, ifscq); 3293 _IF_ENQUEUE(ifscq, m0); 3294 } else { 3295 struct mbuf *mtl; /* last mbuf of packet mt */ 3296 3297 CTR3(KTR_IGMPV3, "%s: merging %p with ifscq tail %p)", 3298 __func__, m0, mt); 3299 3300 mtl = m_last(mt); 3301 m0->m_flags &= ~M_PKTHDR; 3302 mt->m_pkthdr.len += recslen; 3303 mt->m_pkthdr.PH_vt.vt_nrecs += 3304 m0->m_pkthdr.PH_vt.vt_nrecs; 3305 3306 mtl->m_next = m0; 3307 } 3308 } 3309 3310 return (0); 3311 } 3312 3313 /* 3314 * Respond to a pending IGMPv3 General Query. 3315 */ 3316 static void 3317 igmp_v3_dispatch_general_query(struct igmp_ifinfo *igi) 3318 { 3319 struct ifmultiaddr *ifma; 3320 struct ifnet *ifp; 3321 struct in_multi *inm; 3322 int retval, loop; 3323 3324 IN_MULTI_LOCK_ASSERT(); 3325 IGMP_LOCK_ASSERT(); 3326 3327 KASSERT(igi->igi_version == IGMP_VERSION_3, 3328 ("%s: called when version %d", __func__, igi->igi_version)); 3329 3330 ifp = igi->igi_ifp; 3331 3332 IF_ADDR_RLOCK(ifp); 3333 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3334 if (ifma->ifma_addr->sa_family != AF_INET || 3335 ifma->ifma_protospec == NULL) 3336 continue; 3337 3338 inm = (struct in_multi *)ifma->ifma_protospec; 3339 KASSERT(ifp == inm->inm_ifp, 3340 ("%s: inconsistent ifp", __func__)); 3341 3342 switch (inm->inm_state) { 3343 case IGMP_NOT_MEMBER: 3344 case IGMP_SILENT_MEMBER: 3345 break; 3346 case IGMP_REPORTING_MEMBER: 3347 case IGMP_IDLE_MEMBER: 3348 case IGMP_LAZY_MEMBER: 3349 case IGMP_SLEEPING_MEMBER: 3350 case IGMP_AWAKENING_MEMBER: 3351 inm->inm_state = IGMP_REPORTING_MEMBER; 3352 retval = igmp_v3_enqueue_group_record(&igi->igi_gq, 3353 inm, 0, 0, 0); 3354 CTR2(KTR_IGMPV3, "%s: enqueue record = %d", 3355 __func__, retval); 3356 break; 3357 case IGMP_G_QUERY_PENDING_MEMBER: 3358 case IGMP_SG_QUERY_PENDING_MEMBER: 3359 case IGMP_LEAVING_MEMBER: 3360 break; 3361 } 3362 } 3363 IF_ADDR_RUNLOCK(ifp); 3364 3365 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0; 3366 igmp_dispatch_queue(&igi->igi_gq, IGMP_MAX_RESPONSE_BURST, loop); 3367 3368 /* 3369 * Slew transmission of bursts over 500ms intervals. 3370 */ 3371 if (igi->igi_gq.ifq_head != NULL) { 3372 igi->igi_v3_timer = 1 + IGMP_RANDOM_DELAY( 3373 IGMP_RESPONSE_BURST_INTERVAL); 3374 V_interface_timers_running = 1; 3375 } 3376 } 3377 3378 /* 3379 * Transmit the next pending IGMP message in the output queue. 3380 * 3381 * We get called from netisr_processqueue(). A mutex private to igmpoq 3382 * will be acquired and released around this routine. 3383 * 3384 * VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis. 3385 * MRT: Nothing needs to be done, as IGMP traffic is always local to 3386 * a link and uses a link-scope multicast address. 3387 */ 3388 static void 3389 igmp_intr(struct mbuf *m) 3390 { 3391 struct ip_moptions imo; 3392 struct ifnet *ifp; 3393 struct mbuf *ipopts, *m0; 3394 int error; 3395 uint32_t ifindex; 3396 3397 CTR2(KTR_IGMPV3, "%s: transmit %p", __func__, m); 3398 3399 /* 3400 * Set VNET image pointer from enqueued mbuf chain 3401 * before doing anything else. Whilst we use interface 3402 * indexes to guard against interface detach, they are 3403 * unique to each VIMAGE and must be retrieved. 3404 */ 3405 CURVNET_SET((struct vnet *)(m->m_pkthdr.header)); 3406 ifindex = igmp_restore_context(m); 3407 3408 /* 3409 * Check if the ifnet still exists. This limits the scope of 3410 * any race in the absence of a global ifp lock for low cost 3411 * (an array lookup). 3412 */ 3413 ifp = ifnet_byindex(ifindex); 3414 if (ifp == NULL) { 3415 CTR3(KTR_IGMPV3, "%s: dropped %p as ifindex %u went away.", 3416 __func__, m, ifindex); 3417 m_freem(m); 3418 IPSTAT_INC(ips_noroute); 3419 goto out; 3420 } 3421 3422 ipopts = V_igmp_sendra ? m_raopt : NULL; 3423 3424 imo.imo_multicast_ttl = 1; 3425 imo.imo_multicast_vif = -1; 3426 imo.imo_multicast_loop = (V_ip_mrouter != NULL); 3427 3428 /* 3429 * If the user requested that IGMP traffic be explicitly 3430 * redirected to the loopback interface (e.g. they are running a 3431 * MANET interface and the routing protocol needs to see the 3432 * updates), handle this now. 3433 */ 3434 if (m->m_flags & M_IGMP_LOOP) 3435 imo.imo_multicast_ifp = V_loif; 3436 else 3437 imo.imo_multicast_ifp = ifp; 3438 3439 if (m->m_flags & M_IGMPV2) { 3440 m0 = m; 3441 } else { 3442 m0 = igmp_v3_encap_report(ifp, m); 3443 if (m0 == NULL) { 3444 CTR2(KTR_IGMPV3, "%s: dropped %p", __func__, m); 3445 m_freem(m); 3446 IPSTAT_INC(ips_odropped); 3447 goto out; 3448 } 3449 } 3450 3451 igmp_scrub_context(m0); 3452 m->m_flags &= ~(M_PROTOFLAGS); 3453 m0->m_pkthdr.rcvif = V_loif; 3454 #ifdef MAC 3455 mac_netinet_igmp_send(ifp, m0); 3456 #endif 3457 error = ip_output(m0, ipopts, NULL, 0, &imo, NULL); 3458 if (error) { 3459 CTR3(KTR_IGMPV3, "%s: ip_output(%p) = %d", __func__, m0, error); 3460 goto out; 3461 } 3462 3463 IGMPSTAT_INC(igps_snd_reports); 3464 3465 out: 3466 /* 3467 * We must restore the existing vnet pointer before 3468 * continuing as we are run from netisr context. 3469 */ 3470 CURVNET_RESTORE(); 3471 } 3472 3473 /* 3474 * Encapsulate an IGMPv3 report. 3475 * 3476 * The internal mbuf flag M_IGMPV3_HDR is used to indicate that the mbuf 3477 * chain has already had its IP/IGMPv3 header prepended. In this case 3478 * the function will not attempt to prepend; the lengths and checksums 3479 * will however be re-computed. 3480 * 3481 * Returns a pointer to the new mbuf chain head, or NULL if the 3482 * allocation failed. 3483 */ 3484 static struct mbuf * 3485 igmp_v3_encap_report(struct ifnet *ifp, struct mbuf *m) 3486 { 3487 struct igmp_report *igmp; 3488 struct ip *ip; 3489 int hdrlen, igmpreclen; 3490 3491 KASSERT((m->m_flags & M_PKTHDR), 3492 ("%s: mbuf chain %p is !M_PKTHDR", __func__, m)); 3493 3494 igmpreclen = m_length(m, NULL); 3495 hdrlen = sizeof(struct ip) + sizeof(struct igmp_report); 3496 3497 if (m->m_flags & M_IGMPV3_HDR) { 3498 igmpreclen -= hdrlen; 3499 } else { 3500 M_PREPEND(m, hdrlen, M_NOWAIT); 3501 if (m == NULL) 3502 return (NULL); 3503 m->m_flags |= M_IGMPV3_HDR; 3504 } 3505 3506 CTR2(KTR_IGMPV3, "%s: igmpreclen is %d", __func__, igmpreclen); 3507 3508 m->m_data += sizeof(struct ip); 3509 m->m_len -= sizeof(struct ip); 3510 3511 igmp = mtod(m, struct igmp_report *); 3512 igmp->ir_type = IGMP_v3_HOST_MEMBERSHIP_REPORT; 3513 igmp->ir_rsv1 = 0; 3514 igmp->ir_rsv2 = 0; 3515 igmp->ir_numgrps = htons(m->m_pkthdr.PH_vt.vt_nrecs); 3516 igmp->ir_cksum = 0; 3517 igmp->ir_cksum = in_cksum(m, sizeof(struct igmp_report) + igmpreclen); 3518 m->m_pkthdr.PH_vt.vt_nrecs = 0; 3519 3520 m->m_data -= sizeof(struct ip); 3521 m->m_len += sizeof(struct ip); 3522 3523 ip = mtod(m, struct ip *); 3524 ip->ip_tos = IPTOS_PREC_INTERNETCONTROL; 3525 ip->ip_len = htons(hdrlen + igmpreclen); 3526 ip->ip_off = htons(IP_DF); 3527 ip->ip_p = IPPROTO_IGMP; 3528 ip->ip_sum = 0; 3529 3530 ip->ip_src.s_addr = INADDR_ANY; 3531 3532 if (m->m_flags & M_IGMP_LOOP) { 3533 struct in_ifaddr *ia; 3534 3535 IFP_TO_IA(ifp, ia); 3536 if (ia != NULL) { 3537 ip->ip_src = ia->ia_addr.sin_addr; 3538 ifa_free(&ia->ia_ifa); 3539 } 3540 } 3541 3542 ip->ip_dst.s_addr = htonl(INADDR_ALLRPTS_GROUP); 3543 3544 return (m); 3545 } 3546 3547 #ifdef KTR 3548 static char * 3549 igmp_rec_type_to_str(const int type) 3550 { 3551 3552 switch (type) { 3553 case IGMP_CHANGE_TO_EXCLUDE_MODE: 3554 return "TO_EX"; 3555 break; 3556 case IGMP_CHANGE_TO_INCLUDE_MODE: 3557 return "TO_IN"; 3558 break; 3559 case IGMP_MODE_IS_EXCLUDE: 3560 return "MODE_EX"; 3561 break; 3562 case IGMP_MODE_IS_INCLUDE: 3563 return "MODE_IN"; 3564 break; 3565 case IGMP_ALLOW_NEW_SOURCES: 3566 return "ALLOW_NEW"; 3567 break; 3568 case IGMP_BLOCK_OLD_SOURCES: 3569 return "BLOCK_OLD"; 3570 break; 3571 default: 3572 break; 3573 } 3574 return "unknown"; 3575 } 3576 #endif 3577 3578 static void 3579 igmp_init(void *unused __unused) 3580 { 3581 3582 CTR1(KTR_IGMPV3, "%s: initializing", __func__); 3583 3584 IGMP_LOCK_INIT(); 3585 3586 m_raopt = igmp_ra_alloc(); 3587 3588 netisr_register(&igmp_nh); 3589 } 3590 SYSINIT(igmp_init, SI_SUB_PSEUDO, SI_ORDER_MIDDLE, igmp_init, NULL); 3591 3592 static void 3593 igmp_uninit(void *unused __unused) 3594 { 3595 3596 CTR1(KTR_IGMPV3, "%s: tearing down", __func__); 3597 3598 netisr_unregister(&igmp_nh); 3599 3600 m_free(m_raopt); 3601 m_raopt = NULL; 3602 3603 IGMP_LOCK_DESTROY(); 3604 } 3605 SYSUNINIT(igmp_uninit, SI_SUB_PSEUDO, SI_ORDER_MIDDLE, igmp_uninit, NULL); 3606 3607 static void 3608 vnet_igmp_init(const void *unused __unused) 3609 { 3610 3611 CTR1(KTR_IGMPV3, "%s: initializing", __func__); 3612 3613 LIST_INIT(&V_igi_head); 3614 } 3615 VNET_SYSINIT(vnet_igmp_init, SI_SUB_PSEUDO, SI_ORDER_ANY, vnet_igmp_init, 3616 NULL); 3617 3618 static void 3619 vnet_igmp_uninit(const void *unused __unused) 3620 { 3621 3622 CTR1(KTR_IGMPV3, "%s: tearing down", __func__); 3623 3624 KASSERT(LIST_EMPTY(&V_igi_head), 3625 ("%s: igi list not empty; ifnets not detached?", __func__)); 3626 } 3627 VNET_SYSUNINIT(vnet_igmp_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY, 3628 vnet_igmp_uninit, NULL); 3629 3630 static int 3631 igmp_modevent(module_t mod, int type, void *unused __unused) 3632 { 3633 3634 switch (type) { 3635 case MOD_LOAD: 3636 case MOD_UNLOAD: 3637 break; 3638 default: 3639 return (EOPNOTSUPP); 3640 } 3641 return (0); 3642 } 3643 3644 static moduledata_t igmp_mod = { 3645 "igmp", 3646 igmp_modevent, 3647 0 3648 }; 3649 DECLARE_MODULE(igmp, igmp_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 3650