1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2009 Bruce Simpson. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. The name of the author may not be used to endorse or promote 15 * products derived from this software without specific prior written 16 * permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * $KAME: mld6.c,v 1.27 2001/04/04 05:17:30 itojun Exp $ 31 */ 32 33 /*- 34 * Copyright (c) 1988 Stephen Deering. 35 * Copyright (c) 1992, 1993 36 * The Regents of the University of California. All rights reserved. 37 * 38 * This code is derived from software contributed to Berkeley by 39 * Stephen Deering of Stanford University. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. Neither the name of the University nor the names of its contributors 50 * may be used to endorse or promote products derived from this software 51 * without specific prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * SUCH DAMAGE. 64 */ 65 66 #include "opt_inet.h" 67 #include "opt_inet6.h" 68 69 #include <sys/param.h> 70 #include <sys/systm.h> 71 #include <sys/mbuf.h> 72 #include <sys/socket.h> 73 #include <sys/sysctl.h> 74 #include <sys/kernel.h> 75 #include <sys/callout.h> 76 #include <sys/malloc.h> 77 #include <sys/module.h> 78 #include <sys/ktr.h> 79 80 #include <net/if.h> 81 #include <net/if_var.h> 82 #include <net/if_private.h> 83 #include <net/route.h> 84 #include <net/vnet.h> 85 86 #include <netinet/in.h> 87 #include <netinet/in_var.h> 88 #include <netinet6/in6_var.h> 89 #include <netinet/ip6.h> 90 #include <netinet6/ip6_var.h> 91 #include <netinet6/scope6_var.h> 92 #include <netinet/icmp6.h> 93 #include <netinet6/mld6.h> 94 #include <netinet6/mld6_var.h> 95 96 #include <security/mac/mac_framework.h> 97 98 #ifndef KTR_MLD 99 #define KTR_MLD KTR_INET6 100 #endif 101 102 static void mld_dispatch_packet(struct mbuf *); 103 static void mld_dispatch_queue(struct mbufq *, int); 104 static void mld_final_leave(struct in6_multi *, struct mld_ifsoftc *); 105 static void mld_fasttimo_vnet(struct in6_multi_head *inmh); 106 static int mld_handle_state_change(struct in6_multi *, 107 struct mld_ifsoftc *); 108 static int mld_initial_join(struct in6_multi *, struct mld_ifsoftc *, 109 const int); 110 #ifdef KTR 111 static char * mld_rec_type_to_str(const int); 112 #endif 113 static void mld_set_version(struct mld_ifsoftc *, const int); 114 static void mld_slowtimo_vnet(void); 115 static int mld_v1_input_query(struct ifnet *, const struct ip6_hdr *, 116 /*const*/ struct mld_hdr *); 117 static int mld_v1_input_report(struct ifnet *, const struct ip6_hdr *, 118 /*const*/ struct mld_hdr *); 119 static void mld_v1_process_group_timer(struct in6_multi_head *, 120 struct in6_multi *); 121 static void mld_v1_process_querier_timers(struct mld_ifsoftc *); 122 static int mld_v1_transmit_report(struct in6_multi *, const int); 123 static void mld_v1_update_group(struct in6_multi *, const int); 124 static void mld_v2_cancel_link_timers(struct mld_ifsoftc *); 125 static void mld_v2_dispatch_general_query(struct mld_ifsoftc *); 126 static struct mbuf * 127 mld_v2_encap_report(struct ifnet *, struct mbuf *); 128 static int mld_v2_enqueue_filter_change(struct mbufq *, 129 struct in6_multi *); 130 static int mld_v2_enqueue_group_record(struct mbufq *, 131 struct in6_multi *, const int, const int, const int, 132 const int); 133 static int mld_v2_input_query(struct ifnet *, const struct ip6_hdr *, 134 struct mbuf *, struct mldv2_query *, const int, const int); 135 static int mld_v2_merge_state_changes(struct in6_multi *, 136 struct mbufq *); 137 static void mld_v2_process_group_timers(struct in6_multi_head *, 138 struct mbufq *, struct mbufq *, 139 struct in6_multi *, const int); 140 static int mld_v2_process_group_query(struct in6_multi *, 141 struct mld_ifsoftc *mli, int, struct mbuf *, 142 struct mldv2_query *, const int); 143 static int sysctl_mld_gsr(SYSCTL_HANDLER_ARGS); 144 static int sysctl_mld_ifinfo(SYSCTL_HANDLER_ARGS); 145 146 /* 147 * Normative references: RFC 2710, RFC 3590, RFC 3810. 148 * 149 * Locking: 150 * * The MLD subsystem lock ends up being system-wide for the moment, 151 * but could be per-VIMAGE later on. 152 * * The permitted lock order is: IN6_MULTI_LOCK, MLD_LOCK, IF_ADDR_LOCK. 153 * Any may be taken independently; if any are held at the same 154 * time, the above lock order must be followed. 155 * * IN6_MULTI_LOCK covers in_multi. 156 * * MLD_LOCK covers per-link state and any global variables in this file. 157 * * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of 158 * per-link state iterators. 159 * 160 * XXX LOR PREVENTION 161 * A special case for IPv6 is the in6_setscope() routine. ip6_output() 162 * will not accept an ifp; it wants an embedded scope ID, unlike 163 * ip_output(), which happily takes the ifp given to it. The embedded 164 * scope ID is only used by MLD to select the outgoing interface. 165 * 166 * During interface attach and detach, MLD will take MLD_LOCK *after* 167 * the LLTABLE_LOCK. 168 * As in6_setscope() takes LLTABLE_LOCK then SCOPE_LOCK, we can't call 169 * it with MLD_LOCK held without triggering an LOR. A netisr with indirect 170 * dispatch could work around this, but we'd rather not do that, as it 171 * can introduce other races. 172 * 173 * As such, we exploit the fact that the scope ID is just the interface 174 * index, and embed it in the IPv6 destination address accordingly. 175 * This is potentially NOT VALID for MLDv1 reports, as they 176 * are always sent to the multicast group itself; as MLDv2 177 * reports are always sent to ff02::16, this is not an issue 178 * when MLDv2 is in use. 179 * 180 * This does not however eliminate the LOR when ip6_output() itself 181 * calls in6_setscope() internally whilst MLD_LOCK is held. This will 182 * trigger a LOR warning in WITNESS when the ifnet is detached. 183 * 184 * The right answer is probably to make LLTABLE_LOCK an rwlock, given 185 * how it's used across the network stack. Here we're simply exploiting 186 * the fact that MLD runs at a similar layer in the stack to scope6.c. 187 * 188 * VIMAGE: 189 * * Each in6_multi corresponds to an ifp, and each ifp corresponds 190 * to a vnet in ifp->if_vnet. 191 */ 192 static struct mtx mld_mtx; 193 static MALLOC_DEFINE(M_MLD, "mld", "mld state"); 194 195 #define MLD_EMBEDSCOPE(pin6, zoneid) \ 196 if (IN6_IS_SCOPE_LINKLOCAL(pin6) || \ 197 IN6_IS_ADDR_MC_INTFACELOCAL(pin6)) \ 198 (pin6)->s6_addr16[1] = htons((zoneid) & 0xFFFF) \ 199 200 /* 201 * VIMAGE-wide globals. 202 */ 203 VNET_DEFINE_STATIC(struct timeval, mld_gsrdelay) = {10, 0}; 204 VNET_DEFINE_STATIC(LIST_HEAD(, mld_ifsoftc), mli_head); 205 VNET_DEFINE_STATIC(int, interface_timers_running6); 206 VNET_DEFINE_STATIC(int, state_change_timers_running6); 207 VNET_DEFINE_STATIC(int, current_state_timers_running6); 208 209 #define V_mld_gsrdelay VNET(mld_gsrdelay) 210 #define V_mli_head VNET(mli_head) 211 #define V_interface_timers_running6 VNET(interface_timers_running6) 212 #define V_state_change_timers_running6 VNET(state_change_timers_running6) 213 #define V_current_state_timers_running6 VNET(current_state_timers_running6) 214 215 SYSCTL_DECL(_net_inet6); /* Note: Not in any common header. */ 216 217 SYSCTL_NODE(_net_inet6, OID_AUTO, mld, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 218 "IPv6 Multicast Listener Discovery"); 219 220 /* 221 * Virtualized sysctls. 222 */ 223 SYSCTL_PROC(_net_inet6_mld, OID_AUTO, gsrdelay, 224 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 225 &VNET_NAME(mld_gsrdelay.tv_sec), 0, sysctl_mld_gsr, "I", 226 "Rate limit for MLDv2 Group-and-Source queries in seconds"); 227 228 /* 229 * Non-virtualized sysctls. 230 */ 231 static SYSCTL_NODE(_net_inet6_mld, OID_AUTO, ifinfo, 232 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_mld_ifinfo, 233 "Per-interface MLDv2 state"); 234 235 VNET_DEFINE_STATIC(bool, mld_v1enable) = true; 236 #define V_mld_v1enable VNET(mld_v1enable) 237 SYSCTL_BOOL(_net_inet6_mld, OID_AUTO, v1enable, CTLFLAG_VNET | CTLFLAG_RWTUN, 238 &VNET_NAME(mld_v1enable), 0, "Enable fallback to MLDv1"); 239 240 VNET_DEFINE_STATIC(bool, mld_v2enable) = true; 241 #define V_mld_v2enable VNET(mld_v2enable) 242 SYSCTL_BOOL(_net_inet6_mld, OID_AUTO, v2enable, CTLFLAG_VNET | CTLFLAG_RWTUN, 243 &VNET_NAME(mld_v2enable), 0, "Enable MLDv2"); 244 245 VNET_DEFINE_STATIC(bool, mld_use_allow) = true; 246 #define V_mld_use_allow VNET(mld_use_allow) 247 SYSCTL_BOOL(_net_inet6_mld, OID_AUTO, use_allow, CTLFLAG_VNET | CTLFLAG_RWTUN, 248 &VNET_NAME(mld_use_allow), 0, "Use ALLOW/BLOCK for RFC 4604 SSM joins/leaves"); 249 250 /* 251 * Packed Router Alert option structure declaration. 252 */ 253 struct mld_raopt { 254 struct ip6_hbh hbh; 255 struct ip6_opt pad; 256 struct ip6_opt_router ra; 257 } __packed; 258 259 /* 260 * Router Alert hop-by-hop option header. 261 */ 262 static struct mld_raopt mld_ra = { 263 .hbh = { 0, 0 }, 264 .pad = { .ip6o_type = IP6OPT_PADN, 0 }, 265 .ra = { 266 .ip6or_type = IP6OPT_ROUTER_ALERT, 267 .ip6or_len = IP6OPT_RTALERT_LEN - 2, 268 .ip6or_value[0] = ((IP6OPT_RTALERT_MLD >> 8) & 0xFF), 269 .ip6or_value[1] = (IP6OPT_RTALERT_MLD & 0xFF) 270 } 271 }; 272 static struct ip6_pktopts mld_po; 273 274 static __inline void 275 mld_save_context(struct mbuf *m, struct ifnet *ifp) 276 { 277 278 #ifdef VIMAGE 279 m->m_pkthdr.PH_loc.ptr = ifp->if_vnet; 280 #endif /* VIMAGE */ 281 m->m_pkthdr.rcvif = ifp; 282 m->m_pkthdr.flowid = ifp->if_index; 283 } 284 285 static __inline void 286 mld_scrub_context(struct mbuf *m) 287 { 288 289 m->m_pkthdr.PH_loc.ptr = NULL; 290 m->m_pkthdr.flowid = 0; 291 } 292 293 /* 294 * Restore context from a queued output chain. 295 * Return saved ifindex. 296 * 297 * VIMAGE: The assertion is there to make sure that we 298 * actually called CURVNET_SET() with what's in the mbuf chain. 299 */ 300 static __inline uint32_t 301 mld_restore_context(struct mbuf *m) 302 { 303 304 #if defined(VIMAGE) && defined(INVARIANTS) 305 KASSERT(curvnet == m->m_pkthdr.PH_loc.ptr, 306 ("%s: called when curvnet was not restored: cuvnet %p m ptr %p", 307 __func__, curvnet, m->m_pkthdr.PH_loc.ptr)); 308 #endif 309 return (m->m_pkthdr.flowid); 310 } 311 312 /* 313 * Retrieve or set threshold between group-source queries in seconds. 314 * 315 * VIMAGE: Assume curvnet set by caller. 316 * SMPng: NOTE: Serialized by MLD lock. 317 */ 318 static int 319 sysctl_mld_gsr(SYSCTL_HANDLER_ARGS) 320 { 321 int error; 322 int i; 323 324 error = sysctl_wire_old_buffer(req, sizeof(int)); 325 if (error) 326 return (error); 327 328 MLD_LOCK(); 329 330 i = V_mld_gsrdelay.tv_sec; 331 332 error = sysctl_handle_int(oidp, &i, 0, req); 333 if (error || !req->newptr) 334 goto out_locked; 335 336 if (i < -1 || i >= 60) { 337 error = EINVAL; 338 goto out_locked; 339 } 340 341 CTR2(KTR_MLD, "change mld_gsrdelay from %d to %d", 342 V_mld_gsrdelay.tv_sec, i); 343 V_mld_gsrdelay.tv_sec = i; 344 345 out_locked: 346 MLD_UNLOCK(); 347 return (error); 348 } 349 350 /* 351 * Expose struct mld_ifsoftc to userland, keyed by ifindex. 352 * For use by ifmcstat(8). 353 * 354 * VIMAGE: Assume curvnet set by caller. The node handler itself 355 * is not directly virtualized. 356 */ 357 static int 358 sysctl_mld_ifinfo(SYSCTL_HANDLER_ARGS) 359 { 360 struct epoch_tracker et; 361 int *name; 362 int error; 363 u_int namelen; 364 struct ifnet *ifp; 365 struct mld_ifsoftc *mli; 366 367 name = (int *)arg1; 368 namelen = arg2; 369 370 if (req->newptr != NULL) 371 return (EPERM); 372 373 if (namelen != 1) 374 return (EINVAL); 375 376 error = sysctl_wire_old_buffer(req, sizeof(struct mld_ifinfo)); 377 if (error) 378 return (error); 379 380 IN6_MULTI_LOCK(); 381 IN6_MULTI_LIST_LOCK(); 382 MLD_LOCK(); 383 NET_EPOCH_ENTER(et); 384 385 error = ENOENT; 386 ifp = ifnet_byindex(name[0]); 387 if (ifp == NULL) 388 goto out_locked; 389 390 LIST_FOREACH(mli, &V_mli_head, mli_link) { 391 if (ifp == mli->mli_ifp) { 392 struct mld_ifinfo info; 393 394 info.mli_version = mli->mli_version; 395 info.mli_v1_timer = mli->mli_v1_timer; 396 info.mli_v2_timer = mli->mli_v2_timer; 397 info.mli_flags = mli->mli_flags; 398 info.mli_rv = mli->mli_rv; 399 info.mli_qi = mli->mli_qi; 400 info.mli_qri = mli->mli_qri; 401 info.mli_uri = mli->mli_uri; 402 error = SYSCTL_OUT(req, &info, sizeof(info)); 403 break; 404 } 405 } 406 407 out_locked: 408 NET_EPOCH_EXIT(et); 409 MLD_UNLOCK(); 410 IN6_MULTI_LIST_UNLOCK(); 411 IN6_MULTI_UNLOCK(); 412 return (error); 413 } 414 415 /* 416 * Dispatch an entire queue of pending packet chains. 417 * VIMAGE: Assumes the vnet pointer has been set. 418 */ 419 static void 420 mld_dispatch_queue(struct mbufq *mq, int limit) 421 { 422 struct mbuf *m; 423 424 while ((m = mbufq_dequeue(mq)) != NULL) { 425 CTR3(KTR_MLD, "%s: dispatch %p from %p", __func__, mq, m); 426 mld_dispatch_packet(m); 427 if (--limit == 0) 428 break; 429 } 430 } 431 432 /* 433 * Filter outgoing MLD report state by group. 434 * 435 * Reports are ALWAYS suppressed for ALL-HOSTS (ff02::1) 436 * and node-local addresses. However, kernel and socket consumers 437 * always embed the KAME scope ID in the address provided, so strip it 438 * when performing comparison. 439 * Note: This is not the same as the *multicast* scope. 440 * 441 * Return zero if the given group is one for which MLD reports 442 * should be suppressed, or non-zero if reports should be issued. 443 */ 444 static __inline int 445 mld_is_addr_reported(const struct in6_addr *addr) 446 { 447 448 KASSERT(IN6_IS_ADDR_MULTICAST(addr), ("%s: not multicast", __func__)); 449 450 if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_NODELOCAL) 451 return (0); 452 453 if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_LINKLOCAL) { 454 struct in6_addr tmp = *addr; 455 in6_clearscope(&tmp); 456 if (IN6_ARE_ADDR_EQUAL(&tmp, &in6addr_linklocal_allnodes)) 457 return (0); 458 } 459 460 return (1); 461 } 462 463 /* 464 * Attach MLD when PF_INET6 is attached to an interface. Assumes that the 465 * current VNET is set by the caller. 466 */ 467 void 468 mld_domifattach(struct ifnet *ifp) 469 { 470 struct mld_ifsoftc *mli = MLD_IFINFO(ifp); 471 472 CTR3(KTR_MLD, "%s: called for ifp %p(%s)", __func__, ifp, if_name(ifp)); 473 474 *mli = (struct mld_ifsoftc){ 475 .mli_ifp = ifp, 476 .mli_version = MLD_VERSION_2, 477 .mli_rv = MLD_RV_INIT, 478 .mli_qi = MLD_QI_INIT, 479 .mli_qri = MLD_QRI_INIT, 480 .mli_uri = MLD_URI_INIT, 481 }; 482 mbufq_init(&mli->mli_gq, MLD_MAX_RESPONSE_PACKETS); 483 if ((ifp->if_flags & IFF_MULTICAST) == 0) 484 mli->mli_flags |= MLIF_SILENT; 485 if (V_mld_use_allow) 486 mli->mli_flags |= MLIF_USEALLOW; 487 488 MLD_LOCK(); 489 LIST_INSERT_HEAD(&V_mli_head, mli, mli_link); 490 MLD_UNLOCK(); 491 } 492 493 /* 494 * Hook for ifdetach. 495 * 496 * NOTE: Some finalization tasks need to run before the protocol domain 497 * is detached, but also before the link layer does its cleanup. 498 * Run before link-layer cleanup; cleanup groups, but do not free MLD state. 499 * 500 * SMPng: Caller must hold IN6_MULTI_LOCK(). 501 * Must take IF_ADDR_LOCK() to cover if_multiaddrs iterator. 502 * XXX This routine is also bitten by unlocked ifma_protospec access. 503 */ 504 void 505 mld_ifdetach(struct ifnet *ifp, struct in6_multi_head *inmh) 506 { 507 struct epoch_tracker et; 508 struct mld_ifsoftc *mli; 509 struct ifmultiaddr *ifma; 510 struct in6_multi *inm; 511 512 CTR3(KTR_MLD, "%s: called for ifp %p(%s)", __func__, ifp, 513 if_name(ifp)); 514 515 IN6_MULTI_LIST_LOCK_ASSERT(); 516 MLD_LOCK(); 517 518 mli = MLD_IFINFO(ifp); 519 IF_ADDR_WLOCK(ifp); 520 /* 521 * Extract list of in6_multi associated with the detaching ifp 522 * which the PF_INET6 layer is about to release. 523 */ 524 NET_EPOCH_ENTER(et); 525 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 526 inm = in6m_ifmultiaddr_get_inm(ifma); 527 if (inm == NULL) 528 continue; 529 in6m_disconnect_locked(inmh, inm); 530 531 if (mli->mli_version == MLD_VERSION_2) { 532 in6m_clear_recorded(inm); 533 534 /* 535 * We need to release the final reference held 536 * for issuing the INCLUDE {}. 537 */ 538 if (inm->in6m_state == MLD_LEAVING_MEMBER) { 539 inm->in6m_state = MLD_NOT_MEMBER; 540 in6m_rele_locked(inmh, inm); 541 } 542 } 543 } 544 NET_EPOCH_EXIT(et); 545 IF_ADDR_WUNLOCK(ifp); 546 MLD_UNLOCK(); 547 } 548 549 /* 550 * Hook for domifdetach. 551 * Runs after link-layer cleanup; free MLD state. 552 */ 553 void 554 mld_domifdetach(struct ifnet *ifp) 555 { 556 struct mld_ifsoftc *mli = MLD_IFINFO(ifp); 557 558 CTR3(KTR_MLD, "%s: called for ifp %p(%s)", 559 __func__, ifp, if_name(ifp)); 560 561 MLD_LOCK(); 562 LIST_REMOVE(mli, mli_link); 563 MLD_UNLOCK(); 564 mbufq_drain(&mli->mli_gq); 565 } 566 567 /* 568 * Process a received MLDv1 general or address-specific query. 569 * Assumes that the query header has been pulled up to sizeof(mld_hdr). 570 * 571 * NOTE: Can't be fully const correct as we temporarily embed scope ID in 572 * mld_addr. This is OK as we own the mbuf chain. 573 */ 574 static int 575 mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, 576 /*const*/ struct mld_hdr *mld) 577 { 578 struct ifmultiaddr *ifma; 579 struct mld_ifsoftc *mli; 580 struct in6_multi *inm; 581 int is_general_query; 582 uint16_t timer; 583 #ifdef KTR 584 char ip6tbuf[INET6_ADDRSTRLEN]; 585 #endif 586 587 NET_EPOCH_ASSERT(); 588 589 is_general_query = 0; 590 591 if (!V_mld_v1enable) { 592 CTR3(KTR_MLD, "ignore v1 query %s on ifp %p(%s)", 593 ip6_sprintf(ip6tbuf, &mld->mld_addr), 594 ifp, if_name(ifp)); 595 return (0); 596 } 597 598 /* 599 * RFC3810 Section 6.2: MLD queries must originate from 600 * a router's link-local address. 601 */ 602 if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) { 603 CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)", 604 ip6_sprintf(ip6tbuf, &ip6->ip6_src), 605 ifp, if_name(ifp)); 606 return (0); 607 } 608 609 /* 610 * Do address field validation upfront before we accept 611 * the query. 612 */ 613 if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) { 614 /* 615 * MLDv1 General Query. 616 * If this was not sent to the all-nodes group, ignore it. 617 */ 618 struct in6_addr dst; 619 620 dst = ip6->ip6_dst; 621 in6_clearscope(&dst); 622 if (!IN6_ARE_ADDR_EQUAL(&dst, &in6addr_linklocal_allnodes)) 623 return (EINVAL); 624 is_general_query = 1; 625 } else { 626 /* 627 * Embed scope ID of receiving interface in MLD query for 628 * lookup whilst we don't hold other locks. 629 */ 630 in6_setscope(&mld->mld_addr, ifp, NULL); 631 } 632 633 IN6_MULTI_LIST_LOCK(); 634 MLD_LOCK(); 635 636 /* 637 * Switch to MLDv1 host compatibility mode. 638 */ 639 mli = MLD_IFINFO(ifp); 640 KASSERT(mli != NULL, ("%s: no mld_ifsoftc for ifp %p", __func__, ifp)); 641 mld_set_version(mli, MLD_VERSION_1); 642 643 timer = (ntohs(mld->mld_maxdelay) * MLD_FASTHZ) / MLD_TIMER_SCALE; 644 if (timer == 0) 645 timer = 1; 646 647 if (is_general_query) { 648 /* 649 * For each reporting group joined on this 650 * interface, kick the report timer. 651 */ 652 CTR2(KTR_MLD, "process v1 general query on ifp %p(%s)", 653 ifp, if_name(ifp)); 654 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 655 inm = in6m_ifmultiaddr_get_inm(ifma); 656 if (inm == NULL) 657 continue; 658 mld_v1_update_group(inm, timer); 659 } 660 } else { 661 /* 662 * MLDv1 Group-Specific Query. 663 * If this is a group-specific MLDv1 query, we need only 664 * look up the single group to process it. 665 */ 666 inm = in6m_lookup_locked(ifp, &mld->mld_addr); 667 if (inm != NULL) { 668 CTR3(KTR_MLD, "process v1 query %s on ifp %p(%s)", 669 ip6_sprintf(ip6tbuf, &mld->mld_addr), 670 ifp, if_name(ifp)); 671 mld_v1_update_group(inm, timer); 672 } 673 /* XXX Clear embedded scope ID as userland won't expect it. */ 674 in6_clearscope(&mld->mld_addr); 675 } 676 677 MLD_UNLOCK(); 678 IN6_MULTI_LIST_UNLOCK(); 679 680 return (0); 681 } 682 683 /* 684 * Update the report timer on a group in response to an MLDv1 query. 685 * 686 * If we are becoming the reporting member for this group, start the timer. 687 * If we already are the reporting member for this group, and timer is 688 * below the threshold, reset it. 689 * 690 * We may be updating the group for the first time since we switched 691 * to MLDv2. If we are, then we must clear any recorded source lists, 692 * and transition to REPORTING state; the group timer is overloaded 693 * for group and group-source query responses. 694 * 695 * Unlike MLDv2, the delay per group should be jittered 696 * to avoid bursts of MLDv1 reports. 697 */ 698 static void 699 mld_v1_update_group(struct in6_multi *inm, const int timer) 700 { 701 #ifdef KTR 702 char ip6tbuf[INET6_ADDRSTRLEN]; 703 #endif 704 705 CTR4(KTR_MLD, "%s: %s/%s timer=%d", __func__, 706 ip6_sprintf(ip6tbuf, &inm->in6m_addr), 707 if_name(inm->in6m_ifp), timer); 708 709 IN6_MULTI_LIST_LOCK_ASSERT(); 710 711 switch (inm->in6m_state) { 712 case MLD_NOT_MEMBER: 713 case MLD_SILENT_MEMBER: 714 break; 715 case MLD_REPORTING_MEMBER: 716 if (inm->in6m_timer != 0 && 717 inm->in6m_timer <= timer) { 718 CTR1(KTR_MLD, "%s: REPORTING and timer running, " 719 "skipping.", __func__); 720 break; 721 } 722 /* FALLTHROUGH */ 723 case MLD_SG_QUERY_PENDING_MEMBER: 724 case MLD_G_QUERY_PENDING_MEMBER: 725 case MLD_IDLE_MEMBER: 726 case MLD_LAZY_MEMBER: 727 case MLD_AWAKENING_MEMBER: 728 CTR1(KTR_MLD, "%s: ->REPORTING", __func__); 729 inm->in6m_state = MLD_REPORTING_MEMBER; 730 inm->in6m_timer = MLD_RANDOM_DELAY(timer); 731 V_current_state_timers_running6 = 1; 732 break; 733 case MLD_SLEEPING_MEMBER: 734 CTR1(KTR_MLD, "%s: ->AWAKENING", __func__); 735 inm->in6m_state = MLD_AWAKENING_MEMBER; 736 break; 737 case MLD_LEAVING_MEMBER: 738 break; 739 } 740 } 741 742 /* 743 * Process a received MLDv2 general, group-specific or 744 * group-and-source-specific query. 745 * 746 * Assumes that mld points to a struct mldv2_query which is stored in 747 * contiguous memory. 748 * 749 * Return 0 if successful, otherwise an appropriate error code is returned. 750 */ 751 static int 752 mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, 753 struct mbuf *m, struct mldv2_query *mld, const int off, const int icmp6len) 754 { 755 struct mld_ifsoftc *mli; 756 struct in6_multi *inm; 757 uint32_t maxdelay, nsrc, qqi; 758 int is_general_query; 759 uint16_t timer; 760 uint8_t qrv; 761 #ifdef KTR 762 char ip6tbuf[INET6_ADDRSTRLEN]; 763 #endif 764 765 NET_EPOCH_ASSERT(); 766 767 if (!V_mld_v2enable) { 768 CTR3(KTR_MLD, "ignore v2 query src %s on ifp %p(%s)", 769 ip6_sprintf(ip6tbuf, &ip6->ip6_src), 770 ifp, if_name(ifp)); 771 return (0); 772 } 773 774 /* 775 * RFC3810 Section 6.2: MLD queries must originate from 776 * a router's link-local address. 777 */ 778 if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) { 779 CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)", 780 ip6_sprintf(ip6tbuf, &ip6->ip6_src), 781 ifp, if_name(ifp)); 782 return (0); 783 } 784 785 is_general_query = 0; 786 787 CTR2(KTR_MLD, "input v2 query on ifp %p(%s)", ifp, if_name(ifp)); 788 789 maxdelay = ntohs(mld->mld_maxdelay); /* in 1/10ths of a second */ 790 if (maxdelay >= 32768) { 791 maxdelay = (MLD_MRC_MANT(maxdelay) | 0x1000) << 792 (MLD_MRC_EXP(maxdelay) + 3); 793 } 794 timer = (maxdelay * MLD_FASTHZ) / MLD_TIMER_SCALE; 795 if (timer == 0) 796 timer = 1; 797 798 qrv = MLD_QRV(mld->mld_misc); 799 if (qrv < 2) { 800 CTR3(KTR_MLD, "%s: clamping qrv %d to %d", __func__, 801 qrv, MLD_RV_INIT); 802 qrv = MLD_RV_INIT; 803 } 804 805 qqi = mld->mld_qqi; 806 if (qqi >= 128) { 807 qqi = MLD_QQIC_MANT(mld->mld_qqi) << 808 (MLD_QQIC_EXP(mld->mld_qqi) + 3); 809 } 810 811 nsrc = ntohs(mld->mld_numsrc); 812 if (nsrc > MLD_MAX_GS_SOURCES) 813 return (EMSGSIZE); 814 if (icmp6len < sizeof(struct mldv2_query) + 815 (nsrc * sizeof(struct in6_addr))) 816 return (EMSGSIZE); 817 818 /* 819 * Do further input validation upfront to avoid resetting timers 820 * should we need to discard this query. 821 */ 822 if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) { 823 /* 824 * A general query with a source list has undefined 825 * behaviour; discard it. 826 */ 827 if (nsrc > 0) 828 return (EINVAL); 829 is_general_query = 1; 830 } else { 831 /* 832 * Embed scope ID of receiving interface in MLD query for 833 * lookup whilst we don't hold other locks (due to KAME 834 * locking lameness). We own this mbuf chain just now. 835 */ 836 in6_setscope(&mld->mld_addr, ifp, NULL); 837 } 838 839 IN6_MULTI_LIST_LOCK(); 840 MLD_LOCK(); 841 842 mli = MLD_IFINFO(ifp); 843 KASSERT(mli != NULL, ("%s: no mld_ifsoftc for ifp %p", __func__, ifp)); 844 845 /* 846 * Discard the v2 query if we're in Compatibility Mode. 847 * The RFC is pretty clear that hosts need to stay in MLDv1 mode 848 * until the Old Version Querier Present timer expires. 849 */ 850 if (mli->mli_version != MLD_VERSION_2) 851 goto out_locked; 852 853 mld_set_version(mli, MLD_VERSION_2); 854 mli->mli_rv = qrv; 855 mli->mli_qi = qqi; 856 mli->mli_qri = maxdelay; 857 858 CTR4(KTR_MLD, "%s: qrv %d qi %d maxdelay %d", __func__, qrv, qqi, 859 maxdelay); 860 861 if (is_general_query) { 862 /* 863 * MLDv2 General Query. 864 * 865 * Schedule a current-state report on this ifp for 866 * all groups, possibly containing source lists. 867 * 868 * If there is a pending General Query response 869 * scheduled earlier than the selected delay, do 870 * not schedule any other reports. 871 * Otherwise, reset the interface timer. 872 */ 873 CTR2(KTR_MLD, "process v2 general query on ifp %p(%s)", 874 ifp, if_name(ifp)); 875 if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer) { 876 mli->mli_v2_timer = MLD_RANDOM_DELAY(timer); 877 V_interface_timers_running6 = 1; 878 } 879 } else { 880 /* 881 * MLDv2 Group-specific or Group-and-source-specific Query. 882 * 883 * Group-source-specific queries are throttled on 884 * a per-group basis to defeat denial-of-service attempts. 885 * Queries for groups we are not a member of on this 886 * link are simply ignored. 887 */ 888 inm = in6m_lookup_locked(ifp, &mld->mld_addr); 889 if (inm == NULL) 890 goto out_locked; 891 if (nsrc > 0) { 892 if (!ratecheck(&inm->in6m_lastgsrtv, 893 &V_mld_gsrdelay)) { 894 CTR1(KTR_MLD, "%s: GS query throttled.", 895 __func__); 896 goto out_locked; 897 } 898 } 899 CTR2(KTR_MLD, "process v2 group query on ifp %p(%s)", 900 ifp, if_name(ifp)); 901 /* 902 * If there is a pending General Query response 903 * scheduled sooner than the selected delay, no 904 * further report need be scheduled. 905 * Otherwise, prepare to respond to the 906 * group-specific or group-and-source query. 907 */ 908 if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer) 909 mld_v2_process_group_query(inm, mli, timer, m, mld, off); 910 911 /* XXX Clear embedded scope ID as userland won't expect it. */ 912 in6_clearscope(&mld->mld_addr); 913 } 914 915 out_locked: 916 MLD_UNLOCK(); 917 IN6_MULTI_LIST_UNLOCK(); 918 919 return (0); 920 } 921 922 /* 923 * Process a received MLDv2 group-specific or group-and-source-specific 924 * query. 925 * Return <0 if any error occurred. Currently this is ignored. 926 */ 927 static int 928 mld_v2_process_group_query(struct in6_multi *inm, struct mld_ifsoftc *mli, 929 int timer, struct mbuf *m0, struct mldv2_query *mld, const int off) 930 { 931 int retval; 932 uint16_t nsrc; 933 934 IN6_MULTI_LIST_LOCK_ASSERT(); 935 MLD_LOCK_ASSERT(); 936 937 retval = 0; 938 939 switch (inm->in6m_state) { 940 case MLD_NOT_MEMBER: 941 case MLD_SILENT_MEMBER: 942 case MLD_SLEEPING_MEMBER: 943 case MLD_LAZY_MEMBER: 944 case MLD_AWAKENING_MEMBER: 945 case MLD_IDLE_MEMBER: 946 case MLD_LEAVING_MEMBER: 947 return (retval); 948 break; 949 case MLD_REPORTING_MEMBER: 950 case MLD_G_QUERY_PENDING_MEMBER: 951 case MLD_SG_QUERY_PENDING_MEMBER: 952 break; 953 } 954 955 nsrc = ntohs(mld->mld_numsrc); 956 957 /* Length should be checked by calling function. */ 958 KASSERT((m0->m_flags & M_PKTHDR) == 0 || 959 m0->m_pkthdr.len >= off + sizeof(struct mldv2_query) + 960 nsrc * sizeof(struct in6_addr), 961 ("mldv2 packet is too short: (%d bytes < %zd bytes, m=%p)", 962 m0->m_pkthdr.len, off + sizeof(struct mldv2_query) + 963 nsrc * sizeof(struct in6_addr), m0)); 964 965 /* 966 * Deal with group-specific queries upfront. 967 * If any group query is already pending, purge any recorded 968 * source-list state if it exists, and schedule a query response 969 * for this group-specific query. 970 */ 971 if (nsrc == 0) { 972 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER || 973 inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER) { 974 in6m_clear_recorded(inm); 975 timer = min(inm->in6m_timer, timer); 976 } 977 inm->in6m_state = MLD_G_QUERY_PENDING_MEMBER; 978 inm->in6m_timer = MLD_RANDOM_DELAY(timer); 979 V_current_state_timers_running6 = 1; 980 return (retval); 981 } 982 983 /* 984 * Deal with the case where a group-and-source-specific query has 985 * been received but a group-specific query is already pending. 986 */ 987 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER) { 988 timer = min(inm->in6m_timer, timer); 989 inm->in6m_timer = MLD_RANDOM_DELAY(timer); 990 V_current_state_timers_running6 = 1; 991 return (retval); 992 } 993 994 /* 995 * Finally, deal with the case where a group-and-source-specific 996 * query has been received, where a response to a previous g-s-r 997 * query exists, or none exists. 998 * In this case, we need to parse the source-list which the Querier 999 * has provided us with and check if we have any source list filter 1000 * entries at T1 for these sources. If we do not, there is no need 1001 * schedule a report and the query may be dropped. 1002 * If we do, we must record them and schedule a current-state 1003 * report for those sources. 1004 */ 1005 if (inm->in6m_nsrc > 0) { 1006 struct in6_addr srcaddr; 1007 int i, nrecorded; 1008 int soff; 1009 1010 soff = off + sizeof(struct mldv2_query); 1011 nrecorded = 0; 1012 for (i = 0; i < nsrc; i++) { 1013 m_copydata(m0, soff, sizeof(struct in6_addr), 1014 (caddr_t)&srcaddr); 1015 retval = in6m_record_source(inm, &srcaddr); 1016 if (retval < 0) 1017 break; 1018 nrecorded += retval; 1019 soff += sizeof(struct in6_addr); 1020 } 1021 if (nrecorded > 0) { 1022 CTR1(KTR_MLD, 1023 "%s: schedule response to SG query", __func__); 1024 inm->in6m_state = MLD_SG_QUERY_PENDING_MEMBER; 1025 inm->in6m_timer = MLD_RANDOM_DELAY(timer); 1026 V_current_state_timers_running6 = 1; 1027 } 1028 } 1029 1030 return (retval); 1031 } 1032 1033 /* 1034 * Process a received MLDv1 host membership report. 1035 * Assumes mld points to mld_hdr in pulled up mbuf chain. 1036 * 1037 * NOTE: Can't be fully const correct as we temporarily embed scope ID in 1038 * mld_addr. This is OK as we own the mbuf chain. 1039 */ 1040 static int 1041 mld_v1_input_report(struct ifnet *ifp, const struct ip6_hdr *ip6, 1042 /*const*/ struct mld_hdr *mld) 1043 { 1044 struct in6_addr src, dst; 1045 struct in6_ifaddr *ia; 1046 struct in6_multi *inm; 1047 #ifdef KTR 1048 char ip6tbuf[INET6_ADDRSTRLEN]; 1049 #endif 1050 1051 NET_EPOCH_ASSERT(); 1052 1053 if (!V_mld_v1enable) { 1054 CTR3(KTR_MLD, "ignore v1 report %s on ifp %p(%s)", 1055 ip6_sprintf(ip6tbuf, &mld->mld_addr), 1056 ifp, if_name(ifp)); 1057 return (0); 1058 } 1059 1060 if (ifp->if_flags & IFF_LOOPBACK) 1061 return (0); 1062 1063 /* 1064 * MLDv1 reports must originate from a host's link-local address, 1065 * or the unspecified address (when booting). 1066 */ 1067 src = ip6->ip6_src; 1068 in6_clearscope(&src); 1069 if (!IN6_IS_SCOPE_LINKLOCAL(&src) && !IN6_IS_ADDR_UNSPECIFIED(&src)) { 1070 CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)", 1071 ip6_sprintf(ip6tbuf, &ip6->ip6_src), 1072 ifp, if_name(ifp)); 1073 return (EINVAL); 1074 } 1075 1076 /* 1077 * RFC2710 Section 4: MLDv1 reports must pertain to a multicast 1078 * group, and must be directed to the group itself. 1079 */ 1080 dst = ip6->ip6_dst; 1081 in6_clearscope(&dst); 1082 if (!IN6_IS_ADDR_MULTICAST(&mld->mld_addr) || 1083 !IN6_ARE_ADDR_EQUAL(&mld->mld_addr, &dst)) { 1084 CTR3(KTR_MLD, "ignore v1 query dst %s on ifp %p(%s)", 1085 ip6_sprintf(ip6tbuf, &ip6->ip6_dst), 1086 ifp, if_name(ifp)); 1087 return (EINVAL); 1088 } 1089 1090 /* 1091 * Make sure we don't hear our own membership report, as fast 1092 * leave requires knowing that we are the only member of a 1093 * group. Assume we used the link-local address if available, 1094 * otherwise look for ::. 1095 * 1096 * XXX Note that scope ID comparison is needed for the address 1097 * returned by in6ifa_ifpforlinklocal(), but SHOULD NOT be 1098 * performed for the on-wire address. 1099 */ 1100 ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST); 1101 if ((ia && IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, IA6_IN6(ia))) || 1102 (ia == NULL && IN6_IS_ADDR_UNSPECIFIED(&src))) { 1103 if (ia != NULL) 1104 ifa_free(&ia->ia_ifa); 1105 return (0); 1106 } 1107 if (ia != NULL) 1108 ifa_free(&ia->ia_ifa); 1109 1110 CTR3(KTR_MLD, "process v1 report %s on ifp %p(%s)", 1111 ip6_sprintf(ip6tbuf, &mld->mld_addr), ifp, if_name(ifp)); 1112 1113 /* 1114 * Embed scope ID of receiving interface in MLD query for lookup 1115 * whilst we don't hold other locks (due to KAME locking lameness). 1116 */ 1117 if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) 1118 in6_setscope(&mld->mld_addr, ifp, NULL); 1119 1120 IN6_MULTI_LIST_LOCK(); 1121 MLD_LOCK(); 1122 1123 /* 1124 * MLDv1 report suppression. 1125 * If we are a member of this group, and our membership should be 1126 * reported, and our group timer is pending or about to be reset, 1127 * stop our group timer by transitioning to the 'lazy' state. 1128 */ 1129 inm = in6m_lookup_locked(ifp, &mld->mld_addr); 1130 if (inm != NULL) { 1131 struct mld_ifsoftc *mli; 1132 1133 mli = inm->in6m_mli; 1134 KASSERT(mli != NULL, 1135 ("%s: no mli for ifp %p", __func__, ifp)); 1136 1137 /* 1138 * If we are in MLDv2 host mode, do not allow the 1139 * other host's MLDv1 report to suppress our reports. 1140 */ 1141 if (mli->mli_version == MLD_VERSION_2) 1142 goto out_locked; 1143 1144 inm->in6m_timer = 0; 1145 1146 switch (inm->in6m_state) { 1147 case MLD_NOT_MEMBER: 1148 case MLD_SILENT_MEMBER: 1149 case MLD_SLEEPING_MEMBER: 1150 break; 1151 case MLD_REPORTING_MEMBER: 1152 case MLD_IDLE_MEMBER: 1153 case MLD_AWAKENING_MEMBER: 1154 CTR3(KTR_MLD, 1155 "report suppressed for %s on ifp %p(%s)", 1156 ip6_sprintf(ip6tbuf, &mld->mld_addr), 1157 ifp, if_name(ifp)); 1158 case MLD_LAZY_MEMBER: 1159 inm->in6m_state = MLD_LAZY_MEMBER; 1160 break; 1161 case MLD_G_QUERY_PENDING_MEMBER: 1162 case MLD_SG_QUERY_PENDING_MEMBER: 1163 case MLD_LEAVING_MEMBER: 1164 break; 1165 } 1166 } 1167 1168 out_locked: 1169 MLD_UNLOCK(); 1170 IN6_MULTI_LIST_UNLOCK(); 1171 1172 /* XXX Clear embedded scope ID as userland won't expect it. */ 1173 in6_clearscope(&mld->mld_addr); 1174 1175 return (0); 1176 } 1177 1178 /* 1179 * MLD input path. 1180 * 1181 * Assume query messages which fit in a single ICMPv6 message header 1182 * have been pulled up. 1183 * Assume that userland will want to see the message, even if it 1184 * otherwise fails kernel input validation; do not free it. 1185 * Pullup may however free the mbuf chain m if it fails. 1186 * 1187 * Return IPPROTO_DONE if we freed m. Otherwise, return 0. 1188 */ 1189 int 1190 mld_input(struct mbuf **mp, int off, int icmp6len) 1191 { 1192 struct ifnet *ifp; 1193 struct ip6_hdr *ip6; 1194 struct mbuf *m; 1195 struct mld_hdr *mld; 1196 int mldlen; 1197 1198 m = *mp; 1199 CTR3(KTR_MLD, "%s: called w/mbuf (%p,%d)", __func__, m, off); 1200 1201 ifp = m->m_pkthdr.rcvif; 1202 1203 /* Pullup to appropriate size. */ 1204 if (m->m_len < off + sizeof(*mld)) { 1205 m = m_pullup(m, off + sizeof(*mld)); 1206 if (m == NULL) { 1207 ICMP6STAT_INC(icp6s_badlen); 1208 return (IPPROTO_DONE); 1209 } 1210 } 1211 mld = (struct mld_hdr *)(mtod(m, uint8_t *) + off); 1212 if (mld->mld_type == MLD_LISTENER_QUERY && 1213 icmp6len >= sizeof(struct mldv2_query)) { 1214 mldlen = sizeof(struct mldv2_query); 1215 } else { 1216 mldlen = sizeof(struct mld_hdr); 1217 } 1218 if (m->m_len < off + mldlen) { 1219 m = m_pullup(m, off + mldlen); 1220 if (m == NULL) { 1221 ICMP6STAT_INC(icp6s_badlen); 1222 return (IPPROTO_DONE); 1223 } 1224 } 1225 *mp = m; 1226 ip6 = mtod(m, struct ip6_hdr *); 1227 mld = (struct mld_hdr *)(mtod(m, uint8_t *) + off); 1228 1229 /* 1230 * Userland needs to see all of this traffic for implementing 1231 * the endpoint discovery portion of multicast routing. 1232 */ 1233 switch (mld->mld_type) { 1234 case MLD_LISTENER_QUERY: 1235 icmp6_ifstat_inc(ifp, ifs6_in_mldquery); 1236 if (icmp6len == sizeof(struct mld_hdr)) { 1237 if (mld_v1_input_query(ifp, ip6, mld) != 0) 1238 return (0); 1239 } else if (icmp6len >= sizeof(struct mldv2_query)) { 1240 if (mld_v2_input_query(ifp, ip6, m, 1241 (struct mldv2_query *)mld, off, icmp6len) != 0) 1242 return (0); 1243 } 1244 break; 1245 case MLD_LISTENER_REPORT: 1246 icmp6_ifstat_inc(ifp, ifs6_in_mldreport); 1247 if (mld_v1_input_report(ifp, ip6, mld) != 0) 1248 return (0); 1249 break; 1250 case MLDV2_LISTENER_REPORT: 1251 icmp6_ifstat_inc(ifp, ifs6_in_mldreport); 1252 break; 1253 case MLD_LISTENER_DONE: 1254 icmp6_ifstat_inc(ifp, ifs6_in_mlddone); 1255 break; 1256 default: 1257 break; 1258 } 1259 1260 return (0); 1261 } 1262 1263 /* 1264 * Fast timeout handler (global). 1265 * VIMAGE: Timeout handlers are expected to service all vimages. 1266 */ 1267 static struct callout mldfast_callout; 1268 static void 1269 mld_fasttimo(void *arg __unused) 1270 { 1271 struct epoch_tracker et; 1272 struct in6_multi_head inmh; 1273 VNET_ITERATOR_DECL(vnet_iter); 1274 1275 SLIST_INIT(&inmh); 1276 1277 NET_EPOCH_ENTER(et); 1278 VNET_LIST_RLOCK_NOSLEEP(); 1279 VNET_FOREACH(vnet_iter) { 1280 CURVNET_SET(vnet_iter); 1281 mld_fasttimo_vnet(&inmh); 1282 CURVNET_RESTORE(); 1283 } 1284 VNET_LIST_RUNLOCK_NOSLEEP(); 1285 NET_EPOCH_EXIT(et); 1286 in6m_release_list_deferred(&inmh); 1287 1288 callout_reset(&mldfast_callout, hz / MLD_FASTHZ, mld_fasttimo, NULL); 1289 } 1290 1291 /* 1292 * Fast timeout handler (per-vnet). 1293 * 1294 * VIMAGE: Assume caller has set up our curvnet. 1295 */ 1296 static void 1297 mld_fasttimo_vnet(struct in6_multi_head *inmh) 1298 { 1299 struct mbufq scq; /* State-change packets */ 1300 struct mbufq qrq; /* Query response packets */ 1301 struct ifnet *ifp; 1302 struct mld_ifsoftc *mli; 1303 struct ifmultiaddr *ifma; 1304 struct in6_multi *inm; 1305 int uri_fasthz; 1306 1307 uri_fasthz = 0; 1308 1309 /* 1310 * Quick check to see if any work needs to be done, in order to 1311 * minimize the overhead of fasttimo processing. 1312 * SMPng: XXX Unlocked reads. 1313 */ 1314 if (!V_current_state_timers_running6 && 1315 !V_interface_timers_running6 && 1316 !V_state_change_timers_running6) 1317 return; 1318 1319 IN6_MULTI_LIST_LOCK(); 1320 MLD_LOCK(); 1321 1322 /* 1323 * MLDv2 General Query response timer processing. 1324 */ 1325 if (V_interface_timers_running6) { 1326 CTR1(KTR_MLD, "%s: interface timers running", __func__); 1327 1328 V_interface_timers_running6 = 0; 1329 LIST_FOREACH(mli, &V_mli_head, mli_link) { 1330 if (mli->mli_v2_timer == 0) { 1331 /* Do nothing. */ 1332 } else if (--mli->mli_v2_timer == 0) { 1333 mld_v2_dispatch_general_query(mli); 1334 } else { 1335 V_interface_timers_running6 = 1; 1336 } 1337 } 1338 } 1339 1340 if (!V_current_state_timers_running6 && 1341 !V_state_change_timers_running6) 1342 goto out_locked; 1343 1344 V_current_state_timers_running6 = 0; 1345 V_state_change_timers_running6 = 0; 1346 1347 CTR1(KTR_MLD, "%s: state change timers running", __func__); 1348 1349 /* 1350 * MLD host report and state-change timer processing. 1351 * Note: Processing a v2 group timer may remove a node. 1352 */ 1353 LIST_FOREACH(mli, &V_mli_head, mli_link) { 1354 ifp = mli->mli_ifp; 1355 1356 if (mli->mli_version == MLD_VERSION_2) { 1357 uri_fasthz = MLD_RANDOM_DELAY(mli->mli_uri * 1358 MLD_FASTHZ); 1359 mbufq_init(&qrq, MLD_MAX_G_GS_PACKETS); 1360 mbufq_init(&scq, MLD_MAX_STATE_CHANGE_PACKETS); 1361 } 1362 1363 IF_ADDR_WLOCK(ifp); 1364 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1365 inm = in6m_ifmultiaddr_get_inm(ifma); 1366 if (inm == NULL) 1367 continue; 1368 switch (mli->mli_version) { 1369 case MLD_VERSION_1: 1370 mld_v1_process_group_timer(inmh, inm); 1371 break; 1372 case MLD_VERSION_2: 1373 mld_v2_process_group_timers(inmh, &qrq, 1374 &scq, inm, uri_fasthz); 1375 break; 1376 } 1377 } 1378 IF_ADDR_WUNLOCK(ifp); 1379 1380 switch (mli->mli_version) { 1381 case MLD_VERSION_1: 1382 /* 1383 * Transmit reports for this lifecycle. This 1384 * is done while not holding IF_ADDR_LOCK 1385 * since this can call 1386 * in6ifa_ifpforlinklocal() which locks 1387 * IF_ADDR_LOCK internally as well as 1388 * ip6_output() to transmit a packet. 1389 */ 1390 while ((inm = SLIST_FIRST(inmh)) != NULL) { 1391 SLIST_REMOVE_HEAD(inmh, in6m_defer); 1392 (void)mld_v1_transmit_report(inm, 1393 MLD_LISTENER_REPORT); 1394 } 1395 break; 1396 case MLD_VERSION_2: 1397 mld_dispatch_queue(&qrq, 0); 1398 mld_dispatch_queue(&scq, 0); 1399 break; 1400 } 1401 } 1402 1403 out_locked: 1404 MLD_UNLOCK(); 1405 IN6_MULTI_LIST_UNLOCK(); 1406 } 1407 1408 /* 1409 * Update host report group timer. 1410 * Will update the global pending timer flags. 1411 */ 1412 static void 1413 mld_v1_process_group_timer(struct in6_multi_head *inmh, struct in6_multi *inm) 1414 { 1415 int report_timer_expired; 1416 1417 IN6_MULTI_LIST_LOCK_ASSERT(); 1418 MLD_LOCK_ASSERT(); 1419 1420 if (inm->in6m_timer == 0) { 1421 report_timer_expired = 0; 1422 } else if (--inm->in6m_timer == 0) { 1423 report_timer_expired = 1; 1424 } else { 1425 V_current_state_timers_running6 = 1; 1426 return; 1427 } 1428 1429 switch (inm->in6m_state) { 1430 case MLD_NOT_MEMBER: 1431 case MLD_SILENT_MEMBER: 1432 case MLD_IDLE_MEMBER: 1433 case MLD_LAZY_MEMBER: 1434 case MLD_SLEEPING_MEMBER: 1435 case MLD_AWAKENING_MEMBER: 1436 break; 1437 case MLD_REPORTING_MEMBER: 1438 if (report_timer_expired) { 1439 inm->in6m_state = MLD_IDLE_MEMBER; 1440 SLIST_INSERT_HEAD(inmh, inm, in6m_defer); 1441 } 1442 break; 1443 case MLD_G_QUERY_PENDING_MEMBER: 1444 case MLD_SG_QUERY_PENDING_MEMBER: 1445 case MLD_LEAVING_MEMBER: 1446 break; 1447 } 1448 } 1449 1450 /* 1451 * Update a group's timers for MLDv2. 1452 * Will update the global pending timer flags. 1453 * Note: Unlocked read from mli. 1454 */ 1455 static void 1456 mld_v2_process_group_timers(struct in6_multi_head *inmh, 1457 struct mbufq *qrq, struct mbufq *scq, 1458 struct in6_multi *inm, const int uri_fasthz) 1459 { 1460 int query_response_timer_expired; 1461 int state_change_retransmit_timer_expired; 1462 #ifdef KTR 1463 char ip6tbuf[INET6_ADDRSTRLEN]; 1464 #endif 1465 1466 IN6_MULTI_LIST_LOCK_ASSERT(); 1467 MLD_LOCK_ASSERT(); 1468 1469 query_response_timer_expired = 0; 1470 state_change_retransmit_timer_expired = 0; 1471 1472 /* 1473 * During a transition from compatibility mode back to MLDv2, 1474 * a group record in REPORTING state may still have its group 1475 * timer active. This is a no-op in this function; it is easier 1476 * to deal with it here than to complicate the slow-timeout path. 1477 */ 1478 if (inm->in6m_timer == 0) { 1479 query_response_timer_expired = 0; 1480 } else if (--inm->in6m_timer == 0) { 1481 query_response_timer_expired = 1; 1482 } else { 1483 V_current_state_timers_running6 = 1; 1484 } 1485 1486 if (inm->in6m_sctimer == 0) { 1487 state_change_retransmit_timer_expired = 0; 1488 } else if (--inm->in6m_sctimer == 0) { 1489 state_change_retransmit_timer_expired = 1; 1490 } else { 1491 V_state_change_timers_running6 = 1; 1492 } 1493 1494 /* We are in fasttimo, so be quick about it. */ 1495 if (!state_change_retransmit_timer_expired && 1496 !query_response_timer_expired) 1497 return; 1498 1499 switch (inm->in6m_state) { 1500 case MLD_NOT_MEMBER: 1501 case MLD_SILENT_MEMBER: 1502 case MLD_SLEEPING_MEMBER: 1503 case MLD_LAZY_MEMBER: 1504 case MLD_AWAKENING_MEMBER: 1505 case MLD_IDLE_MEMBER: 1506 break; 1507 case MLD_G_QUERY_PENDING_MEMBER: 1508 case MLD_SG_QUERY_PENDING_MEMBER: 1509 /* 1510 * Respond to a previously pending Group-Specific 1511 * or Group-and-Source-Specific query by enqueueing 1512 * the appropriate Current-State report for 1513 * immediate transmission. 1514 */ 1515 if (query_response_timer_expired) { 1516 int retval __unused; 1517 1518 retval = mld_v2_enqueue_group_record(qrq, inm, 0, 1, 1519 (inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER), 1520 0); 1521 CTR2(KTR_MLD, "%s: enqueue record = %d", 1522 __func__, retval); 1523 inm->in6m_state = MLD_REPORTING_MEMBER; 1524 in6m_clear_recorded(inm); 1525 } 1526 /* FALLTHROUGH */ 1527 case MLD_REPORTING_MEMBER: 1528 case MLD_LEAVING_MEMBER: 1529 if (state_change_retransmit_timer_expired) { 1530 /* 1531 * State-change retransmission timer fired. 1532 * If there are any further pending retransmissions, 1533 * set the global pending state-change flag, and 1534 * reset the timer. 1535 */ 1536 if (--inm->in6m_scrv > 0) { 1537 inm->in6m_sctimer = uri_fasthz; 1538 V_state_change_timers_running6 = 1; 1539 } 1540 /* 1541 * Retransmit the previously computed state-change 1542 * report. If there are no further pending 1543 * retransmissions, the mbuf queue will be consumed. 1544 * Update T0 state to T1 as we have now sent 1545 * a state-change. 1546 */ 1547 (void)mld_v2_merge_state_changes(inm, scq); 1548 1549 in6m_commit(inm); 1550 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__, 1551 ip6_sprintf(ip6tbuf, &inm->in6m_addr), 1552 if_name(inm->in6m_ifp)); 1553 1554 /* 1555 * If we are leaving the group for good, make sure 1556 * we release MLD's reference to it. 1557 * This release must be deferred using a SLIST, 1558 * as we are called from a loop which traverses 1559 * the in_ifmultiaddr TAILQ. 1560 */ 1561 if (inm->in6m_state == MLD_LEAVING_MEMBER && 1562 inm->in6m_scrv == 0) { 1563 inm->in6m_state = MLD_NOT_MEMBER; 1564 in6m_disconnect_locked(inmh, inm); 1565 in6m_rele_locked(inmh, inm); 1566 } 1567 } 1568 break; 1569 } 1570 } 1571 1572 /* 1573 * Switch to a different version on the given interface, 1574 * as per Section 9.12. 1575 */ 1576 static void 1577 mld_set_version(struct mld_ifsoftc *mli, const int version) 1578 { 1579 int old_version_timer; 1580 1581 MLD_LOCK_ASSERT(); 1582 1583 CTR4(KTR_MLD, "%s: switching to v%d on ifp %p(%s)", __func__, 1584 version, mli->mli_ifp, if_name(mli->mli_ifp)); 1585 1586 if (version == MLD_VERSION_1) { 1587 /* 1588 * Compute the "Older Version Querier Present" timer as per 1589 * Section 9.12. 1590 */ 1591 old_version_timer = (mli->mli_rv * mli->mli_qi) + mli->mli_qri; 1592 old_version_timer *= MLD_SLOWHZ; 1593 mli->mli_v1_timer = old_version_timer; 1594 } 1595 1596 if (mli->mli_v1_timer > 0 && mli->mli_version != MLD_VERSION_1) { 1597 mli->mli_version = MLD_VERSION_1; 1598 mld_v2_cancel_link_timers(mli); 1599 } 1600 } 1601 1602 /* 1603 * Cancel pending MLDv2 timers for the given link and all groups 1604 * joined on it; state-change, general-query, and group-query timers. 1605 */ 1606 static void 1607 mld_v2_cancel_link_timers(struct mld_ifsoftc *mli) 1608 { 1609 struct epoch_tracker et; 1610 struct in6_multi_head inmh; 1611 struct ifmultiaddr *ifma; 1612 struct ifnet *ifp; 1613 struct in6_multi *inm; 1614 1615 CTR3(KTR_MLD, "%s: cancel v2 timers on ifp %p(%s)", __func__, 1616 mli->mli_ifp, if_name(mli->mli_ifp)); 1617 1618 SLIST_INIT(&inmh); 1619 IN6_MULTI_LIST_LOCK_ASSERT(); 1620 MLD_LOCK_ASSERT(); 1621 1622 /* 1623 * Fast-track this potentially expensive operation 1624 * by checking all the global 'timer pending' flags. 1625 */ 1626 if (!V_interface_timers_running6 && 1627 !V_state_change_timers_running6 && 1628 !V_current_state_timers_running6) 1629 return; 1630 1631 mli->mli_v2_timer = 0; 1632 1633 ifp = mli->mli_ifp; 1634 1635 IF_ADDR_WLOCK(ifp); 1636 NET_EPOCH_ENTER(et); 1637 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1638 inm = in6m_ifmultiaddr_get_inm(ifma); 1639 if (inm == NULL) 1640 continue; 1641 switch (inm->in6m_state) { 1642 case MLD_NOT_MEMBER: 1643 case MLD_SILENT_MEMBER: 1644 case MLD_IDLE_MEMBER: 1645 case MLD_LAZY_MEMBER: 1646 case MLD_SLEEPING_MEMBER: 1647 case MLD_AWAKENING_MEMBER: 1648 break; 1649 case MLD_LEAVING_MEMBER: 1650 /* 1651 * If we are leaving the group and switching 1652 * version, we need to release the final 1653 * reference held for issuing the INCLUDE {}. 1654 */ 1655 if (inm->in6m_refcount == 1) 1656 in6m_disconnect_locked(&inmh, inm); 1657 in6m_rele_locked(&inmh, inm); 1658 /* FALLTHROUGH */ 1659 case MLD_G_QUERY_PENDING_MEMBER: 1660 case MLD_SG_QUERY_PENDING_MEMBER: 1661 in6m_clear_recorded(inm); 1662 /* FALLTHROUGH */ 1663 case MLD_REPORTING_MEMBER: 1664 inm->in6m_sctimer = 0; 1665 inm->in6m_timer = 0; 1666 inm->in6m_state = MLD_REPORTING_MEMBER; 1667 /* 1668 * Free any pending MLDv2 state-change records. 1669 */ 1670 mbufq_drain(&inm->in6m_scq); 1671 break; 1672 } 1673 } 1674 NET_EPOCH_EXIT(et); 1675 IF_ADDR_WUNLOCK(ifp); 1676 in6m_release_list_deferred(&inmh); 1677 } 1678 1679 /* 1680 * Global slowtimo handler. 1681 * VIMAGE: Timeout handlers are expected to service all vimages. 1682 */ 1683 static struct callout mldslow_callout; 1684 static void 1685 mld_slowtimo(void *arg __unused) 1686 { 1687 VNET_ITERATOR_DECL(vnet_iter); 1688 1689 VNET_LIST_RLOCK_NOSLEEP(); 1690 VNET_FOREACH(vnet_iter) { 1691 CURVNET_SET(vnet_iter); 1692 mld_slowtimo_vnet(); 1693 CURVNET_RESTORE(); 1694 } 1695 VNET_LIST_RUNLOCK_NOSLEEP(); 1696 1697 callout_reset(&mldslow_callout, hz / MLD_SLOWHZ, mld_slowtimo, NULL); 1698 } 1699 1700 /* 1701 * Per-vnet slowtimo handler. 1702 */ 1703 static void 1704 mld_slowtimo_vnet(void) 1705 { 1706 struct mld_ifsoftc *mli; 1707 1708 MLD_LOCK(); 1709 1710 LIST_FOREACH(mli, &V_mli_head, mli_link) { 1711 mld_v1_process_querier_timers(mli); 1712 } 1713 1714 MLD_UNLOCK(); 1715 } 1716 1717 /* 1718 * Update the Older Version Querier Present timers for a link. 1719 * See Section 9.12 of RFC 3810. 1720 */ 1721 static void 1722 mld_v1_process_querier_timers(struct mld_ifsoftc *mli) 1723 { 1724 1725 MLD_LOCK_ASSERT(); 1726 1727 if (mli->mli_version != MLD_VERSION_2 && --mli->mli_v1_timer == 0) { 1728 /* 1729 * MLDv1 Querier Present timer expired; revert to MLDv2. 1730 */ 1731 CTR5(KTR_MLD, 1732 "%s: transition from v%d -> v%d on %p(%s)", 1733 __func__, mli->mli_version, MLD_VERSION_2, 1734 mli->mli_ifp, if_name(mli->mli_ifp)); 1735 mli->mli_version = MLD_VERSION_2; 1736 } 1737 } 1738 1739 /* 1740 * Transmit an MLDv1 report immediately. 1741 */ 1742 static int 1743 mld_v1_transmit_report(struct in6_multi *in6m, const int type) 1744 { 1745 struct ifnet *ifp; 1746 struct in6_ifaddr *ia; 1747 struct ip6_hdr *ip6; 1748 struct mbuf *mh, *md; 1749 struct mld_hdr *mld; 1750 1751 NET_EPOCH_ASSERT(); 1752 IN6_MULTI_LIST_LOCK_ASSERT(); 1753 MLD_LOCK_ASSERT(); 1754 1755 ifp = in6m->in6m_ifp; 1756 /* in process of being freed */ 1757 if (ifp == NULL) 1758 return (0); 1759 ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST); 1760 /* ia may be NULL if link-local address is tentative. */ 1761 1762 mh = m_gethdr(M_NOWAIT, MT_DATA); 1763 if (mh == NULL) { 1764 if (ia != NULL) 1765 ifa_free(&ia->ia_ifa); 1766 return (ENOMEM); 1767 } 1768 md = m_get(M_NOWAIT, MT_DATA); 1769 if (md == NULL) { 1770 m_free(mh); 1771 if (ia != NULL) 1772 ifa_free(&ia->ia_ifa); 1773 return (ENOMEM); 1774 } 1775 mh->m_next = md; 1776 1777 /* 1778 * FUTURE: Consider increasing alignment by ETHER_HDR_LEN, so 1779 * that ether_output() does not need to allocate another mbuf 1780 * for the header in the most common case. 1781 */ 1782 M_ALIGN(mh, sizeof(struct ip6_hdr)); 1783 mh->m_pkthdr.len = sizeof(struct ip6_hdr) + sizeof(struct mld_hdr); 1784 mh->m_len = sizeof(struct ip6_hdr); 1785 1786 ip6 = mtod(mh, struct ip6_hdr *); 1787 ip6->ip6_flow = 0; 1788 ip6->ip6_vfc &= ~IPV6_VERSION_MASK; 1789 ip6->ip6_vfc |= IPV6_VERSION; 1790 ip6->ip6_nxt = IPPROTO_ICMPV6; 1791 ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any; 1792 ip6->ip6_dst = in6m->in6m_addr; 1793 1794 md->m_len = sizeof(struct mld_hdr); 1795 mld = mtod(md, struct mld_hdr *); 1796 mld->mld_type = type; 1797 mld->mld_code = 0; 1798 mld->mld_cksum = 0; 1799 mld->mld_maxdelay = 0; 1800 mld->mld_reserved = 0; 1801 mld->mld_addr = in6m->in6m_addr; 1802 in6_clearscope(&mld->mld_addr); 1803 mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6, 1804 sizeof(struct ip6_hdr), sizeof(struct mld_hdr)); 1805 1806 mld_save_context(mh, ifp); 1807 mh->m_flags |= M_MLDV1; 1808 1809 mld_dispatch_packet(mh); 1810 1811 if (ia != NULL) 1812 ifa_free(&ia->ia_ifa); 1813 return (0); 1814 } 1815 1816 /* 1817 * Process a state change from the upper layer for the given IPv6 group. 1818 * 1819 * Each socket holds a reference on the in_multi in its own ip_moptions. 1820 * The socket layer will have made the necessary updates to.the group 1821 * state, it is now up to MLD to issue a state change report if there 1822 * has been any change between T0 (when the last state-change was issued) 1823 * and T1 (now). 1824 * 1825 * We use the MLDv2 state machine at group level. The MLd module 1826 * however makes the decision as to which MLD protocol version to speak. 1827 * A state change *from* INCLUDE {} always means an initial join. 1828 * A state change *to* INCLUDE {} always means a final leave. 1829 * 1830 * If delay is non-zero, and the state change is an initial multicast 1831 * join, the state change report will be delayed by 'delay' ticks 1832 * in units of MLD_FASTHZ if MLDv1 is active on the link; otherwise 1833 * the initial MLDv2 state change report will be delayed by whichever 1834 * is sooner, a pending state-change timer or delay itself. 1835 * 1836 * VIMAGE: curvnet should have been set by caller, as this routine 1837 * is called from the socket option handlers. 1838 */ 1839 int 1840 mld_change_state(struct in6_multi *inm, const int delay) 1841 { 1842 struct mld_ifsoftc *mli; 1843 struct ifnet *ifp; 1844 int error; 1845 1846 IN6_MULTI_LIST_LOCK_ASSERT(); 1847 1848 error = 0; 1849 1850 /* 1851 * Check if the in6_multi has already been disconnected. 1852 */ 1853 if (inm->in6m_ifp == NULL) { 1854 CTR1(KTR_MLD, "%s: inm is disconnected", __func__); 1855 return (0); 1856 } 1857 1858 /* 1859 * Try to detect if the upper layer just asked us to change state 1860 * for an interface which has now gone away. 1861 */ 1862 KASSERT(inm->in6m_ifma != NULL, ("%s: no ifma", __func__)); 1863 ifp = inm->in6m_ifma->ifma_ifp; 1864 if (ifp == NULL) 1865 return (0); 1866 /* 1867 * Sanity check that netinet6's notion of ifp is the 1868 * same as net's. 1869 */ 1870 KASSERT(inm->in6m_ifp == ifp, ("%s: bad ifp", __func__)); 1871 1872 MLD_LOCK(); 1873 mli = MLD_IFINFO(ifp); 1874 KASSERT(mli != NULL, ("%s: no mld_ifsoftc for ifp %p", __func__, ifp)); 1875 1876 /* 1877 * If we detect a state transition to or from MCAST_UNDEFINED 1878 * for this group, then we are starting or finishing an MLD 1879 * life cycle for this group. 1880 */ 1881 if (inm->in6m_st[1].iss_fmode != inm->in6m_st[0].iss_fmode) { 1882 CTR3(KTR_MLD, "%s: inm transition %d -> %d", __func__, 1883 inm->in6m_st[0].iss_fmode, inm->in6m_st[1].iss_fmode); 1884 if (inm->in6m_st[0].iss_fmode == MCAST_UNDEFINED) { 1885 CTR1(KTR_MLD, "%s: initial join", __func__); 1886 error = mld_initial_join(inm, mli, delay); 1887 goto out_locked; 1888 } else if (inm->in6m_st[1].iss_fmode == MCAST_UNDEFINED) { 1889 CTR1(KTR_MLD, "%s: final leave", __func__); 1890 mld_final_leave(inm, mli); 1891 goto out_locked; 1892 } 1893 } else { 1894 CTR1(KTR_MLD, "%s: filter set change", __func__); 1895 } 1896 1897 error = mld_handle_state_change(inm, mli); 1898 1899 out_locked: 1900 MLD_UNLOCK(); 1901 return (error); 1902 } 1903 1904 /* 1905 * Perform the initial join for an MLD group. 1906 * 1907 * When joining a group: 1908 * If the group should have its MLD traffic suppressed, do nothing. 1909 * MLDv1 starts sending MLDv1 host membership reports. 1910 * MLDv2 will schedule an MLDv2 state-change report containing the 1911 * initial state of the membership. 1912 * 1913 * If the delay argument is non-zero, then we must delay sending the 1914 * initial state change for delay ticks (in units of MLD_FASTHZ). 1915 */ 1916 static int 1917 mld_initial_join(struct in6_multi *inm, struct mld_ifsoftc *mli, 1918 const int delay) 1919 { 1920 struct epoch_tracker et; 1921 struct ifnet *ifp; 1922 struct mbufq *mq; 1923 int error, retval, syncstates; 1924 int odelay; 1925 #ifdef KTR 1926 char ip6tbuf[INET6_ADDRSTRLEN]; 1927 #endif 1928 1929 CTR4(KTR_MLD, "%s: initial join %s on ifp %p(%s)", 1930 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr), 1931 inm->in6m_ifp, if_name(inm->in6m_ifp)); 1932 1933 error = 0; 1934 syncstates = 1; 1935 1936 ifp = inm->in6m_ifp; 1937 1938 IN6_MULTI_LIST_LOCK_ASSERT(); 1939 MLD_LOCK_ASSERT(); 1940 1941 KASSERT(mli && mli->mli_ifp == ifp, ("%s: inconsistent ifp", __func__)); 1942 1943 /* 1944 * Groups joined on loopback or marked as 'not reported', 1945 * enter the MLD_SILENT_MEMBER state and 1946 * are never reported in any protocol exchanges. 1947 * All other groups enter the appropriate state machine 1948 * for the version in use on this link. 1949 * A link marked as MLIF_SILENT causes MLD to be completely 1950 * disabled for the link. 1951 */ 1952 if ((ifp->if_flags & IFF_LOOPBACK) || 1953 (mli->mli_flags & MLIF_SILENT) || 1954 !mld_is_addr_reported(&inm->in6m_addr)) { 1955 CTR1(KTR_MLD, 1956 "%s: not kicking state machine for silent group", __func__); 1957 inm->in6m_state = MLD_SILENT_MEMBER; 1958 inm->in6m_timer = 0; 1959 } else { 1960 /* 1961 * Deal with overlapping in_multi lifecycle. 1962 * If this group was LEAVING, then make sure 1963 * we drop the reference we picked up to keep the 1964 * group around for the final INCLUDE {} enqueue. 1965 */ 1966 if (mli->mli_version == MLD_VERSION_2 && 1967 inm->in6m_state == MLD_LEAVING_MEMBER) { 1968 inm->in6m_refcount--; 1969 MPASS(inm->in6m_refcount > 0); 1970 } 1971 inm->in6m_state = MLD_REPORTING_MEMBER; 1972 1973 switch (mli->mli_version) { 1974 case MLD_VERSION_1: 1975 /* 1976 * If a delay was provided, only use it if 1977 * it is greater than the delay normally 1978 * used for an MLDv1 state change report, 1979 * and delay sending the initial MLDv1 report 1980 * by not transitioning to the IDLE state. 1981 */ 1982 odelay = MLD_RANDOM_DELAY(MLD_V1_MAX_RI * MLD_FASTHZ); 1983 if (delay) { 1984 inm->in6m_timer = max(delay, odelay); 1985 V_current_state_timers_running6 = 1; 1986 } else { 1987 inm->in6m_state = MLD_IDLE_MEMBER; 1988 NET_EPOCH_ENTER(et); 1989 error = mld_v1_transmit_report(inm, 1990 MLD_LISTENER_REPORT); 1991 NET_EPOCH_EXIT(et); 1992 if (error == 0) { 1993 inm->in6m_timer = odelay; 1994 V_current_state_timers_running6 = 1; 1995 } 1996 } 1997 break; 1998 1999 case MLD_VERSION_2: 2000 /* 2001 * Defer update of T0 to T1, until the first copy 2002 * of the state change has been transmitted. 2003 */ 2004 syncstates = 0; 2005 2006 /* 2007 * Immediately enqueue a State-Change Report for 2008 * this interface, freeing any previous reports. 2009 * Don't kick the timers if there is nothing to do, 2010 * or if an error occurred. 2011 */ 2012 mq = &inm->in6m_scq; 2013 mbufq_drain(mq); 2014 retval = mld_v2_enqueue_group_record(mq, inm, 1, 2015 0, 0, (mli->mli_flags & MLIF_USEALLOW)); 2016 CTR2(KTR_MLD, "%s: enqueue record = %d", 2017 __func__, retval); 2018 if (retval <= 0) { 2019 error = retval * -1; 2020 break; 2021 } 2022 2023 /* 2024 * Schedule transmission of pending state-change 2025 * report up to RV times for this link. The timer 2026 * will fire at the next mld_fasttimo (~200ms), 2027 * giving us an opportunity to merge the reports. 2028 * 2029 * If a delay was provided to this function, only 2030 * use this delay if sooner than the existing one. 2031 */ 2032 KASSERT(mli->mli_rv > 1, 2033 ("%s: invalid robustness %d", __func__, 2034 mli->mli_rv)); 2035 inm->in6m_scrv = mli->mli_rv; 2036 if (delay) { 2037 if (inm->in6m_sctimer > 1) { 2038 inm->in6m_sctimer = 2039 min(inm->in6m_sctimer, delay); 2040 } else 2041 inm->in6m_sctimer = delay; 2042 } else 2043 inm->in6m_sctimer = 1; 2044 V_state_change_timers_running6 = 1; 2045 2046 error = 0; 2047 break; 2048 } 2049 } 2050 2051 /* 2052 * Only update the T0 state if state change is atomic, 2053 * i.e. we don't need to wait for a timer to fire before we 2054 * can consider the state change to have been communicated. 2055 */ 2056 if (syncstates) { 2057 in6m_commit(inm); 2058 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__, 2059 ip6_sprintf(ip6tbuf, &inm->in6m_addr), 2060 if_name(inm->in6m_ifp)); 2061 } 2062 2063 return (error); 2064 } 2065 2066 /* 2067 * Issue an intermediate state change during the life-cycle. 2068 */ 2069 static int 2070 mld_handle_state_change(struct in6_multi *inm, struct mld_ifsoftc *mli) 2071 { 2072 struct ifnet *ifp; 2073 int retval; 2074 #ifdef KTR 2075 char ip6tbuf[INET6_ADDRSTRLEN]; 2076 #endif 2077 2078 CTR4(KTR_MLD, "%s: state change for %s on ifp %p(%s)", 2079 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr), 2080 inm->in6m_ifp, if_name(inm->in6m_ifp)); 2081 2082 ifp = inm->in6m_ifp; 2083 2084 IN6_MULTI_LIST_LOCK_ASSERT(); 2085 MLD_LOCK_ASSERT(); 2086 2087 KASSERT(mli && mli->mli_ifp == ifp, 2088 ("%s: inconsistent ifp", __func__)); 2089 2090 if ((ifp->if_flags & IFF_LOOPBACK) || 2091 (mli->mli_flags & MLIF_SILENT) || 2092 !mld_is_addr_reported(&inm->in6m_addr) || 2093 (mli->mli_version != MLD_VERSION_2)) { 2094 if (!mld_is_addr_reported(&inm->in6m_addr)) { 2095 CTR1(KTR_MLD, 2096 "%s: not kicking state machine for silent group", __func__); 2097 } 2098 CTR1(KTR_MLD, "%s: nothing to do", __func__); 2099 in6m_commit(inm); 2100 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__, 2101 ip6_sprintf(ip6tbuf, &inm->in6m_addr), 2102 if_name(inm->in6m_ifp)); 2103 return (0); 2104 } 2105 2106 mbufq_drain(&inm->in6m_scq); 2107 2108 retval = mld_v2_enqueue_group_record(&inm->in6m_scq, inm, 1, 0, 0, 2109 (mli->mli_flags & MLIF_USEALLOW)); 2110 CTR2(KTR_MLD, "%s: enqueue record = %d", __func__, retval); 2111 if (retval <= 0) 2112 return (-retval); 2113 2114 /* 2115 * If record(s) were enqueued, start the state-change 2116 * report timer for this group. 2117 */ 2118 inm->in6m_scrv = mli->mli_rv; 2119 inm->in6m_sctimer = 1; 2120 V_state_change_timers_running6 = 1; 2121 2122 return (0); 2123 } 2124 2125 /* 2126 * Perform the final leave for a multicast address. 2127 * 2128 * When leaving a group: 2129 * MLDv1 sends a DONE message, if and only if we are the reporter. 2130 * MLDv2 enqueues a state-change report containing a transition 2131 * to INCLUDE {} for immediate transmission. 2132 */ 2133 static void 2134 mld_final_leave(struct in6_multi *inm, struct mld_ifsoftc *mli) 2135 { 2136 struct epoch_tracker et; 2137 #ifdef KTR 2138 char ip6tbuf[INET6_ADDRSTRLEN]; 2139 #endif 2140 2141 CTR4(KTR_MLD, "%s: final leave %s on ifp %p(%s)", 2142 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr), 2143 inm->in6m_ifp, if_name(inm->in6m_ifp)); 2144 2145 IN6_MULTI_LIST_LOCK_ASSERT(); 2146 MLD_LOCK_ASSERT(); 2147 2148 switch (inm->in6m_state) { 2149 case MLD_NOT_MEMBER: 2150 case MLD_SILENT_MEMBER: 2151 case MLD_LEAVING_MEMBER: 2152 /* Already leaving or left; do nothing. */ 2153 CTR1(KTR_MLD, 2154 "%s: not kicking state machine for silent group", __func__); 2155 break; 2156 case MLD_REPORTING_MEMBER: 2157 case MLD_IDLE_MEMBER: 2158 case MLD_G_QUERY_PENDING_MEMBER: 2159 case MLD_SG_QUERY_PENDING_MEMBER: 2160 if (mli->mli_version == MLD_VERSION_1) { 2161 #ifdef INVARIANTS 2162 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER || 2163 inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER) 2164 panic("%s: MLDv2 state reached, not MLDv2 mode", 2165 __func__); 2166 #endif 2167 NET_EPOCH_ENTER(et); 2168 mld_v1_transmit_report(inm, MLD_LISTENER_DONE); 2169 NET_EPOCH_EXIT(et); 2170 inm->in6m_state = MLD_NOT_MEMBER; 2171 V_current_state_timers_running6 = 1; 2172 } else if (mli->mli_version == MLD_VERSION_2) { 2173 /* 2174 * Stop group timer and all pending reports. 2175 * Immediately enqueue a state-change report 2176 * TO_IN {} to be sent on the next fast timeout, 2177 * giving us an opportunity to merge reports. 2178 */ 2179 mbufq_drain(&inm->in6m_scq); 2180 inm->in6m_timer = 0; 2181 inm->in6m_scrv = mli->mli_rv; 2182 CTR4(KTR_MLD, "%s: Leaving %s/%s with %d " 2183 "pending retransmissions.", __func__, 2184 ip6_sprintf(ip6tbuf, &inm->in6m_addr), 2185 if_name(inm->in6m_ifp), inm->in6m_scrv); 2186 if (inm->in6m_scrv == 0) { 2187 inm->in6m_state = MLD_NOT_MEMBER; 2188 inm->in6m_sctimer = 0; 2189 } else { 2190 int retval __diagused; 2191 2192 in6m_acquire_locked(inm); 2193 2194 retval = mld_v2_enqueue_group_record( 2195 &inm->in6m_scq, inm, 1, 0, 0, 2196 (mli->mli_flags & MLIF_USEALLOW)); 2197 KASSERT(retval != 0, 2198 ("%s: enqueue record = %d", __func__, 2199 retval)); 2200 2201 inm->in6m_state = MLD_LEAVING_MEMBER; 2202 inm->in6m_sctimer = 1; 2203 V_state_change_timers_running6 = 1; 2204 } 2205 break; 2206 } 2207 break; 2208 case MLD_LAZY_MEMBER: 2209 case MLD_SLEEPING_MEMBER: 2210 case MLD_AWAKENING_MEMBER: 2211 /* Our reports are suppressed; do nothing. */ 2212 break; 2213 } 2214 2215 in6m_commit(inm); 2216 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__, 2217 ip6_sprintf(ip6tbuf, &inm->in6m_addr), 2218 if_name(inm->in6m_ifp)); 2219 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED; 2220 CTR3(KTR_MLD, "%s: T1 now MCAST_UNDEFINED for %p/%s", 2221 __func__, &inm->in6m_addr, if_name(inm->in6m_ifp)); 2222 } 2223 2224 /* 2225 * Enqueue an MLDv2 group record to the given output queue. 2226 * 2227 * If is_state_change is zero, a current-state record is appended. 2228 * If is_state_change is non-zero, a state-change report is appended. 2229 * 2230 * If is_group_query is non-zero, an mbuf packet chain is allocated. 2231 * If is_group_query is zero, and if there is a packet with free space 2232 * at the tail of the queue, it will be appended to providing there 2233 * is enough free space. 2234 * Otherwise a new mbuf packet chain is allocated. 2235 * 2236 * If is_source_query is non-zero, each source is checked to see if 2237 * it was recorded for a Group-Source query, and will be omitted if 2238 * it is not both in-mode and recorded. 2239 * 2240 * If use_block_allow is non-zero, state change reports for initial join 2241 * and final leave, on an inclusive mode group with a source list, will be 2242 * rewritten to use the ALLOW_NEW and BLOCK_OLD record types, respectively. 2243 * 2244 * The function will attempt to allocate leading space in the packet 2245 * for the IPv6+ICMP headers to be prepended without fragmenting the chain. 2246 * 2247 * If successful the size of all data appended to the queue is returned, 2248 * otherwise an error code less than zero is returned, or zero if 2249 * no record(s) were appended. 2250 */ 2251 static int 2252 mld_v2_enqueue_group_record(struct mbufq *mq, struct in6_multi *inm, 2253 const int is_state_change, const int is_group_query, 2254 const int is_source_query, const int use_block_allow) 2255 { 2256 struct mldv2_record mr; 2257 struct mldv2_record *pmr; 2258 struct ifnet *ifp; 2259 struct ip6_msource *ims, *nims; 2260 struct mbuf *m0, *m, *md; 2261 int is_filter_list_change; 2262 int minrec0len, m0srcs, msrcs, nbytes, off; 2263 int record_has_sources; 2264 int now; 2265 int type; 2266 uint8_t mode; 2267 #ifdef KTR 2268 char ip6tbuf[INET6_ADDRSTRLEN]; 2269 #endif 2270 2271 IN6_MULTI_LIST_LOCK_ASSERT(); 2272 2273 ifp = inm->in6m_ifp; 2274 is_filter_list_change = 0; 2275 m = NULL; 2276 m0 = NULL; 2277 m0srcs = 0; 2278 msrcs = 0; 2279 nbytes = 0; 2280 nims = NULL; 2281 record_has_sources = 1; 2282 pmr = NULL; 2283 type = MLD_DO_NOTHING; 2284 mode = inm->in6m_st[1].iss_fmode; 2285 2286 /* 2287 * If we did not transition out of ASM mode during t0->t1, 2288 * and there are no source nodes to process, we can skip 2289 * the generation of source records. 2290 */ 2291 if (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0 && 2292 inm->in6m_nsrc == 0) 2293 record_has_sources = 0; 2294 2295 if (is_state_change) { 2296 /* 2297 * Queue a state change record. 2298 * If the mode did not change, and there are non-ASM 2299 * listeners or source filters present, 2300 * we potentially need to issue two records for the group. 2301 * If there are ASM listeners, and there was no filter 2302 * mode transition of any kind, do nothing. 2303 * 2304 * If we are transitioning to MCAST_UNDEFINED, we need 2305 * not send any sources. A transition to/from this state is 2306 * considered inclusive with some special treatment. 2307 * 2308 * If we are rewriting initial joins/leaves to use 2309 * ALLOW/BLOCK, and the group's membership is inclusive, 2310 * we need to send sources in all cases. 2311 */ 2312 if (mode != inm->in6m_st[0].iss_fmode) { 2313 if (mode == MCAST_EXCLUDE) { 2314 CTR1(KTR_MLD, "%s: change to EXCLUDE", 2315 __func__); 2316 type = MLD_CHANGE_TO_EXCLUDE_MODE; 2317 } else { 2318 CTR1(KTR_MLD, "%s: change to INCLUDE", 2319 __func__); 2320 if (use_block_allow) { 2321 /* 2322 * XXX 2323 * Here we're interested in state 2324 * edges either direction between 2325 * MCAST_UNDEFINED and MCAST_INCLUDE. 2326 * Perhaps we should just check 2327 * the group state, rather than 2328 * the filter mode. 2329 */ 2330 if (mode == MCAST_UNDEFINED) { 2331 type = MLD_BLOCK_OLD_SOURCES; 2332 } else { 2333 type = MLD_ALLOW_NEW_SOURCES; 2334 } 2335 } else { 2336 type = MLD_CHANGE_TO_INCLUDE_MODE; 2337 if (mode == MCAST_UNDEFINED) 2338 record_has_sources = 0; 2339 } 2340 } 2341 } else { 2342 if (record_has_sources) { 2343 is_filter_list_change = 1; 2344 } else { 2345 type = MLD_DO_NOTHING; 2346 } 2347 } 2348 } else { 2349 /* 2350 * Queue a current state record. 2351 */ 2352 if (mode == MCAST_EXCLUDE) { 2353 type = MLD_MODE_IS_EXCLUDE; 2354 } else if (mode == MCAST_INCLUDE) { 2355 type = MLD_MODE_IS_INCLUDE; 2356 KASSERT(inm->in6m_st[1].iss_asm == 0, 2357 ("%s: inm %p is INCLUDE but ASM count is %d", 2358 __func__, inm, inm->in6m_st[1].iss_asm)); 2359 } 2360 } 2361 2362 /* 2363 * Generate the filter list changes using a separate function. 2364 */ 2365 if (is_filter_list_change) 2366 return (mld_v2_enqueue_filter_change(mq, inm)); 2367 2368 if (type == MLD_DO_NOTHING) { 2369 CTR3(KTR_MLD, "%s: nothing to do for %s/%s", 2370 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr), 2371 if_name(inm->in6m_ifp)); 2372 return (0); 2373 } 2374 2375 /* 2376 * If any sources are present, we must be able to fit at least 2377 * one in the trailing space of the tail packet's mbuf, 2378 * ideally more. 2379 */ 2380 minrec0len = sizeof(struct mldv2_record); 2381 if (record_has_sources) 2382 minrec0len += sizeof(struct in6_addr); 2383 2384 CTR4(KTR_MLD, "%s: queueing %s for %s/%s", __func__, 2385 mld_rec_type_to_str(type), 2386 ip6_sprintf(ip6tbuf, &inm->in6m_addr), 2387 if_name(inm->in6m_ifp)); 2388 2389 /* 2390 * Check if we have a packet in the tail of the queue for this 2391 * group into which the first group record for this group will fit. 2392 * Otherwise allocate a new packet. 2393 * Always allocate leading space for IP6+RA+ICMPV6+REPORT. 2394 * Note: Group records for G/GSR query responses MUST be sent 2395 * in their own packet. 2396 */ 2397 m0 = mbufq_last(mq); 2398 if (!is_group_query && 2399 m0 != NULL && 2400 (m0->m_pkthdr.vt_nrecs + 1 <= MLD_V2_REPORT_MAXRECS) && 2401 (m0->m_pkthdr.len + minrec0len) < 2402 (ifp->if_mtu - MLD_MTUSPACE)) { 2403 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len - 2404 sizeof(struct mldv2_record)) / 2405 sizeof(struct in6_addr); 2406 m = m0; 2407 CTR1(KTR_MLD, "%s: use existing packet", __func__); 2408 } else { 2409 if (mbufq_full(mq)) { 2410 CTR1(KTR_MLD, "%s: outbound queue full", __func__); 2411 return (-ENOMEM); 2412 } 2413 m = NULL; 2414 m0srcs = (ifp->if_mtu - MLD_MTUSPACE - 2415 sizeof(struct mldv2_record)) / sizeof(struct in6_addr); 2416 if (!is_state_change && !is_group_query) 2417 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 2418 if (m == NULL) 2419 m = m_gethdr(M_NOWAIT, MT_DATA); 2420 if (m == NULL) 2421 return (-ENOMEM); 2422 2423 mld_save_context(m, ifp); 2424 2425 CTR1(KTR_MLD, "%s: allocated first packet", __func__); 2426 } 2427 2428 /* 2429 * Append group record. 2430 * If we have sources, we don't know how many yet. 2431 */ 2432 mr.mr_type = type; 2433 mr.mr_datalen = 0; 2434 mr.mr_numsrc = 0; 2435 mr.mr_addr = inm->in6m_addr; 2436 in6_clearscope(&mr.mr_addr); 2437 if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) { 2438 if (m != m0) 2439 m_freem(m); 2440 CTR1(KTR_MLD, "%s: m_append() failed.", __func__); 2441 return (-ENOMEM); 2442 } 2443 nbytes += sizeof(struct mldv2_record); 2444 2445 /* 2446 * Append as many sources as will fit in the first packet. 2447 * If we are appending to a new packet, the chain allocation 2448 * may potentially use clusters; use m_getptr() in this case. 2449 * If we are appending to an existing packet, we need to obtain 2450 * a pointer to the group record after m_append(), in case a new 2451 * mbuf was allocated. 2452 * 2453 * Only append sources which are in-mode at t1. If we are 2454 * transitioning to MCAST_UNDEFINED state on the group, and 2455 * use_block_allow is zero, do not include source entries. 2456 * Otherwise, we need to include this source in the report. 2457 * 2458 * Only report recorded sources in our filter set when responding 2459 * to a group-source query. 2460 */ 2461 if (record_has_sources) { 2462 if (m == m0) { 2463 md = m_last(m); 2464 pmr = (struct mldv2_record *)(mtod(md, uint8_t *) + 2465 md->m_len - nbytes); 2466 } else { 2467 md = m_getptr(m, 0, &off); 2468 pmr = (struct mldv2_record *)(mtod(md, uint8_t *) + 2469 off); 2470 } 2471 msrcs = 0; 2472 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, 2473 nims) { 2474 CTR2(KTR_MLD, "%s: visit node %s", __func__, 2475 ip6_sprintf(ip6tbuf, &ims->im6s_addr)); 2476 now = im6s_get_mode(inm, ims, 1); 2477 CTR2(KTR_MLD, "%s: node is %d", __func__, now); 2478 if ((now != mode) || 2479 (now == mode && 2480 (!use_block_allow && mode == MCAST_UNDEFINED))) { 2481 CTR1(KTR_MLD, "%s: skip node", __func__); 2482 continue; 2483 } 2484 if (is_source_query && ims->im6s_stp == 0) { 2485 CTR1(KTR_MLD, "%s: skip unrecorded node", 2486 __func__); 2487 continue; 2488 } 2489 CTR1(KTR_MLD, "%s: append node", __func__); 2490 if (!m_append(m, sizeof(struct in6_addr), 2491 (void *)&ims->im6s_addr)) { 2492 if (m != m0) 2493 m_freem(m); 2494 CTR1(KTR_MLD, "%s: m_append() failed.", 2495 __func__); 2496 return (-ENOMEM); 2497 } 2498 nbytes += sizeof(struct in6_addr); 2499 ++msrcs; 2500 if (msrcs == m0srcs) 2501 break; 2502 } 2503 CTR2(KTR_MLD, "%s: msrcs is %d this packet", __func__, 2504 msrcs); 2505 pmr->mr_numsrc = htons(msrcs); 2506 nbytes += (msrcs * sizeof(struct in6_addr)); 2507 } 2508 2509 if (is_source_query && msrcs == 0) { 2510 CTR1(KTR_MLD, "%s: no recorded sources to report", __func__); 2511 if (m != m0) 2512 m_freem(m); 2513 return (0); 2514 } 2515 2516 /* 2517 * We are good to go with first packet. 2518 */ 2519 if (m != m0) { 2520 CTR1(KTR_MLD, "%s: enqueueing first packet", __func__); 2521 m->m_pkthdr.vt_nrecs = 1; 2522 mbufq_enqueue(mq, m); 2523 } else 2524 m->m_pkthdr.vt_nrecs++; 2525 2526 /* 2527 * No further work needed if no source list in packet(s). 2528 */ 2529 if (!record_has_sources) 2530 return (nbytes); 2531 2532 /* 2533 * Whilst sources remain to be announced, we need to allocate 2534 * a new packet and fill out as many sources as will fit. 2535 * Always try for a cluster first. 2536 */ 2537 while (nims != NULL) { 2538 if (mbufq_full(mq)) { 2539 CTR1(KTR_MLD, "%s: outbound queue full", __func__); 2540 return (-ENOMEM); 2541 } 2542 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 2543 if (m == NULL) 2544 m = m_gethdr(M_NOWAIT, MT_DATA); 2545 if (m == NULL) 2546 return (-ENOMEM); 2547 mld_save_context(m, ifp); 2548 md = m_getptr(m, 0, &off); 2549 pmr = (struct mldv2_record *)(mtod(md, uint8_t *) + off); 2550 CTR1(KTR_MLD, "%s: allocated next packet", __func__); 2551 2552 if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) { 2553 if (m != m0) 2554 m_freem(m); 2555 CTR1(KTR_MLD, "%s: m_append() failed.", __func__); 2556 return (-ENOMEM); 2557 } 2558 m->m_pkthdr.vt_nrecs = 1; 2559 nbytes += sizeof(struct mldv2_record); 2560 2561 m0srcs = (ifp->if_mtu - MLD_MTUSPACE - 2562 sizeof(struct mldv2_record)) / sizeof(struct in6_addr); 2563 2564 msrcs = 0; 2565 RB_FOREACH_FROM(ims, ip6_msource_tree, nims) { 2566 CTR2(KTR_MLD, "%s: visit node %s", 2567 __func__, ip6_sprintf(ip6tbuf, &ims->im6s_addr)); 2568 now = im6s_get_mode(inm, ims, 1); 2569 if ((now != mode) || 2570 (now == mode && 2571 (!use_block_allow && mode == MCAST_UNDEFINED))) { 2572 CTR1(KTR_MLD, "%s: skip node", __func__); 2573 continue; 2574 } 2575 if (is_source_query && ims->im6s_stp == 0) { 2576 CTR1(KTR_MLD, "%s: skip unrecorded node", 2577 __func__); 2578 continue; 2579 } 2580 CTR1(KTR_MLD, "%s: append node", __func__); 2581 if (!m_append(m, sizeof(struct in6_addr), 2582 (void *)&ims->im6s_addr)) { 2583 if (m != m0) 2584 m_freem(m); 2585 CTR1(KTR_MLD, "%s: m_append() failed.", 2586 __func__); 2587 return (-ENOMEM); 2588 } 2589 ++msrcs; 2590 if (msrcs == m0srcs) 2591 break; 2592 } 2593 pmr->mr_numsrc = htons(msrcs); 2594 nbytes += (msrcs * sizeof(struct in6_addr)); 2595 2596 CTR1(KTR_MLD, "%s: enqueueing next packet", __func__); 2597 mbufq_enqueue(mq, m); 2598 } 2599 2600 return (nbytes); 2601 } 2602 2603 /* 2604 * Type used to mark record pass completion. 2605 * We exploit the fact we can cast to this easily from the 2606 * current filter modes on each ip_msource node. 2607 */ 2608 typedef enum { 2609 REC_NONE = 0x00, /* MCAST_UNDEFINED */ 2610 REC_ALLOW = 0x01, /* MCAST_INCLUDE */ 2611 REC_BLOCK = 0x02, /* MCAST_EXCLUDE */ 2612 REC_FULL = REC_ALLOW | REC_BLOCK 2613 } rectype_t; 2614 2615 /* 2616 * Enqueue an MLDv2 filter list change to the given output queue. 2617 * 2618 * Source list filter state is held in an RB-tree. When the filter list 2619 * for a group is changed without changing its mode, we need to compute 2620 * the deltas between T0 and T1 for each source in the filter set, 2621 * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records. 2622 * 2623 * As we may potentially queue two record types, and the entire R-B tree 2624 * needs to be walked at once, we break this out into its own function 2625 * so we can generate a tightly packed queue of packets. 2626 * 2627 * XXX This could be written to only use one tree walk, although that makes 2628 * serializing into the mbuf chains a bit harder. For now we do two walks 2629 * which makes things easier on us, and it may or may not be harder on 2630 * the L2 cache. 2631 * 2632 * If successful the size of all data appended to the queue is returned, 2633 * otherwise an error code less than zero is returned, or zero if 2634 * no record(s) were appended. 2635 */ 2636 static int 2637 mld_v2_enqueue_filter_change(struct mbufq *mq, struct in6_multi *inm) 2638 { 2639 static const int MINRECLEN = 2640 sizeof(struct mldv2_record) + sizeof(struct in6_addr); 2641 struct ifnet *ifp; 2642 struct mldv2_record mr; 2643 struct mldv2_record *pmr; 2644 struct ip6_msource *ims, *nims; 2645 struct mbuf *m, *m0, *md; 2646 int m0srcs, nbytes, npbytes, off, rsrcs, schanged; 2647 uint8_t mode, now, then; 2648 rectype_t crt, drt, nrt; 2649 #ifdef KTR 2650 int nallow, nblock; 2651 char ip6tbuf[INET6_ADDRSTRLEN]; 2652 #endif 2653 2654 IN6_MULTI_LIST_LOCK_ASSERT(); 2655 2656 if (inm->in6m_nsrc == 0 || 2657 (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0)) 2658 return (0); 2659 2660 ifp = inm->in6m_ifp; /* interface */ 2661 mode = inm->in6m_st[1].iss_fmode; /* filter mode at t1 */ 2662 crt = REC_NONE; /* current group record type */ 2663 drt = REC_NONE; /* mask of completed group record types */ 2664 nrt = REC_NONE; /* record type for current node */ 2665 m0srcs = 0; /* # source which will fit in current mbuf chain */ 2666 npbytes = 0; /* # of bytes appended this packet */ 2667 nbytes = 0; /* # of bytes appended to group's state-change queue */ 2668 rsrcs = 0; /* # sources encoded in current record */ 2669 schanged = 0; /* # nodes encoded in overall filter change */ 2670 #ifdef KTR 2671 nallow = 0; /* # of source entries in ALLOW_NEW */ 2672 nblock = 0; /* # of source entries in BLOCK_OLD */ 2673 #endif 2674 nims = NULL; /* next tree node pointer */ 2675 2676 /* 2677 * For each possible filter record mode. 2678 * The first kind of source we encounter tells us which 2679 * is the first kind of record we start appending. 2680 * If a node transitioned to UNDEFINED at t1, its mode is treated 2681 * as the inverse of the group's filter mode. 2682 */ 2683 while (drt != REC_FULL) { 2684 do { 2685 m0 = mbufq_last(mq); 2686 if (m0 != NULL && 2687 (m0->m_pkthdr.vt_nrecs + 1 <= 2688 MLD_V2_REPORT_MAXRECS) && 2689 (m0->m_pkthdr.len + MINRECLEN) < 2690 (ifp->if_mtu - MLD_MTUSPACE)) { 2691 m = m0; 2692 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len - 2693 sizeof(struct mldv2_record)) / 2694 sizeof(struct in6_addr); 2695 CTR1(KTR_MLD, 2696 "%s: use previous packet", __func__); 2697 } else { 2698 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 2699 if (m == NULL) 2700 m = m_gethdr(M_NOWAIT, MT_DATA); 2701 if (m == NULL) { 2702 CTR1(KTR_MLD, 2703 "%s: m_get*() failed", __func__); 2704 return (-ENOMEM); 2705 } 2706 m->m_pkthdr.vt_nrecs = 0; 2707 mld_save_context(m, ifp); 2708 m0srcs = (ifp->if_mtu - MLD_MTUSPACE - 2709 sizeof(struct mldv2_record)) / 2710 sizeof(struct in6_addr); 2711 npbytes = 0; 2712 CTR1(KTR_MLD, 2713 "%s: allocated new packet", __func__); 2714 } 2715 /* 2716 * Append the MLD group record header to the 2717 * current packet's data area. 2718 * Recalculate pointer to free space for next 2719 * group record, in case m_append() allocated 2720 * a new mbuf or cluster. 2721 */ 2722 memset(&mr, 0, sizeof(mr)); 2723 mr.mr_addr = inm->in6m_addr; 2724 in6_clearscope(&mr.mr_addr); 2725 if (!m_append(m, sizeof(mr), (void *)&mr)) { 2726 if (m != m0) 2727 m_freem(m); 2728 CTR1(KTR_MLD, 2729 "%s: m_append() failed", __func__); 2730 return (-ENOMEM); 2731 } 2732 npbytes += sizeof(struct mldv2_record); 2733 if (m != m0) { 2734 /* new packet; offset in chain */ 2735 md = m_getptr(m, npbytes - 2736 sizeof(struct mldv2_record), &off); 2737 pmr = (struct mldv2_record *)(mtod(md, 2738 uint8_t *) + off); 2739 } else { 2740 /* current packet; offset from last append */ 2741 md = m_last(m); 2742 pmr = (struct mldv2_record *)(mtod(md, 2743 uint8_t *) + md->m_len - 2744 sizeof(struct mldv2_record)); 2745 } 2746 /* 2747 * Begin walking the tree for this record type 2748 * pass, or continue from where we left off 2749 * previously if we had to allocate a new packet. 2750 * Only report deltas in-mode at t1. 2751 * We need not report included sources as allowed 2752 * if we are in inclusive mode on the group, 2753 * however the converse is not true. 2754 */ 2755 rsrcs = 0; 2756 if (nims == NULL) { 2757 nims = RB_MIN(ip6_msource_tree, 2758 &inm->in6m_srcs); 2759 } 2760 RB_FOREACH_FROM(ims, ip6_msource_tree, nims) { 2761 CTR2(KTR_MLD, "%s: visit node %s", __func__, 2762 ip6_sprintf(ip6tbuf, &ims->im6s_addr)); 2763 now = im6s_get_mode(inm, ims, 1); 2764 then = im6s_get_mode(inm, ims, 0); 2765 CTR3(KTR_MLD, "%s: mode: t0 %d, t1 %d", 2766 __func__, then, now); 2767 if (now == then) { 2768 CTR1(KTR_MLD, 2769 "%s: skip unchanged", __func__); 2770 continue; 2771 } 2772 if (mode == MCAST_EXCLUDE && 2773 now == MCAST_INCLUDE) { 2774 CTR1(KTR_MLD, 2775 "%s: skip IN src on EX group", 2776 __func__); 2777 continue; 2778 } 2779 nrt = (rectype_t)now; 2780 if (nrt == REC_NONE) 2781 nrt = (rectype_t)(~mode & REC_FULL); 2782 if (schanged++ == 0) { 2783 crt = nrt; 2784 } else if (crt != nrt) 2785 continue; 2786 if (!m_append(m, sizeof(struct in6_addr), 2787 (void *)&ims->im6s_addr)) { 2788 if (m != m0) 2789 m_freem(m); 2790 CTR1(KTR_MLD, 2791 "%s: m_append() failed", __func__); 2792 return (-ENOMEM); 2793 } 2794 #ifdef KTR 2795 nallow += !!(crt == REC_ALLOW); 2796 nblock += !!(crt == REC_BLOCK); 2797 #endif 2798 if (++rsrcs == m0srcs) 2799 break; 2800 } 2801 /* 2802 * If we did not append any tree nodes on this 2803 * pass, back out of allocations. 2804 */ 2805 if (rsrcs == 0) { 2806 npbytes -= sizeof(struct mldv2_record); 2807 if (m != m0) { 2808 CTR1(KTR_MLD, 2809 "%s: m_free(m)", __func__); 2810 m_freem(m); 2811 } else { 2812 CTR1(KTR_MLD, 2813 "%s: m_adj(m, -mr)", __func__); 2814 m_adj(m, -((int)sizeof( 2815 struct mldv2_record))); 2816 } 2817 continue; 2818 } 2819 npbytes += (rsrcs * sizeof(struct in6_addr)); 2820 if (crt == REC_ALLOW) 2821 pmr->mr_type = MLD_ALLOW_NEW_SOURCES; 2822 else if (crt == REC_BLOCK) 2823 pmr->mr_type = MLD_BLOCK_OLD_SOURCES; 2824 pmr->mr_numsrc = htons(rsrcs); 2825 /* 2826 * Count the new group record, and enqueue this 2827 * packet if it wasn't already queued. 2828 */ 2829 m->m_pkthdr.vt_nrecs++; 2830 if (m != m0) 2831 mbufq_enqueue(mq, m); 2832 nbytes += npbytes; 2833 } while (nims != NULL); 2834 drt |= crt; 2835 crt = (~crt & REC_FULL); 2836 } 2837 2838 CTR3(KTR_MLD, "%s: queued %d ALLOW_NEW, %d BLOCK_OLD", __func__, 2839 nallow, nblock); 2840 2841 return (nbytes); 2842 } 2843 2844 static int 2845 mld_v2_merge_state_changes(struct in6_multi *inm, struct mbufq *scq) 2846 { 2847 struct mbufq *gq; 2848 struct mbuf *m; /* pending state-change */ 2849 struct mbuf *m0; /* copy of pending state-change */ 2850 struct mbuf *mt; /* last state-change in packet */ 2851 int docopy, domerge; 2852 u_int recslen; 2853 2854 docopy = 0; 2855 domerge = 0; 2856 recslen = 0; 2857 2858 IN6_MULTI_LIST_LOCK_ASSERT(); 2859 MLD_LOCK_ASSERT(); 2860 2861 /* 2862 * If there are further pending retransmissions, make a writable 2863 * copy of each queued state-change message before merging. 2864 */ 2865 if (inm->in6m_scrv > 0) 2866 docopy = 1; 2867 2868 gq = &inm->in6m_scq; 2869 #ifdef KTR 2870 if (mbufq_first(gq) == NULL) { 2871 CTR2(KTR_MLD, "%s: WARNING: queue for inm %p is empty", 2872 __func__, inm); 2873 } 2874 #endif 2875 2876 m = mbufq_first(gq); 2877 while (m != NULL) { 2878 /* 2879 * Only merge the report into the current packet if 2880 * there is sufficient space to do so; an MLDv2 report 2881 * packet may only contain 65,535 group records. 2882 * Always use a simple mbuf chain concatentation to do this, 2883 * as large state changes for single groups may have 2884 * allocated clusters. 2885 */ 2886 domerge = 0; 2887 mt = mbufq_last(scq); 2888 if (mt != NULL) { 2889 recslen = m_length(m, NULL); 2890 2891 if ((mt->m_pkthdr.vt_nrecs + 2892 m->m_pkthdr.vt_nrecs <= 2893 MLD_V2_REPORT_MAXRECS) && 2894 (mt->m_pkthdr.len + recslen <= 2895 (inm->in6m_ifp->if_mtu - MLD_MTUSPACE))) 2896 domerge = 1; 2897 } 2898 2899 if (!domerge && mbufq_full(gq)) { 2900 CTR2(KTR_MLD, 2901 "%s: outbound queue full, skipping whole packet %p", 2902 __func__, m); 2903 mt = m->m_nextpkt; 2904 if (!docopy) 2905 m_freem(m); 2906 m = mt; 2907 continue; 2908 } 2909 2910 if (!docopy) { 2911 CTR2(KTR_MLD, "%s: dequeueing %p", __func__, m); 2912 m0 = mbufq_dequeue(gq); 2913 m = m0->m_nextpkt; 2914 } else { 2915 CTR2(KTR_MLD, "%s: copying %p", __func__, m); 2916 m0 = m_dup(m, M_NOWAIT); 2917 if (m0 == NULL) 2918 return (ENOMEM); 2919 m0->m_nextpkt = NULL; 2920 m = m->m_nextpkt; 2921 } 2922 2923 if (!domerge) { 2924 CTR3(KTR_MLD, "%s: queueing %p to scq %p)", 2925 __func__, m0, scq); 2926 mbufq_enqueue(scq, m0); 2927 } else { 2928 struct mbuf *mtl; /* last mbuf of packet mt */ 2929 2930 CTR3(KTR_MLD, "%s: merging %p with ifscq tail %p)", 2931 __func__, m0, mt); 2932 2933 mtl = m_last(mt); 2934 m0->m_flags &= ~M_PKTHDR; 2935 mt->m_pkthdr.len += recslen; 2936 mt->m_pkthdr.vt_nrecs += 2937 m0->m_pkthdr.vt_nrecs; 2938 2939 mtl->m_next = m0; 2940 } 2941 } 2942 2943 return (0); 2944 } 2945 2946 /* 2947 * Respond to a pending MLDv2 General Query. 2948 */ 2949 static void 2950 mld_v2_dispatch_general_query(struct mld_ifsoftc *mli) 2951 { 2952 struct ifmultiaddr *ifma; 2953 struct ifnet *ifp; 2954 struct in6_multi *inm; 2955 int retval __unused; 2956 2957 NET_EPOCH_ASSERT(); 2958 IN6_MULTI_LIST_LOCK_ASSERT(); 2959 MLD_LOCK_ASSERT(); 2960 2961 KASSERT(mli->mli_version == MLD_VERSION_2, 2962 ("%s: called when version %d", __func__, mli->mli_version)); 2963 2964 /* 2965 * Check that there are some packets queued. If so, send them first. 2966 * For large number of groups the reply to general query can take 2967 * many packets, we should finish sending them before starting of 2968 * queuing the new reply. 2969 */ 2970 if (!mbufq_empty(&mli->mli_gq)) 2971 goto send; 2972 2973 ifp = mli->mli_ifp; 2974 2975 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2976 inm = in6m_ifmultiaddr_get_inm(ifma); 2977 if (inm == NULL) 2978 continue; 2979 KASSERT(ifp == inm->in6m_ifp, 2980 ("%s: inconsistent ifp", __func__)); 2981 2982 switch (inm->in6m_state) { 2983 case MLD_NOT_MEMBER: 2984 case MLD_SILENT_MEMBER: 2985 break; 2986 case MLD_REPORTING_MEMBER: 2987 case MLD_IDLE_MEMBER: 2988 case MLD_LAZY_MEMBER: 2989 case MLD_SLEEPING_MEMBER: 2990 case MLD_AWAKENING_MEMBER: 2991 inm->in6m_state = MLD_REPORTING_MEMBER; 2992 retval = mld_v2_enqueue_group_record(&mli->mli_gq, 2993 inm, 0, 0, 0, 0); 2994 CTR2(KTR_MLD, "%s: enqueue record = %d", 2995 __func__, retval); 2996 break; 2997 case MLD_G_QUERY_PENDING_MEMBER: 2998 case MLD_SG_QUERY_PENDING_MEMBER: 2999 case MLD_LEAVING_MEMBER: 3000 break; 3001 } 3002 } 3003 3004 send: 3005 mld_dispatch_queue(&mli->mli_gq, MLD_MAX_RESPONSE_BURST); 3006 3007 /* 3008 * Slew transmission of bursts over 500ms intervals. 3009 */ 3010 if (mbufq_first(&mli->mli_gq) != NULL) { 3011 mli->mli_v2_timer = 1 + MLD_RANDOM_DELAY( 3012 MLD_RESPONSE_BURST_INTERVAL); 3013 V_interface_timers_running6 = 1; 3014 } 3015 } 3016 3017 /* 3018 * Transmit the next pending message in the output queue. 3019 * 3020 * VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis. 3021 * MRT: Nothing needs to be done, as MLD traffic is always local to 3022 * a link and uses a link-scope multicast address. 3023 */ 3024 static void 3025 mld_dispatch_packet(struct mbuf *m) 3026 { 3027 struct ip6_moptions im6o; 3028 struct ifnet *ifp; 3029 struct ifnet *oifp; 3030 struct mbuf *m0; 3031 struct mbuf *md; 3032 struct ip6_hdr *ip6; 3033 struct mld_hdr *mld; 3034 int error; 3035 int off; 3036 int type; 3037 uint32_t ifindex; 3038 3039 CTR2(KTR_MLD, "%s: transmit %p", __func__, m); 3040 NET_EPOCH_ASSERT(); 3041 3042 /* 3043 * Set VNET image pointer from enqueued mbuf chain 3044 * before doing anything else. Whilst we use interface 3045 * indexes to guard against interface detach, they are 3046 * unique to each VIMAGE and must be retrieved. 3047 */ 3048 ifindex = mld_restore_context(m); 3049 3050 /* 3051 * Check if the ifnet still exists. This limits the scope of 3052 * any race in the absence of a global ifp lock for low cost 3053 * (an array lookup). 3054 */ 3055 ifp = ifnet_byindex(ifindex); 3056 if (ifp == NULL) { 3057 CTR3(KTR_MLD, "%s: dropped %p as ifindex %u went away.", 3058 __func__, m, ifindex); 3059 m_freem(m); 3060 IP6STAT_INC(ip6s_noroute); 3061 goto out; 3062 } 3063 3064 im6o.im6o_multicast_hlim = 1; 3065 im6o.im6o_multicast_loop = (V_ip6_mrouter != NULL); 3066 im6o.im6o_multicast_ifp = ifp; 3067 3068 if (m->m_flags & M_MLDV1) { 3069 m0 = m; 3070 } else { 3071 m0 = mld_v2_encap_report(ifp, m); 3072 if (m0 == NULL) { 3073 CTR2(KTR_MLD, "%s: dropped %p", __func__, m); 3074 IP6STAT_INC(ip6s_odropped); 3075 goto out; 3076 } 3077 } 3078 3079 mld_scrub_context(m0); 3080 m_clrprotoflags(m); 3081 m0->m_pkthdr.rcvif = V_loif; 3082 3083 ip6 = mtod(m0, struct ip6_hdr *); 3084 #if 0 3085 (void)in6_setscope(&ip6->ip6_dst, ifp, NULL); /* XXX LOR */ 3086 #else 3087 /* 3088 * XXX XXX Break some KPI rules to prevent an LOR which would 3089 * occur if we called in6_setscope() at transmission. 3090 * See comments at top of file. 3091 */ 3092 MLD_EMBEDSCOPE(&ip6->ip6_dst, ifp->if_index); 3093 #endif 3094 3095 /* 3096 * Retrieve the ICMPv6 type before handoff to ip6_output(), 3097 * so we can bump the stats. 3098 */ 3099 md = m_getptr(m0, sizeof(struct ip6_hdr), &off); 3100 mld = (struct mld_hdr *)(mtod(md, uint8_t *) + off); 3101 type = mld->mld_type; 3102 3103 oifp = NULL; 3104 error = ip6_output(m0, &mld_po, NULL, IPV6_UNSPECSRC, &im6o, 3105 &oifp, NULL); 3106 if (error) { 3107 CTR3(KTR_MLD, "%s: ip6_output(%p) = %d", __func__, m0, error); 3108 goto out; 3109 } 3110 ICMP6STAT_INC2(icp6s_outhist, type); 3111 if (oifp != NULL) { 3112 icmp6_ifstat_inc(oifp, ifs6_out_msg); 3113 switch (type) { 3114 case MLD_LISTENER_REPORT: 3115 case MLDV2_LISTENER_REPORT: 3116 icmp6_ifstat_inc(oifp, ifs6_out_mldreport); 3117 break; 3118 case MLD_LISTENER_DONE: 3119 icmp6_ifstat_inc(oifp, ifs6_out_mlddone); 3120 break; 3121 } 3122 } 3123 out: 3124 return; 3125 } 3126 3127 /* 3128 * Encapsulate an MLDv2 report. 3129 * 3130 * KAME IPv6 requires that hop-by-hop options be passed separately, 3131 * and that the IPv6 header be prepended in a separate mbuf. 3132 * 3133 * Returns a pointer to the new mbuf chain head, or NULL if the 3134 * allocation failed. 3135 */ 3136 static struct mbuf * 3137 mld_v2_encap_report(struct ifnet *ifp, struct mbuf *m) 3138 { 3139 struct mbuf *mh; 3140 struct mldv2_report *mld; 3141 struct ip6_hdr *ip6; 3142 struct in6_ifaddr *ia; 3143 int mldreclen; 3144 3145 KASSERT(ifp != NULL, ("%s: null ifp", __func__)); 3146 KASSERT((m->m_flags & M_PKTHDR), 3147 ("%s: mbuf chain %p is !M_PKTHDR", __func__, m)); 3148 3149 /* 3150 * RFC3590: OK to send as :: or tentative during DAD. 3151 */ 3152 NET_EPOCH_ASSERT(); 3153 ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST); 3154 if (ia == NULL) 3155 CTR1(KTR_MLD, "%s: warning: ia is NULL", __func__); 3156 3157 mh = m_gethdr(M_NOWAIT, MT_DATA); 3158 if (mh == NULL) { 3159 if (ia != NULL) 3160 ifa_free(&ia->ia_ifa); 3161 m_freem(m); 3162 return (NULL); 3163 } 3164 M_ALIGN(mh, sizeof(struct ip6_hdr) + sizeof(struct mldv2_report)); 3165 3166 mldreclen = m_length(m, NULL); 3167 CTR2(KTR_MLD, "%s: mldreclen is %d", __func__, mldreclen); 3168 3169 mh->m_len = sizeof(struct ip6_hdr) + sizeof(struct mldv2_report); 3170 mh->m_pkthdr.len = sizeof(struct ip6_hdr) + 3171 sizeof(struct mldv2_report) + mldreclen; 3172 3173 ip6 = mtod(mh, struct ip6_hdr *); 3174 ip6->ip6_flow = 0; 3175 ip6->ip6_vfc &= ~IPV6_VERSION_MASK; 3176 ip6->ip6_vfc |= IPV6_VERSION; 3177 ip6->ip6_nxt = IPPROTO_ICMPV6; 3178 ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any; 3179 if (ia != NULL) 3180 ifa_free(&ia->ia_ifa); 3181 ip6->ip6_dst = in6addr_linklocal_allv2routers; 3182 /* scope ID will be set in netisr */ 3183 3184 mld = (struct mldv2_report *)(ip6 + 1); 3185 mld->mld_type = MLDV2_LISTENER_REPORT; 3186 mld->mld_code = 0; 3187 mld->mld_cksum = 0; 3188 mld->mld_v2_reserved = 0; 3189 mld->mld_v2_numrecs = htons(m->m_pkthdr.vt_nrecs); 3190 m->m_pkthdr.vt_nrecs = 0; 3191 3192 mh->m_next = m; 3193 mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6, 3194 sizeof(struct ip6_hdr), sizeof(struct mldv2_report) + mldreclen); 3195 return (mh); 3196 } 3197 3198 #ifdef KTR 3199 static char * 3200 mld_rec_type_to_str(const int type) 3201 { 3202 3203 switch (type) { 3204 case MLD_CHANGE_TO_EXCLUDE_MODE: 3205 return "TO_EX"; 3206 break; 3207 case MLD_CHANGE_TO_INCLUDE_MODE: 3208 return "TO_IN"; 3209 break; 3210 case MLD_MODE_IS_EXCLUDE: 3211 return "MODE_EX"; 3212 break; 3213 case MLD_MODE_IS_INCLUDE: 3214 return "MODE_IN"; 3215 break; 3216 case MLD_ALLOW_NEW_SOURCES: 3217 return "ALLOW_NEW"; 3218 break; 3219 case MLD_BLOCK_OLD_SOURCES: 3220 return "BLOCK_OLD"; 3221 break; 3222 default: 3223 break; 3224 } 3225 return "unknown"; 3226 } 3227 #endif 3228 3229 static void 3230 mld_init(void *unused __unused) 3231 { 3232 3233 CTR1(KTR_MLD, "%s: initializing", __func__); 3234 MLD_LOCK_INIT(); 3235 3236 ip6_initpktopts(&mld_po); 3237 mld_po.ip6po_hlim = 1; 3238 mld_po.ip6po_hbh = &mld_ra.hbh; 3239 mld_po.ip6po_prefer_tempaddr = IP6PO_TEMPADDR_NOTPREFER; 3240 mld_po.ip6po_flags = IP6PO_DONTFRAG; 3241 mld_po.ip6po_valid = IP6PO_VALID_HLIM | IP6PO_VALID_HBH; 3242 3243 callout_init(&mldslow_callout, 1); 3244 callout_reset(&mldslow_callout, hz / MLD_SLOWHZ, mld_slowtimo, NULL); 3245 callout_init(&mldfast_callout, 1); 3246 callout_reset(&mldfast_callout, hz / MLD_FASTHZ, mld_fasttimo, NULL); 3247 } 3248 SYSINIT(mld_init, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE, mld_init, NULL); 3249 3250 static void 3251 mld_uninit(void *unused __unused) 3252 { 3253 3254 CTR1(KTR_MLD, "%s: tearing down", __func__); 3255 callout_drain(&mldslow_callout); 3256 callout_drain(&mldfast_callout); 3257 MLD_LOCK_DESTROY(); 3258 } 3259 SYSUNINIT(mld_uninit, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE, mld_uninit, NULL); 3260 3261 static void 3262 vnet_mld_init(const void *unused __unused) 3263 { 3264 3265 CTR1(KTR_MLD, "%s: initializing", __func__); 3266 3267 LIST_INIT(&V_mli_head); 3268 } 3269 VNET_SYSINIT(vnet_mld_init, SI_SUB_PROTO_MC, SI_ORDER_ANY, vnet_mld_init, 3270 NULL); 3271 3272 static void 3273 vnet_mld_uninit(const void *unused __unused) 3274 { 3275 3276 /* This can happen if we shutdown the network stack. */ 3277 CTR1(KTR_MLD, "%s: tearing down", __func__); 3278 } 3279 VNET_SYSUNINIT(vnet_mld_uninit, SI_SUB_PROTO_MC, SI_ORDER_ANY, vnet_mld_uninit, 3280 NULL); 3281 3282 static int 3283 mld_modevent(module_t mod, int type, void *unused __unused) 3284 { 3285 3286 switch (type) { 3287 case MOD_LOAD: 3288 case MOD_UNLOAD: 3289 break; 3290 default: 3291 return (EOPNOTSUPP); 3292 } 3293 return (0); 3294 } 3295 3296 static moduledata_t mld_mod = { 3297 "mld", 3298 mld_modevent, 3299 0 3300 }; 3301 DECLARE_MODULE(mld, mld_mod, SI_SUB_PROTO_MC, SI_ORDER_ANY); 3302