1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2009 Bruce Simpson. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote 16 * products derived from this software without specific prior written 17 * permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 /* 33 * IPv6 multicast socket, group, and socket option processing module. 34 * Normative references: RFC 2292, RFC 3492, RFC 3542, RFC 3678, RFC 3810. 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_inet6.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/gtaskqueue.h> 45 #include <sys/kernel.h> 46 #include <sys/malloc.h> 47 #include <sys/mbuf.h> 48 #include <sys/protosw.h> 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/sysctl.h> 52 #include <sys/priv.h> 53 #include <sys/ktr.h> 54 #include <sys/tree.h> 55 56 #include <net/if.h> 57 #include <net/if_var.h> 58 #include <net/if_dl.h> 59 #include <net/route.h> 60 #include <net/vnet.h> 61 62 63 #include <netinet/in.h> 64 #include <netinet/udp.h> 65 #include <netinet/in_var.h> 66 #include <netinet/ip_var.h> 67 #include <netinet/udp_var.h> 68 #include <netinet6/in6_fib.h> 69 #include <netinet6/in6_var.h> 70 #include <netinet/ip6.h> 71 #include <netinet/icmp6.h> 72 #include <netinet6/ip6_var.h> 73 #include <netinet/in_pcb.h> 74 #include <netinet/tcp_var.h> 75 #include <netinet6/nd6.h> 76 #include <netinet6/mld6_var.h> 77 #include <netinet6/scope6_var.h> 78 79 #ifndef KTR_MLD 80 #define KTR_MLD KTR_INET6 81 #endif 82 83 #ifndef __SOCKUNION_DECLARED 84 union sockunion { 85 struct sockaddr_storage ss; 86 struct sockaddr sa; 87 struct sockaddr_dl sdl; 88 struct sockaddr_in6 sin6; 89 }; 90 typedef union sockunion sockunion_t; 91 #define __SOCKUNION_DECLARED 92 #endif /* __SOCKUNION_DECLARED */ 93 94 static MALLOC_DEFINE(M_IN6MFILTER, "in6_mfilter", 95 "IPv6 multicast PCB-layer source filter"); 96 MALLOC_DEFINE(M_IP6MADDR, "in6_multi", "IPv6 multicast group"); 97 static MALLOC_DEFINE(M_IP6MOPTS, "ip6_moptions", "IPv6 multicast options"); 98 static MALLOC_DEFINE(M_IP6MSOURCE, "ip6_msource", 99 "IPv6 multicast MLD-layer source filter"); 100 101 RB_GENERATE(ip6_msource_tree, ip6_msource, im6s_link, ip6_msource_cmp); 102 103 /* 104 * Locking: 105 * - Lock order is: Giant, IN6_MULTI_LOCK, INP_WLOCK, 106 * IN6_MULTI_LIST_LOCK, MLD_LOCK, IF_ADDR_LOCK. 107 * - The IF_ADDR_LOCK is implicitly taken by in6m_lookup() earlier, however 108 * it can be taken by code in net/if.c also. 109 * - ip6_moptions and in6_mfilter are covered by the INP_WLOCK. 110 * 111 * struct in6_multi is covered by IN6_MULTI_LOCK. There isn't strictly 112 * any need for in6_multi itself to be virtualized -- it is bound to an ifp 113 * anyway no matter what happens. 114 */ 115 struct mtx in6_multi_list_mtx; 116 MTX_SYSINIT(in6_multi_mtx, &in6_multi_list_mtx, "in6_multi_list_mtx", MTX_DEF); 117 118 struct mtx in6_multi_free_mtx; 119 MTX_SYSINIT(in6_multi_free_mtx, &in6_multi_free_mtx, "in6_multi_free_mtx", MTX_DEF); 120 121 struct sx in6_multi_sx; 122 SX_SYSINIT(in6_multi_sx, &in6_multi_sx, "in6_multi_sx"); 123 124 static void im6f_commit(struct in6_mfilter *); 125 static int im6f_get_source(struct in6_mfilter *imf, 126 const struct sockaddr_in6 *psin, 127 struct in6_msource **); 128 static struct in6_msource * 129 im6f_graft(struct in6_mfilter *, const uint8_t, 130 const struct sockaddr_in6 *); 131 static void im6f_leave(struct in6_mfilter *); 132 static int im6f_prune(struct in6_mfilter *, const struct sockaddr_in6 *); 133 static void im6f_purge(struct in6_mfilter *); 134 static void im6f_rollback(struct in6_mfilter *); 135 static void im6f_reap(struct in6_mfilter *); 136 static struct in6_mfilter * 137 im6o_match_group(const struct ip6_moptions *, 138 const struct ifnet *, const struct sockaddr *); 139 static struct in6_msource * 140 im6o_match_source(struct in6_mfilter *, const struct sockaddr *); 141 static void im6s_merge(struct ip6_msource *ims, 142 const struct in6_msource *lims, const int rollback); 143 static int in6_getmulti(struct ifnet *, const struct in6_addr *, 144 struct in6_multi **); 145 static int in6_joingroup_locked(struct ifnet *, const struct in6_addr *, 146 struct in6_mfilter *, struct in6_multi **, int); 147 static int in6m_get_source(struct in6_multi *inm, 148 const struct in6_addr *addr, const int noalloc, 149 struct ip6_msource **pims); 150 #ifdef KTR 151 static int in6m_is_ifp_detached(const struct in6_multi *); 152 #endif 153 static int in6m_merge(struct in6_multi *, /*const*/ struct in6_mfilter *); 154 static void in6m_purge(struct in6_multi *); 155 static void in6m_reap(struct in6_multi *); 156 static struct ip6_moptions * 157 in6p_findmoptions(struct inpcb *); 158 static int in6p_get_source_filters(struct inpcb *, struct sockopt *); 159 static int in6p_join_group(struct inpcb *, struct sockopt *); 160 static int in6p_leave_group(struct inpcb *, struct sockopt *); 161 static struct ifnet * 162 in6p_lookup_mcast_ifp(const struct inpcb *, 163 const struct sockaddr_in6 *); 164 static int in6p_block_unblock_source(struct inpcb *, struct sockopt *); 165 static int in6p_set_multicast_if(struct inpcb *, struct sockopt *); 166 static int in6p_set_source_filters(struct inpcb *, struct sockopt *); 167 static int sysctl_ip6_mcast_filters(SYSCTL_HANDLER_ARGS); 168 169 SYSCTL_DECL(_net_inet6_ip6); /* XXX Not in any common header. */ 170 171 static SYSCTL_NODE(_net_inet6_ip6, OID_AUTO, mcast, CTLFLAG_RW, 0, 172 "IPv6 multicast"); 173 174 static u_long in6_mcast_maxgrpsrc = IPV6_MAX_GROUP_SRC_FILTER; 175 SYSCTL_ULONG(_net_inet6_ip6_mcast, OID_AUTO, maxgrpsrc, 176 CTLFLAG_RWTUN, &in6_mcast_maxgrpsrc, 0, 177 "Max source filters per group"); 178 179 static u_long in6_mcast_maxsocksrc = IPV6_MAX_SOCK_SRC_FILTER; 180 SYSCTL_ULONG(_net_inet6_ip6_mcast, OID_AUTO, maxsocksrc, 181 CTLFLAG_RWTUN, &in6_mcast_maxsocksrc, 0, 182 "Max source filters per socket"); 183 184 /* TODO Virtualize this switch. */ 185 int in6_mcast_loop = IPV6_DEFAULT_MULTICAST_LOOP; 186 SYSCTL_INT(_net_inet6_ip6_mcast, OID_AUTO, loop, CTLFLAG_RWTUN, 187 &in6_mcast_loop, 0, "Loopback multicast datagrams by default"); 188 189 static SYSCTL_NODE(_net_inet6_ip6_mcast, OID_AUTO, filters, 190 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_ip6_mcast_filters, 191 "Per-interface stack-wide source filters"); 192 193 #ifdef KTR 194 /* 195 * Inline function which wraps assertions for a valid ifp. 196 * The ifnet layer will set the ifma's ifp pointer to NULL if the ifp 197 * is detached. 198 */ 199 static int __inline 200 in6m_is_ifp_detached(const struct in6_multi *inm) 201 { 202 struct ifnet *ifp; 203 204 KASSERT(inm->in6m_ifma != NULL, ("%s: no ifma", __func__)); 205 ifp = inm->in6m_ifma->ifma_ifp; 206 if (ifp != NULL) { 207 /* 208 * Sanity check that network-layer notion of ifp is the 209 * same as that of link-layer. 210 */ 211 KASSERT(inm->in6m_ifp == ifp, ("%s: bad ifp", __func__)); 212 } 213 214 return (ifp == NULL); 215 } 216 #endif 217 218 /* 219 * Initialize an in6_mfilter structure to a known state at t0, t1 220 * with an empty source filter list. 221 */ 222 static __inline void 223 im6f_init(struct in6_mfilter *imf, const int st0, const int st1) 224 { 225 memset(imf, 0, sizeof(struct in6_mfilter)); 226 RB_INIT(&imf->im6f_sources); 227 imf->im6f_st[0] = st0; 228 imf->im6f_st[1] = st1; 229 } 230 231 struct in6_mfilter * 232 ip6_mfilter_alloc(const int mflags, const int st0, const int st1) 233 { 234 struct in6_mfilter *imf; 235 236 imf = malloc(sizeof(*imf), M_IN6MFILTER, mflags); 237 238 if (imf != NULL) 239 im6f_init(imf, st0, st1); 240 241 return (imf); 242 } 243 244 void 245 ip6_mfilter_free(struct in6_mfilter *imf) 246 { 247 248 im6f_purge(imf); 249 free(imf, M_IN6MFILTER); 250 } 251 252 /* 253 * Find an IPv6 multicast group entry for this ip6_moptions instance 254 * which matches the specified group, and optionally an interface. 255 * Return its index into the array, or -1 if not found. 256 */ 257 static struct in6_mfilter * 258 im6o_match_group(const struct ip6_moptions *imo, const struct ifnet *ifp, 259 const struct sockaddr *group) 260 { 261 const struct sockaddr_in6 *gsin6; 262 struct in6_mfilter *imf; 263 struct in6_multi *inm; 264 265 gsin6 = (const struct sockaddr_in6 *)group; 266 267 IP6_MFILTER_FOREACH(imf, &imo->im6o_head) { 268 inm = imf->im6f_in6m; 269 if (inm == NULL) 270 continue; 271 if ((ifp == NULL || (inm->in6m_ifp == ifp)) && 272 IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, 273 &gsin6->sin6_addr)) { 274 break; 275 } 276 } 277 return (imf); 278 } 279 280 /* 281 * Find an IPv6 multicast source entry for this imo which matches 282 * the given group index for this socket, and source address. 283 * 284 * XXX TODO: The scope ID, if present in src, is stripped before 285 * any comparison. We SHOULD enforce scope/zone checks where the source 286 * filter entry has a link scope. 287 * 288 * NOTE: This does not check if the entry is in-mode, merely if 289 * it exists, which may not be the desired behaviour. 290 */ 291 static struct in6_msource * 292 im6o_match_source(struct in6_mfilter *imf, const struct sockaddr *src) 293 { 294 struct ip6_msource find; 295 struct ip6_msource *ims; 296 const sockunion_t *psa; 297 298 KASSERT(src->sa_family == AF_INET6, ("%s: !AF_INET6", __func__)); 299 300 psa = (const sockunion_t *)src; 301 find.im6s_addr = psa->sin6.sin6_addr; 302 in6_clearscope(&find.im6s_addr); /* XXX */ 303 ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find); 304 305 return ((struct in6_msource *)ims); 306 } 307 308 /* 309 * Perform filtering for multicast datagrams on a socket by group and source. 310 * 311 * Returns 0 if a datagram should be allowed through, or various error codes 312 * if the socket was not a member of the group, or the source was muted, etc. 313 */ 314 int 315 im6o_mc_filter(const struct ip6_moptions *imo, const struct ifnet *ifp, 316 const struct sockaddr *group, const struct sockaddr *src) 317 { 318 struct in6_mfilter *imf; 319 struct in6_msource *ims; 320 int mode; 321 322 KASSERT(ifp != NULL, ("%s: null ifp", __func__)); 323 324 imf = im6o_match_group(imo, ifp, group); 325 if (imf == NULL) 326 return (MCAST_NOTGMEMBER); 327 328 /* 329 * Check if the source was included in an (S,G) join. 330 * Allow reception on exclusive memberships by default, 331 * reject reception on inclusive memberships by default. 332 * Exclude source only if an in-mode exclude filter exists. 333 * Include source only if an in-mode include filter exists. 334 * NOTE: We are comparing group state here at MLD t1 (now) 335 * with socket-layer t0 (since last downcall). 336 */ 337 mode = imf->im6f_st[1]; 338 ims = im6o_match_source(imf, src); 339 340 if ((ims == NULL && mode == MCAST_INCLUDE) || 341 (ims != NULL && ims->im6sl_st[0] != mode)) 342 return (MCAST_NOTSMEMBER); 343 344 return (MCAST_PASS); 345 } 346 347 /* 348 * Find and return a reference to an in6_multi record for (ifp, group), 349 * and bump its reference count. 350 * If one does not exist, try to allocate it, and update link-layer multicast 351 * filters on ifp to listen for group. 352 * Assumes the IN6_MULTI lock is held across the call. 353 * Return 0 if successful, otherwise return an appropriate error code. 354 */ 355 static int 356 in6_getmulti(struct ifnet *ifp, const struct in6_addr *group, 357 struct in6_multi **pinm) 358 { 359 struct epoch_tracker et; 360 struct sockaddr_in6 gsin6; 361 struct ifmultiaddr *ifma; 362 struct in6_multi *inm; 363 int error; 364 365 error = 0; 366 367 /* 368 * XXX: Accesses to ifma_protospec must be covered by IF_ADDR_LOCK; 369 * if_addmulti() takes this mutex itself, so we must drop and 370 * re-acquire around the call. 371 */ 372 IN6_MULTI_LOCK_ASSERT(); 373 IN6_MULTI_LIST_LOCK(); 374 IF_ADDR_WLOCK(ifp); 375 NET_EPOCH_ENTER(et); 376 inm = in6m_lookup_locked(ifp, group); 377 NET_EPOCH_EXIT(et); 378 379 if (inm != NULL) { 380 /* 381 * If we already joined this group, just bump the 382 * refcount and return it. 383 */ 384 KASSERT(inm->in6m_refcount >= 1, 385 ("%s: bad refcount %d", __func__, inm->in6m_refcount)); 386 in6m_acquire_locked(inm); 387 *pinm = inm; 388 goto out_locked; 389 } 390 391 memset(&gsin6, 0, sizeof(gsin6)); 392 gsin6.sin6_family = AF_INET6; 393 gsin6.sin6_len = sizeof(struct sockaddr_in6); 394 gsin6.sin6_addr = *group; 395 396 /* 397 * Check if a link-layer group is already associated 398 * with this network-layer group on the given ifnet. 399 */ 400 IN6_MULTI_LIST_UNLOCK(); 401 IF_ADDR_WUNLOCK(ifp); 402 error = if_addmulti(ifp, (struct sockaddr *)&gsin6, &ifma); 403 if (error != 0) 404 return (error); 405 IN6_MULTI_LIST_LOCK(); 406 IF_ADDR_WLOCK(ifp); 407 408 /* 409 * If something other than netinet6 is occupying the link-layer 410 * group, print a meaningful error message and back out of 411 * the allocation. 412 * Otherwise, bump the refcount on the existing network-layer 413 * group association and return it. 414 */ 415 if (ifma->ifma_protospec != NULL) { 416 inm = (struct in6_multi *)ifma->ifma_protospec; 417 #ifdef INVARIANTS 418 KASSERT(ifma->ifma_addr != NULL, ("%s: no ifma_addr", 419 __func__)); 420 KASSERT(ifma->ifma_addr->sa_family == AF_INET6, 421 ("%s: ifma not AF_INET6", __func__)); 422 KASSERT(inm != NULL, ("%s: no ifma_protospec", __func__)); 423 if (inm->in6m_ifma != ifma || inm->in6m_ifp != ifp || 424 !IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, group)) 425 panic("%s: ifma %p is inconsistent with %p (%p)", 426 __func__, ifma, inm, group); 427 #endif 428 in6m_acquire_locked(inm); 429 *pinm = inm; 430 goto out_locked; 431 } 432 433 IF_ADDR_WLOCK_ASSERT(ifp); 434 435 /* 436 * A new in6_multi record is needed; allocate and initialize it. 437 * We DO NOT perform an MLD join as the in6_ layer may need to 438 * push an initial source list down to MLD to support SSM. 439 * 440 * The initial source filter state is INCLUDE, {} as per the RFC. 441 * Pending state-changes per group are subject to a bounds check. 442 */ 443 inm = malloc(sizeof(*inm), M_IP6MADDR, M_NOWAIT | M_ZERO); 444 if (inm == NULL) { 445 IN6_MULTI_LIST_UNLOCK(); 446 IF_ADDR_WUNLOCK(ifp); 447 if_delmulti_ifma(ifma); 448 return (ENOMEM); 449 } 450 inm->in6m_addr = *group; 451 inm->in6m_ifp = ifp; 452 inm->in6m_mli = MLD_IFINFO(ifp); 453 inm->in6m_ifma = ifma; 454 inm->in6m_refcount = 1; 455 inm->in6m_state = MLD_NOT_MEMBER; 456 mbufq_init(&inm->in6m_scq, MLD_MAX_STATE_CHANGES); 457 458 inm->in6m_st[0].iss_fmode = MCAST_UNDEFINED; 459 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED; 460 RB_INIT(&inm->in6m_srcs); 461 462 ifma->ifma_protospec = inm; 463 *pinm = inm; 464 465 out_locked: 466 IN6_MULTI_LIST_UNLOCK(); 467 IF_ADDR_WUNLOCK(ifp); 468 return (error); 469 } 470 471 /* 472 * Drop a reference to an in6_multi record. 473 * 474 * If the refcount drops to 0, free the in6_multi record and 475 * delete the underlying link-layer membership. 476 */ 477 static void 478 in6m_release(struct in6_multi *inm) 479 { 480 struct ifmultiaddr *ifma; 481 struct ifnet *ifp; 482 483 CTR2(KTR_MLD, "%s: refcount is %d", __func__, inm->in6m_refcount); 484 485 MPASS(inm->in6m_refcount == 0); 486 CTR2(KTR_MLD, "%s: freeing inm %p", __func__, inm); 487 488 ifma = inm->in6m_ifma; 489 ifp = inm->in6m_ifp; 490 MPASS(ifma->ifma_llifma == NULL); 491 492 /* XXX this access is not covered by IF_ADDR_LOCK */ 493 CTR2(KTR_MLD, "%s: purging ifma %p", __func__, ifma); 494 KASSERT(ifma->ifma_protospec == NULL, 495 ("%s: ifma_protospec != NULL", __func__)); 496 if (ifp == NULL) 497 ifp = ifma->ifma_ifp; 498 499 if (ifp != NULL) { 500 CURVNET_SET(ifp->if_vnet); 501 in6m_purge(inm); 502 free(inm, M_IP6MADDR); 503 if_delmulti_ifma_flags(ifma, 1); 504 CURVNET_RESTORE(); 505 if_rele(ifp); 506 } else { 507 in6m_purge(inm); 508 free(inm, M_IP6MADDR); 509 if_delmulti_ifma_flags(ifma, 1); 510 } 511 } 512 513 static struct grouptask free_gtask; 514 static struct in6_multi_head in6m_free_list; 515 static void in6m_release_task(void *arg __unused); 516 static void in6m_init(void) 517 { 518 SLIST_INIT(&in6m_free_list); 519 taskqgroup_config_gtask_init(NULL, &free_gtask, in6m_release_task, "in6m release task"); 520 } 521 522 #ifdef EARLY_AP_STARTUP 523 SYSINIT(in6m_init, SI_SUB_SMP + 1, SI_ORDER_FIRST, 524 in6m_init, NULL); 525 #else 526 SYSINIT(in6m_init, SI_SUB_ROOT_CONF - 1, SI_ORDER_SECOND, 527 in6m_init, NULL); 528 #endif 529 530 531 void 532 in6m_release_list_deferred(struct in6_multi_head *inmh) 533 { 534 if (SLIST_EMPTY(inmh)) 535 return; 536 mtx_lock(&in6_multi_free_mtx); 537 SLIST_CONCAT(&in6m_free_list, inmh, in6_multi, in6m_nrele); 538 mtx_unlock(&in6_multi_free_mtx); 539 GROUPTASK_ENQUEUE(&free_gtask); 540 } 541 542 void 543 in6m_release_wait(void) 544 { 545 546 /* Wait for all jobs to complete. */ 547 gtaskqueue_drain_all(free_gtask.gt_taskqueue); 548 } 549 550 void 551 in6m_disconnect_locked(struct in6_multi_head *inmh, struct in6_multi *inm) 552 { 553 struct ifnet *ifp; 554 struct ifaddr *ifa; 555 struct in6_ifaddr *ifa6; 556 struct in6_multi_mship *imm, *imm_tmp; 557 struct ifmultiaddr *ifma, *ll_ifma; 558 559 IN6_MULTI_LIST_LOCK_ASSERT(); 560 561 ifp = inm->in6m_ifp; 562 if (ifp == NULL) 563 return; /* already called */ 564 565 inm->in6m_ifp = NULL; 566 IF_ADDR_WLOCK_ASSERT(ifp); 567 ifma = inm->in6m_ifma; 568 if (ifma == NULL) 569 return; 570 571 if_ref(ifp); 572 if (ifma->ifma_flags & IFMA_F_ENQUEUED) { 573 CK_STAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifmultiaddr, ifma_link); 574 ifma->ifma_flags &= ~IFMA_F_ENQUEUED; 575 } 576 MCDPRINTF("removed ifma: %p from %s\n", ifma, ifp->if_xname); 577 if ((ll_ifma = ifma->ifma_llifma) != NULL) { 578 MPASS(ifma != ll_ifma); 579 ifma->ifma_llifma = NULL; 580 MPASS(ll_ifma->ifma_llifma == NULL); 581 MPASS(ll_ifma->ifma_ifp == ifp); 582 if (--ll_ifma->ifma_refcount == 0) { 583 if (ll_ifma->ifma_flags & IFMA_F_ENQUEUED) { 584 CK_STAILQ_REMOVE(&ifp->if_multiaddrs, ll_ifma, ifmultiaddr, ifma_link); 585 ll_ifma->ifma_flags &= ~IFMA_F_ENQUEUED; 586 } 587 MCDPRINTF("removed ll_ifma: %p from %s\n", ll_ifma, ifp->if_xname); 588 if_freemulti(ll_ifma); 589 } 590 } 591 CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 592 if (ifa->ifa_addr->sa_family != AF_INET6) 593 continue; 594 ifa6 = (void *)ifa; 595 LIST_FOREACH_SAFE(imm, &ifa6->ia6_memberships, 596 i6mm_chain, imm_tmp) { 597 if (inm == imm->i6mm_maddr) { 598 LIST_REMOVE(imm, i6mm_chain); 599 free(imm, M_IP6MADDR); 600 in6m_rele_locked(inmh, inm); 601 } 602 } 603 } 604 } 605 606 static void 607 in6m_release_task(void *arg __unused) 608 { 609 struct in6_multi_head in6m_free_tmp; 610 struct in6_multi *inm, *tinm; 611 612 SLIST_INIT(&in6m_free_tmp); 613 mtx_lock(&in6_multi_free_mtx); 614 SLIST_CONCAT(&in6m_free_tmp, &in6m_free_list, in6_multi, in6m_nrele); 615 mtx_unlock(&in6_multi_free_mtx); 616 IN6_MULTI_LOCK(); 617 SLIST_FOREACH_SAFE(inm, &in6m_free_tmp, in6m_nrele, tinm) { 618 SLIST_REMOVE_HEAD(&in6m_free_tmp, in6m_nrele); 619 in6m_release(inm); 620 } 621 IN6_MULTI_UNLOCK(); 622 } 623 624 /* 625 * Clear recorded source entries for a group. 626 * Used by the MLD code. Caller must hold the IN6_MULTI lock. 627 * FIXME: Should reap. 628 */ 629 void 630 in6m_clear_recorded(struct in6_multi *inm) 631 { 632 struct ip6_msource *ims; 633 634 IN6_MULTI_LIST_LOCK_ASSERT(); 635 636 RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) { 637 if (ims->im6s_stp) { 638 ims->im6s_stp = 0; 639 --inm->in6m_st[1].iss_rec; 640 } 641 } 642 KASSERT(inm->in6m_st[1].iss_rec == 0, 643 ("%s: iss_rec %d not 0", __func__, inm->in6m_st[1].iss_rec)); 644 } 645 646 /* 647 * Record a source as pending for a Source-Group MLDv2 query. 648 * This lives here as it modifies the shared tree. 649 * 650 * inm is the group descriptor. 651 * naddr is the address of the source to record in network-byte order. 652 * 653 * If the net.inet6.mld.sgalloc sysctl is non-zero, we will 654 * lazy-allocate a source node in response to an SG query. 655 * Otherwise, no allocation is performed. This saves some memory 656 * with the trade-off that the source will not be reported to the 657 * router if joined in the window between the query response and 658 * the group actually being joined on the local host. 659 * 660 * VIMAGE: XXX: Currently the mld_sgalloc feature has been removed. 661 * This turns off the allocation of a recorded source entry if 662 * the group has not been joined. 663 * 664 * Return 0 if the source didn't exist or was already marked as recorded. 665 * Return 1 if the source was marked as recorded by this function. 666 * Return <0 if any error occurred (negated errno code). 667 */ 668 int 669 in6m_record_source(struct in6_multi *inm, const struct in6_addr *addr) 670 { 671 struct ip6_msource find; 672 struct ip6_msource *ims, *nims; 673 674 IN6_MULTI_LIST_LOCK_ASSERT(); 675 676 find.im6s_addr = *addr; 677 ims = RB_FIND(ip6_msource_tree, &inm->in6m_srcs, &find); 678 if (ims && ims->im6s_stp) 679 return (0); 680 if (ims == NULL) { 681 if (inm->in6m_nsrc == in6_mcast_maxgrpsrc) 682 return (-ENOSPC); 683 nims = malloc(sizeof(struct ip6_msource), M_IP6MSOURCE, 684 M_NOWAIT | M_ZERO); 685 if (nims == NULL) 686 return (-ENOMEM); 687 nims->im6s_addr = find.im6s_addr; 688 RB_INSERT(ip6_msource_tree, &inm->in6m_srcs, nims); 689 ++inm->in6m_nsrc; 690 ims = nims; 691 } 692 693 /* 694 * Mark the source as recorded and update the recorded 695 * source count. 696 */ 697 ++ims->im6s_stp; 698 ++inm->in6m_st[1].iss_rec; 699 700 return (1); 701 } 702 703 /* 704 * Return a pointer to an in6_msource owned by an in6_mfilter, 705 * given its source address. 706 * Lazy-allocate if needed. If this is a new entry its filter state is 707 * undefined at t0. 708 * 709 * imf is the filter set being modified. 710 * addr is the source address. 711 * 712 * SMPng: May be called with locks held; malloc must not block. 713 */ 714 static int 715 im6f_get_source(struct in6_mfilter *imf, const struct sockaddr_in6 *psin, 716 struct in6_msource **plims) 717 { 718 struct ip6_msource find; 719 struct ip6_msource *ims, *nims; 720 struct in6_msource *lims; 721 int error; 722 723 error = 0; 724 ims = NULL; 725 lims = NULL; 726 727 find.im6s_addr = psin->sin6_addr; 728 ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find); 729 lims = (struct in6_msource *)ims; 730 if (lims == NULL) { 731 if (imf->im6f_nsrc == in6_mcast_maxsocksrc) 732 return (ENOSPC); 733 nims = malloc(sizeof(struct in6_msource), M_IN6MFILTER, 734 M_NOWAIT | M_ZERO); 735 if (nims == NULL) 736 return (ENOMEM); 737 lims = (struct in6_msource *)nims; 738 lims->im6s_addr = find.im6s_addr; 739 lims->im6sl_st[0] = MCAST_UNDEFINED; 740 RB_INSERT(ip6_msource_tree, &imf->im6f_sources, nims); 741 ++imf->im6f_nsrc; 742 } 743 744 *plims = lims; 745 746 return (error); 747 } 748 749 /* 750 * Graft a source entry into an existing socket-layer filter set, 751 * maintaining any required invariants and checking allocations. 752 * 753 * The source is marked as being in the new filter mode at t1. 754 * 755 * Return the pointer to the new node, otherwise return NULL. 756 */ 757 static struct in6_msource * 758 im6f_graft(struct in6_mfilter *imf, const uint8_t st1, 759 const struct sockaddr_in6 *psin) 760 { 761 struct ip6_msource *nims; 762 struct in6_msource *lims; 763 764 nims = malloc(sizeof(struct in6_msource), M_IN6MFILTER, 765 M_NOWAIT | M_ZERO); 766 if (nims == NULL) 767 return (NULL); 768 lims = (struct in6_msource *)nims; 769 lims->im6s_addr = psin->sin6_addr; 770 lims->im6sl_st[0] = MCAST_UNDEFINED; 771 lims->im6sl_st[1] = st1; 772 RB_INSERT(ip6_msource_tree, &imf->im6f_sources, nims); 773 ++imf->im6f_nsrc; 774 775 return (lims); 776 } 777 778 /* 779 * Prune a source entry from an existing socket-layer filter set, 780 * maintaining any required invariants and checking allocations. 781 * 782 * The source is marked as being left at t1, it is not freed. 783 * 784 * Return 0 if no error occurred, otherwise return an errno value. 785 */ 786 static int 787 im6f_prune(struct in6_mfilter *imf, const struct sockaddr_in6 *psin) 788 { 789 struct ip6_msource find; 790 struct ip6_msource *ims; 791 struct in6_msource *lims; 792 793 find.im6s_addr = psin->sin6_addr; 794 ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find); 795 if (ims == NULL) 796 return (ENOENT); 797 lims = (struct in6_msource *)ims; 798 lims->im6sl_st[1] = MCAST_UNDEFINED; 799 return (0); 800 } 801 802 /* 803 * Revert socket-layer filter set deltas at t1 to t0 state. 804 */ 805 static void 806 im6f_rollback(struct in6_mfilter *imf) 807 { 808 struct ip6_msource *ims, *tims; 809 struct in6_msource *lims; 810 811 RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) { 812 lims = (struct in6_msource *)ims; 813 if (lims->im6sl_st[0] == lims->im6sl_st[1]) { 814 /* no change at t1 */ 815 continue; 816 } else if (lims->im6sl_st[0] != MCAST_UNDEFINED) { 817 /* revert change to existing source at t1 */ 818 lims->im6sl_st[1] = lims->im6sl_st[0]; 819 } else { 820 /* revert source added t1 */ 821 CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); 822 RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims); 823 free(ims, M_IN6MFILTER); 824 imf->im6f_nsrc--; 825 } 826 } 827 imf->im6f_st[1] = imf->im6f_st[0]; 828 } 829 830 /* 831 * Mark socket-layer filter set as INCLUDE {} at t1. 832 */ 833 static void 834 im6f_leave(struct in6_mfilter *imf) 835 { 836 struct ip6_msource *ims; 837 struct in6_msource *lims; 838 839 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { 840 lims = (struct in6_msource *)ims; 841 lims->im6sl_st[1] = MCAST_UNDEFINED; 842 } 843 imf->im6f_st[1] = MCAST_INCLUDE; 844 } 845 846 /* 847 * Mark socket-layer filter set deltas as committed. 848 */ 849 static void 850 im6f_commit(struct in6_mfilter *imf) 851 { 852 struct ip6_msource *ims; 853 struct in6_msource *lims; 854 855 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { 856 lims = (struct in6_msource *)ims; 857 lims->im6sl_st[0] = lims->im6sl_st[1]; 858 } 859 imf->im6f_st[0] = imf->im6f_st[1]; 860 } 861 862 /* 863 * Reap unreferenced sources from socket-layer filter set. 864 */ 865 static void 866 im6f_reap(struct in6_mfilter *imf) 867 { 868 struct ip6_msource *ims, *tims; 869 struct in6_msource *lims; 870 871 RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) { 872 lims = (struct in6_msource *)ims; 873 if ((lims->im6sl_st[0] == MCAST_UNDEFINED) && 874 (lims->im6sl_st[1] == MCAST_UNDEFINED)) { 875 CTR2(KTR_MLD, "%s: free lims %p", __func__, ims); 876 RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims); 877 free(ims, M_IN6MFILTER); 878 imf->im6f_nsrc--; 879 } 880 } 881 } 882 883 /* 884 * Purge socket-layer filter set. 885 */ 886 static void 887 im6f_purge(struct in6_mfilter *imf) 888 { 889 struct ip6_msource *ims, *tims; 890 891 RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) { 892 CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); 893 RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims); 894 free(ims, M_IN6MFILTER); 895 imf->im6f_nsrc--; 896 } 897 imf->im6f_st[0] = imf->im6f_st[1] = MCAST_UNDEFINED; 898 KASSERT(RB_EMPTY(&imf->im6f_sources), 899 ("%s: im6f_sources not empty", __func__)); 900 } 901 902 /* 903 * Look up a source filter entry for a multicast group. 904 * 905 * inm is the group descriptor to work with. 906 * addr is the IPv6 address to look up. 907 * noalloc may be non-zero to suppress allocation of sources. 908 * *pims will be set to the address of the retrieved or allocated source. 909 * 910 * SMPng: NOTE: may be called with locks held. 911 * Return 0 if successful, otherwise return a non-zero error code. 912 */ 913 static int 914 in6m_get_source(struct in6_multi *inm, const struct in6_addr *addr, 915 const int noalloc, struct ip6_msource **pims) 916 { 917 struct ip6_msource find; 918 struct ip6_msource *ims, *nims; 919 #ifdef KTR 920 char ip6tbuf[INET6_ADDRSTRLEN]; 921 #endif 922 923 find.im6s_addr = *addr; 924 ims = RB_FIND(ip6_msource_tree, &inm->in6m_srcs, &find); 925 if (ims == NULL && !noalloc) { 926 if (inm->in6m_nsrc == in6_mcast_maxgrpsrc) 927 return (ENOSPC); 928 nims = malloc(sizeof(struct ip6_msource), M_IP6MSOURCE, 929 M_NOWAIT | M_ZERO); 930 if (nims == NULL) 931 return (ENOMEM); 932 nims->im6s_addr = *addr; 933 RB_INSERT(ip6_msource_tree, &inm->in6m_srcs, nims); 934 ++inm->in6m_nsrc; 935 ims = nims; 936 CTR3(KTR_MLD, "%s: allocated %s as %p", __func__, 937 ip6_sprintf(ip6tbuf, addr), ims); 938 } 939 940 *pims = ims; 941 return (0); 942 } 943 944 /* 945 * Merge socket-layer source into MLD-layer source. 946 * If rollback is non-zero, perform the inverse of the merge. 947 */ 948 static void 949 im6s_merge(struct ip6_msource *ims, const struct in6_msource *lims, 950 const int rollback) 951 { 952 int n = rollback ? -1 : 1; 953 #ifdef KTR 954 char ip6tbuf[INET6_ADDRSTRLEN]; 955 956 ip6_sprintf(ip6tbuf, &lims->im6s_addr); 957 #endif 958 959 if (lims->im6sl_st[0] == MCAST_EXCLUDE) { 960 CTR3(KTR_MLD, "%s: t1 ex -= %d on %s", __func__, n, ip6tbuf); 961 ims->im6s_st[1].ex -= n; 962 } else if (lims->im6sl_st[0] == MCAST_INCLUDE) { 963 CTR3(KTR_MLD, "%s: t1 in -= %d on %s", __func__, n, ip6tbuf); 964 ims->im6s_st[1].in -= n; 965 } 966 967 if (lims->im6sl_st[1] == MCAST_EXCLUDE) { 968 CTR3(KTR_MLD, "%s: t1 ex += %d on %s", __func__, n, ip6tbuf); 969 ims->im6s_st[1].ex += n; 970 } else if (lims->im6sl_st[1] == MCAST_INCLUDE) { 971 CTR3(KTR_MLD, "%s: t1 in += %d on %s", __func__, n, ip6tbuf); 972 ims->im6s_st[1].in += n; 973 } 974 } 975 976 /* 977 * Atomically update the global in6_multi state, when a membership's 978 * filter list is being updated in any way. 979 * 980 * imf is the per-inpcb-membership group filter pointer. 981 * A fake imf may be passed for in-kernel consumers. 982 * 983 * XXX This is a candidate for a set-symmetric-difference style loop 984 * which would eliminate the repeated lookup from root of ims nodes, 985 * as they share the same key space. 986 * 987 * If any error occurred this function will back out of refcounts 988 * and return a non-zero value. 989 */ 990 static int 991 in6m_merge(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) 992 { 993 struct ip6_msource *ims, *nims; 994 struct in6_msource *lims; 995 int schanged, error; 996 int nsrc0, nsrc1; 997 998 schanged = 0; 999 error = 0; 1000 nsrc1 = nsrc0 = 0; 1001 IN6_MULTI_LIST_LOCK_ASSERT(); 1002 1003 /* 1004 * Update the source filters first, as this may fail. 1005 * Maintain count of in-mode filters at t0, t1. These are 1006 * used to work out if we transition into ASM mode or not. 1007 * Maintain a count of source filters whose state was 1008 * actually modified by this operation. 1009 */ 1010 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { 1011 lims = (struct in6_msource *)ims; 1012 if (lims->im6sl_st[0] == imf->im6f_st[0]) nsrc0++; 1013 if (lims->im6sl_st[1] == imf->im6f_st[1]) nsrc1++; 1014 if (lims->im6sl_st[0] == lims->im6sl_st[1]) continue; 1015 error = in6m_get_source(inm, &lims->im6s_addr, 0, &nims); 1016 ++schanged; 1017 if (error) 1018 break; 1019 im6s_merge(nims, lims, 0); 1020 } 1021 if (error) { 1022 struct ip6_msource *bims; 1023 1024 RB_FOREACH_REVERSE_FROM(ims, ip6_msource_tree, nims) { 1025 lims = (struct in6_msource *)ims; 1026 if (lims->im6sl_st[0] == lims->im6sl_st[1]) 1027 continue; 1028 (void)in6m_get_source(inm, &lims->im6s_addr, 1, &bims); 1029 if (bims == NULL) 1030 continue; 1031 im6s_merge(bims, lims, 1); 1032 } 1033 goto out_reap; 1034 } 1035 1036 CTR3(KTR_MLD, "%s: imf filters in-mode: %d at t0, %d at t1", 1037 __func__, nsrc0, nsrc1); 1038 1039 /* Handle transition between INCLUDE {n} and INCLUDE {} on socket. */ 1040 if (imf->im6f_st[0] == imf->im6f_st[1] && 1041 imf->im6f_st[1] == MCAST_INCLUDE) { 1042 if (nsrc1 == 0) { 1043 CTR1(KTR_MLD, "%s: --in on inm at t1", __func__); 1044 --inm->in6m_st[1].iss_in; 1045 } 1046 } 1047 1048 /* Handle filter mode transition on socket. */ 1049 if (imf->im6f_st[0] != imf->im6f_st[1]) { 1050 CTR3(KTR_MLD, "%s: imf transition %d to %d", 1051 __func__, imf->im6f_st[0], imf->im6f_st[1]); 1052 1053 if (imf->im6f_st[0] == MCAST_EXCLUDE) { 1054 CTR1(KTR_MLD, "%s: --ex on inm at t1", __func__); 1055 --inm->in6m_st[1].iss_ex; 1056 } else if (imf->im6f_st[0] == MCAST_INCLUDE) { 1057 CTR1(KTR_MLD, "%s: --in on inm at t1", __func__); 1058 --inm->in6m_st[1].iss_in; 1059 } 1060 1061 if (imf->im6f_st[1] == MCAST_EXCLUDE) { 1062 CTR1(KTR_MLD, "%s: ex++ on inm at t1", __func__); 1063 inm->in6m_st[1].iss_ex++; 1064 } else if (imf->im6f_st[1] == MCAST_INCLUDE && nsrc1 > 0) { 1065 CTR1(KTR_MLD, "%s: in++ on inm at t1", __func__); 1066 inm->in6m_st[1].iss_in++; 1067 } 1068 } 1069 1070 /* 1071 * Track inm filter state in terms of listener counts. 1072 * If there are any exclusive listeners, stack-wide 1073 * membership is exclusive. 1074 * Otherwise, if only inclusive listeners, stack-wide is inclusive. 1075 * If no listeners remain, state is undefined at t1, 1076 * and the MLD lifecycle for this group should finish. 1077 */ 1078 if (inm->in6m_st[1].iss_ex > 0) { 1079 CTR1(KTR_MLD, "%s: transition to EX", __func__); 1080 inm->in6m_st[1].iss_fmode = MCAST_EXCLUDE; 1081 } else if (inm->in6m_st[1].iss_in > 0) { 1082 CTR1(KTR_MLD, "%s: transition to IN", __func__); 1083 inm->in6m_st[1].iss_fmode = MCAST_INCLUDE; 1084 } else { 1085 CTR1(KTR_MLD, "%s: transition to UNDEF", __func__); 1086 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED; 1087 } 1088 1089 /* Decrement ASM listener count on transition out of ASM mode. */ 1090 if (imf->im6f_st[0] == MCAST_EXCLUDE && nsrc0 == 0) { 1091 if ((imf->im6f_st[1] != MCAST_EXCLUDE) || 1092 (imf->im6f_st[1] == MCAST_EXCLUDE && nsrc1 > 0)) { 1093 CTR1(KTR_MLD, "%s: --asm on inm at t1", __func__); 1094 --inm->in6m_st[1].iss_asm; 1095 } 1096 } 1097 1098 /* Increment ASM listener count on transition to ASM mode. */ 1099 if (imf->im6f_st[1] == MCAST_EXCLUDE && nsrc1 == 0) { 1100 CTR1(KTR_MLD, "%s: asm++ on inm at t1", __func__); 1101 inm->in6m_st[1].iss_asm++; 1102 } 1103 1104 CTR3(KTR_MLD, "%s: merged imf %p to inm %p", __func__, imf, inm); 1105 in6m_print(inm); 1106 1107 out_reap: 1108 if (schanged > 0) { 1109 CTR1(KTR_MLD, "%s: sources changed; reaping", __func__); 1110 in6m_reap(inm); 1111 } 1112 return (error); 1113 } 1114 1115 /* 1116 * Mark an in6_multi's filter set deltas as committed. 1117 * Called by MLD after a state change has been enqueued. 1118 */ 1119 void 1120 in6m_commit(struct in6_multi *inm) 1121 { 1122 struct ip6_msource *ims; 1123 1124 CTR2(KTR_MLD, "%s: commit inm %p", __func__, inm); 1125 CTR1(KTR_MLD, "%s: pre commit:", __func__); 1126 in6m_print(inm); 1127 1128 RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) { 1129 ims->im6s_st[0] = ims->im6s_st[1]; 1130 } 1131 inm->in6m_st[0] = inm->in6m_st[1]; 1132 } 1133 1134 /* 1135 * Reap unreferenced nodes from an in6_multi's filter set. 1136 */ 1137 static void 1138 in6m_reap(struct in6_multi *inm) 1139 { 1140 struct ip6_msource *ims, *tims; 1141 1142 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, tims) { 1143 if (ims->im6s_st[0].ex > 0 || ims->im6s_st[0].in > 0 || 1144 ims->im6s_st[1].ex > 0 || ims->im6s_st[1].in > 0 || 1145 ims->im6s_stp != 0) 1146 continue; 1147 CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); 1148 RB_REMOVE(ip6_msource_tree, &inm->in6m_srcs, ims); 1149 free(ims, M_IP6MSOURCE); 1150 inm->in6m_nsrc--; 1151 } 1152 } 1153 1154 /* 1155 * Purge all source nodes from an in6_multi's filter set. 1156 */ 1157 static void 1158 in6m_purge(struct in6_multi *inm) 1159 { 1160 struct ip6_msource *ims, *tims; 1161 1162 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, tims) { 1163 CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); 1164 RB_REMOVE(ip6_msource_tree, &inm->in6m_srcs, ims); 1165 free(ims, M_IP6MSOURCE); 1166 inm->in6m_nsrc--; 1167 } 1168 /* Free state-change requests that might be queued. */ 1169 mbufq_drain(&inm->in6m_scq); 1170 } 1171 1172 /* 1173 * Join a multicast address w/o sources. 1174 * KAME compatibility entry point. 1175 * 1176 * SMPng: Assume no mc locks held by caller. 1177 */ 1178 int 1179 in6_joingroup(struct ifnet *ifp, const struct in6_addr *mcaddr, 1180 /*const*/ struct in6_mfilter *imf, struct in6_multi **pinm, 1181 const int delay) 1182 { 1183 int error; 1184 1185 IN6_MULTI_LOCK(); 1186 error = in6_joingroup_locked(ifp, mcaddr, NULL, pinm, delay); 1187 IN6_MULTI_UNLOCK(); 1188 return (error); 1189 } 1190 1191 /* 1192 * Join a multicast group; real entry point. 1193 * 1194 * Only preserves atomicity at inm level. 1195 * NOTE: imf argument cannot be const due to sys/tree.h limitations. 1196 * 1197 * If the MLD downcall fails, the group is not joined, and an error 1198 * code is returned. 1199 */ 1200 static int 1201 in6_joingroup_locked(struct ifnet *ifp, const struct in6_addr *mcaddr, 1202 /*const*/ struct in6_mfilter *imf, struct in6_multi **pinm, 1203 const int delay) 1204 { 1205 struct in6_multi_head inmh; 1206 struct in6_mfilter timf; 1207 struct in6_multi *inm; 1208 struct ifmultiaddr *ifma; 1209 int error; 1210 #ifdef KTR 1211 char ip6tbuf[INET6_ADDRSTRLEN]; 1212 #endif 1213 1214 /* 1215 * Sanity: Check scope zone ID was set for ifp, if and 1216 * only if group is scoped to an interface. 1217 */ 1218 KASSERT(IN6_IS_ADDR_MULTICAST(mcaddr), 1219 ("%s: not a multicast address", __func__)); 1220 if (IN6_IS_ADDR_MC_LINKLOCAL(mcaddr) || 1221 IN6_IS_ADDR_MC_INTFACELOCAL(mcaddr)) { 1222 KASSERT(mcaddr->s6_addr16[1] != 0, 1223 ("%s: scope zone ID not set", __func__)); 1224 } 1225 1226 IN6_MULTI_LOCK_ASSERT(); 1227 IN6_MULTI_LIST_UNLOCK_ASSERT(); 1228 1229 CTR4(KTR_MLD, "%s: join %s on %p(%s))", __func__, 1230 ip6_sprintf(ip6tbuf, mcaddr), ifp, if_name(ifp)); 1231 1232 error = 0; 1233 inm = NULL; 1234 1235 /* 1236 * If no imf was specified (i.e. kernel consumer), 1237 * fake one up and assume it is an ASM join. 1238 */ 1239 if (imf == NULL) { 1240 im6f_init(&timf, MCAST_UNDEFINED, MCAST_EXCLUDE); 1241 imf = &timf; 1242 } 1243 error = in6_getmulti(ifp, mcaddr, &inm); 1244 if (error) { 1245 CTR1(KTR_MLD, "%s: in6_getmulti() failure", __func__); 1246 return (error); 1247 } 1248 1249 IN6_MULTI_LIST_LOCK(); 1250 CTR1(KTR_MLD, "%s: merge inm state", __func__); 1251 error = in6m_merge(inm, imf); 1252 if (error) { 1253 CTR1(KTR_MLD, "%s: failed to merge inm state", __func__); 1254 goto out_in6m_release; 1255 } 1256 1257 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 1258 error = mld_change_state(inm, delay); 1259 if (error) { 1260 CTR1(KTR_MLD, "%s: failed to update source", __func__); 1261 goto out_in6m_release; 1262 } 1263 1264 out_in6m_release: 1265 SLIST_INIT(&inmh); 1266 if (error) { 1267 struct epoch_tracker et; 1268 1269 CTR2(KTR_MLD, "%s: dropping ref on %p", __func__, inm); 1270 IF_ADDR_WLOCK(ifp); 1271 NET_EPOCH_ENTER(et); 1272 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1273 if (ifma->ifma_protospec == inm) { 1274 ifma->ifma_protospec = NULL; 1275 break; 1276 } 1277 } 1278 in6m_disconnect_locked(&inmh, inm); 1279 in6m_rele_locked(&inmh, inm); 1280 NET_EPOCH_EXIT(et); 1281 IF_ADDR_WUNLOCK(ifp); 1282 } else { 1283 *pinm = inm; 1284 } 1285 IN6_MULTI_LIST_UNLOCK(); 1286 in6m_release_list_deferred(&inmh); 1287 return (error); 1288 } 1289 1290 /* 1291 * Leave a multicast group; unlocked entry point. 1292 */ 1293 int 1294 in6_leavegroup(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) 1295 { 1296 int error; 1297 1298 IN6_MULTI_LOCK(); 1299 error = in6_leavegroup_locked(inm, imf); 1300 IN6_MULTI_UNLOCK(); 1301 return (error); 1302 } 1303 1304 /* 1305 * Leave a multicast group; real entry point. 1306 * All source filters will be expunged. 1307 * 1308 * Only preserves atomicity at inm level. 1309 * 1310 * Holding the write lock for the INP which contains imf 1311 * is highly advisable. We can't assert for it as imf does not 1312 * contain a back-pointer to the owning inp. 1313 * 1314 * Note: This is not the same as in6m_release(*) as this function also 1315 * makes a state change downcall into MLD. 1316 */ 1317 int 1318 in6_leavegroup_locked(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) 1319 { 1320 struct in6_multi_head inmh; 1321 struct in6_mfilter timf; 1322 struct ifnet *ifp; 1323 int error; 1324 #ifdef KTR 1325 char ip6tbuf[INET6_ADDRSTRLEN]; 1326 #endif 1327 1328 error = 0; 1329 1330 IN6_MULTI_LOCK_ASSERT(); 1331 1332 CTR5(KTR_MLD, "%s: leave inm %p, %s/%s, imf %p", __func__, 1333 inm, ip6_sprintf(ip6tbuf, &inm->in6m_addr), 1334 (in6m_is_ifp_detached(inm) ? "null" : if_name(inm->in6m_ifp)), 1335 imf); 1336 1337 /* 1338 * If no imf was specified (i.e. kernel consumer), 1339 * fake one up and assume it is an ASM join. 1340 */ 1341 if (imf == NULL) { 1342 im6f_init(&timf, MCAST_EXCLUDE, MCAST_UNDEFINED); 1343 imf = &timf; 1344 } 1345 1346 /* 1347 * Begin state merge transaction at MLD layer. 1348 * 1349 * As this particular invocation should not cause any memory 1350 * to be allocated, and there is no opportunity to roll back 1351 * the transaction, it MUST NOT fail. 1352 */ 1353 1354 ifp = inm->in6m_ifp; 1355 IN6_MULTI_LIST_LOCK(); 1356 CTR1(KTR_MLD, "%s: merge inm state", __func__); 1357 error = in6m_merge(inm, imf); 1358 KASSERT(error == 0, ("%s: failed to merge inm state", __func__)); 1359 1360 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 1361 error = 0; 1362 if (ifp) 1363 error = mld_change_state(inm, 0); 1364 if (error) 1365 CTR1(KTR_MLD, "%s: failed mld downcall", __func__); 1366 1367 CTR2(KTR_MLD, "%s: dropping ref on %p", __func__, inm); 1368 if (ifp) 1369 IF_ADDR_WLOCK(ifp); 1370 1371 SLIST_INIT(&inmh); 1372 if (inm->in6m_refcount == 1) 1373 in6m_disconnect_locked(&inmh, inm); 1374 in6m_rele_locked(&inmh, inm); 1375 if (ifp) 1376 IF_ADDR_WUNLOCK(ifp); 1377 IN6_MULTI_LIST_UNLOCK(); 1378 in6m_release_list_deferred(&inmh); 1379 return (error); 1380 } 1381 1382 1383 /* 1384 * Block or unblock an ASM multicast source on an inpcb. 1385 * This implements the delta-based API described in RFC 3678. 1386 * 1387 * The delta-based API applies only to exclusive-mode memberships. 1388 * An MLD downcall will be performed. 1389 * 1390 * SMPng: NOTE: Must take Giant as a join may create a new ifma. 1391 * 1392 * Return 0 if successful, otherwise return an appropriate error code. 1393 */ 1394 static int 1395 in6p_block_unblock_source(struct inpcb *inp, struct sockopt *sopt) 1396 { 1397 struct group_source_req gsr; 1398 sockunion_t *gsa, *ssa; 1399 struct ifnet *ifp; 1400 struct in6_mfilter *imf; 1401 struct ip6_moptions *imo; 1402 struct in6_msource *ims; 1403 struct in6_multi *inm; 1404 uint16_t fmode; 1405 int error, doblock; 1406 #ifdef KTR 1407 char ip6tbuf[INET6_ADDRSTRLEN]; 1408 #endif 1409 1410 ifp = NULL; 1411 error = 0; 1412 doblock = 0; 1413 1414 memset(&gsr, 0, sizeof(struct group_source_req)); 1415 gsa = (sockunion_t *)&gsr.gsr_group; 1416 ssa = (sockunion_t *)&gsr.gsr_source; 1417 1418 switch (sopt->sopt_name) { 1419 case MCAST_BLOCK_SOURCE: 1420 case MCAST_UNBLOCK_SOURCE: 1421 error = sooptcopyin(sopt, &gsr, 1422 sizeof(struct group_source_req), 1423 sizeof(struct group_source_req)); 1424 if (error) 1425 return (error); 1426 1427 if (gsa->sin6.sin6_family != AF_INET6 || 1428 gsa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 1429 return (EINVAL); 1430 1431 if (ssa->sin6.sin6_family != AF_INET6 || 1432 ssa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 1433 return (EINVAL); 1434 1435 if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface) 1436 return (EADDRNOTAVAIL); 1437 1438 ifp = ifnet_byindex(gsr.gsr_interface); 1439 1440 if (sopt->sopt_name == MCAST_BLOCK_SOURCE) 1441 doblock = 1; 1442 break; 1443 1444 default: 1445 CTR2(KTR_MLD, "%s: unknown sopt_name %d", 1446 __func__, sopt->sopt_name); 1447 return (EOPNOTSUPP); 1448 break; 1449 } 1450 1451 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 1452 return (EINVAL); 1453 1454 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 1455 1456 /* 1457 * Check if we are actually a member of this group. 1458 */ 1459 imo = in6p_findmoptions(inp); 1460 imf = im6o_match_group(imo, ifp, &gsa->sa); 1461 if (imf == NULL) { 1462 error = EADDRNOTAVAIL; 1463 goto out_in6p_locked; 1464 } 1465 inm = imf->im6f_in6m; 1466 1467 /* 1468 * Attempting to use the delta-based API on an 1469 * non exclusive-mode membership is an error. 1470 */ 1471 fmode = imf->im6f_st[0]; 1472 if (fmode != MCAST_EXCLUDE) { 1473 error = EINVAL; 1474 goto out_in6p_locked; 1475 } 1476 1477 /* 1478 * Deal with error cases up-front: 1479 * Asked to block, but already blocked; or 1480 * Asked to unblock, but nothing to unblock. 1481 * If adding a new block entry, allocate it. 1482 */ 1483 ims = im6o_match_source(imf, &ssa->sa); 1484 if ((ims != NULL && doblock) || (ims == NULL && !doblock)) { 1485 CTR3(KTR_MLD, "%s: source %s %spresent", __func__, 1486 ip6_sprintf(ip6tbuf, &ssa->sin6.sin6_addr), 1487 doblock ? "" : "not "); 1488 error = EADDRNOTAVAIL; 1489 goto out_in6p_locked; 1490 } 1491 1492 INP_WLOCK_ASSERT(inp); 1493 1494 /* 1495 * Begin state merge transaction at socket layer. 1496 */ 1497 if (doblock) { 1498 CTR2(KTR_MLD, "%s: %s source", __func__, "block"); 1499 ims = im6f_graft(imf, fmode, &ssa->sin6); 1500 if (ims == NULL) 1501 error = ENOMEM; 1502 } else { 1503 CTR2(KTR_MLD, "%s: %s source", __func__, "allow"); 1504 error = im6f_prune(imf, &ssa->sin6); 1505 } 1506 1507 if (error) { 1508 CTR1(KTR_MLD, "%s: merge imf state failed", __func__); 1509 goto out_im6f_rollback; 1510 } 1511 1512 /* 1513 * Begin state merge transaction at MLD layer. 1514 */ 1515 IN6_MULTI_LIST_LOCK(); 1516 CTR1(KTR_MLD, "%s: merge inm state", __func__); 1517 error = in6m_merge(inm, imf); 1518 if (error) 1519 CTR1(KTR_MLD, "%s: failed to merge inm state", __func__); 1520 else { 1521 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 1522 error = mld_change_state(inm, 0); 1523 if (error) 1524 CTR1(KTR_MLD, "%s: failed mld downcall", __func__); 1525 } 1526 1527 IN6_MULTI_LIST_UNLOCK(); 1528 1529 out_im6f_rollback: 1530 if (error) 1531 im6f_rollback(imf); 1532 else 1533 im6f_commit(imf); 1534 1535 im6f_reap(imf); 1536 1537 out_in6p_locked: 1538 INP_WUNLOCK(inp); 1539 return (error); 1540 } 1541 1542 /* 1543 * Given an inpcb, return its multicast options structure pointer. Accepts 1544 * an unlocked inpcb pointer, but will return it locked. May sleep. 1545 * 1546 * SMPng: NOTE: Potentially calls malloc(M_WAITOK) with Giant held. 1547 * SMPng: NOTE: Returns with the INP write lock held. 1548 */ 1549 static struct ip6_moptions * 1550 in6p_findmoptions(struct inpcb *inp) 1551 { 1552 struct ip6_moptions *imo; 1553 1554 INP_WLOCK(inp); 1555 if (inp->in6p_moptions != NULL) 1556 return (inp->in6p_moptions); 1557 1558 INP_WUNLOCK(inp); 1559 1560 imo = malloc(sizeof(*imo), M_IP6MOPTS, M_WAITOK); 1561 1562 imo->im6o_multicast_ifp = NULL; 1563 imo->im6o_multicast_hlim = V_ip6_defmcasthlim; 1564 imo->im6o_multicast_loop = in6_mcast_loop; 1565 STAILQ_INIT(&imo->im6o_head); 1566 1567 INP_WLOCK(inp); 1568 if (inp->in6p_moptions != NULL) { 1569 free(imo, M_IP6MOPTS); 1570 return (inp->in6p_moptions); 1571 } 1572 inp->in6p_moptions = imo; 1573 return (imo); 1574 } 1575 1576 /* 1577 * Discard the IPv6 multicast options (and source filters). 1578 * 1579 * SMPng: NOTE: assumes INP write lock is held. 1580 * 1581 * XXX can all be safely deferred to epoch_call 1582 * 1583 */ 1584 1585 static void 1586 inp_gcmoptions(struct ip6_moptions *imo) 1587 { 1588 struct in6_mfilter *imf; 1589 struct in6_multi *inm; 1590 struct ifnet *ifp; 1591 1592 while ((imf = ip6_mfilter_first(&imo->im6o_head)) != NULL) { 1593 ip6_mfilter_remove(&imo->im6o_head, imf); 1594 1595 im6f_leave(imf); 1596 if ((inm = imf->im6f_in6m) != NULL) { 1597 if ((ifp = inm->in6m_ifp) != NULL) { 1598 CURVNET_SET(ifp->if_vnet); 1599 (void)in6_leavegroup(inm, imf); 1600 CURVNET_RESTORE(); 1601 } else { 1602 (void)in6_leavegroup(inm, imf); 1603 } 1604 } 1605 ip6_mfilter_free(imf); 1606 } 1607 free(imo, M_IP6MOPTS); 1608 } 1609 1610 void 1611 ip6_freemoptions(struct ip6_moptions *imo) 1612 { 1613 if (imo == NULL) 1614 return; 1615 inp_gcmoptions(imo); 1616 } 1617 1618 /* 1619 * Atomically get source filters on a socket for an IPv6 multicast group. 1620 * Called with INP lock held; returns with lock released. 1621 */ 1622 static int 1623 in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt) 1624 { 1625 struct __msfilterreq msfr; 1626 sockunion_t *gsa; 1627 struct ifnet *ifp; 1628 struct ip6_moptions *imo; 1629 struct in6_mfilter *imf; 1630 struct ip6_msource *ims; 1631 struct in6_msource *lims; 1632 struct sockaddr_in6 *psin; 1633 struct sockaddr_storage *ptss; 1634 struct sockaddr_storage *tss; 1635 int error; 1636 size_t nsrcs, ncsrcs; 1637 1638 INP_WLOCK_ASSERT(inp); 1639 1640 imo = inp->in6p_moptions; 1641 KASSERT(imo != NULL, ("%s: null ip6_moptions", __func__)); 1642 1643 INP_WUNLOCK(inp); 1644 1645 error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq), 1646 sizeof(struct __msfilterreq)); 1647 if (error) 1648 return (error); 1649 1650 if (msfr.msfr_group.ss_family != AF_INET6 || 1651 msfr.msfr_group.ss_len != sizeof(struct sockaddr_in6)) 1652 return (EINVAL); 1653 1654 gsa = (sockunion_t *)&msfr.msfr_group; 1655 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 1656 return (EINVAL); 1657 1658 if (msfr.msfr_ifindex == 0 || V_if_index < msfr.msfr_ifindex) 1659 return (EADDRNOTAVAIL); 1660 ifp = ifnet_byindex(msfr.msfr_ifindex); 1661 if (ifp == NULL) 1662 return (EADDRNOTAVAIL); 1663 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 1664 1665 INP_WLOCK(inp); 1666 1667 /* 1668 * Lookup group on the socket. 1669 */ 1670 imf = im6o_match_group(imo, ifp, &gsa->sa); 1671 if (imf == NULL) { 1672 INP_WUNLOCK(inp); 1673 return (EADDRNOTAVAIL); 1674 } 1675 1676 /* 1677 * Ignore memberships which are in limbo. 1678 */ 1679 if (imf->im6f_st[1] == MCAST_UNDEFINED) { 1680 INP_WUNLOCK(inp); 1681 return (EAGAIN); 1682 } 1683 msfr.msfr_fmode = imf->im6f_st[1]; 1684 1685 /* 1686 * If the user specified a buffer, copy out the source filter 1687 * entries to userland gracefully. 1688 * We only copy out the number of entries which userland 1689 * has asked for, but we always tell userland how big the 1690 * buffer really needs to be. 1691 */ 1692 if (msfr.msfr_nsrcs > in6_mcast_maxsocksrc) 1693 msfr.msfr_nsrcs = in6_mcast_maxsocksrc; 1694 tss = NULL; 1695 if (msfr.msfr_srcs != NULL && msfr.msfr_nsrcs > 0) { 1696 tss = malloc(sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs, 1697 M_TEMP, M_NOWAIT | M_ZERO); 1698 if (tss == NULL) { 1699 INP_WUNLOCK(inp); 1700 return (ENOBUFS); 1701 } 1702 } 1703 1704 /* 1705 * Count number of sources in-mode at t0. 1706 * If buffer space exists and remains, copy out source entries. 1707 */ 1708 nsrcs = msfr.msfr_nsrcs; 1709 ncsrcs = 0; 1710 ptss = tss; 1711 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { 1712 lims = (struct in6_msource *)ims; 1713 if (lims->im6sl_st[0] == MCAST_UNDEFINED || 1714 lims->im6sl_st[0] != imf->im6f_st[0]) 1715 continue; 1716 ++ncsrcs; 1717 if (tss != NULL && nsrcs > 0) { 1718 psin = (struct sockaddr_in6 *)ptss; 1719 psin->sin6_family = AF_INET6; 1720 psin->sin6_len = sizeof(struct sockaddr_in6); 1721 psin->sin6_addr = lims->im6s_addr; 1722 psin->sin6_port = 0; 1723 --nsrcs; 1724 ++ptss; 1725 } 1726 } 1727 1728 INP_WUNLOCK(inp); 1729 1730 if (tss != NULL) { 1731 error = copyout(tss, msfr.msfr_srcs, 1732 sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs); 1733 free(tss, M_TEMP); 1734 if (error) 1735 return (error); 1736 } 1737 1738 msfr.msfr_nsrcs = ncsrcs; 1739 error = sooptcopyout(sopt, &msfr, sizeof(struct __msfilterreq)); 1740 1741 return (error); 1742 } 1743 1744 /* 1745 * Return the IP multicast options in response to user getsockopt(). 1746 */ 1747 int 1748 ip6_getmoptions(struct inpcb *inp, struct sockopt *sopt) 1749 { 1750 struct ip6_moptions *im6o; 1751 int error; 1752 u_int optval; 1753 1754 INP_WLOCK(inp); 1755 im6o = inp->in6p_moptions; 1756 /* 1757 * If socket is neither of type SOCK_RAW or SOCK_DGRAM, 1758 * or is a divert socket, reject it. 1759 */ 1760 if (inp->inp_socket->so_proto->pr_protocol == IPPROTO_DIVERT || 1761 (inp->inp_socket->so_proto->pr_type != SOCK_RAW && 1762 inp->inp_socket->so_proto->pr_type != SOCK_DGRAM)) { 1763 INP_WUNLOCK(inp); 1764 return (EOPNOTSUPP); 1765 } 1766 1767 error = 0; 1768 switch (sopt->sopt_name) { 1769 case IPV6_MULTICAST_IF: 1770 if (im6o == NULL || im6o->im6o_multicast_ifp == NULL) { 1771 optval = 0; 1772 } else { 1773 optval = im6o->im6o_multicast_ifp->if_index; 1774 } 1775 INP_WUNLOCK(inp); 1776 error = sooptcopyout(sopt, &optval, sizeof(u_int)); 1777 break; 1778 1779 case IPV6_MULTICAST_HOPS: 1780 if (im6o == NULL) 1781 optval = V_ip6_defmcasthlim; 1782 else 1783 optval = im6o->im6o_multicast_hlim; 1784 INP_WUNLOCK(inp); 1785 error = sooptcopyout(sopt, &optval, sizeof(u_int)); 1786 break; 1787 1788 case IPV6_MULTICAST_LOOP: 1789 if (im6o == NULL) 1790 optval = in6_mcast_loop; /* XXX VIMAGE */ 1791 else 1792 optval = im6o->im6o_multicast_loop; 1793 INP_WUNLOCK(inp); 1794 error = sooptcopyout(sopt, &optval, sizeof(u_int)); 1795 break; 1796 1797 case IPV6_MSFILTER: 1798 if (im6o == NULL) { 1799 error = EADDRNOTAVAIL; 1800 INP_WUNLOCK(inp); 1801 } else { 1802 error = in6p_get_source_filters(inp, sopt); 1803 } 1804 break; 1805 1806 default: 1807 INP_WUNLOCK(inp); 1808 error = ENOPROTOOPT; 1809 break; 1810 } 1811 1812 INP_UNLOCK_ASSERT(inp); 1813 1814 return (error); 1815 } 1816 1817 /* 1818 * Look up the ifnet to use for a multicast group membership, 1819 * given the address of an IPv6 group. 1820 * 1821 * This routine exists to support legacy IPv6 multicast applications. 1822 * 1823 * If inp is non-NULL, use this socket's current FIB number for any 1824 * required FIB lookup. Look up the group address in the unicast FIB, 1825 * and use its ifp; usually, this points to the default next-hop. 1826 * If the FIB lookup fails, return NULL. 1827 * 1828 * FUTURE: Support multiple forwarding tables for IPv6. 1829 * 1830 * Returns NULL if no ifp could be found. 1831 */ 1832 static struct ifnet * 1833 in6p_lookup_mcast_ifp(const struct inpcb *inp, 1834 const struct sockaddr_in6 *gsin6) 1835 { 1836 struct nhop6_basic nh6; 1837 struct in6_addr dst; 1838 uint32_t scopeid; 1839 uint32_t fibnum; 1840 1841 KASSERT(inp->inp_vflag & INP_IPV6, 1842 ("%s: not INP_IPV6 inpcb", __func__)); 1843 KASSERT(gsin6->sin6_family == AF_INET6, 1844 ("%s: not AF_INET6 group", __func__)); 1845 1846 in6_splitscope(&gsin6->sin6_addr, &dst, &scopeid); 1847 fibnum = inp ? inp->inp_inc.inc_fibnum : RT_DEFAULT_FIB; 1848 if (fib6_lookup_nh_basic(fibnum, &dst, scopeid, 0, 0, &nh6) != 0) 1849 return (NULL); 1850 1851 return (nh6.nh_ifp); 1852 } 1853 1854 /* 1855 * Join an IPv6 multicast group, possibly with a source. 1856 * 1857 * FIXME: The KAME use of the unspecified address (::) 1858 * to join *all* multicast groups is currently unsupported. 1859 */ 1860 static int 1861 in6p_join_group(struct inpcb *inp, struct sockopt *sopt) 1862 { 1863 struct in6_multi_head inmh; 1864 struct group_source_req gsr; 1865 sockunion_t *gsa, *ssa; 1866 struct ifnet *ifp; 1867 struct in6_mfilter *imf; 1868 struct ip6_moptions *imo; 1869 struct in6_multi *inm; 1870 struct in6_msource *lims; 1871 int error, is_new; 1872 1873 SLIST_INIT(&inmh); 1874 ifp = NULL; 1875 lims = NULL; 1876 error = 0; 1877 1878 memset(&gsr, 0, sizeof(struct group_source_req)); 1879 gsa = (sockunion_t *)&gsr.gsr_group; 1880 gsa->ss.ss_family = AF_UNSPEC; 1881 ssa = (sockunion_t *)&gsr.gsr_source; 1882 ssa->ss.ss_family = AF_UNSPEC; 1883 1884 /* 1885 * Chew everything into struct group_source_req. 1886 * Overwrite the port field if present, as the sockaddr 1887 * being copied in may be matched with a binary comparison. 1888 * Ignore passed-in scope ID. 1889 */ 1890 switch (sopt->sopt_name) { 1891 case IPV6_JOIN_GROUP: { 1892 struct ipv6_mreq mreq; 1893 1894 error = sooptcopyin(sopt, &mreq, sizeof(struct ipv6_mreq), 1895 sizeof(struct ipv6_mreq)); 1896 if (error) 1897 return (error); 1898 1899 gsa->sin6.sin6_family = AF_INET6; 1900 gsa->sin6.sin6_len = sizeof(struct sockaddr_in6); 1901 gsa->sin6.sin6_addr = mreq.ipv6mr_multiaddr; 1902 1903 if (mreq.ipv6mr_interface == 0) { 1904 ifp = in6p_lookup_mcast_ifp(inp, &gsa->sin6); 1905 } else { 1906 if (V_if_index < mreq.ipv6mr_interface) 1907 return (EADDRNOTAVAIL); 1908 ifp = ifnet_byindex(mreq.ipv6mr_interface); 1909 } 1910 CTR3(KTR_MLD, "%s: ipv6mr_interface = %d, ifp = %p", 1911 __func__, mreq.ipv6mr_interface, ifp); 1912 } break; 1913 1914 case MCAST_JOIN_GROUP: 1915 case MCAST_JOIN_SOURCE_GROUP: 1916 if (sopt->sopt_name == MCAST_JOIN_GROUP) { 1917 error = sooptcopyin(sopt, &gsr, 1918 sizeof(struct group_req), 1919 sizeof(struct group_req)); 1920 } else if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) { 1921 error = sooptcopyin(sopt, &gsr, 1922 sizeof(struct group_source_req), 1923 sizeof(struct group_source_req)); 1924 } 1925 if (error) 1926 return (error); 1927 1928 if (gsa->sin6.sin6_family != AF_INET6 || 1929 gsa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 1930 return (EINVAL); 1931 1932 if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) { 1933 if (ssa->sin6.sin6_family != AF_INET6 || 1934 ssa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 1935 return (EINVAL); 1936 if (IN6_IS_ADDR_MULTICAST(&ssa->sin6.sin6_addr)) 1937 return (EINVAL); 1938 /* 1939 * TODO: Validate embedded scope ID in source 1940 * list entry against passed-in ifp, if and only 1941 * if source list filter entry is iface or node local. 1942 */ 1943 in6_clearscope(&ssa->sin6.sin6_addr); 1944 ssa->sin6.sin6_port = 0; 1945 ssa->sin6.sin6_scope_id = 0; 1946 } 1947 1948 if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface) 1949 return (EADDRNOTAVAIL); 1950 ifp = ifnet_byindex(gsr.gsr_interface); 1951 break; 1952 1953 default: 1954 CTR2(KTR_MLD, "%s: unknown sopt_name %d", 1955 __func__, sopt->sopt_name); 1956 return (EOPNOTSUPP); 1957 break; 1958 } 1959 1960 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 1961 return (EINVAL); 1962 1963 if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) 1964 return (EADDRNOTAVAIL); 1965 1966 gsa->sin6.sin6_port = 0; 1967 gsa->sin6.sin6_scope_id = 0; 1968 1969 /* 1970 * Always set the scope zone ID on memberships created from userland. 1971 * Use the passed-in ifp to do this. 1972 * XXX The in6_setscope() return value is meaningless. 1973 * XXX SCOPE6_LOCK() is taken by in6_setscope(). 1974 */ 1975 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 1976 1977 IN6_MULTI_LOCK(); 1978 1979 /* 1980 * Find the membership in the membership list. 1981 */ 1982 imo = in6p_findmoptions(inp); 1983 imf = im6o_match_group(imo, ifp, &gsa->sa); 1984 if (imf == NULL) { 1985 is_new = 1; 1986 inm = NULL; 1987 1988 if (ip6_mfilter_count(&imo->im6o_head) >= IPV6_MAX_MEMBERSHIPS) { 1989 error = ENOMEM; 1990 goto out_in6p_locked; 1991 } 1992 } else { 1993 is_new = 0; 1994 inm = imf->im6f_in6m; 1995 1996 if (ssa->ss.ss_family != AF_UNSPEC) { 1997 /* 1998 * MCAST_JOIN_SOURCE_GROUP on an exclusive membership 1999 * is an error. On an existing inclusive membership, 2000 * it just adds the source to the filter list. 2001 */ 2002 if (imf->im6f_st[1] != MCAST_INCLUDE) { 2003 error = EINVAL; 2004 goto out_in6p_locked; 2005 } 2006 /* 2007 * Throw out duplicates. 2008 * 2009 * XXX FIXME: This makes a naive assumption that 2010 * even if entries exist for *ssa in this imf, 2011 * they will be rejected as dupes, even if they 2012 * are not valid in the current mode (in-mode). 2013 * 2014 * in6_msource is transactioned just as for anything 2015 * else in SSM -- but note naive use of in6m_graft() 2016 * below for allocating new filter entries. 2017 * 2018 * This is only an issue if someone mixes the 2019 * full-state SSM API with the delta-based API, 2020 * which is discouraged in the relevant RFCs. 2021 */ 2022 lims = im6o_match_source(imf, &ssa->sa); 2023 if (lims != NULL /*&& 2024 lims->im6sl_st[1] == MCAST_INCLUDE*/) { 2025 error = EADDRNOTAVAIL; 2026 goto out_in6p_locked; 2027 } 2028 } else { 2029 /* 2030 * MCAST_JOIN_GROUP alone, on any existing membership, 2031 * is rejected, to stop the same inpcb tying up 2032 * multiple refs to the in_multi. 2033 * On an existing inclusive membership, this is also 2034 * an error; if you want to change filter mode, 2035 * you must use the userland API setsourcefilter(). 2036 * XXX We don't reject this for imf in UNDEFINED 2037 * state at t1, because allocation of a filter 2038 * is atomic with allocation of a membership. 2039 */ 2040 error = EINVAL; 2041 goto out_in6p_locked; 2042 } 2043 } 2044 2045 /* 2046 * Begin state merge transaction at socket layer. 2047 */ 2048 INP_WLOCK_ASSERT(inp); 2049 2050 /* 2051 * Graft new source into filter list for this inpcb's 2052 * membership of the group. The in6_multi may not have 2053 * been allocated yet if this is a new membership, however, 2054 * the in_mfilter slot will be allocated and must be initialized. 2055 * 2056 * Note: Grafting of exclusive mode filters doesn't happen 2057 * in this path. 2058 * XXX: Should check for non-NULL lims (node exists but may 2059 * not be in-mode) for interop with full-state API. 2060 */ 2061 if (ssa->ss.ss_family != AF_UNSPEC) { 2062 /* Membership starts in IN mode */ 2063 if (is_new) { 2064 CTR1(KTR_MLD, "%s: new join w/source", __func__); 2065 imf = ip6_mfilter_alloc(M_NOWAIT, MCAST_UNDEFINED, MCAST_INCLUDE); 2066 if (imf == NULL) { 2067 error = ENOMEM; 2068 goto out_in6p_locked; 2069 } 2070 } else { 2071 CTR2(KTR_MLD, "%s: %s source", __func__, "allow"); 2072 } 2073 lims = im6f_graft(imf, MCAST_INCLUDE, &ssa->sin6); 2074 if (lims == NULL) { 2075 CTR1(KTR_MLD, "%s: merge imf state failed", 2076 __func__); 2077 error = ENOMEM; 2078 goto out_in6p_locked; 2079 } 2080 } else { 2081 /* No address specified; Membership starts in EX mode */ 2082 if (is_new) { 2083 CTR1(KTR_MLD, "%s: new join w/o source", __func__); 2084 imf = ip6_mfilter_alloc(M_NOWAIT, MCAST_UNDEFINED, MCAST_EXCLUDE); 2085 if (imf == NULL) { 2086 error = ENOMEM; 2087 goto out_in6p_locked; 2088 } 2089 } 2090 } 2091 2092 /* 2093 * Begin state merge transaction at MLD layer. 2094 */ 2095 if (is_new) { 2096 in_pcbref(inp); 2097 INP_WUNLOCK(inp); 2098 2099 error = in6_joingroup_locked(ifp, &gsa->sin6.sin6_addr, imf, 2100 &imf->im6f_in6m, 0); 2101 2102 INP_WLOCK(inp); 2103 if (in_pcbrele_wlocked(inp)) { 2104 error = ENXIO; 2105 goto out_in6p_unlocked; 2106 } 2107 if (error) { 2108 goto out_in6p_locked; 2109 } 2110 /* 2111 * NOTE: Refcount from in6_joingroup_locked() 2112 * is protecting membership. 2113 */ 2114 ip6_mfilter_insert(&imo->im6o_head, imf); 2115 } else { 2116 CTR1(KTR_MLD, "%s: merge inm state", __func__); 2117 IN6_MULTI_LIST_LOCK(); 2118 error = in6m_merge(inm, imf); 2119 if (error) { 2120 CTR1(KTR_MLD, "%s: failed to merge inm state", 2121 __func__); 2122 IN6_MULTI_LIST_UNLOCK(); 2123 im6f_rollback(imf); 2124 im6f_reap(imf); 2125 goto out_in6p_locked; 2126 } 2127 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 2128 error = mld_change_state(inm, 0); 2129 IN6_MULTI_LIST_UNLOCK(); 2130 2131 if (error) { 2132 CTR1(KTR_MLD, "%s: failed mld downcall", 2133 __func__); 2134 im6f_rollback(imf); 2135 im6f_reap(imf); 2136 goto out_in6p_locked; 2137 } 2138 } 2139 2140 im6f_commit(imf); 2141 imf = NULL; 2142 2143 out_in6p_locked: 2144 INP_WUNLOCK(inp); 2145 out_in6p_unlocked: 2146 IN6_MULTI_UNLOCK(); 2147 2148 if (is_new && imf) { 2149 if (imf->im6f_in6m != NULL) { 2150 struct in6_multi_head inmh; 2151 2152 SLIST_INIT(&inmh); 2153 SLIST_INSERT_HEAD(&inmh, imf->im6f_in6m, in6m_defer); 2154 in6m_release_list_deferred(&inmh); 2155 } 2156 ip6_mfilter_free(imf); 2157 } 2158 return (error); 2159 } 2160 2161 /* 2162 * Leave an IPv6 multicast group on an inpcb, possibly with a source. 2163 */ 2164 static int 2165 in6p_leave_group(struct inpcb *inp, struct sockopt *sopt) 2166 { 2167 struct ipv6_mreq mreq; 2168 struct group_source_req gsr; 2169 sockunion_t *gsa, *ssa; 2170 struct ifnet *ifp; 2171 struct in6_mfilter *imf; 2172 struct ip6_moptions *imo; 2173 struct in6_msource *ims; 2174 struct in6_multi *inm; 2175 uint32_t ifindex; 2176 int error; 2177 bool is_final; 2178 #ifdef KTR 2179 char ip6tbuf[INET6_ADDRSTRLEN]; 2180 #endif 2181 2182 ifp = NULL; 2183 ifindex = 0; 2184 error = 0; 2185 is_final = true; 2186 2187 memset(&gsr, 0, sizeof(struct group_source_req)); 2188 gsa = (sockunion_t *)&gsr.gsr_group; 2189 gsa->ss.ss_family = AF_UNSPEC; 2190 ssa = (sockunion_t *)&gsr.gsr_source; 2191 ssa->ss.ss_family = AF_UNSPEC; 2192 2193 /* 2194 * Chew everything passed in up into a struct group_source_req 2195 * as that is easier to process. 2196 * Note: Any embedded scope ID in the multicast group passed 2197 * in by userland is ignored, the interface index is the recommended 2198 * mechanism to specify an interface; see below. 2199 */ 2200 switch (sopt->sopt_name) { 2201 case IPV6_LEAVE_GROUP: 2202 error = sooptcopyin(sopt, &mreq, sizeof(struct ipv6_mreq), 2203 sizeof(struct ipv6_mreq)); 2204 if (error) 2205 return (error); 2206 gsa->sin6.sin6_family = AF_INET6; 2207 gsa->sin6.sin6_len = sizeof(struct sockaddr_in6); 2208 gsa->sin6.sin6_addr = mreq.ipv6mr_multiaddr; 2209 gsa->sin6.sin6_port = 0; 2210 gsa->sin6.sin6_scope_id = 0; 2211 ifindex = mreq.ipv6mr_interface; 2212 break; 2213 2214 case MCAST_LEAVE_GROUP: 2215 case MCAST_LEAVE_SOURCE_GROUP: 2216 if (sopt->sopt_name == MCAST_LEAVE_GROUP) { 2217 error = sooptcopyin(sopt, &gsr, 2218 sizeof(struct group_req), 2219 sizeof(struct group_req)); 2220 } else if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) { 2221 error = sooptcopyin(sopt, &gsr, 2222 sizeof(struct group_source_req), 2223 sizeof(struct group_source_req)); 2224 } 2225 if (error) 2226 return (error); 2227 2228 if (gsa->sin6.sin6_family != AF_INET6 || 2229 gsa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 2230 return (EINVAL); 2231 if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) { 2232 if (ssa->sin6.sin6_family != AF_INET6 || 2233 ssa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 2234 return (EINVAL); 2235 if (IN6_IS_ADDR_MULTICAST(&ssa->sin6.sin6_addr)) 2236 return (EINVAL); 2237 /* 2238 * TODO: Validate embedded scope ID in source 2239 * list entry against passed-in ifp, if and only 2240 * if source list filter entry is iface or node local. 2241 */ 2242 in6_clearscope(&ssa->sin6.sin6_addr); 2243 } 2244 gsa->sin6.sin6_port = 0; 2245 gsa->sin6.sin6_scope_id = 0; 2246 ifindex = gsr.gsr_interface; 2247 break; 2248 2249 default: 2250 CTR2(KTR_MLD, "%s: unknown sopt_name %d", 2251 __func__, sopt->sopt_name); 2252 return (EOPNOTSUPP); 2253 break; 2254 } 2255 2256 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 2257 return (EINVAL); 2258 2259 /* 2260 * Validate interface index if provided. If no interface index 2261 * was provided separately, attempt to look the membership up 2262 * from the default scope as a last resort to disambiguate 2263 * the membership we are being asked to leave. 2264 * XXX SCOPE6 lock potentially taken here. 2265 */ 2266 if (ifindex != 0) { 2267 if (V_if_index < ifindex) 2268 return (EADDRNOTAVAIL); 2269 ifp = ifnet_byindex(ifindex); 2270 if (ifp == NULL) 2271 return (EADDRNOTAVAIL); 2272 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 2273 } else { 2274 error = sa6_embedscope(&gsa->sin6, V_ip6_use_defzone); 2275 if (error) 2276 return (EADDRNOTAVAIL); 2277 /* 2278 * Some badly behaved applications don't pass an ifindex 2279 * or a scope ID, which is an API violation. In this case, 2280 * perform a lookup as per a v6 join. 2281 * 2282 * XXX For now, stomp on zone ID for the corner case. 2283 * This is not the 'KAME way', but we need to see the ifp 2284 * directly until such time as this implementation is 2285 * refactored, assuming the scope IDs are the way to go. 2286 */ 2287 ifindex = ntohs(gsa->sin6.sin6_addr.s6_addr16[1]); 2288 if (ifindex == 0) { 2289 CTR2(KTR_MLD, "%s: warning: no ifindex, looking up " 2290 "ifp for group %s.", __func__, 2291 ip6_sprintf(ip6tbuf, &gsa->sin6.sin6_addr)); 2292 ifp = in6p_lookup_mcast_ifp(inp, &gsa->sin6); 2293 } else { 2294 ifp = ifnet_byindex(ifindex); 2295 } 2296 if (ifp == NULL) 2297 return (EADDRNOTAVAIL); 2298 } 2299 2300 CTR2(KTR_MLD, "%s: ifp = %p", __func__, ifp); 2301 KASSERT(ifp != NULL, ("%s: ifp did not resolve", __func__)); 2302 2303 IN6_MULTI_LOCK(); 2304 2305 /* 2306 * Find the membership in the membership list. 2307 */ 2308 imo = in6p_findmoptions(inp); 2309 imf = im6o_match_group(imo, ifp, &gsa->sa); 2310 if (imf == NULL) { 2311 error = EADDRNOTAVAIL; 2312 goto out_in6p_locked; 2313 } 2314 inm = imf->im6f_in6m; 2315 2316 if (ssa->ss.ss_family != AF_UNSPEC) 2317 is_final = false; 2318 2319 /* 2320 * Begin state merge transaction at socket layer. 2321 */ 2322 INP_WLOCK_ASSERT(inp); 2323 2324 /* 2325 * If we were instructed only to leave a given source, do so. 2326 * MCAST_LEAVE_SOURCE_GROUP is only valid for inclusive memberships. 2327 */ 2328 if (is_final) { 2329 ip6_mfilter_remove(&imo->im6o_head, imf); 2330 im6f_leave(imf); 2331 2332 /* 2333 * Give up the multicast address record to which 2334 * the membership points. 2335 */ 2336 (void)in6_leavegroup_locked(inm, imf); 2337 } else { 2338 if (imf->im6f_st[0] == MCAST_EXCLUDE) { 2339 error = EADDRNOTAVAIL; 2340 goto out_in6p_locked; 2341 } 2342 ims = im6o_match_source(imf, &ssa->sa); 2343 if (ims == NULL) { 2344 CTR3(KTR_MLD, "%s: source %p %spresent", __func__, 2345 ip6_sprintf(ip6tbuf, &ssa->sin6.sin6_addr), 2346 "not "); 2347 error = EADDRNOTAVAIL; 2348 goto out_in6p_locked; 2349 } 2350 CTR2(KTR_MLD, "%s: %s source", __func__, "block"); 2351 error = im6f_prune(imf, &ssa->sin6); 2352 if (error) { 2353 CTR1(KTR_MLD, "%s: merge imf state failed", 2354 __func__); 2355 goto out_in6p_locked; 2356 } 2357 } 2358 2359 /* 2360 * Begin state merge transaction at MLD layer. 2361 */ 2362 if (!is_final) { 2363 CTR1(KTR_MLD, "%s: merge inm state", __func__); 2364 IN6_MULTI_LIST_LOCK(); 2365 error = in6m_merge(inm, imf); 2366 if (error) { 2367 CTR1(KTR_MLD, "%s: failed to merge inm state", 2368 __func__); 2369 IN6_MULTI_LIST_UNLOCK(); 2370 im6f_rollback(imf); 2371 im6f_reap(imf); 2372 goto out_in6p_locked; 2373 } 2374 2375 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 2376 error = mld_change_state(inm, 0); 2377 IN6_MULTI_LIST_UNLOCK(); 2378 if (error) { 2379 CTR1(KTR_MLD, "%s: failed mld downcall", 2380 __func__); 2381 im6f_rollback(imf); 2382 im6f_reap(imf); 2383 goto out_in6p_locked; 2384 } 2385 } 2386 2387 im6f_commit(imf); 2388 im6f_reap(imf); 2389 2390 out_in6p_locked: 2391 INP_WUNLOCK(inp); 2392 2393 if (is_final && imf) 2394 ip6_mfilter_free(imf); 2395 2396 IN6_MULTI_UNLOCK(); 2397 return (error); 2398 } 2399 2400 /* 2401 * Select the interface for transmitting IPv6 multicast datagrams. 2402 * 2403 * Either an instance of struct in6_addr or an instance of struct ipv6_mreqn 2404 * may be passed to this socket option. An address of in6addr_any or an 2405 * interface index of 0 is used to remove a previous selection. 2406 * When no interface is selected, one is chosen for every send. 2407 */ 2408 static int 2409 in6p_set_multicast_if(struct inpcb *inp, struct sockopt *sopt) 2410 { 2411 struct ifnet *ifp; 2412 struct ip6_moptions *imo; 2413 u_int ifindex; 2414 int error; 2415 2416 if (sopt->sopt_valsize != sizeof(u_int)) 2417 return (EINVAL); 2418 2419 error = sooptcopyin(sopt, &ifindex, sizeof(u_int), sizeof(u_int)); 2420 if (error) 2421 return (error); 2422 if (V_if_index < ifindex) 2423 return (EINVAL); 2424 if (ifindex == 0) 2425 ifp = NULL; 2426 else { 2427 ifp = ifnet_byindex(ifindex); 2428 if (ifp == NULL) 2429 return (EINVAL); 2430 if ((ifp->if_flags & IFF_MULTICAST) == 0) 2431 return (EADDRNOTAVAIL); 2432 } 2433 imo = in6p_findmoptions(inp); 2434 imo->im6o_multicast_ifp = ifp; 2435 INP_WUNLOCK(inp); 2436 2437 return (0); 2438 } 2439 2440 /* 2441 * Atomically set source filters on a socket for an IPv6 multicast group. 2442 * 2443 * SMPng: NOTE: Potentially calls malloc(M_WAITOK) with Giant held. 2444 */ 2445 static int 2446 in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt) 2447 { 2448 struct __msfilterreq msfr; 2449 sockunion_t *gsa; 2450 struct ifnet *ifp; 2451 struct in6_mfilter *imf; 2452 struct ip6_moptions *imo; 2453 struct in6_multi *inm; 2454 int error; 2455 2456 error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq), 2457 sizeof(struct __msfilterreq)); 2458 if (error) 2459 return (error); 2460 2461 if (msfr.msfr_nsrcs > in6_mcast_maxsocksrc) 2462 return (ENOBUFS); 2463 2464 if (msfr.msfr_fmode != MCAST_EXCLUDE && 2465 msfr.msfr_fmode != MCAST_INCLUDE) 2466 return (EINVAL); 2467 2468 if (msfr.msfr_group.ss_family != AF_INET6 || 2469 msfr.msfr_group.ss_len != sizeof(struct sockaddr_in6)) 2470 return (EINVAL); 2471 2472 gsa = (sockunion_t *)&msfr.msfr_group; 2473 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 2474 return (EINVAL); 2475 2476 gsa->sin6.sin6_port = 0; /* ignore port */ 2477 2478 if (msfr.msfr_ifindex == 0 || V_if_index < msfr.msfr_ifindex) 2479 return (EADDRNOTAVAIL); 2480 ifp = ifnet_byindex(msfr.msfr_ifindex); 2481 if (ifp == NULL) 2482 return (EADDRNOTAVAIL); 2483 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 2484 2485 /* 2486 * Take the INP write lock. 2487 * Check if this socket is a member of this group. 2488 */ 2489 imo = in6p_findmoptions(inp); 2490 imf = im6o_match_group(imo, ifp, &gsa->sa); 2491 if (imf == NULL) { 2492 error = EADDRNOTAVAIL; 2493 goto out_in6p_locked; 2494 } 2495 inm = imf->im6f_in6m; 2496 2497 /* 2498 * Begin state merge transaction at socket layer. 2499 */ 2500 INP_WLOCK_ASSERT(inp); 2501 2502 imf->im6f_st[1] = msfr.msfr_fmode; 2503 2504 /* 2505 * Apply any new source filters, if present. 2506 * Make a copy of the user-space source vector so 2507 * that we may copy them with a single copyin. This 2508 * allows us to deal with page faults up-front. 2509 */ 2510 if (msfr.msfr_nsrcs > 0) { 2511 struct in6_msource *lims; 2512 struct sockaddr_in6 *psin; 2513 struct sockaddr_storage *kss, *pkss; 2514 int i; 2515 2516 INP_WUNLOCK(inp); 2517 2518 CTR2(KTR_MLD, "%s: loading %lu source list entries", 2519 __func__, (unsigned long)msfr.msfr_nsrcs); 2520 kss = malloc(sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs, 2521 M_TEMP, M_WAITOK); 2522 error = copyin(msfr.msfr_srcs, kss, 2523 sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs); 2524 if (error) { 2525 free(kss, M_TEMP); 2526 return (error); 2527 } 2528 2529 INP_WLOCK(inp); 2530 2531 /* 2532 * Mark all source filters as UNDEFINED at t1. 2533 * Restore new group filter mode, as im6f_leave() 2534 * will set it to INCLUDE. 2535 */ 2536 im6f_leave(imf); 2537 imf->im6f_st[1] = msfr.msfr_fmode; 2538 2539 /* 2540 * Update socket layer filters at t1, lazy-allocating 2541 * new entries. This saves a bunch of memory at the 2542 * cost of one RB_FIND() per source entry; duplicate 2543 * entries in the msfr_nsrcs vector are ignored. 2544 * If we encounter an error, rollback transaction. 2545 * 2546 * XXX This too could be replaced with a set-symmetric 2547 * difference like loop to avoid walking from root 2548 * every time, as the key space is common. 2549 */ 2550 for (i = 0, pkss = kss; i < msfr.msfr_nsrcs; i++, pkss++) { 2551 psin = (struct sockaddr_in6 *)pkss; 2552 if (psin->sin6_family != AF_INET6) { 2553 error = EAFNOSUPPORT; 2554 break; 2555 } 2556 if (psin->sin6_len != sizeof(struct sockaddr_in6)) { 2557 error = EINVAL; 2558 break; 2559 } 2560 if (IN6_IS_ADDR_MULTICAST(&psin->sin6_addr)) { 2561 error = EINVAL; 2562 break; 2563 } 2564 /* 2565 * TODO: Validate embedded scope ID in source 2566 * list entry against passed-in ifp, if and only 2567 * if source list filter entry is iface or node local. 2568 */ 2569 in6_clearscope(&psin->sin6_addr); 2570 error = im6f_get_source(imf, psin, &lims); 2571 if (error) 2572 break; 2573 lims->im6sl_st[1] = imf->im6f_st[1]; 2574 } 2575 free(kss, M_TEMP); 2576 } 2577 2578 if (error) 2579 goto out_im6f_rollback; 2580 2581 INP_WLOCK_ASSERT(inp); 2582 IN6_MULTI_LIST_LOCK(); 2583 2584 /* 2585 * Begin state merge transaction at MLD layer. 2586 */ 2587 CTR1(KTR_MLD, "%s: merge inm state", __func__); 2588 error = in6m_merge(inm, imf); 2589 if (error) 2590 CTR1(KTR_MLD, "%s: failed to merge inm state", __func__); 2591 else { 2592 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 2593 error = mld_change_state(inm, 0); 2594 if (error) 2595 CTR1(KTR_MLD, "%s: failed mld downcall", __func__); 2596 } 2597 2598 IN6_MULTI_LIST_UNLOCK(); 2599 2600 out_im6f_rollback: 2601 if (error) 2602 im6f_rollback(imf); 2603 else 2604 im6f_commit(imf); 2605 2606 im6f_reap(imf); 2607 2608 out_in6p_locked: 2609 INP_WUNLOCK(inp); 2610 return (error); 2611 } 2612 2613 /* 2614 * Set the IP multicast options in response to user setsockopt(). 2615 * 2616 * Many of the socket options handled in this function duplicate the 2617 * functionality of socket options in the regular unicast API. However, 2618 * it is not possible to merge the duplicate code, because the idempotence 2619 * of the IPv6 multicast part of the BSD Sockets API must be preserved; 2620 * the effects of these options must be treated as separate and distinct. 2621 * 2622 * SMPng: XXX: Unlocked read of inp_socket believed OK. 2623 */ 2624 int 2625 ip6_setmoptions(struct inpcb *inp, struct sockopt *sopt) 2626 { 2627 struct ip6_moptions *im6o; 2628 int error; 2629 2630 error = 0; 2631 2632 /* 2633 * If socket is neither of type SOCK_RAW or SOCK_DGRAM, 2634 * or is a divert socket, reject it. 2635 */ 2636 if (inp->inp_socket->so_proto->pr_protocol == IPPROTO_DIVERT || 2637 (inp->inp_socket->so_proto->pr_type != SOCK_RAW && 2638 inp->inp_socket->so_proto->pr_type != SOCK_DGRAM)) 2639 return (EOPNOTSUPP); 2640 2641 switch (sopt->sopt_name) { 2642 case IPV6_MULTICAST_IF: 2643 error = in6p_set_multicast_if(inp, sopt); 2644 break; 2645 2646 case IPV6_MULTICAST_HOPS: { 2647 int hlim; 2648 2649 if (sopt->sopt_valsize != sizeof(int)) { 2650 error = EINVAL; 2651 break; 2652 } 2653 error = sooptcopyin(sopt, &hlim, sizeof(hlim), sizeof(int)); 2654 if (error) 2655 break; 2656 if (hlim < -1 || hlim > 255) { 2657 error = EINVAL; 2658 break; 2659 } else if (hlim == -1) { 2660 hlim = V_ip6_defmcasthlim; 2661 } 2662 im6o = in6p_findmoptions(inp); 2663 im6o->im6o_multicast_hlim = hlim; 2664 INP_WUNLOCK(inp); 2665 break; 2666 } 2667 2668 case IPV6_MULTICAST_LOOP: { 2669 u_int loop; 2670 2671 /* 2672 * Set the loopback flag for outgoing multicast packets. 2673 * Must be zero or one. 2674 */ 2675 if (sopt->sopt_valsize != sizeof(u_int)) { 2676 error = EINVAL; 2677 break; 2678 } 2679 error = sooptcopyin(sopt, &loop, sizeof(u_int), sizeof(u_int)); 2680 if (error) 2681 break; 2682 if (loop > 1) { 2683 error = EINVAL; 2684 break; 2685 } 2686 im6o = in6p_findmoptions(inp); 2687 im6o->im6o_multicast_loop = loop; 2688 INP_WUNLOCK(inp); 2689 break; 2690 } 2691 2692 case IPV6_JOIN_GROUP: 2693 case MCAST_JOIN_GROUP: 2694 case MCAST_JOIN_SOURCE_GROUP: 2695 error = in6p_join_group(inp, sopt); 2696 break; 2697 2698 case IPV6_LEAVE_GROUP: 2699 case MCAST_LEAVE_GROUP: 2700 case MCAST_LEAVE_SOURCE_GROUP: 2701 error = in6p_leave_group(inp, sopt); 2702 break; 2703 2704 case MCAST_BLOCK_SOURCE: 2705 case MCAST_UNBLOCK_SOURCE: 2706 error = in6p_block_unblock_source(inp, sopt); 2707 break; 2708 2709 case IPV6_MSFILTER: 2710 error = in6p_set_source_filters(inp, sopt); 2711 break; 2712 2713 default: 2714 error = EOPNOTSUPP; 2715 break; 2716 } 2717 2718 INP_UNLOCK_ASSERT(inp); 2719 2720 return (error); 2721 } 2722 2723 /* 2724 * Expose MLD's multicast filter mode and source list(s) to userland, 2725 * keyed by (ifindex, group). 2726 * The filter mode is written out as a uint32_t, followed by 2727 * 0..n of struct in6_addr. 2728 * For use by ifmcstat(8). 2729 * SMPng: NOTE: unlocked read of ifindex space. 2730 */ 2731 static int 2732 sysctl_ip6_mcast_filters(SYSCTL_HANDLER_ARGS) 2733 { 2734 struct in6_addr mcaddr; 2735 struct in6_addr src; 2736 struct epoch_tracker et; 2737 struct ifnet *ifp; 2738 struct ifmultiaddr *ifma; 2739 struct in6_multi *inm; 2740 struct ip6_msource *ims; 2741 int *name; 2742 int retval; 2743 u_int namelen; 2744 uint32_t fmode, ifindex; 2745 #ifdef KTR 2746 char ip6tbuf[INET6_ADDRSTRLEN]; 2747 #endif 2748 2749 name = (int *)arg1; 2750 namelen = arg2; 2751 2752 if (req->newptr != NULL) 2753 return (EPERM); 2754 2755 /* int: ifindex + 4 * 32 bits of IPv6 address */ 2756 if (namelen != 5) 2757 return (EINVAL); 2758 2759 ifindex = name[0]; 2760 if (ifindex <= 0 || ifindex > V_if_index) { 2761 CTR2(KTR_MLD, "%s: ifindex %u out of range", 2762 __func__, ifindex); 2763 return (ENOENT); 2764 } 2765 2766 memcpy(&mcaddr, &name[1], sizeof(struct in6_addr)); 2767 if (!IN6_IS_ADDR_MULTICAST(&mcaddr)) { 2768 CTR2(KTR_MLD, "%s: group %s is not multicast", 2769 __func__, ip6_sprintf(ip6tbuf, &mcaddr)); 2770 return (EINVAL); 2771 } 2772 2773 NET_EPOCH_ENTER(et); 2774 ifp = ifnet_byindex(ifindex); 2775 if (ifp == NULL) { 2776 NET_EPOCH_EXIT(et); 2777 CTR2(KTR_MLD, "%s: no ifp for ifindex %u", 2778 __func__, ifindex); 2779 return (ENOENT); 2780 } 2781 /* 2782 * Internal MLD lookups require that scope/zone ID is set. 2783 */ 2784 (void)in6_setscope(&mcaddr, ifp, NULL); 2785 2786 retval = sysctl_wire_old_buffer(req, 2787 sizeof(uint32_t) + (in6_mcast_maxgrpsrc * sizeof(struct in6_addr))); 2788 if (retval) { 2789 NET_EPOCH_EXIT(et); 2790 return (retval); 2791 } 2792 2793 IN6_MULTI_LOCK(); 2794 IN6_MULTI_LIST_LOCK(); 2795 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2796 inm = in6m_ifmultiaddr_get_inm(ifma); 2797 if (inm == NULL) 2798 continue; 2799 if (!IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, &mcaddr)) 2800 continue; 2801 fmode = inm->in6m_st[1].iss_fmode; 2802 retval = SYSCTL_OUT(req, &fmode, sizeof(uint32_t)); 2803 if (retval != 0) 2804 break; 2805 RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) { 2806 CTR2(KTR_MLD, "%s: visit node %p", __func__, ims); 2807 /* 2808 * Only copy-out sources which are in-mode. 2809 */ 2810 if (fmode != im6s_get_mode(inm, ims, 1)) { 2811 CTR1(KTR_MLD, "%s: skip non-in-mode", 2812 __func__); 2813 continue; 2814 } 2815 src = ims->im6s_addr; 2816 retval = SYSCTL_OUT(req, &src, 2817 sizeof(struct in6_addr)); 2818 if (retval != 0) 2819 break; 2820 } 2821 } 2822 IN6_MULTI_LIST_UNLOCK(); 2823 IN6_MULTI_UNLOCK(); 2824 NET_EPOCH_EXIT(et); 2825 2826 return (retval); 2827 } 2828 2829 #ifdef KTR 2830 2831 static const char *in6m_modestrs[] = { "un", "in", "ex" }; 2832 2833 static const char * 2834 in6m_mode_str(const int mode) 2835 { 2836 2837 if (mode >= MCAST_UNDEFINED && mode <= MCAST_EXCLUDE) 2838 return (in6m_modestrs[mode]); 2839 return ("??"); 2840 } 2841 2842 static const char *in6m_statestrs[] = { 2843 "not-member", 2844 "silent", 2845 "idle", 2846 "lazy", 2847 "sleeping", 2848 "awakening", 2849 "query-pending", 2850 "sg-query-pending", 2851 "leaving" 2852 }; 2853 2854 static const char * 2855 in6m_state_str(const int state) 2856 { 2857 2858 if (state >= MLD_NOT_MEMBER && state <= MLD_LEAVING_MEMBER) 2859 return (in6m_statestrs[state]); 2860 return ("??"); 2861 } 2862 2863 /* 2864 * Dump an in6_multi structure to the console. 2865 */ 2866 void 2867 in6m_print(const struct in6_multi *inm) 2868 { 2869 int t; 2870 char ip6tbuf[INET6_ADDRSTRLEN]; 2871 2872 if ((ktr_mask & KTR_MLD) == 0) 2873 return; 2874 2875 printf("%s: --- begin in6m %p ---\n", __func__, inm); 2876 printf("addr %s ifp %p(%s) ifma %p\n", 2877 ip6_sprintf(ip6tbuf, &inm->in6m_addr), 2878 inm->in6m_ifp, 2879 if_name(inm->in6m_ifp), 2880 inm->in6m_ifma); 2881 printf("timer %u state %s refcount %u scq.len %u\n", 2882 inm->in6m_timer, 2883 in6m_state_str(inm->in6m_state), 2884 inm->in6m_refcount, 2885 mbufq_len(&inm->in6m_scq)); 2886 printf("mli %p nsrc %lu sctimer %u scrv %u\n", 2887 inm->in6m_mli, 2888 inm->in6m_nsrc, 2889 inm->in6m_sctimer, 2890 inm->in6m_scrv); 2891 for (t = 0; t < 2; t++) { 2892 printf("t%d: fmode %s asm %u ex %u in %u rec %u\n", t, 2893 in6m_mode_str(inm->in6m_st[t].iss_fmode), 2894 inm->in6m_st[t].iss_asm, 2895 inm->in6m_st[t].iss_ex, 2896 inm->in6m_st[t].iss_in, 2897 inm->in6m_st[t].iss_rec); 2898 } 2899 printf("%s: --- end in6m %p ---\n", __func__, inm); 2900 } 2901 2902 #else /* !KTR */ 2903 2904 void 2905 in6m_print(const struct in6_multi *inm) 2906 { 2907 2908 } 2909 2910 #endif /* KTR */ 2911