1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2009 Bruce Simpson. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote 16 * products derived from this software without specific prior written 17 * permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 /* 33 * IPv6 multicast socket, group, and socket option processing module. 34 * Normative references: RFC 2292, RFC 3492, RFC 3542, RFC 3678, RFC 3810. 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_inet6.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/gtaskqueue.h> 45 #include <sys/kernel.h> 46 #include <sys/malloc.h> 47 #include <sys/mbuf.h> 48 #include <sys/protosw.h> 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/sysctl.h> 52 #include <sys/priv.h> 53 #include <sys/ktr.h> 54 #include <sys/tree.h> 55 56 #include <net/if.h> 57 #include <net/if_var.h> 58 #include <net/if_dl.h> 59 #include <net/route.h> 60 #include <net/vnet.h> 61 62 63 #include <netinet/in.h> 64 #include <netinet/udp.h> 65 #include <netinet/in_var.h> 66 #include <netinet/ip_var.h> 67 #include <netinet/udp_var.h> 68 #include <netinet6/in6_fib.h> 69 #include <netinet6/in6_var.h> 70 #include <netinet/ip6.h> 71 #include <netinet/icmp6.h> 72 #include <netinet6/ip6_var.h> 73 #include <netinet/in_pcb.h> 74 #include <netinet/tcp_var.h> 75 #include <netinet6/nd6.h> 76 #include <netinet6/mld6_var.h> 77 #include <netinet6/scope6_var.h> 78 79 #ifndef KTR_MLD 80 #define KTR_MLD KTR_INET6 81 #endif 82 83 #ifndef __SOCKUNION_DECLARED 84 union sockunion { 85 struct sockaddr_storage ss; 86 struct sockaddr sa; 87 struct sockaddr_dl sdl; 88 struct sockaddr_in6 sin6; 89 }; 90 typedef union sockunion sockunion_t; 91 #define __SOCKUNION_DECLARED 92 #endif /* __SOCKUNION_DECLARED */ 93 94 static MALLOC_DEFINE(M_IN6MFILTER, "in6_mfilter", 95 "IPv6 multicast PCB-layer source filter"); 96 MALLOC_DEFINE(M_IP6MADDR, "in6_multi", "IPv6 multicast group"); 97 static MALLOC_DEFINE(M_IP6MOPTS, "ip6_moptions", "IPv6 multicast options"); 98 static MALLOC_DEFINE(M_IP6MSOURCE, "ip6_msource", 99 "IPv6 multicast MLD-layer source filter"); 100 101 RB_GENERATE(ip6_msource_tree, ip6_msource, im6s_link, ip6_msource_cmp); 102 103 /* 104 * Locking: 105 * - Lock order is: Giant, IN6_MULTI_LOCK, INP_WLOCK, 106 * IN6_MULTI_LIST_LOCK, MLD_LOCK, IF_ADDR_LOCK. 107 * - The IF_ADDR_LOCK is implicitly taken by in6m_lookup() earlier, however 108 * it can be taken by code in net/if.c also. 109 * - ip6_moptions and in6_mfilter are covered by the INP_WLOCK. 110 * 111 * struct in6_multi is covered by IN6_MULTI_LOCK. There isn't strictly 112 * any need for in6_multi itself to be virtualized -- it is bound to an ifp 113 * anyway no matter what happens. 114 */ 115 struct mtx in6_multi_list_mtx; 116 MTX_SYSINIT(in6_multi_mtx, &in6_multi_list_mtx, "in6_multi_list_mtx", MTX_DEF); 117 118 struct mtx in6_multi_free_mtx; 119 MTX_SYSINIT(in6_multi_free_mtx, &in6_multi_free_mtx, "in6_multi_free_mtx", MTX_DEF); 120 121 struct sx in6_multi_sx; 122 SX_SYSINIT(in6_multi_sx, &in6_multi_sx, "in6_multi_sx"); 123 124 static void im6f_commit(struct in6_mfilter *); 125 static int im6f_get_source(struct in6_mfilter *imf, 126 const struct sockaddr_in6 *psin, 127 struct in6_msource **); 128 static struct in6_msource * 129 im6f_graft(struct in6_mfilter *, const uint8_t, 130 const struct sockaddr_in6 *); 131 static void im6f_leave(struct in6_mfilter *); 132 static int im6f_prune(struct in6_mfilter *, const struct sockaddr_in6 *); 133 static void im6f_purge(struct in6_mfilter *); 134 static void im6f_rollback(struct in6_mfilter *); 135 static void im6f_reap(struct in6_mfilter *); 136 static struct in6_mfilter * 137 im6o_match_group(const struct ip6_moptions *, 138 const struct ifnet *, const struct sockaddr *); 139 static struct in6_msource * 140 im6o_match_source(struct in6_mfilter *, const struct sockaddr *); 141 static void im6s_merge(struct ip6_msource *ims, 142 const struct in6_msource *lims, const int rollback); 143 static int in6_getmulti(struct ifnet *, const struct in6_addr *, 144 struct in6_multi **); 145 static int in6_joingroup_locked(struct ifnet *, const struct in6_addr *, 146 struct in6_mfilter *, struct in6_multi **, int); 147 static int in6m_get_source(struct in6_multi *inm, 148 const struct in6_addr *addr, const int noalloc, 149 struct ip6_msource **pims); 150 #ifdef KTR 151 static int in6m_is_ifp_detached(const struct in6_multi *); 152 #endif 153 static int in6m_merge(struct in6_multi *, /*const*/ struct in6_mfilter *); 154 static void in6m_purge(struct in6_multi *); 155 static void in6m_reap(struct in6_multi *); 156 static struct ip6_moptions * 157 in6p_findmoptions(struct inpcb *); 158 static int in6p_get_source_filters(struct inpcb *, struct sockopt *); 159 static int in6p_join_group(struct inpcb *, struct sockopt *); 160 static int in6p_leave_group(struct inpcb *, struct sockopt *); 161 static struct ifnet * 162 in6p_lookup_mcast_ifp(const struct inpcb *, 163 const struct sockaddr_in6 *); 164 static int in6p_block_unblock_source(struct inpcb *, struct sockopt *); 165 static int in6p_set_multicast_if(struct inpcb *, struct sockopt *); 166 static int in6p_set_source_filters(struct inpcb *, struct sockopt *); 167 static int sysctl_ip6_mcast_filters(SYSCTL_HANDLER_ARGS); 168 169 SYSCTL_DECL(_net_inet6_ip6); /* XXX Not in any common header. */ 170 171 static SYSCTL_NODE(_net_inet6_ip6, OID_AUTO, mcast, 172 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 173 "IPv6 multicast"); 174 175 static u_long in6_mcast_maxgrpsrc = IPV6_MAX_GROUP_SRC_FILTER; 176 SYSCTL_ULONG(_net_inet6_ip6_mcast, OID_AUTO, maxgrpsrc, 177 CTLFLAG_RWTUN, &in6_mcast_maxgrpsrc, 0, 178 "Max source filters per group"); 179 180 static u_long in6_mcast_maxsocksrc = IPV6_MAX_SOCK_SRC_FILTER; 181 SYSCTL_ULONG(_net_inet6_ip6_mcast, OID_AUTO, maxsocksrc, 182 CTLFLAG_RWTUN, &in6_mcast_maxsocksrc, 0, 183 "Max source filters per socket"); 184 185 /* TODO Virtualize this switch. */ 186 int in6_mcast_loop = IPV6_DEFAULT_MULTICAST_LOOP; 187 SYSCTL_INT(_net_inet6_ip6_mcast, OID_AUTO, loop, CTLFLAG_RWTUN, 188 &in6_mcast_loop, 0, "Loopback multicast datagrams by default"); 189 190 static SYSCTL_NODE(_net_inet6_ip6_mcast, OID_AUTO, filters, 191 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_ip6_mcast_filters, 192 "Per-interface stack-wide source filters"); 193 194 #ifdef KTR 195 /* 196 * Inline function which wraps assertions for a valid ifp. 197 * The ifnet layer will set the ifma's ifp pointer to NULL if the ifp 198 * is detached. 199 */ 200 static int __inline 201 in6m_is_ifp_detached(const struct in6_multi *inm) 202 { 203 struct ifnet *ifp; 204 205 KASSERT(inm->in6m_ifma != NULL, ("%s: no ifma", __func__)); 206 ifp = inm->in6m_ifma->ifma_ifp; 207 if (ifp != NULL) { 208 /* 209 * Sanity check that network-layer notion of ifp is the 210 * same as that of link-layer. 211 */ 212 KASSERT(inm->in6m_ifp == ifp, ("%s: bad ifp", __func__)); 213 } 214 215 return (ifp == NULL); 216 } 217 #endif 218 219 /* 220 * Initialize an in6_mfilter structure to a known state at t0, t1 221 * with an empty source filter list. 222 */ 223 static __inline void 224 im6f_init(struct in6_mfilter *imf, const int st0, const int st1) 225 { 226 memset(imf, 0, sizeof(struct in6_mfilter)); 227 RB_INIT(&imf->im6f_sources); 228 imf->im6f_st[0] = st0; 229 imf->im6f_st[1] = st1; 230 } 231 232 struct in6_mfilter * 233 ip6_mfilter_alloc(const int mflags, const int st0, const int st1) 234 { 235 struct in6_mfilter *imf; 236 237 imf = malloc(sizeof(*imf), M_IN6MFILTER, mflags); 238 239 if (imf != NULL) 240 im6f_init(imf, st0, st1); 241 242 return (imf); 243 } 244 245 void 246 ip6_mfilter_free(struct in6_mfilter *imf) 247 { 248 249 im6f_purge(imf); 250 free(imf, M_IN6MFILTER); 251 } 252 253 /* 254 * Find an IPv6 multicast group entry for this ip6_moptions instance 255 * which matches the specified group, and optionally an interface. 256 * Return its index into the array, or -1 if not found. 257 */ 258 static struct in6_mfilter * 259 im6o_match_group(const struct ip6_moptions *imo, const struct ifnet *ifp, 260 const struct sockaddr *group) 261 { 262 const struct sockaddr_in6 *gsin6; 263 struct in6_mfilter *imf; 264 struct in6_multi *inm; 265 266 gsin6 = (const struct sockaddr_in6 *)group; 267 268 IP6_MFILTER_FOREACH(imf, &imo->im6o_head) { 269 inm = imf->im6f_in6m; 270 if (inm == NULL) 271 continue; 272 if ((ifp == NULL || (inm->in6m_ifp == ifp)) && 273 IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, 274 &gsin6->sin6_addr)) { 275 break; 276 } 277 } 278 return (imf); 279 } 280 281 /* 282 * Find an IPv6 multicast source entry for this imo which matches 283 * the given group index for this socket, and source address. 284 * 285 * XXX TODO: The scope ID, if present in src, is stripped before 286 * any comparison. We SHOULD enforce scope/zone checks where the source 287 * filter entry has a link scope. 288 * 289 * NOTE: This does not check if the entry is in-mode, merely if 290 * it exists, which may not be the desired behaviour. 291 */ 292 static struct in6_msource * 293 im6o_match_source(struct in6_mfilter *imf, const struct sockaddr *src) 294 { 295 struct ip6_msource find; 296 struct ip6_msource *ims; 297 const sockunion_t *psa; 298 299 KASSERT(src->sa_family == AF_INET6, ("%s: !AF_INET6", __func__)); 300 301 psa = (const sockunion_t *)src; 302 find.im6s_addr = psa->sin6.sin6_addr; 303 in6_clearscope(&find.im6s_addr); /* XXX */ 304 ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find); 305 306 return ((struct in6_msource *)ims); 307 } 308 309 /* 310 * Perform filtering for multicast datagrams on a socket by group and source. 311 * 312 * Returns 0 if a datagram should be allowed through, or various error codes 313 * if the socket was not a member of the group, or the source was muted, etc. 314 */ 315 int 316 im6o_mc_filter(const struct ip6_moptions *imo, const struct ifnet *ifp, 317 const struct sockaddr *group, const struct sockaddr *src) 318 { 319 struct in6_mfilter *imf; 320 struct in6_msource *ims; 321 int mode; 322 323 KASSERT(ifp != NULL, ("%s: null ifp", __func__)); 324 325 imf = im6o_match_group(imo, ifp, group); 326 if (imf == NULL) 327 return (MCAST_NOTGMEMBER); 328 329 /* 330 * Check if the source was included in an (S,G) join. 331 * Allow reception on exclusive memberships by default, 332 * reject reception on inclusive memberships by default. 333 * Exclude source only if an in-mode exclude filter exists. 334 * Include source only if an in-mode include filter exists. 335 * NOTE: We are comparing group state here at MLD t1 (now) 336 * with socket-layer t0 (since last downcall). 337 */ 338 mode = imf->im6f_st[1]; 339 ims = im6o_match_source(imf, src); 340 341 if ((ims == NULL && mode == MCAST_INCLUDE) || 342 (ims != NULL && ims->im6sl_st[0] != mode)) 343 return (MCAST_NOTSMEMBER); 344 345 return (MCAST_PASS); 346 } 347 348 /* 349 * Find and return a reference to an in6_multi record for (ifp, group), 350 * and bump its reference count. 351 * If one does not exist, try to allocate it, and update link-layer multicast 352 * filters on ifp to listen for group. 353 * Assumes the IN6_MULTI lock is held across the call. 354 * Return 0 if successful, otherwise return an appropriate error code. 355 */ 356 static int 357 in6_getmulti(struct ifnet *ifp, const struct in6_addr *group, 358 struct in6_multi **pinm) 359 { 360 struct epoch_tracker et; 361 struct sockaddr_in6 gsin6; 362 struct ifmultiaddr *ifma; 363 struct in6_multi *inm; 364 int error; 365 366 error = 0; 367 368 /* 369 * XXX: Accesses to ifma_protospec must be covered by IF_ADDR_LOCK; 370 * if_addmulti() takes this mutex itself, so we must drop and 371 * re-acquire around the call. 372 */ 373 IN6_MULTI_LOCK_ASSERT(); 374 IN6_MULTI_LIST_LOCK(); 375 IF_ADDR_WLOCK(ifp); 376 NET_EPOCH_ENTER(et); 377 inm = in6m_lookup_locked(ifp, group); 378 NET_EPOCH_EXIT(et); 379 380 if (inm != NULL) { 381 /* 382 * If we already joined this group, just bump the 383 * refcount and return it. 384 */ 385 KASSERT(inm->in6m_refcount >= 1, 386 ("%s: bad refcount %d", __func__, inm->in6m_refcount)); 387 in6m_acquire_locked(inm); 388 *pinm = inm; 389 goto out_locked; 390 } 391 392 memset(&gsin6, 0, sizeof(gsin6)); 393 gsin6.sin6_family = AF_INET6; 394 gsin6.sin6_len = sizeof(struct sockaddr_in6); 395 gsin6.sin6_addr = *group; 396 397 /* 398 * Check if a link-layer group is already associated 399 * with this network-layer group on the given ifnet. 400 */ 401 IN6_MULTI_LIST_UNLOCK(); 402 IF_ADDR_WUNLOCK(ifp); 403 error = if_addmulti(ifp, (struct sockaddr *)&gsin6, &ifma); 404 if (error != 0) 405 return (error); 406 IN6_MULTI_LIST_LOCK(); 407 IF_ADDR_WLOCK(ifp); 408 409 /* 410 * If something other than netinet6 is occupying the link-layer 411 * group, print a meaningful error message and back out of 412 * the allocation. 413 * Otherwise, bump the refcount on the existing network-layer 414 * group association and return it. 415 */ 416 if (ifma->ifma_protospec != NULL) { 417 inm = (struct in6_multi *)ifma->ifma_protospec; 418 #ifdef INVARIANTS 419 KASSERT(ifma->ifma_addr != NULL, ("%s: no ifma_addr", 420 __func__)); 421 KASSERT(ifma->ifma_addr->sa_family == AF_INET6, 422 ("%s: ifma not AF_INET6", __func__)); 423 KASSERT(inm != NULL, ("%s: no ifma_protospec", __func__)); 424 if (inm->in6m_ifma != ifma || inm->in6m_ifp != ifp || 425 !IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, group)) 426 panic("%s: ifma %p is inconsistent with %p (%p)", 427 __func__, ifma, inm, group); 428 #endif 429 in6m_acquire_locked(inm); 430 *pinm = inm; 431 goto out_locked; 432 } 433 434 IF_ADDR_WLOCK_ASSERT(ifp); 435 436 /* 437 * A new in6_multi record is needed; allocate and initialize it. 438 * We DO NOT perform an MLD join as the in6_ layer may need to 439 * push an initial source list down to MLD to support SSM. 440 * 441 * The initial source filter state is INCLUDE, {} as per the RFC. 442 * Pending state-changes per group are subject to a bounds check. 443 */ 444 inm = malloc(sizeof(*inm), M_IP6MADDR, M_NOWAIT | M_ZERO); 445 if (inm == NULL) { 446 IN6_MULTI_LIST_UNLOCK(); 447 IF_ADDR_WUNLOCK(ifp); 448 if_delmulti_ifma(ifma); 449 return (ENOMEM); 450 } 451 inm->in6m_addr = *group; 452 inm->in6m_ifp = ifp; 453 inm->in6m_mli = MLD_IFINFO(ifp); 454 inm->in6m_ifma = ifma; 455 inm->in6m_refcount = 1; 456 inm->in6m_state = MLD_NOT_MEMBER; 457 mbufq_init(&inm->in6m_scq, MLD_MAX_STATE_CHANGES); 458 459 inm->in6m_st[0].iss_fmode = MCAST_UNDEFINED; 460 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED; 461 RB_INIT(&inm->in6m_srcs); 462 463 ifma->ifma_protospec = inm; 464 *pinm = inm; 465 466 out_locked: 467 IN6_MULTI_LIST_UNLOCK(); 468 IF_ADDR_WUNLOCK(ifp); 469 return (error); 470 } 471 472 /* 473 * Drop a reference to an in6_multi record. 474 * 475 * If the refcount drops to 0, free the in6_multi record and 476 * delete the underlying link-layer membership. 477 */ 478 static void 479 in6m_release(struct in6_multi *inm) 480 { 481 struct ifmultiaddr *ifma; 482 struct ifnet *ifp; 483 484 CTR2(KTR_MLD, "%s: refcount is %d", __func__, inm->in6m_refcount); 485 486 MPASS(inm->in6m_refcount == 0); 487 CTR2(KTR_MLD, "%s: freeing inm %p", __func__, inm); 488 489 ifma = inm->in6m_ifma; 490 ifp = inm->in6m_ifp; 491 MPASS(ifma->ifma_llifma == NULL); 492 493 /* XXX this access is not covered by IF_ADDR_LOCK */ 494 CTR2(KTR_MLD, "%s: purging ifma %p", __func__, ifma); 495 KASSERT(ifma->ifma_protospec == NULL, 496 ("%s: ifma_protospec != NULL", __func__)); 497 if (ifp == NULL) 498 ifp = ifma->ifma_ifp; 499 500 if (ifp != NULL) { 501 CURVNET_SET(ifp->if_vnet); 502 in6m_purge(inm); 503 free(inm, M_IP6MADDR); 504 if_delmulti_ifma_flags(ifma, 1); 505 CURVNET_RESTORE(); 506 if_rele(ifp); 507 } else { 508 in6m_purge(inm); 509 free(inm, M_IP6MADDR); 510 if_delmulti_ifma_flags(ifma, 1); 511 } 512 } 513 514 static struct grouptask free_gtask; 515 static struct in6_multi_head in6m_free_list; 516 static void in6m_release_task(void *arg __unused); 517 static void in6m_init(void) 518 { 519 SLIST_INIT(&in6m_free_list); 520 taskqgroup_config_gtask_init(NULL, &free_gtask, in6m_release_task, "in6m release task"); 521 } 522 523 #ifdef EARLY_AP_STARTUP 524 SYSINIT(in6m_init, SI_SUB_SMP + 1, SI_ORDER_FIRST, 525 in6m_init, NULL); 526 #else 527 SYSINIT(in6m_init, SI_SUB_ROOT_CONF - 1, SI_ORDER_SECOND, 528 in6m_init, NULL); 529 #endif 530 531 532 void 533 in6m_release_list_deferred(struct in6_multi_head *inmh) 534 { 535 if (SLIST_EMPTY(inmh)) 536 return; 537 mtx_lock(&in6_multi_free_mtx); 538 SLIST_CONCAT(&in6m_free_list, inmh, in6_multi, in6m_nrele); 539 mtx_unlock(&in6_multi_free_mtx); 540 GROUPTASK_ENQUEUE(&free_gtask); 541 } 542 543 void 544 in6m_release_wait(void) 545 { 546 547 /* Wait for all jobs to complete. */ 548 gtaskqueue_drain_all(free_gtask.gt_taskqueue); 549 } 550 551 void 552 in6m_disconnect_locked(struct in6_multi_head *inmh, struct in6_multi *inm) 553 { 554 struct ifnet *ifp; 555 struct ifaddr *ifa; 556 struct in6_ifaddr *ifa6; 557 struct in6_multi_mship *imm, *imm_tmp; 558 struct ifmultiaddr *ifma, *ll_ifma; 559 560 IN6_MULTI_LIST_LOCK_ASSERT(); 561 562 ifp = inm->in6m_ifp; 563 if (ifp == NULL) 564 return; /* already called */ 565 566 inm->in6m_ifp = NULL; 567 IF_ADDR_WLOCK_ASSERT(ifp); 568 ifma = inm->in6m_ifma; 569 if (ifma == NULL) 570 return; 571 572 if_ref(ifp); 573 if (ifma->ifma_flags & IFMA_F_ENQUEUED) { 574 CK_STAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifmultiaddr, ifma_link); 575 ifma->ifma_flags &= ~IFMA_F_ENQUEUED; 576 } 577 MCDPRINTF("removed ifma: %p from %s\n", ifma, ifp->if_xname); 578 if ((ll_ifma = ifma->ifma_llifma) != NULL) { 579 MPASS(ifma != ll_ifma); 580 ifma->ifma_llifma = NULL; 581 MPASS(ll_ifma->ifma_llifma == NULL); 582 MPASS(ll_ifma->ifma_ifp == ifp); 583 if (--ll_ifma->ifma_refcount == 0) { 584 if (ll_ifma->ifma_flags & IFMA_F_ENQUEUED) { 585 CK_STAILQ_REMOVE(&ifp->if_multiaddrs, ll_ifma, ifmultiaddr, ifma_link); 586 ll_ifma->ifma_flags &= ~IFMA_F_ENQUEUED; 587 } 588 MCDPRINTF("removed ll_ifma: %p from %s\n", ll_ifma, ifp->if_xname); 589 if_freemulti(ll_ifma); 590 } 591 } 592 CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 593 if (ifa->ifa_addr->sa_family != AF_INET6) 594 continue; 595 ifa6 = (void *)ifa; 596 LIST_FOREACH_SAFE(imm, &ifa6->ia6_memberships, 597 i6mm_chain, imm_tmp) { 598 if (inm == imm->i6mm_maddr) { 599 LIST_REMOVE(imm, i6mm_chain); 600 free(imm, M_IP6MADDR); 601 in6m_rele_locked(inmh, inm); 602 } 603 } 604 } 605 } 606 607 static void 608 in6m_release_task(void *arg __unused) 609 { 610 struct in6_multi_head in6m_free_tmp; 611 struct in6_multi *inm, *tinm; 612 613 SLIST_INIT(&in6m_free_tmp); 614 mtx_lock(&in6_multi_free_mtx); 615 SLIST_CONCAT(&in6m_free_tmp, &in6m_free_list, in6_multi, in6m_nrele); 616 mtx_unlock(&in6_multi_free_mtx); 617 IN6_MULTI_LOCK(); 618 SLIST_FOREACH_SAFE(inm, &in6m_free_tmp, in6m_nrele, tinm) { 619 SLIST_REMOVE_HEAD(&in6m_free_tmp, in6m_nrele); 620 in6m_release(inm); 621 } 622 IN6_MULTI_UNLOCK(); 623 } 624 625 /* 626 * Clear recorded source entries for a group. 627 * Used by the MLD code. Caller must hold the IN6_MULTI lock. 628 * FIXME: Should reap. 629 */ 630 void 631 in6m_clear_recorded(struct in6_multi *inm) 632 { 633 struct ip6_msource *ims; 634 635 IN6_MULTI_LIST_LOCK_ASSERT(); 636 637 RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) { 638 if (ims->im6s_stp) { 639 ims->im6s_stp = 0; 640 --inm->in6m_st[1].iss_rec; 641 } 642 } 643 KASSERT(inm->in6m_st[1].iss_rec == 0, 644 ("%s: iss_rec %d not 0", __func__, inm->in6m_st[1].iss_rec)); 645 } 646 647 /* 648 * Record a source as pending for a Source-Group MLDv2 query. 649 * This lives here as it modifies the shared tree. 650 * 651 * inm is the group descriptor. 652 * naddr is the address of the source to record in network-byte order. 653 * 654 * If the net.inet6.mld.sgalloc sysctl is non-zero, we will 655 * lazy-allocate a source node in response to an SG query. 656 * Otherwise, no allocation is performed. This saves some memory 657 * with the trade-off that the source will not be reported to the 658 * router if joined in the window between the query response and 659 * the group actually being joined on the local host. 660 * 661 * VIMAGE: XXX: Currently the mld_sgalloc feature has been removed. 662 * This turns off the allocation of a recorded source entry if 663 * the group has not been joined. 664 * 665 * Return 0 if the source didn't exist or was already marked as recorded. 666 * Return 1 if the source was marked as recorded by this function. 667 * Return <0 if any error occurred (negated errno code). 668 */ 669 int 670 in6m_record_source(struct in6_multi *inm, const struct in6_addr *addr) 671 { 672 struct ip6_msource find; 673 struct ip6_msource *ims, *nims; 674 675 IN6_MULTI_LIST_LOCK_ASSERT(); 676 677 find.im6s_addr = *addr; 678 ims = RB_FIND(ip6_msource_tree, &inm->in6m_srcs, &find); 679 if (ims && ims->im6s_stp) 680 return (0); 681 if (ims == NULL) { 682 if (inm->in6m_nsrc == in6_mcast_maxgrpsrc) 683 return (-ENOSPC); 684 nims = malloc(sizeof(struct ip6_msource), M_IP6MSOURCE, 685 M_NOWAIT | M_ZERO); 686 if (nims == NULL) 687 return (-ENOMEM); 688 nims->im6s_addr = find.im6s_addr; 689 RB_INSERT(ip6_msource_tree, &inm->in6m_srcs, nims); 690 ++inm->in6m_nsrc; 691 ims = nims; 692 } 693 694 /* 695 * Mark the source as recorded and update the recorded 696 * source count. 697 */ 698 ++ims->im6s_stp; 699 ++inm->in6m_st[1].iss_rec; 700 701 return (1); 702 } 703 704 /* 705 * Return a pointer to an in6_msource owned by an in6_mfilter, 706 * given its source address. 707 * Lazy-allocate if needed. If this is a new entry its filter state is 708 * undefined at t0. 709 * 710 * imf is the filter set being modified. 711 * addr is the source address. 712 * 713 * SMPng: May be called with locks held; malloc must not block. 714 */ 715 static int 716 im6f_get_source(struct in6_mfilter *imf, const struct sockaddr_in6 *psin, 717 struct in6_msource **plims) 718 { 719 struct ip6_msource find; 720 struct ip6_msource *ims, *nims; 721 struct in6_msource *lims; 722 int error; 723 724 error = 0; 725 ims = NULL; 726 lims = NULL; 727 728 find.im6s_addr = psin->sin6_addr; 729 ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find); 730 lims = (struct in6_msource *)ims; 731 if (lims == NULL) { 732 if (imf->im6f_nsrc == in6_mcast_maxsocksrc) 733 return (ENOSPC); 734 nims = malloc(sizeof(struct in6_msource), M_IN6MFILTER, 735 M_NOWAIT | M_ZERO); 736 if (nims == NULL) 737 return (ENOMEM); 738 lims = (struct in6_msource *)nims; 739 lims->im6s_addr = find.im6s_addr; 740 lims->im6sl_st[0] = MCAST_UNDEFINED; 741 RB_INSERT(ip6_msource_tree, &imf->im6f_sources, nims); 742 ++imf->im6f_nsrc; 743 } 744 745 *plims = lims; 746 747 return (error); 748 } 749 750 /* 751 * Graft a source entry into an existing socket-layer filter set, 752 * maintaining any required invariants and checking allocations. 753 * 754 * The source is marked as being in the new filter mode at t1. 755 * 756 * Return the pointer to the new node, otherwise return NULL. 757 */ 758 static struct in6_msource * 759 im6f_graft(struct in6_mfilter *imf, const uint8_t st1, 760 const struct sockaddr_in6 *psin) 761 { 762 struct ip6_msource *nims; 763 struct in6_msource *lims; 764 765 nims = malloc(sizeof(struct in6_msource), M_IN6MFILTER, 766 M_NOWAIT | M_ZERO); 767 if (nims == NULL) 768 return (NULL); 769 lims = (struct in6_msource *)nims; 770 lims->im6s_addr = psin->sin6_addr; 771 lims->im6sl_st[0] = MCAST_UNDEFINED; 772 lims->im6sl_st[1] = st1; 773 RB_INSERT(ip6_msource_tree, &imf->im6f_sources, nims); 774 ++imf->im6f_nsrc; 775 776 return (lims); 777 } 778 779 /* 780 * Prune a source entry from an existing socket-layer filter set, 781 * maintaining any required invariants and checking allocations. 782 * 783 * The source is marked as being left at t1, it is not freed. 784 * 785 * Return 0 if no error occurred, otherwise return an errno value. 786 */ 787 static int 788 im6f_prune(struct in6_mfilter *imf, const struct sockaddr_in6 *psin) 789 { 790 struct ip6_msource find; 791 struct ip6_msource *ims; 792 struct in6_msource *lims; 793 794 find.im6s_addr = psin->sin6_addr; 795 ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find); 796 if (ims == NULL) 797 return (ENOENT); 798 lims = (struct in6_msource *)ims; 799 lims->im6sl_st[1] = MCAST_UNDEFINED; 800 return (0); 801 } 802 803 /* 804 * Revert socket-layer filter set deltas at t1 to t0 state. 805 */ 806 static void 807 im6f_rollback(struct in6_mfilter *imf) 808 { 809 struct ip6_msource *ims, *tims; 810 struct in6_msource *lims; 811 812 RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) { 813 lims = (struct in6_msource *)ims; 814 if (lims->im6sl_st[0] == lims->im6sl_st[1]) { 815 /* no change at t1 */ 816 continue; 817 } else if (lims->im6sl_st[0] != MCAST_UNDEFINED) { 818 /* revert change to existing source at t1 */ 819 lims->im6sl_st[1] = lims->im6sl_st[0]; 820 } else { 821 /* revert source added t1 */ 822 CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); 823 RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims); 824 free(ims, M_IN6MFILTER); 825 imf->im6f_nsrc--; 826 } 827 } 828 imf->im6f_st[1] = imf->im6f_st[0]; 829 } 830 831 /* 832 * Mark socket-layer filter set as INCLUDE {} at t1. 833 */ 834 static void 835 im6f_leave(struct in6_mfilter *imf) 836 { 837 struct ip6_msource *ims; 838 struct in6_msource *lims; 839 840 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { 841 lims = (struct in6_msource *)ims; 842 lims->im6sl_st[1] = MCAST_UNDEFINED; 843 } 844 imf->im6f_st[1] = MCAST_INCLUDE; 845 } 846 847 /* 848 * Mark socket-layer filter set deltas as committed. 849 */ 850 static void 851 im6f_commit(struct in6_mfilter *imf) 852 { 853 struct ip6_msource *ims; 854 struct in6_msource *lims; 855 856 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { 857 lims = (struct in6_msource *)ims; 858 lims->im6sl_st[0] = lims->im6sl_st[1]; 859 } 860 imf->im6f_st[0] = imf->im6f_st[1]; 861 } 862 863 /* 864 * Reap unreferenced sources from socket-layer filter set. 865 */ 866 static void 867 im6f_reap(struct in6_mfilter *imf) 868 { 869 struct ip6_msource *ims, *tims; 870 struct in6_msource *lims; 871 872 RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) { 873 lims = (struct in6_msource *)ims; 874 if ((lims->im6sl_st[0] == MCAST_UNDEFINED) && 875 (lims->im6sl_st[1] == MCAST_UNDEFINED)) { 876 CTR2(KTR_MLD, "%s: free lims %p", __func__, ims); 877 RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims); 878 free(ims, M_IN6MFILTER); 879 imf->im6f_nsrc--; 880 } 881 } 882 } 883 884 /* 885 * Purge socket-layer filter set. 886 */ 887 static void 888 im6f_purge(struct in6_mfilter *imf) 889 { 890 struct ip6_msource *ims, *tims; 891 892 RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) { 893 CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); 894 RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims); 895 free(ims, M_IN6MFILTER); 896 imf->im6f_nsrc--; 897 } 898 imf->im6f_st[0] = imf->im6f_st[1] = MCAST_UNDEFINED; 899 KASSERT(RB_EMPTY(&imf->im6f_sources), 900 ("%s: im6f_sources not empty", __func__)); 901 } 902 903 /* 904 * Look up a source filter entry for a multicast group. 905 * 906 * inm is the group descriptor to work with. 907 * addr is the IPv6 address to look up. 908 * noalloc may be non-zero to suppress allocation of sources. 909 * *pims will be set to the address of the retrieved or allocated source. 910 * 911 * SMPng: NOTE: may be called with locks held. 912 * Return 0 if successful, otherwise return a non-zero error code. 913 */ 914 static int 915 in6m_get_source(struct in6_multi *inm, const struct in6_addr *addr, 916 const int noalloc, struct ip6_msource **pims) 917 { 918 struct ip6_msource find; 919 struct ip6_msource *ims, *nims; 920 #ifdef KTR 921 char ip6tbuf[INET6_ADDRSTRLEN]; 922 #endif 923 924 find.im6s_addr = *addr; 925 ims = RB_FIND(ip6_msource_tree, &inm->in6m_srcs, &find); 926 if (ims == NULL && !noalloc) { 927 if (inm->in6m_nsrc == in6_mcast_maxgrpsrc) 928 return (ENOSPC); 929 nims = malloc(sizeof(struct ip6_msource), M_IP6MSOURCE, 930 M_NOWAIT | M_ZERO); 931 if (nims == NULL) 932 return (ENOMEM); 933 nims->im6s_addr = *addr; 934 RB_INSERT(ip6_msource_tree, &inm->in6m_srcs, nims); 935 ++inm->in6m_nsrc; 936 ims = nims; 937 CTR3(KTR_MLD, "%s: allocated %s as %p", __func__, 938 ip6_sprintf(ip6tbuf, addr), ims); 939 } 940 941 *pims = ims; 942 return (0); 943 } 944 945 /* 946 * Merge socket-layer source into MLD-layer source. 947 * If rollback is non-zero, perform the inverse of the merge. 948 */ 949 static void 950 im6s_merge(struct ip6_msource *ims, const struct in6_msource *lims, 951 const int rollback) 952 { 953 int n = rollback ? -1 : 1; 954 #ifdef KTR 955 char ip6tbuf[INET6_ADDRSTRLEN]; 956 957 ip6_sprintf(ip6tbuf, &lims->im6s_addr); 958 #endif 959 960 if (lims->im6sl_st[0] == MCAST_EXCLUDE) { 961 CTR3(KTR_MLD, "%s: t1 ex -= %d on %s", __func__, n, ip6tbuf); 962 ims->im6s_st[1].ex -= n; 963 } else if (lims->im6sl_st[0] == MCAST_INCLUDE) { 964 CTR3(KTR_MLD, "%s: t1 in -= %d on %s", __func__, n, ip6tbuf); 965 ims->im6s_st[1].in -= n; 966 } 967 968 if (lims->im6sl_st[1] == MCAST_EXCLUDE) { 969 CTR3(KTR_MLD, "%s: t1 ex += %d on %s", __func__, n, ip6tbuf); 970 ims->im6s_st[1].ex += n; 971 } else if (lims->im6sl_st[1] == MCAST_INCLUDE) { 972 CTR3(KTR_MLD, "%s: t1 in += %d on %s", __func__, n, ip6tbuf); 973 ims->im6s_st[1].in += n; 974 } 975 } 976 977 /* 978 * Atomically update the global in6_multi state, when a membership's 979 * filter list is being updated in any way. 980 * 981 * imf is the per-inpcb-membership group filter pointer. 982 * A fake imf may be passed for in-kernel consumers. 983 * 984 * XXX This is a candidate for a set-symmetric-difference style loop 985 * which would eliminate the repeated lookup from root of ims nodes, 986 * as they share the same key space. 987 * 988 * If any error occurred this function will back out of refcounts 989 * and return a non-zero value. 990 */ 991 static int 992 in6m_merge(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) 993 { 994 struct ip6_msource *ims, *nims; 995 struct in6_msource *lims; 996 int schanged, error; 997 int nsrc0, nsrc1; 998 999 schanged = 0; 1000 error = 0; 1001 nsrc1 = nsrc0 = 0; 1002 IN6_MULTI_LIST_LOCK_ASSERT(); 1003 1004 /* 1005 * Update the source filters first, as this may fail. 1006 * Maintain count of in-mode filters at t0, t1. These are 1007 * used to work out if we transition into ASM mode or not. 1008 * Maintain a count of source filters whose state was 1009 * actually modified by this operation. 1010 */ 1011 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { 1012 lims = (struct in6_msource *)ims; 1013 if (lims->im6sl_st[0] == imf->im6f_st[0]) nsrc0++; 1014 if (lims->im6sl_st[1] == imf->im6f_st[1]) nsrc1++; 1015 if (lims->im6sl_st[0] == lims->im6sl_st[1]) continue; 1016 error = in6m_get_source(inm, &lims->im6s_addr, 0, &nims); 1017 ++schanged; 1018 if (error) 1019 break; 1020 im6s_merge(nims, lims, 0); 1021 } 1022 if (error) { 1023 struct ip6_msource *bims; 1024 1025 RB_FOREACH_REVERSE_FROM(ims, ip6_msource_tree, nims) { 1026 lims = (struct in6_msource *)ims; 1027 if (lims->im6sl_st[0] == lims->im6sl_st[1]) 1028 continue; 1029 (void)in6m_get_source(inm, &lims->im6s_addr, 1, &bims); 1030 if (bims == NULL) 1031 continue; 1032 im6s_merge(bims, lims, 1); 1033 } 1034 goto out_reap; 1035 } 1036 1037 CTR3(KTR_MLD, "%s: imf filters in-mode: %d at t0, %d at t1", 1038 __func__, nsrc0, nsrc1); 1039 1040 /* Handle transition between INCLUDE {n} and INCLUDE {} on socket. */ 1041 if (imf->im6f_st[0] == imf->im6f_st[1] && 1042 imf->im6f_st[1] == MCAST_INCLUDE) { 1043 if (nsrc1 == 0) { 1044 CTR1(KTR_MLD, "%s: --in on inm at t1", __func__); 1045 --inm->in6m_st[1].iss_in; 1046 } 1047 } 1048 1049 /* Handle filter mode transition on socket. */ 1050 if (imf->im6f_st[0] != imf->im6f_st[1]) { 1051 CTR3(KTR_MLD, "%s: imf transition %d to %d", 1052 __func__, imf->im6f_st[0], imf->im6f_st[1]); 1053 1054 if (imf->im6f_st[0] == MCAST_EXCLUDE) { 1055 CTR1(KTR_MLD, "%s: --ex on inm at t1", __func__); 1056 --inm->in6m_st[1].iss_ex; 1057 } else if (imf->im6f_st[0] == MCAST_INCLUDE) { 1058 CTR1(KTR_MLD, "%s: --in on inm at t1", __func__); 1059 --inm->in6m_st[1].iss_in; 1060 } 1061 1062 if (imf->im6f_st[1] == MCAST_EXCLUDE) { 1063 CTR1(KTR_MLD, "%s: ex++ on inm at t1", __func__); 1064 inm->in6m_st[1].iss_ex++; 1065 } else if (imf->im6f_st[1] == MCAST_INCLUDE && nsrc1 > 0) { 1066 CTR1(KTR_MLD, "%s: in++ on inm at t1", __func__); 1067 inm->in6m_st[1].iss_in++; 1068 } 1069 } 1070 1071 /* 1072 * Track inm filter state in terms of listener counts. 1073 * If there are any exclusive listeners, stack-wide 1074 * membership is exclusive. 1075 * Otherwise, if only inclusive listeners, stack-wide is inclusive. 1076 * If no listeners remain, state is undefined at t1, 1077 * and the MLD lifecycle for this group should finish. 1078 */ 1079 if (inm->in6m_st[1].iss_ex > 0) { 1080 CTR1(KTR_MLD, "%s: transition to EX", __func__); 1081 inm->in6m_st[1].iss_fmode = MCAST_EXCLUDE; 1082 } else if (inm->in6m_st[1].iss_in > 0) { 1083 CTR1(KTR_MLD, "%s: transition to IN", __func__); 1084 inm->in6m_st[1].iss_fmode = MCAST_INCLUDE; 1085 } else { 1086 CTR1(KTR_MLD, "%s: transition to UNDEF", __func__); 1087 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED; 1088 } 1089 1090 /* Decrement ASM listener count on transition out of ASM mode. */ 1091 if (imf->im6f_st[0] == MCAST_EXCLUDE && nsrc0 == 0) { 1092 if ((imf->im6f_st[1] != MCAST_EXCLUDE) || 1093 (imf->im6f_st[1] == MCAST_EXCLUDE && nsrc1 > 0)) { 1094 CTR1(KTR_MLD, "%s: --asm on inm at t1", __func__); 1095 --inm->in6m_st[1].iss_asm; 1096 } 1097 } 1098 1099 /* Increment ASM listener count on transition to ASM mode. */ 1100 if (imf->im6f_st[1] == MCAST_EXCLUDE && nsrc1 == 0) { 1101 CTR1(KTR_MLD, "%s: asm++ on inm at t1", __func__); 1102 inm->in6m_st[1].iss_asm++; 1103 } 1104 1105 CTR3(KTR_MLD, "%s: merged imf %p to inm %p", __func__, imf, inm); 1106 in6m_print(inm); 1107 1108 out_reap: 1109 if (schanged > 0) { 1110 CTR1(KTR_MLD, "%s: sources changed; reaping", __func__); 1111 in6m_reap(inm); 1112 } 1113 return (error); 1114 } 1115 1116 /* 1117 * Mark an in6_multi's filter set deltas as committed. 1118 * Called by MLD after a state change has been enqueued. 1119 */ 1120 void 1121 in6m_commit(struct in6_multi *inm) 1122 { 1123 struct ip6_msource *ims; 1124 1125 CTR2(KTR_MLD, "%s: commit inm %p", __func__, inm); 1126 CTR1(KTR_MLD, "%s: pre commit:", __func__); 1127 in6m_print(inm); 1128 1129 RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) { 1130 ims->im6s_st[0] = ims->im6s_st[1]; 1131 } 1132 inm->in6m_st[0] = inm->in6m_st[1]; 1133 } 1134 1135 /* 1136 * Reap unreferenced nodes from an in6_multi's filter set. 1137 */ 1138 static void 1139 in6m_reap(struct in6_multi *inm) 1140 { 1141 struct ip6_msource *ims, *tims; 1142 1143 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, tims) { 1144 if (ims->im6s_st[0].ex > 0 || ims->im6s_st[0].in > 0 || 1145 ims->im6s_st[1].ex > 0 || ims->im6s_st[1].in > 0 || 1146 ims->im6s_stp != 0) 1147 continue; 1148 CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); 1149 RB_REMOVE(ip6_msource_tree, &inm->in6m_srcs, ims); 1150 free(ims, M_IP6MSOURCE); 1151 inm->in6m_nsrc--; 1152 } 1153 } 1154 1155 /* 1156 * Purge all source nodes from an in6_multi's filter set. 1157 */ 1158 static void 1159 in6m_purge(struct in6_multi *inm) 1160 { 1161 struct ip6_msource *ims, *tims; 1162 1163 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, tims) { 1164 CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); 1165 RB_REMOVE(ip6_msource_tree, &inm->in6m_srcs, ims); 1166 free(ims, M_IP6MSOURCE); 1167 inm->in6m_nsrc--; 1168 } 1169 /* Free state-change requests that might be queued. */ 1170 mbufq_drain(&inm->in6m_scq); 1171 } 1172 1173 /* 1174 * Join a multicast address w/o sources. 1175 * KAME compatibility entry point. 1176 * 1177 * SMPng: Assume no mc locks held by caller. 1178 */ 1179 int 1180 in6_joingroup(struct ifnet *ifp, const struct in6_addr *mcaddr, 1181 /*const*/ struct in6_mfilter *imf, struct in6_multi **pinm, 1182 const int delay) 1183 { 1184 int error; 1185 1186 IN6_MULTI_LOCK(); 1187 error = in6_joingroup_locked(ifp, mcaddr, NULL, pinm, delay); 1188 IN6_MULTI_UNLOCK(); 1189 return (error); 1190 } 1191 1192 /* 1193 * Join a multicast group; real entry point. 1194 * 1195 * Only preserves atomicity at inm level. 1196 * NOTE: imf argument cannot be const due to sys/tree.h limitations. 1197 * 1198 * If the MLD downcall fails, the group is not joined, and an error 1199 * code is returned. 1200 */ 1201 static int 1202 in6_joingroup_locked(struct ifnet *ifp, const struct in6_addr *mcaddr, 1203 /*const*/ struct in6_mfilter *imf, struct in6_multi **pinm, 1204 const int delay) 1205 { 1206 struct in6_multi_head inmh; 1207 struct in6_mfilter timf; 1208 struct in6_multi *inm; 1209 struct ifmultiaddr *ifma; 1210 int error; 1211 #ifdef KTR 1212 char ip6tbuf[INET6_ADDRSTRLEN]; 1213 #endif 1214 1215 /* 1216 * Sanity: Check scope zone ID was set for ifp, if and 1217 * only if group is scoped to an interface. 1218 */ 1219 KASSERT(IN6_IS_ADDR_MULTICAST(mcaddr), 1220 ("%s: not a multicast address", __func__)); 1221 if (IN6_IS_ADDR_MC_LINKLOCAL(mcaddr) || 1222 IN6_IS_ADDR_MC_INTFACELOCAL(mcaddr)) { 1223 KASSERT(mcaddr->s6_addr16[1] != 0, 1224 ("%s: scope zone ID not set", __func__)); 1225 } 1226 1227 IN6_MULTI_LOCK_ASSERT(); 1228 IN6_MULTI_LIST_UNLOCK_ASSERT(); 1229 1230 CTR4(KTR_MLD, "%s: join %s on %p(%s))", __func__, 1231 ip6_sprintf(ip6tbuf, mcaddr), ifp, if_name(ifp)); 1232 1233 error = 0; 1234 inm = NULL; 1235 1236 /* 1237 * If no imf was specified (i.e. kernel consumer), 1238 * fake one up and assume it is an ASM join. 1239 */ 1240 if (imf == NULL) { 1241 im6f_init(&timf, MCAST_UNDEFINED, MCAST_EXCLUDE); 1242 imf = &timf; 1243 } 1244 error = in6_getmulti(ifp, mcaddr, &inm); 1245 if (error) { 1246 CTR1(KTR_MLD, "%s: in6_getmulti() failure", __func__); 1247 return (error); 1248 } 1249 1250 IN6_MULTI_LIST_LOCK(); 1251 CTR1(KTR_MLD, "%s: merge inm state", __func__); 1252 error = in6m_merge(inm, imf); 1253 if (error) { 1254 CTR1(KTR_MLD, "%s: failed to merge inm state", __func__); 1255 goto out_in6m_release; 1256 } 1257 1258 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 1259 error = mld_change_state(inm, delay); 1260 if (error) { 1261 CTR1(KTR_MLD, "%s: failed to update source", __func__); 1262 goto out_in6m_release; 1263 } 1264 1265 out_in6m_release: 1266 SLIST_INIT(&inmh); 1267 if (error) { 1268 struct epoch_tracker et; 1269 1270 CTR2(KTR_MLD, "%s: dropping ref on %p", __func__, inm); 1271 IF_ADDR_WLOCK(ifp); 1272 NET_EPOCH_ENTER(et); 1273 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1274 if (ifma->ifma_protospec == inm) { 1275 ifma->ifma_protospec = NULL; 1276 break; 1277 } 1278 } 1279 in6m_disconnect_locked(&inmh, inm); 1280 in6m_rele_locked(&inmh, inm); 1281 NET_EPOCH_EXIT(et); 1282 IF_ADDR_WUNLOCK(ifp); 1283 } else { 1284 *pinm = inm; 1285 } 1286 IN6_MULTI_LIST_UNLOCK(); 1287 in6m_release_list_deferred(&inmh); 1288 return (error); 1289 } 1290 1291 /* 1292 * Leave a multicast group; unlocked entry point. 1293 */ 1294 int 1295 in6_leavegroup(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) 1296 { 1297 int error; 1298 1299 IN6_MULTI_LOCK(); 1300 error = in6_leavegroup_locked(inm, imf); 1301 IN6_MULTI_UNLOCK(); 1302 return (error); 1303 } 1304 1305 /* 1306 * Leave a multicast group; real entry point. 1307 * All source filters will be expunged. 1308 * 1309 * Only preserves atomicity at inm level. 1310 * 1311 * Holding the write lock for the INP which contains imf 1312 * is highly advisable. We can't assert for it as imf does not 1313 * contain a back-pointer to the owning inp. 1314 * 1315 * Note: This is not the same as in6m_release(*) as this function also 1316 * makes a state change downcall into MLD. 1317 */ 1318 int 1319 in6_leavegroup_locked(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) 1320 { 1321 struct in6_multi_head inmh; 1322 struct in6_mfilter timf; 1323 struct ifnet *ifp; 1324 int error; 1325 #ifdef KTR 1326 char ip6tbuf[INET6_ADDRSTRLEN]; 1327 #endif 1328 1329 error = 0; 1330 1331 IN6_MULTI_LOCK_ASSERT(); 1332 1333 CTR5(KTR_MLD, "%s: leave inm %p, %s/%s, imf %p", __func__, 1334 inm, ip6_sprintf(ip6tbuf, &inm->in6m_addr), 1335 (in6m_is_ifp_detached(inm) ? "null" : if_name(inm->in6m_ifp)), 1336 imf); 1337 1338 /* 1339 * If no imf was specified (i.e. kernel consumer), 1340 * fake one up and assume it is an ASM join. 1341 */ 1342 if (imf == NULL) { 1343 im6f_init(&timf, MCAST_EXCLUDE, MCAST_UNDEFINED); 1344 imf = &timf; 1345 } 1346 1347 /* 1348 * Begin state merge transaction at MLD layer. 1349 * 1350 * As this particular invocation should not cause any memory 1351 * to be allocated, and there is no opportunity to roll back 1352 * the transaction, it MUST NOT fail. 1353 */ 1354 1355 ifp = inm->in6m_ifp; 1356 IN6_MULTI_LIST_LOCK(); 1357 CTR1(KTR_MLD, "%s: merge inm state", __func__); 1358 error = in6m_merge(inm, imf); 1359 KASSERT(error == 0, ("%s: failed to merge inm state", __func__)); 1360 1361 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 1362 error = 0; 1363 if (ifp) 1364 error = mld_change_state(inm, 0); 1365 if (error) 1366 CTR1(KTR_MLD, "%s: failed mld downcall", __func__); 1367 1368 CTR2(KTR_MLD, "%s: dropping ref on %p", __func__, inm); 1369 if (ifp) 1370 IF_ADDR_WLOCK(ifp); 1371 1372 SLIST_INIT(&inmh); 1373 if (inm->in6m_refcount == 1) 1374 in6m_disconnect_locked(&inmh, inm); 1375 in6m_rele_locked(&inmh, inm); 1376 if (ifp) 1377 IF_ADDR_WUNLOCK(ifp); 1378 IN6_MULTI_LIST_UNLOCK(); 1379 in6m_release_list_deferred(&inmh); 1380 return (error); 1381 } 1382 1383 1384 /* 1385 * Block or unblock an ASM multicast source on an inpcb. 1386 * This implements the delta-based API described in RFC 3678. 1387 * 1388 * The delta-based API applies only to exclusive-mode memberships. 1389 * An MLD downcall will be performed. 1390 * 1391 * SMPng: NOTE: Must take Giant as a join may create a new ifma. 1392 * 1393 * Return 0 if successful, otherwise return an appropriate error code. 1394 */ 1395 static int 1396 in6p_block_unblock_source(struct inpcb *inp, struct sockopt *sopt) 1397 { 1398 struct group_source_req gsr; 1399 sockunion_t *gsa, *ssa; 1400 struct ifnet *ifp; 1401 struct in6_mfilter *imf; 1402 struct ip6_moptions *imo; 1403 struct in6_msource *ims; 1404 struct in6_multi *inm; 1405 uint16_t fmode; 1406 int error, doblock; 1407 #ifdef KTR 1408 char ip6tbuf[INET6_ADDRSTRLEN]; 1409 #endif 1410 1411 ifp = NULL; 1412 error = 0; 1413 doblock = 0; 1414 1415 memset(&gsr, 0, sizeof(struct group_source_req)); 1416 gsa = (sockunion_t *)&gsr.gsr_group; 1417 ssa = (sockunion_t *)&gsr.gsr_source; 1418 1419 switch (sopt->sopt_name) { 1420 case MCAST_BLOCK_SOURCE: 1421 case MCAST_UNBLOCK_SOURCE: 1422 error = sooptcopyin(sopt, &gsr, 1423 sizeof(struct group_source_req), 1424 sizeof(struct group_source_req)); 1425 if (error) 1426 return (error); 1427 1428 if (gsa->sin6.sin6_family != AF_INET6 || 1429 gsa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 1430 return (EINVAL); 1431 1432 if (ssa->sin6.sin6_family != AF_INET6 || 1433 ssa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 1434 return (EINVAL); 1435 1436 if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface) 1437 return (EADDRNOTAVAIL); 1438 1439 ifp = ifnet_byindex(gsr.gsr_interface); 1440 1441 if (sopt->sopt_name == MCAST_BLOCK_SOURCE) 1442 doblock = 1; 1443 break; 1444 1445 default: 1446 CTR2(KTR_MLD, "%s: unknown sopt_name %d", 1447 __func__, sopt->sopt_name); 1448 return (EOPNOTSUPP); 1449 break; 1450 } 1451 1452 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 1453 return (EINVAL); 1454 1455 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 1456 1457 /* 1458 * Check if we are actually a member of this group. 1459 */ 1460 imo = in6p_findmoptions(inp); 1461 imf = im6o_match_group(imo, ifp, &gsa->sa); 1462 if (imf == NULL) { 1463 error = EADDRNOTAVAIL; 1464 goto out_in6p_locked; 1465 } 1466 inm = imf->im6f_in6m; 1467 1468 /* 1469 * Attempting to use the delta-based API on an 1470 * non exclusive-mode membership is an error. 1471 */ 1472 fmode = imf->im6f_st[0]; 1473 if (fmode != MCAST_EXCLUDE) { 1474 error = EINVAL; 1475 goto out_in6p_locked; 1476 } 1477 1478 /* 1479 * Deal with error cases up-front: 1480 * Asked to block, but already blocked; or 1481 * Asked to unblock, but nothing to unblock. 1482 * If adding a new block entry, allocate it. 1483 */ 1484 ims = im6o_match_source(imf, &ssa->sa); 1485 if ((ims != NULL && doblock) || (ims == NULL && !doblock)) { 1486 CTR3(KTR_MLD, "%s: source %s %spresent", __func__, 1487 ip6_sprintf(ip6tbuf, &ssa->sin6.sin6_addr), 1488 doblock ? "" : "not "); 1489 error = EADDRNOTAVAIL; 1490 goto out_in6p_locked; 1491 } 1492 1493 INP_WLOCK_ASSERT(inp); 1494 1495 /* 1496 * Begin state merge transaction at socket layer. 1497 */ 1498 if (doblock) { 1499 CTR2(KTR_MLD, "%s: %s source", __func__, "block"); 1500 ims = im6f_graft(imf, fmode, &ssa->sin6); 1501 if (ims == NULL) 1502 error = ENOMEM; 1503 } else { 1504 CTR2(KTR_MLD, "%s: %s source", __func__, "allow"); 1505 error = im6f_prune(imf, &ssa->sin6); 1506 } 1507 1508 if (error) { 1509 CTR1(KTR_MLD, "%s: merge imf state failed", __func__); 1510 goto out_im6f_rollback; 1511 } 1512 1513 /* 1514 * Begin state merge transaction at MLD layer. 1515 */ 1516 IN6_MULTI_LIST_LOCK(); 1517 CTR1(KTR_MLD, "%s: merge inm state", __func__); 1518 error = in6m_merge(inm, imf); 1519 if (error) 1520 CTR1(KTR_MLD, "%s: failed to merge inm state", __func__); 1521 else { 1522 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 1523 error = mld_change_state(inm, 0); 1524 if (error) 1525 CTR1(KTR_MLD, "%s: failed mld downcall", __func__); 1526 } 1527 1528 IN6_MULTI_LIST_UNLOCK(); 1529 1530 out_im6f_rollback: 1531 if (error) 1532 im6f_rollback(imf); 1533 else 1534 im6f_commit(imf); 1535 1536 im6f_reap(imf); 1537 1538 out_in6p_locked: 1539 INP_WUNLOCK(inp); 1540 return (error); 1541 } 1542 1543 /* 1544 * Given an inpcb, return its multicast options structure pointer. Accepts 1545 * an unlocked inpcb pointer, but will return it locked. May sleep. 1546 * 1547 * SMPng: NOTE: Potentially calls malloc(M_WAITOK) with Giant held. 1548 * SMPng: NOTE: Returns with the INP write lock held. 1549 */ 1550 static struct ip6_moptions * 1551 in6p_findmoptions(struct inpcb *inp) 1552 { 1553 struct ip6_moptions *imo; 1554 1555 INP_WLOCK(inp); 1556 if (inp->in6p_moptions != NULL) 1557 return (inp->in6p_moptions); 1558 1559 INP_WUNLOCK(inp); 1560 1561 imo = malloc(sizeof(*imo), M_IP6MOPTS, M_WAITOK); 1562 1563 imo->im6o_multicast_ifp = NULL; 1564 imo->im6o_multicast_hlim = V_ip6_defmcasthlim; 1565 imo->im6o_multicast_loop = in6_mcast_loop; 1566 STAILQ_INIT(&imo->im6o_head); 1567 1568 INP_WLOCK(inp); 1569 if (inp->in6p_moptions != NULL) { 1570 free(imo, M_IP6MOPTS); 1571 return (inp->in6p_moptions); 1572 } 1573 inp->in6p_moptions = imo; 1574 return (imo); 1575 } 1576 1577 /* 1578 * Discard the IPv6 multicast options (and source filters). 1579 * 1580 * SMPng: NOTE: assumes INP write lock is held. 1581 * 1582 * XXX can all be safely deferred to epoch_call 1583 * 1584 */ 1585 1586 static void 1587 inp_gcmoptions(struct ip6_moptions *imo) 1588 { 1589 struct in6_mfilter *imf; 1590 struct in6_multi *inm; 1591 struct ifnet *ifp; 1592 1593 while ((imf = ip6_mfilter_first(&imo->im6o_head)) != NULL) { 1594 ip6_mfilter_remove(&imo->im6o_head, imf); 1595 1596 im6f_leave(imf); 1597 if ((inm = imf->im6f_in6m) != NULL) { 1598 if ((ifp = inm->in6m_ifp) != NULL) { 1599 CURVNET_SET(ifp->if_vnet); 1600 (void)in6_leavegroup(inm, imf); 1601 CURVNET_RESTORE(); 1602 } else { 1603 (void)in6_leavegroup(inm, imf); 1604 } 1605 } 1606 ip6_mfilter_free(imf); 1607 } 1608 free(imo, M_IP6MOPTS); 1609 } 1610 1611 void 1612 ip6_freemoptions(struct ip6_moptions *imo) 1613 { 1614 if (imo == NULL) 1615 return; 1616 inp_gcmoptions(imo); 1617 } 1618 1619 /* 1620 * Atomically get source filters on a socket for an IPv6 multicast group. 1621 * Called with INP lock held; returns with lock released. 1622 */ 1623 static int 1624 in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt) 1625 { 1626 struct __msfilterreq msfr; 1627 sockunion_t *gsa; 1628 struct ifnet *ifp; 1629 struct ip6_moptions *imo; 1630 struct in6_mfilter *imf; 1631 struct ip6_msource *ims; 1632 struct in6_msource *lims; 1633 struct sockaddr_in6 *psin; 1634 struct sockaddr_storage *ptss; 1635 struct sockaddr_storage *tss; 1636 int error; 1637 size_t nsrcs, ncsrcs; 1638 1639 INP_WLOCK_ASSERT(inp); 1640 1641 imo = inp->in6p_moptions; 1642 KASSERT(imo != NULL, ("%s: null ip6_moptions", __func__)); 1643 1644 INP_WUNLOCK(inp); 1645 1646 error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq), 1647 sizeof(struct __msfilterreq)); 1648 if (error) 1649 return (error); 1650 1651 if (msfr.msfr_group.ss_family != AF_INET6 || 1652 msfr.msfr_group.ss_len != sizeof(struct sockaddr_in6)) 1653 return (EINVAL); 1654 1655 gsa = (sockunion_t *)&msfr.msfr_group; 1656 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 1657 return (EINVAL); 1658 1659 if (msfr.msfr_ifindex == 0 || V_if_index < msfr.msfr_ifindex) 1660 return (EADDRNOTAVAIL); 1661 ifp = ifnet_byindex(msfr.msfr_ifindex); 1662 if (ifp == NULL) 1663 return (EADDRNOTAVAIL); 1664 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 1665 1666 INP_WLOCK(inp); 1667 1668 /* 1669 * Lookup group on the socket. 1670 */ 1671 imf = im6o_match_group(imo, ifp, &gsa->sa); 1672 if (imf == NULL) { 1673 INP_WUNLOCK(inp); 1674 return (EADDRNOTAVAIL); 1675 } 1676 1677 /* 1678 * Ignore memberships which are in limbo. 1679 */ 1680 if (imf->im6f_st[1] == MCAST_UNDEFINED) { 1681 INP_WUNLOCK(inp); 1682 return (EAGAIN); 1683 } 1684 msfr.msfr_fmode = imf->im6f_st[1]; 1685 1686 /* 1687 * If the user specified a buffer, copy out the source filter 1688 * entries to userland gracefully. 1689 * We only copy out the number of entries which userland 1690 * has asked for, but we always tell userland how big the 1691 * buffer really needs to be. 1692 */ 1693 if (msfr.msfr_nsrcs > in6_mcast_maxsocksrc) 1694 msfr.msfr_nsrcs = in6_mcast_maxsocksrc; 1695 tss = NULL; 1696 if (msfr.msfr_srcs != NULL && msfr.msfr_nsrcs > 0) { 1697 tss = malloc(sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs, 1698 M_TEMP, M_NOWAIT | M_ZERO); 1699 if (tss == NULL) { 1700 INP_WUNLOCK(inp); 1701 return (ENOBUFS); 1702 } 1703 } 1704 1705 /* 1706 * Count number of sources in-mode at t0. 1707 * If buffer space exists and remains, copy out source entries. 1708 */ 1709 nsrcs = msfr.msfr_nsrcs; 1710 ncsrcs = 0; 1711 ptss = tss; 1712 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { 1713 lims = (struct in6_msource *)ims; 1714 if (lims->im6sl_st[0] == MCAST_UNDEFINED || 1715 lims->im6sl_st[0] != imf->im6f_st[0]) 1716 continue; 1717 ++ncsrcs; 1718 if (tss != NULL && nsrcs > 0) { 1719 psin = (struct sockaddr_in6 *)ptss; 1720 psin->sin6_family = AF_INET6; 1721 psin->sin6_len = sizeof(struct sockaddr_in6); 1722 psin->sin6_addr = lims->im6s_addr; 1723 psin->sin6_port = 0; 1724 --nsrcs; 1725 ++ptss; 1726 } 1727 } 1728 1729 INP_WUNLOCK(inp); 1730 1731 if (tss != NULL) { 1732 error = copyout(tss, msfr.msfr_srcs, 1733 sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs); 1734 free(tss, M_TEMP); 1735 if (error) 1736 return (error); 1737 } 1738 1739 msfr.msfr_nsrcs = ncsrcs; 1740 error = sooptcopyout(sopt, &msfr, sizeof(struct __msfilterreq)); 1741 1742 return (error); 1743 } 1744 1745 /* 1746 * Return the IP multicast options in response to user getsockopt(). 1747 */ 1748 int 1749 ip6_getmoptions(struct inpcb *inp, struct sockopt *sopt) 1750 { 1751 struct ip6_moptions *im6o; 1752 int error; 1753 u_int optval; 1754 1755 INP_WLOCK(inp); 1756 im6o = inp->in6p_moptions; 1757 /* 1758 * If socket is neither of type SOCK_RAW or SOCK_DGRAM, 1759 * or is a divert socket, reject it. 1760 */ 1761 if (inp->inp_socket->so_proto->pr_protocol == IPPROTO_DIVERT || 1762 (inp->inp_socket->so_proto->pr_type != SOCK_RAW && 1763 inp->inp_socket->so_proto->pr_type != SOCK_DGRAM)) { 1764 INP_WUNLOCK(inp); 1765 return (EOPNOTSUPP); 1766 } 1767 1768 error = 0; 1769 switch (sopt->sopt_name) { 1770 case IPV6_MULTICAST_IF: 1771 if (im6o == NULL || im6o->im6o_multicast_ifp == NULL) { 1772 optval = 0; 1773 } else { 1774 optval = im6o->im6o_multicast_ifp->if_index; 1775 } 1776 INP_WUNLOCK(inp); 1777 error = sooptcopyout(sopt, &optval, sizeof(u_int)); 1778 break; 1779 1780 case IPV6_MULTICAST_HOPS: 1781 if (im6o == NULL) 1782 optval = V_ip6_defmcasthlim; 1783 else 1784 optval = im6o->im6o_multicast_hlim; 1785 INP_WUNLOCK(inp); 1786 error = sooptcopyout(sopt, &optval, sizeof(u_int)); 1787 break; 1788 1789 case IPV6_MULTICAST_LOOP: 1790 if (im6o == NULL) 1791 optval = in6_mcast_loop; /* XXX VIMAGE */ 1792 else 1793 optval = im6o->im6o_multicast_loop; 1794 INP_WUNLOCK(inp); 1795 error = sooptcopyout(sopt, &optval, sizeof(u_int)); 1796 break; 1797 1798 case IPV6_MSFILTER: 1799 if (im6o == NULL) { 1800 error = EADDRNOTAVAIL; 1801 INP_WUNLOCK(inp); 1802 } else { 1803 error = in6p_get_source_filters(inp, sopt); 1804 } 1805 break; 1806 1807 default: 1808 INP_WUNLOCK(inp); 1809 error = ENOPROTOOPT; 1810 break; 1811 } 1812 1813 INP_UNLOCK_ASSERT(inp); 1814 1815 return (error); 1816 } 1817 1818 /* 1819 * Look up the ifnet to use for a multicast group membership, 1820 * given the address of an IPv6 group. 1821 * 1822 * This routine exists to support legacy IPv6 multicast applications. 1823 * 1824 * If inp is non-NULL, use this socket's current FIB number for any 1825 * required FIB lookup. Look up the group address in the unicast FIB, 1826 * and use its ifp; usually, this points to the default next-hop. 1827 * If the FIB lookup fails, return NULL. 1828 * 1829 * FUTURE: Support multiple forwarding tables for IPv6. 1830 * 1831 * Returns NULL if no ifp could be found. 1832 */ 1833 static struct ifnet * 1834 in6p_lookup_mcast_ifp(const struct inpcb *inp, 1835 const struct sockaddr_in6 *gsin6) 1836 { 1837 struct nhop6_basic nh6; 1838 struct in6_addr dst; 1839 uint32_t scopeid; 1840 uint32_t fibnum; 1841 1842 KASSERT(inp->inp_vflag & INP_IPV6, 1843 ("%s: not INP_IPV6 inpcb", __func__)); 1844 KASSERT(gsin6->sin6_family == AF_INET6, 1845 ("%s: not AF_INET6 group", __func__)); 1846 1847 in6_splitscope(&gsin6->sin6_addr, &dst, &scopeid); 1848 fibnum = inp ? inp->inp_inc.inc_fibnum : RT_DEFAULT_FIB; 1849 if (fib6_lookup_nh_basic(fibnum, &dst, scopeid, 0, 0, &nh6) != 0) 1850 return (NULL); 1851 1852 return (nh6.nh_ifp); 1853 } 1854 1855 /* 1856 * Join an IPv6 multicast group, possibly with a source. 1857 * 1858 * FIXME: The KAME use of the unspecified address (::) 1859 * to join *all* multicast groups is currently unsupported. 1860 */ 1861 static int 1862 in6p_join_group(struct inpcb *inp, struct sockopt *sopt) 1863 { 1864 struct in6_multi_head inmh; 1865 struct group_source_req gsr; 1866 sockunion_t *gsa, *ssa; 1867 struct ifnet *ifp; 1868 struct in6_mfilter *imf; 1869 struct ip6_moptions *imo; 1870 struct in6_multi *inm; 1871 struct in6_msource *lims; 1872 int error, is_new; 1873 1874 SLIST_INIT(&inmh); 1875 ifp = NULL; 1876 lims = NULL; 1877 error = 0; 1878 1879 memset(&gsr, 0, sizeof(struct group_source_req)); 1880 gsa = (sockunion_t *)&gsr.gsr_group; 1881 gsa->ss.ss_family = AF_UNSPEC; 1882 ssa = (sockunion_t *)&gsr.gsr_source; 1883 ssa->ss.ss_family = AF_UNSPEC; 1884 1885 /* 1886 * Chew everything into struct group_source_req. 1887 * Overwrite the port field if present, as the sockaddr 1888 * being copied in may be matched with a binary comparison. 1889 * Ignore passed-in scope ID. 1890 */ 1891 switch (sopt->sopt_name) { 1892 case IPV6_JOIN_GROUP: { 1893 struct ipv6_mreq mreq; 1894 1895 error = sooptcopyin(sopt, &mreq, sizeof(struct ipv6_mreq), 1896 sizeof(struct ipv6_mreq)); 1897 if (error) 1898 return (error); 1899 1900 gsa->sin6.sin6_family = AF_INET6; 1901 gsa->sin6.sin6_len = sizeof(struct sockaddr_in6); 1902 gsa->sin6.sin6_addr = mreq.ipv6mr_multiaddr; 1903 1904 if (mreq.ipv6mr_interface == 0) { 1905 ifp = in6p_lookup_mcast_ifp(inp, &gsa->sin6); 1906 } else { 1907 if (V_if_index < mreq.ipv6mr_interface) 1908 return (EADDRNOTAVAIL); 1909 ifp = ifnet_byindex(mreq.ipv6mr_interface); 1910 } 1911 CTR3(KTR_MLD, "%s: ipv6mr_interface = %d, ifp = %p", 1912 __func__, mreq.ipv6mr_interface, ifp); 1913 } break; 1914 1915 case MCAST_JOIN_GROUP: 1916 case MCAST_JOIN_SOURCE_GROUP: 1917 if (sopt->sopt_name == MCAST_JOIN_GROUP) { 1918 error = sooptcopyin(sopt, &gsr, 1919 sizeof(struct group_req), 1920 sizeof(struct group_req)); 1921 } else if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) { 1922 error = sooptcopyin(sopt, &gsr, 1923 sizeof(struct group_source_req), 1924 sizeof(struct group_source_req)); 1925 } 1926 if (error) 1927 return (error); 1928 1929 if (gsa->sin6.sin6_family != AF_INET6 || 1930 gsa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 1931 return (EINVAL); 1932 1933 if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) { 1934 if (ssa->sin6.sin6_family != AF_INET6 || 1935 ssa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 1936 return (EINVAL); 1937 if (IN6_IS_ADDR_MULTICAST(&ssa->sin6.sin6_addr)) 1938 return (EINVAL); 1939 /* 1940 * TODO: Validate embedded scope ID in source 1941 * list entry against passed-in ifp, if and only 1942 * if source list filter entry is iface or node local. 1943 */ 1944 in6_clearscope(&ssa->sin6.sin6_addr); 1945 ssa->sin6.sin6_port = 0; 1946 ssa->sin6.sin6_scope_id = 0; 1947 } 1948 1949 if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface) 1950 return (EADDRNOTAVAIL); 1951 ifp = ifnet_byindex(gsr.gsr_interface); 1952 break; 1953 1954 default: 1955 CTR2(KTR_MLD, "%s: unknown sopt_name %d", 1956 __func__, sopt->sopt_name); 1957 return (EOPNOTSUPP); 1958 break; 1959 } 1960 1961 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 1962 return (EINVAL); 1963 1964 if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) 1965 return (EADDRNOTAVAIL); 1966 1967 gsa->sin6.sin6_port = 0; 1968 gsa->sin6.sin6_scope_id = 0; 1969 1970 /* 1971 * Always set the scope zone ID on memberships created from userland. 1972 * Use the passed-in ifp to do this. 1973 * XXX The in6_setscope() return value is meaningless. 1974 * XXX SCOPE6_LOCK() is taken by in6_setscope(). 1975 */ 1976 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 1977 1978 IN6_MULTI_LOCK(); 1979 1980 /* 1981 * Find the membership in the membership list. 1982 */ 1983 imo = in6p_findmoptions(inp); 1984 imf = im6o_match_group(imo, ifp, &gsa->sa); 1985 if (imf == NULL) { 1986 is_new = 1; 1987 inm = NULL; 1988 1989 if (ip6_mfilter_count(&imo->im6o_head) >= IPV6_MAX_MEMBERSHIPS) { 1990 error = ENOMEM; 1991 goto out_in6p_locked; 1992 } 1993 } else { 1994 is_new = 0; 1995 inm = imf->im6f_in6m; 1996 1997 if (ssa->ss.ss_family != AF_UNSPEC) { 1998 /* 1999 * MCAST_JOIN_SOURCE_GROUP on an exclusive membership 2000 * is an error. On an existing inclusive membership, 2001 * it just adds the source to the filter list. 2002 */ 2003 if (imf->im6f_st[1] != MCAST_INCLUDE) { 2004 error = EINVAL; 2005 goto out_in6p_locked; 2006 } 2007 /* 2008 * Throw out duplicates. 2009 * 2010 * XXX FIXME: This makes a naive assumption that 2011 * even if entries exist for *ssa in this imf, 2012 * they will be rejected as dupes, even if they 2013 * are not valid in the current mode (in-mode). 2014 * 2015 * in6_msource is transactioned just as for anything 2016 * else in SSM -- but note naive use of in6m_graft() 2017 * below for allocating new filter entries. 2018 * 2019 * This is only an issue if someone mixes the 2020 * full-state SSM API with the delta-based API, 2021 * which is discouraged in the relevant RFCs. 2022 */ 2023 lims = im6o_match_source(imf, &ssa->sa); 2024 if (lims != NULL /*&& 2025 lims->im6sl_st[1] == MCAST_INCLUDE*/) { 2026 error = EADDRNOTAVAIL; 2027 goto out_in6p_locked; 2028 } 2029 } else { 2030 /* 2031 * MCAST_JOIN_GROUP alone, on any existing membership, 2032 * is rejected, to stop the same inpcb tying up 2033 * multiple refs to the in_multi. 2034 * On an existing inclusive membership, this is also 2035 * an error; if you want to change filter mode, 2036 * you must use the userland API setsourcefilter(). 2037 * XXX We don't reject this for imf in UNDEFINED 2038 * state at t1, because allocation of a filter 2039 * is atomic with allocation of a membership. 2040 */ 2041 error = EINVAL; 2042 goto out_in6p_locked; 2043 } 2044 } 2045 2046 /* 2047 * Begin state merge transaction at socket layer. 2048 */ 2049 INP_WLOCK_ASSERT(inp); 2050 2051 /* 2052 * Graft new source into filter list for this inpcb's 2053 * membership of the group. The in6_multi may not have 2054 * been allocated yet if this is a new membership, however, 2055 * the in_mfilter slot will be allocated and must be initialized. 2056 * 2057 * Note: Grafting of exclusive mode filters doesn't happen 2058 * in this path. 2059 * XXX: Should check for non-NULL lims (node exists but may 2060 * not be in-mode) for interop with full-state API. 2061 */ 2062 if (ssa->ss.ss_family != AF_UNSPEC) { 2063 /* Membership starts in IN mode */ 2064 if (is_new) { 2065 CTR1(KTR_MLD, "%s: new join w/source", __func__); 2066 imf = ip6_mfilter_alloc(M_NOWAIT, MCAST_UNDEFINED, MCAST_INCLUDE); 2067 if (imf == NULL) { 2068 error = ENOMEM; 2069 goto out_in6p_locked; 2070 } 2071 } else { 2072 CTR2(KTR_MLD, "%s: %s source", __func__, "allow"); 2073 } 2074 lims = im6f_graft(imf, MCAST_INCLUDE, &ssa->sin6); 2075 if (lims == NULL) { 2076 CTR1(KTR_MLD, "%s: merge imf state failed", 2077 __func__); 2078 error = ENOMEM; 2079 goto out_in6p_locked; 2080 } 2081 } else { 2082 /* No address specified; Membership starts in EX mode */ 2083 if (is_new) { 2084 CTR1(KTR_MLD, "%s: new join w/o source", __func__); 2085 imf = ip6_mfilter_alloc(M_NOWAIT, MCAST_UNDEFINED, MCAST_EXCLUDE); 2086 if (imf == NULL) { 2087 error = ENOMEM; 2088 goto out_in6p_locked; 2089 } 2090 } 2091 } 2092 2093 /* 2094 * Begin state merge transaction at MLD layer. 2095 */ 2096 if (is_new) { 2097 in_pcbref(inp); 2098 INP_WUNLOCK(inp); 2099 2100 error = in6_joingroup_locked(ifp, &gsa->sin6.sin6_addr, imf, 2101 &imf->im6f_in6m, 0); 2102 2103 INP_WLOCK(inp); 2104 if (in_pcbrele_wlocked(inp)) { 2105 error = ENXIO; 2106 goto out_in6p_unlocked; 2107 } 2108 if (error) { 2109 goto out_in6p_locked; 2110 } 2111 /* 2112 * NOTE: Refcount from in6_joingroup_locked() 2113 * is protecting membership. 2114 */ 2115 ip6_mfilter_insert(&imo->im6o_head, imf); 2116 } else { 2117 CTR1(KTR_MLD, "%s: merge inm state", __func__); 2118 IN6_MULTI_LIST_LOCK(); 2119 error = in6m_merge(inm, imf); 2120 if (error) { 2121 CTR1(KTR_MLD, "%s: failed to merge inm state", 2122 __func__); 2123 IN6_MULTI_LIST_UNLOCK(); 2124 im6f_rollback(imf); 2125 im6f_reap(imf); 2126 goto out_in6p_locked; 2127 } 2128 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 2129 error = mld_change_state(inm, 0); 2130 IN6_MULTI_LIST_UNLOCK(); 2131 2132 if (error) { 2133 CTR1(KTR_MLD, "%s: failed mld downcall", 2134 __func__); 2135 im6f_rollback(imf); 2136 im6f_reap(imf); 2137 goto out_in6p_locked; 2138 } 2139 } 2140 2141 im6f_commit(imf); 2142 imf = NULL; 2143 2144 out_in6p_locked: 2145 INP_WUNLOCK(inp); 2146 out_in6p_unlocked: 2147 IN6_MULTI_UNLOCK(); 2148 2149 if (is_new && imf) { 2150 if (imf->im6f_in6m != NULL) { 2151 struct in6_multi_head inmh; 2152 2153 SLIST_INIT(&inmh); 2154 SLIST_INSERT_HEAD(&inmh, imf->im6f_in6m, in6m_defer); 2155 in6m_release_list_deferred(&inmh); 2156 } 2157 ip6_mfilter_free(imf); 2158 } 2159 return (error); 2160 } 2161 2162 /* 2163 * Leave an IPv6 multicast group on an inpcb, possibly with a source. 2164 */ 2165 static int 2166 in6p_leave_group(struct inpcb *inp, struct sockopt *sopt) 2167 { 2168 struct ipv6_mreq mreq; 2169 struct group_source_req gsr; 2170 sockunion_t *gsa, *ssa; 2171 struct ifnet *ifp; 2172 struct in6_mfilter *imf; 2173 struct ip6_moptions *imo; 2174 struct in6_msource *ims; 2175 struct in6_multi *inm; 2176 uint32_t ifindex; 2177 int error; 2178 bool is_final; 2179 #ifdef KTR 2180 char ip6tbuf[INET6_ADDRSTRLEN]; 2181 #endif 2182 2183 ifp = NULL; 2184 ifindex = 0; 2185 error = 0; 2186 is_final = true; 2187 2188 memset(&gsr, 0, sizeof(struct group_source_req)); 2189 gsa = (sockunion_t *)&gsr.gsr_group; 2190 gsa->ss.ss_family = AF_UNSPEC; 2191 ssa = (sockunion_t *)&gsr.gsr_source; 2192 ssa->ss.ss_family = AF_UNSPEC; 2193 2194 /* 2195 * Chew everything passed in up into a struct group_source_req 2196 * as that is easier to process. 2197 * Note: Any embedded scope ID in the multicast group passed 2198 * in by userland is ignored, the interface index is the recommended 2199 * mechanism to specify an interface; see below. 2200 */ 2201 switch (sopt->sopt_name) { 2202 case IPV6_LEAVE_GROUP: 2203 error = sooptcopyin(sopt, &mreq, sizeof(struct ipv6_mreq), 2204 sizeof(struct ipv6_mreq)); 2205 if (error) 2206 return (error); 2207 gsa->sin6.sin6_family = AF_INET6; 2208 gsa->sin6.sin6_len = sizeof(struct sockaddr_in6); 2209 gsa->sin6.sin6_addr = mreq.ipv6mr_multiaddr; 2210 gsa->sin6.sin6_port = 0; 2211 gsa->sin6.sin6_scope_id = 0; 2212 ifindex = mreq.ipv6mr_interface; 2213 break; 2214 2215 case MCAST_LEAVE_GROUP: 2216 case MCAST_LEAVE_SOURCE_GROUP: 2217 if (sopt->sopt_name == MCAST_LEAVE_GROUP) { 2218 error = sooptcopyin(sopt, &gsr, 2219 sizeof(struct group_req), 2220 sizeof(struct group_req)); 2221 } else if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) { 2222 error = sooptcopyin(sopt, &gsr, 2223 sizeof(struct group_source_req), 2224 sizeof(struct group_source_req)); 2225 } 2226 if (error) 2227 return (error); 2228 2229 if (gsa->sin6.sin6_family != AF_INET6 || 2230 gsa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 2231 return (EINVAL); 2232 if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) { 2233 if (ssa->sin6.sin6_family != AF_INET6 || 2234 ssa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 2235 return (EINVAL); 2236 if (IN6_IS_ADDR_MULTICAST(&ssa->sin6.sin6_addr)) 2237 return (EINVAL); 2238 /* 2239 * TODO: Validate embedded scope ID in source 2240 * list entry against passed-in ifp, if and only 2241 * if source list filter entry is iface or node local. 2242 */ 2243 in6_clearscope(&ssa->sin6.sin6_addr); 2244 } 2245 gsa->sin6.sin6_port = 0; 2246 gsa->sin6.sin6_scope_id = 0; 2247 ifindex = gsr.gsr_interface; 2248 break; 2249 2250 default: 2251 CTR2(KTR_MLD, "%s: unknown sopt_name %d", 2252 __func__, sopt->sopt_name); 2253 return (EOPNOTSUPP); 2254 break; 2255 } 2256 2257 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 2258 return (EINVAL); 2259 2260 /* 2261 * Validate interface index if provided. If no interface index 2262 * was provided separately, attempt to look the membership up 2263 * from the default scope as a last resort to disambiguate 2264 * the membership we are being asked to leave. 2265 * XXX SCOPE6 lock potentially taken here. 2266 */ 2267 if (ifindex != 0) { 2268 if (V_if_index < ifindex) 2269 return (EADDRNOTAVAIL); 2270 ifp = ifnet_byindex(ifindex); 2271 if (ifp == NULL) 2272 return (EADDRNOTAVAIL); 2273 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 2274 } else { 2275 error = sa6_embedscope(&gsa->sin6, V_ip6_use_defzone); 2276 if (error) 2277 return (EADDRNOTAVAIL); 2278 /* 2279 * Some badly behaved applications don't pass an ifindex 2280 * or a scope ID, which is an API violation. In this case, 2281 * perform a lookup as per a v6 join. 2282 * 2283 * XXX For now, stomp on zone ID for the corner case. 2284 * This is not the 'KAME way', but we need to see the ifp 2285 * directly until such time as this implementation is 2286 * refactored, assuming the scope IDs are the way to go. 2287 */ 2288 ifindex = ntohs(gsa->sin6.sin6_addr.s6_addr16[1]); 2289 if (ifindex == 0) { 2290 CTR2(KTR_MLD, "%s: warning: no ifindex, looking up " 2291 "ifp for group %s.", __func__, 2292 ip6_sprintf(ip6tbuf, &gsa->sin6.sin6_addr)); 2293 ifp = in6p_lookup_mcast_ifp(inp, &gsa->sin6); 2294 } else { 2295 ifp = ifnet_byindex(ifindex); 2296 } 2297 if (ifp == NULL) 2298 return (EADDRNOTAVAIL); 2299 } 2300 2301 CTR2(KTR_MLD, "%s: ifp = %p", __func__, ifp); 2302 KASSERT(ifp != NULL, ("%s: ifp did not resolve", __func__)); 2303 2304 IN6_MULTI_LOCK(); 2305 2306 /* 2307 * Find the membership in the membership list. 2308 */ 2309 imo = in6p_findmoptions(inp); 2310 imf = im6o_match_group(imo, ifp, &gsa->sa); 2311 if (imf == NULL) { 2312 error = EADDRNOTAVAIL; 2313 goto out_in6p_locked; 2314 } 2315 inm = imf->im6f_in6m; 2316 2317 if (ssa->ss.ss_family != AF_UNSPEC) 2318 is_final = false; 2319 2320 /* 2321 * Begin state merge transaction at socket layer. 2322 */ 2323 INP_WLOCK_ASSERT(inp); 2324 2325 /* 2326 * If we were instructed only to leave a given source, do so. 2327 * MCAST_LEAVE_SOURCE_GROUP is only valid for inclusive memberships. 2328 */ 2329 if (is_final) { 2330 ip6_mfilter_remove(&imo->im6o_head, imf); 2331 im6f_leave(imf); 2332 2333 /* 2334 * Give up the multicast address record to which 2335 * the membership points. 2336 */ 2337 (void)in6_leavegroup_locked(inm, imf); 2338 } else { 2339 if (imf->im6f_st[0] == MCAST_EXCLUDE) { 2340 error = EADDRNOTAVAIL; 2341 goto out_in6p_locked; 2342 } 2343 ims = im6o_match_source(imf, &ssa->sa); 2344 if (ims == NULL) { 2345 CTR3(KTR_MLD, "%s: source %p %spresent", __func__, 2346 ip6_sprintf(ip6tbuf, &ssa->sin6.sin6_addr), 2347 "not "); 2348 error = EADDRNOTAVAIL; 2349 goto out_in6p_locked; 2350 } 2351 CTR2(KTR_MLD, "%s: %s source", __func__, "block"); 2352 error = im6f_prune(imf, &ssa->sin6); 2353 if (error) { 2354 CTR1(KTR_MLD, "%s: merge imf state failed", 2355 __func__); 2356 goto out_in6p_locked; 2357 } 2358 } 2359 2360 /* 2361 * Begin state merge transaction at MLD layer. 2362 */ 2363 if (!is_final) { 2364 CTR1(KTR_MLD, "%s: merge inm state", __func__); 2365 IN6_MULTI_LIST_LOCK(); 2366 error = in6m_merge(inm, imf); 2367 if (error) { 2368 CTR1(KTR_MLD, "%s: failed to merge inm state", 2369 __func__); 2370 IN6_MULTI_LIST_UNLOCK(); 2371 im6f_rollback(imf); 2372 im6f_reap(imf); 2373 goto out_in6p_locked; 2374 } 2375 2376 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 2377 error = mld_change_state(inm, 0); 2378 IN6_MULTI_LIST_UNLOCK(); 2379 if (error) { 2380 CTR1(KTR_MLD, "%s: failed mld downcall", 2381 __func__); 2382 im6f_rollback(imf); 2383 im6f_reap(imf); 2384 goto out_in6p_locked; 2385 } 2386 } 2387 2388 im6f_commit(imf); 2389 im6f_reap(imf); 2390 2391 out_in6p_locked: 2392 INP_WUNLOCK(inp); 2393 2394 if (is_final && imf) 2395 ip6_mfilter_free(imf); 2396 2397 IN6_MULTI_UNLOCK(); 2398 return (error); 2399 } 2400 2401 /* 2402 * Select the interface for transmitting IPv6 multicast datagrams. 2403 * 2404 * Either an instance of struct in6_addr or an instance of struct ipv6_mreqn 2405 * may be passed to this socket option. An address of in6addr_any or an 2406 * interface index of 0 is used to remove a previous selection. 2407 * When no interface is selected, one is chosen for every send. 2408 */ 2409 static int 2410 in6p_set_multicast_if(struct inpcb *inp, struct sockopt *sopt) 2411 { 2412 struct ifnet *ifp; 2413 struct ip6_moptions *imo; 2414 u_int ifindex; 2415 int error; 2416 2417 if (sopt->sopt_valsize != sizeof(u_int)) 2418 return (EINVAL); 2419 2420 error = sooptcopyin(sopt, &ifindex, sizeof(u_int), sizeof(u_int)); 2421 if (error) 2422 return (error); 2423 if (V_if_index < ifindex) 2424 return (EINVAL); 2425 if (ifindex == 0) 2426 ifp = NULL; 2427 else { 2428 ifp = ifnet_byindex(ifindex); 2429 if (ifp == NULL) 2430 return (EINVAL); 2431 if ((ifp->if_flags & IFF_MULTICAST) == 0) 2432 return (EADDRNOTAVAIL); 2433 } 2434 imo = in6p_findmoptions(inp); 2435 imo->im6o_multicast_ifp = ifp; 2436 INP_WUNLOCK(inp); 2437 2438 return (0); 2439 } 2440 2441 /* 2442 * Atomically set source filters on a socket for an IPv6 multicast group. 2443 * 2444 * SMPng: NOTE: Potentially calls malloc(M_WAITOK) with Giant held. 2445 */ 2446 static int 2447 in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt) 2448 { 2449 struct __msfilterreq msfr; 2450 sockunion_t *gsa; 2451 struct ifnet *ifp; 2452 struct in6_mfilter *imf; 2453 struct ip6_moptions *imo; 2454 struct in6_multi *inm; 2455 int error; 2456 2457 error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq), 2458 sizeof(struct __msfilterreq)); 2459 if (error) 2460 return (error); 2461 2462 if (msfr.msfr_nsrcs > in6_mcast_maxsocksrc) 2463 return (ENOBUFS); 2464 2465 if (msfr.msfr_fmode != MCAST_EXCLUDE && 2466 msfr.msfr_fmode != MCAST_INCLUDE) 2467 return (EINVAL); 2468 2469 if (msfr.msfr_group.ss_family != AF_INET6 || 2470 msfr.msfr_group.ss_len != sizeof(struct sockaddr_in6)) 2471 return (EINVAL); 2472 2473 gsa = (sockunion_t *)&msfr.msfr_group; 2474 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 2475 return (EINVAL); 2476 2477 gsa->sin6.sin6_port = 0; /* ignore port */ 2478 2479 if (msfr.msfr_ifindex == 0 || V_if_index < msfr.msfr_ifindex) 2480 return (EADDRNOTAVAIL); 2481 ifp = ifnet_byindex(msfr.msfr_ifindex); 2482 if (ifp == NULL) 2483 return (EADDRNOTAVAIL); 2484 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 2485 2486 /* 2487 * Take the INP write lock. 2488 * Check if this socket is a member of this group. 2489 */ 2490 imo = in6p_findmoptions(inp); 2491 imf = im6o_match_group(imo, ifp, &gsa->sa); 2492 if (imf == NULL) { 2493 error = EADDRNOTAVAIL; 2494 goto out_in6p_locked; 2495 } 2496 inm = imf->im6f_in6m; 2497 2498 /* 2499 * Begin state merge transaction at socket layer. 2500 */ 2501 INP_WLOCK_ASSERT(inp); 2502 2503 imf->im6f_st[1] = msfr.msfr_fmode; 2504 2505 /* 2506 * Apply any new source filters, if present. 2507 * Make a copy of the user-space source vector so 2508 * that we may copy them with a single copyin. This 2509 * allows us to deal with page faults up-front. 2510 */ 2511 if (msfr.msfr_nsrcs > 0) { 2512 struct in6_msource *lims; 2513 struct sockaddr_in6 *psin; 2514 struct sockaddr_storage *kss, *pkss; 2515 int i; 2516 2517 INP_WUNLOCK(inp); 2518 2519 CTR2(KTR_MLD, "%s: loading %lu source list entries", 2520 __func__, (unsigned long)msfr.msfr_nsrcs); 2521 kss = malloc(sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs, 2522 M_TEMP, M_WAITOK); 2523 error = copyin(msfr.msfr_srcs, kss, 2524 sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs); 2525 if (error) { 2526 free(kss, M_TEMP); 2527 return (error); 2528 } 2529 2530 INP_WLOCK(inp); 2531 2532 /* 2533 * Mark all source filters as UNDEFINED at t1. 2534 * Restore new group filter mode, as im6f_leave() 2535 * will set it to INCLUDE. 2536 */ 2537 im6f_leave(imf); 2538 imf->im6f_st[1] = msfr.msfr_fmode; 2539 2540 /* 2541 * Update socket layer filters at t1, lazy-allocating 2542 * new entries. This saves a bunch of memory at the 2543 * cost of one RB_FIND() per source entry; duplicate 2544 * entries in the msfr_nsrcs vector are ignored. 2545 * If we encounter an error, rollback transaction. 2546 * 2547 * XXX This too could be replaced with a set-symmetric 2548 * difference like loop to avoid walking from root 2549 * every time, as the key space is common. 2550 */ 2551 for (i = 0, pkss = kss; i < msfr.msfr_nsrcs; i++, pkss++) { 2552 psin = (struct sockaddr_in6 *)pkss; 2553 if (psin->sin6_family != AF_INET6) { 2554 error = EAFNOSUPPORT; 2555 break; 2556 } 2557 if (psin->sin6_len != sizeof(struct sockaddr_in6)) { 2558 error = EINVAL; 2559 break; 2560 } 2561 if (IN6_IS_ADDR_MULTICAST(&psin->sin6_addr)) { 2562 error = EINVAL; 2563 break; 2564 } 2565 /* 2566 * TODO: Validate embedded scope ID in source 2567 * list entry against passed-in ifp, if and only 2568 * if source list filter entry is iface or node local. 2569 */ 2570 in6_clearscope(&psin->sin6_addr); 2571 error = im6f_get_source(imf, psin, &lims); 2572 if (error) 2573 break; 2574 lims->im6sl_st[1] = imf->im6f_st[1]; 2575 } 2576 free(kss, M_TEMP); 2577 } 2578 2579 if (error) 2580 goto out_im6f_rollback; 2581 2582 INP_WLOCK_ASSERT(inp); 2583 IN6_MULTI_LIST_LOCK(); 2584 2585 /* 2586 * Begin state merge transaction at MLD layer. 2587 */ 2588 CTR1(KTR_MLD, "%s: merge inm state", __func__); 2589 error = in6m_merge(inm, imf); 2590 if (error) 2591 CTR1(KTR_MLD, "%s: failed to merge inm state", __func__); 2592 else { 2593 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 2594 error = mld_change_state(inm, 0); 2595 if (error) 2596 CTR1(KTR_MLD, "%s: failed mld downcall", __func__); 2597 } 2598 2599 IN6_MULTI_LIST_UNLOCK(); 2600 2601 out_im6f_rollback: 2602 if (error) 2603 im6f_rollback(imf); 2604 else 2605 im6f_commit(imf); 2606 2607 im6f_reap(imf); 2608 2609 out_in6p_locked: 2610 INP_WUNLOCK(inp); 2611 return (error); 2612 } 2613 2614 /* 2615 * Set the IP multicast options in response to user setsockopt(). 2616 * 2617 * Many of the socket options handled in this function duplicate the 2618 * functionality of socket options in the regular unicast API. However, 2619 * it is not possible to merge the duplicate code, because the idempotence 2620 * of the IPv6 multicast part of the BSD Sockets API must be preserved; 2621 * the effects of these options must be treated as separate and distinct. 2622 * 2623 * SMPng: XXX: Unlocked read of inp_socket believed OK. 2624 */ 2625 int 2626 ip6_setmoptions(struct inpcb *inp, struct sockopt *sopt) 2627 { 2628 struct ip6_moptions *im6o; 2629 int error; 2630 2631 error = 0; 2632 2633 /* 2634 * If socket is neither of type SOCK_RAW or SOCK_DGRAM, 2635 * or is a divert socket, reject it. 2636 */ 2637 if (inp->inp_socket->so_proto->pr_protocol == IPPROTO_DIVERT || 2638 (inp->inp_socket->so_proto->pr_type != SOCK_RAW && 2639 inp->inp_socket->so_proto->pr_type != SOCK_DGRAM)) 2640 return (EOPNOTSUPP); 2641 2642 switch (sopt->sopt_name) { 2643 case IPV6_MULTICAST_IF: 2644 error = in6p_set_multicast_if(inp, sopt); 2645 break; 2646 2647 case IPV6_MULTICAST_HOPS: { 2648 int hlim; 2649 2650 if (sopt->sopt_valsize != sizeof(int)) { 2651 error = EINVAL; 2652 break; 2653 } 2654 error = sooptcopyin(sopt, &hlim, sizeof(hlim), sizeof(int)); 2655 if (error) 2656 break; 2657 if (hlim < -1 || hlim > 255) { 2658 error = EINVAL; 2659 break; 2660 } else if (hlim == -1) { 2661 hlim = V_ip6_defmcasthlim; 2662 } 2663 im6o = in6p_findmoptions(inp); 2664 im6o->im6o_multicast_hlim = hlim; 2665 INP_WUNLOCK(inp); 2666 break; 2667 } 2668 2669 case IPV6_MULTICAST_LOOP: { 2670 u_int loop; 2671 2672 /* 2673 * Set the loopback flag for outgoing multicast packets. 2674 * Must be zero or one. 2675 */ 2676 if (sopt->sopt_valsize != sizeof(u_int)) { 2677 error = EINVAL; 2678 break; 2679 } 2680 error = sooptcopyin(sopt, &loop, sizeof(u_int), sizeof(u_int)); 2681 if (error) 2682 break; 2683 if (loop > 1) { 2684 error = EINVAL; 2685 break; 2686 } 2687 im6o = in6p_findmoptions(inp); 2688 im6o->im6o_multicast_loop = loop; 2689 INP_WUNLOCK(inp); 2690 break; 2691 } 2692 2693 case IPV6_JOIN_GROUP: 2694 case MCAST_JOIN_GROUP: 2695 case MCAST_JOIN_SOURCE_GROUP: 2696 error = in6p_join_group(inp, sopt); 2697 break; 2698 2699 case IPV6_LEAVE_GROUP: 2700 case MCAST_LEAVE_GROUP: 2701 case MCAST_LEAVE_SOURCE_GROUP: 2702 error = in6p_leave_group(inp, sopt); 2703 break; 2704 2705 case MCAST_BLOCK_SOURCE: 2706 case MCAST_UNBLOCK_SOURCE: 2707 error = in6p_block_unblock_source(inp, sopt); 2708 break; 2709 2710 case IPV6_MSFILTER: 2711 error = in6p_set_source_filters(inp, sopt); 2712 break; 2713 2714 default: 2715 error = EOPNOTSUPP; 2716 break; 2717 } 2718 2719 INP_UNLOCK_ASSERT(inp); 2720 2721 return (error); 2722 } 2723 2724 /* 2725 * Expose MLD's multicast filter mode and source list(s) to userland, 2726 * keyed by (ifindex, group). 2727 * The filter mode is written out as a uint32_t, followed by 2728 * 0..n of struct in6_addr. 2729 * For use by ifmcstat(8). 2730 * SMPng: NOTE: unlocked read of ifindex space. 2731 */ 2732 static int 2733 sysctl_ip6_mcast_filters(SYSCTL_HANDLER_ARGS) 2734 { 2735 struct in6_addr mcaddr; 2736 struct in6_addr src; 2737 struct epoch_tracker et; 2738 struct ifnet *ifp; 2739 struct ifmultiaddr *ifma; 2740 struct in6_multi *inm; 2741 struct ip6_msource *ims; 2742 int *name; 2743 int retval; 2744 u_int namelen; 2745 uint32_t fmode, ifindex; 2746 #ifdef KTR 2747 char ip6tbuf[INET6_ADDRSTRLEN]; 2748 #endif 2749 2750 name = (int *)arg1; 2751 namelen = arg2; 2752 2753 if (req->newptr != NULL) 2754 return (EPERM); 2755 2756 /* int: ifindex + 4 * 32 bits of IPv6 address */ 2757 if (namelen != 5) 2758 return (EINVAL); 2759 2760 ifindex = name[0]; 2761 if (ifindex <= 0 || ifindex > V_if_index) { 2762 CTR2(KTR_MLD, "%s: ifindex %u out of range", 2763 __func__, ifindex); 2764 return (ENOENT); 2765 } 2766 2767 memcpy(&mcaddr, &name[1], sizeof(struct in6_addr)); 2768 if (!IN6_IS_ADDR_MULTICAST(&mcaddr)) { 2769 CTR2(KTR_MLD, "%s: group %s is not multicast", 2770 __func__, ip6_sprintf(ip6tbuf, &mcaddr)); 2771 return (EINVAL); 2772 } 2773 2774 NET_EPOCH_ENTER(et); 2775 ifp = ifnet_byindex(ifindex); 2776 if (ifp == NULL) { 2777 NET_EPOCH_EXIT(et); 2778 CTR2(KTR_MLD, "%s: no ifp for ifindex %u", 2779 __func__, ifindex); 2780 return (ENOENT); 2781 } 2782 /* 2783 * Internal MLD lookups require that scope/zone ID is set. 2784 */ 2785 (void)in6_setscope(&mcaddr, ifp, NULL); 2786 2787 retval = sysctl_wire_old_buffer(req, 2788 sizeof(uint32_t) + (in6_mcast_maxgrpsrc * sizeof(struct in6_addr))); 2789 if (retval) { 2790 NET_EPOCH_EXIT(et); 2791 return (retval); 2792 } 2793 2794 IN6_MULTI_LOCK(); 2795 IN6_MULTI_LIST_LOCK(); 2796 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2797 inm = in6m_ifmultiaddr_get_inm(ifma); 2798 if (inm == NULL) 2799 continue; 2800 if (!IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, &mcaddr)) 2801 continue; 2802 fmode = inm->in6m_st[1].iss_fmode; 2803 retval = SYSCTL_OUT(req, &fmode, sizeof(uint32_t)); 2804 if (retval != 0) 2805 break; 2806 RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) { 2807 CTR2(KTR_MLD, "%s: visit node %p", __func__, ims); 2808 /* 2809 * Only copy-out sources which are in-mode. 2810 */ 2811 if (fmode != im6s_get_mode(inm, ims, 1)) { 2812 CTR1(KTR_MLD, "%s: skip non-in-mode", 2813 __func__); 2814 continue; 2815 } 2816 src = ims->im6s_addr; 2817 retval = SYSCTL_OUT(req, &src, 2818 sizeof(struct in6_addr)); 2819 if (retval != 0) 2820 break; 2821 } 2822 } 2823 IN6_MULTI_LIST_UNLOCK(); 2824 IN6_MULTI_UNLOCK(); 2825 NET_EPOCH_EXIT(et); 2826 2827 return (retval); 2828 } 2829 2830 #ifdef KTR 2831 2832 static const char *in6m_modestrs[] = { "un", "in", "ex" }; 2833 2834 static const char * 2835 in6m_mode_str(const int mode) 2836 { 2837 2838 if (mode >= MCAST_UNDEFINED && mode <= MCAST_EXCLUDE) 2839 return (in6m_modestrs[mode]); 2840 return ("??"); 2841 } 2842 2843 static const char *in6m_statestrs[] = { 2844 "not-member", 2845 "silent", 2846 "idle", 2847 "lazy", 2848 "sleeping", 2849 "awakening", 2850 "query-pending", 2851 "sg-query-pending", 2852 "leaving" 2853 }; 2854 2855 static const char * 2856 in6m_state_str(const int state) 2857 { 2858 2859 if (state >= MLD_NOT_MEMBER && state <= MLD_LEAVING_MEMBER) 2860 return (in6m_statestrs[state]); 2861 return ("??"); 2862 } 2863 2864 /* 2865 * Dump an in6_multi structure to the console. 2866 */ 2867 void 2868 in6m_print(const struct in6_multi *inm) 2869 { 2870 int t; 2871 char ip6tbuf[INET6_ADDRSTRLEN]; 2872 2873 if ((ktr_mask & KTR_MLD) == 0) 2874 return; 2875 2876 printf("%s: --- begin in6m %p ---\n", __func__, inm); 2877 printf("addr %s ifp %p(%s) ifma %p\n", 2878 ip6_sprintf(ip6tbuf, &inm->in6m_addr), 2879 inm->in6m_ifp, 2880 if_name(inm->in6m_ifp), 2881 inm->in6m_ifma); 2882 printf("timer %u state %s refcount %u scq.len %u\n", 2883 inm->in6m_timer, 2884 in6m_state_str(inm->in6m_state), 2885 inm->in6m_refcount, 2886 mbufq_len(&inm->in6m_scq)); 2887 printf("mli %p nsrc %lu sctimer %u scrv %u\n", 2888 inm->in6m_mli, 2889 inm->in6m_nsrc, 2890 inm->in6m_sctimer, 2891 inm->in6m_scrv); 2892 for (t = 0; t < 2; t++) { 2893 printf("t%d: fmode %s asm %u ex %u in %u rec %u\n", t, 2894 in6m_mode_str(inm->in6m_st[t].iss_fmode), 2895 inm->in6m_st[t].iss_asm, 2896 inm->in6m_st[t].iss_ex, 2897 inm->in6m_st[t].iss_in, 2898 inm->in6m_st[t].iss_rec); 2899 } 2900 printf("%s: --- end in6m %p ---\n", __func__, inm); 2901 } 2902 2903 #else /* !KTR */ 2904 2905 void 2906 in6m_print(const struct in6_multi *inm) 2907 { 2908 2909 } 2910 2911 #endif /* KTR */ 2912