1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2009 Bruce Simpson. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote 16 * products derived from this software without specific prior written 17 * permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 /* 33 * IPv6 multicast socket, group, and socket option processing module. 34 * Normative references: RFC 2292, RFC 3492, RFC 3542, RFC 3678, RFC 3810. 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_inet6.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/ktr.h> 46 #include <sys/malloc.h> 47 #include <sys/mbuf.h> 48 #include <sys/protosw.h> 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/sysctl.h> 52 #include <sys/priv.h> 53 #include <sys/taskqueue.h> 54 #include <sys/tree.h> 55 56 #include <net/if.h> 57 #include <net/if_var.h> 58 #include <net/if_dl.h> 59 #include <net/route.h> 60 #include <net/vnet.h> 61 62 #include <netinet/in.h> 63 #include <netinet/udp.h> 64 #include <netinet/in_var.h> 65 #include <netinet/ip_var.h> 66 #include <netinet/udp_var.h> 67 #include <netinet6/in6_fib.h> 68 #include <netinet6/in6_var.h> 69 #include <netinet/ip6.h> 70 #include <netinet/icmp6.h> 71 #include <netinet6/ip6_var.h> 72 #include <netinet/in_pcb.h> 73 #include <netinet/tcp_var.h> 74 #include <netinet6/nd6.h> 75 #include <netinet6/mld6_var.h> 76 #include <netinet6/scope6_var.h> 77 78 #ifndef KTR_MLD 79 #define KTR_MLD KTR_INET6 80 #endif 81 82 #ifndef __SOCKUNION_DECLARED 83 union sockunion { 84 struct sockaddr_storage ss; 85 struct sockaddr sa; 86 struct sockaddr_dl sdl; 87 struct sockaddr_in6 sin6; 88 }; 89 typedef union sockunion sockunion_t; 90 #define __SOCKUNION_DECLARED 91 #endif /* __SOCKUNION_DECLARED */ 92 93 static MALLOC_DEFINE(M_IN6MFILTER, "in6_mfilter", 94 "IPv6 multicast PCB-layer source filter"); 95 MALLOC_DEFINE(M_IP6MADDR, "in6_multi", "IPv6 multicast group"); 96 static MALLOC_DEFINE(M_IP6MOPTS, "ip6_moptions", "IPv6 multicast options"); 97 static MALLOC_DEFINE(M_IP6MSOURCE, "ip6_msource", 98 "IPv6 multicast MLD-layer source filter"); 99 100 RB_GENERATE(ip6_msource_tree, ip6_msource, im6s_link, ip6_msource_cmp); 101 102 /* 103 * Locking: 104 * - Lock order is: Giant, IN6_MULTI_LOCK, INP_WLOCK, 105 * IN6_MULTI_LIST_LOCK, MLD_LOCK, IF_ADDR_LOCK. 106 * - The IF_ADDR_LOCK is implicitly taken by in6m_lookup() earlier, however 107 * it can be taken by code in net/if.c also. 108 * - ip6_moptions and in6_mfilter are covered by the INP_WLOCK. 109 * 110 * struct in6_multi is covered by IN6_MULTI_LOCK. There isn't strictly 111 * any need for in6_multi itself to be virtualized -- it is bound to an ifp 112 * anyway no matter what happens. 113 */ 114 struct mtx in6_multi_list_mtx; 115 MTX_SYSINIT(in6_multi_mtx, &in6_multi_list_mtx, "in6_multi_list_mtx", MTX_DEF); 116 117 struct mtx in6_multi_free_mtx; 118 MTX_SYSINIT(in6_multi_free_mtx, &in6_multi_free_mtx, "in6_multi_free_mtx", MTX_DEF); 119 120 struct sx in6_multi_sx; 121 SX_SYSINIT(in6_multi_sx, &in6_multi_sx, "in6_multi_sx"); 122 123 static void im6f_commit(struct in6_mfilter *); 124 static int im6f_get_source(struct in6_mfilter *imf, 125 const struct sockaddr_in6 *psin, 126 struct in6_msource **); 127 static struct in6_msource * 128 im6f_graft(struct in6_mfilter *, const uint8_t, 129 const struct sockaddr_in6 *); 130 static void im6f_leave(struct in6_mfilter *); 131 static int im6f_prune(struct in6_mfilter *, const struct sockaddr_in6 *); 132 static void im6f_purge(struct in6_mfilter *); 133 static void im6f_rollback(struct in6_mfilter *); 134 static void im6f_reap(struct in6_mfilter *); 135 static struct in6_mfilter * 136 im6o_match_group(const struct ip6_moptions *, 137 const struct ifnet *, const struct sockaddr *); 138 static struct in6_msource * 139 im6o_match_source(struct in6_mfilter *, const struct sockaddr *); 140 static void im6s_merge(struct ip6_msource *ims, 141 const struct in6_msource *lims, const int rollback); 142 static int in6_getmulti(struct ifnet *, const struct in6_addr *, 143 struct in6_multi **); 144 static int in6_joingroup_locked(struct ifnet *, const struct in6_addr *, 145 struct in6_mfilter *, struct in6_multi **, int); 146 static int in6m_get_source(struct in6_multi *inm, 147 const struct in6_addr *addr, const int noalloc, 148 struct ip6_msource **pims); 149 #ifdef KTR 150 static int in6m_is_ifp_detached(const struct in6_multi *); 151 #endif 152 static int in6m_merge(struct in6_multi *, /*const*/ struct in6_mfilter *); 153 static void in6m_purge(struct in6_multi *); 154 static void in6m_reap(struct in6_multi *); 155 static struct ip6_moptions * 156 in6p_findmoptions(struct inpcb *); 157 static int in6p_get_source_filters(struct inpcb *, struct sockopt *); 158 static int in6p_join_group(struct inpcb *, struct sockopt *); 159 static int in6p_leave_group(struct inpcb *, struct sockopt *); 160 static struct ifnet * 161 in6p_lookup_mcast_ifp(const struct inpcb *, 162 const struct sockaddr_in6 *); 163 static int in6p_block_unblock_source(struct inpcb *, struct sockopt *); 164 static int in6p_set_multicast_if(struct inpcb *, struct sockopt *); 165 static int in6p_set_source_filters(struct inpcb *, struct sockopt *); 166 static int sysctl_ip6_mcast_filters(SYSCTL_HANDLER_ARGS); 167 168 SYSCTL_DECL(_net_inet6_ip6); /* XXX Not in any common header. */ 169 170 static SYSCTL_NODE(_net_inet6_ip6, OID_AUTO, mcast, 171 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 172 "IPv6 multicast"); 173 174 static u_long in6_mcast_maxgrpsrc = IPV6_MAX_GROUP_SRC_FILTER; 175 SYSCTL_ULONG(_net_inet6_ip6_mcast, OID_AUTO, maxgrpsrc, 176 CTLFLAG_RWTUN, &in6_mcast_maxgrpsrc, 0, 177 "Max source filters per group"); 178 179 static u_long in6_mcast_maxsocksrc = IPV6_MAX_SOCK_SRC_FILTER; 180 SYSCTL_ULONG(_net_inet6_ip6_mcast, OID_AUTO, maxsocksrc, 181 CTLFLAG_RWTUN, &in6_mcast_maxsocksrc, 0, 182 "Max source filters per socket"); 183 184 /* TODO Virtualize this switch. */ 185 int in6_mcast_loop = IPV6_DEFAULT_MULTICAST_LOOP; 186 SYSCTL_INT(_net_inet6_ip6_mcast, OID_AUTO, loop, CTLFLAG_RWTUN, 187 &in6_mcast_loop, 0, "Loopback multicast datagrams by default"); 188 189 static SYSCTL_NODE(_net_inet6_ip6_mcast, OID_AUTO, filters, 190 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_ip6_mcast_filters, 191 "Per-interface stack-wide source filters"); 192 193 #ifdef KTR 194 /* 195 * Inline function which wraps assertions for a valid ifp. 196 * The ifnet layer will set the ifma's ifp pointer to NULL if the ifp 197 * is detached. 198 */ 199 static int __inline 200 in6m_is_ifp_detached(const struct in6_multi *inm) 201 { 202 struct ifnet *ifp; 203 204 KASSERT(inm->in6m_ifma != NULL, ("%s: no ifma", __func__)); 205 ifp = inm->in6m_ifma->ifma_ifp; 206 if (ifp != NULL) { 207 /* 208 * Sanity check that network-layer notion of ifp is the 209 * same as that of link-layer. 210 */ 211 KASSERT(inm->in6m_ifp == ifp, ("%s: bad ifp", __func__)); 212 } 213 214 return (ifp == NULL); 215 } 216 #endif 217 218 /* 219 * Initialize an in6_mfilter structure to a known state at t0, t1 220 * with an empty source filter list. 221 */ 222 static __inline void 223 im6f_init(struct in6_mfilter *imf, const int st0, const int st1) 224 { 225 memset(imf, 0, sizeof(struct in6_mfilter)); 226 RB_INIT(&imf->im6f_sources); 227 imf->im6f_st[0] = st0; 228 imf->im6f_st[1] = st1; 229 } 230 231 struct in6_mfilter * 232 ip6_mfilter_alloc(const int mflags, const int st0, const int st1) 233 { 234 struct in6_mfilter *imf; 235 236 imf = malloc(sizeof(*imf), M_IN6MFILTER, mflags); 237 238 if (imf != NULL) 239 im6f_init(imf, st0, st1); 240 241 return (imf); 242 } 243 244 void 245 ip6_mfilter_free(struct in6_mfilter *imf) 246 { 247 248 im6f_purge(imf); 249 free(imf, M_IN6MFILTER); 250 } 251 252 /* 253 * Find an IPv6 multicast group entry for this ip6_moptions instance 254 * which matches the specified group, and optionally an interface. 255 * Return its index into the array, or -1 if not found. 256 */ 257 static struct in6_mfilter * 258 im6o_match_group(const struct ip6_moptions *imo, const struct ifnet *ifp, 259 const struct sockaddr *group) 260 { 261 const struct sockaddr_in6 *gsin6; 262 struct in6_mfilter *imf; 263 struct in6_multi *inm; 264 265 gsin6 = (const struct sockaddr_in6 *)group; 266 267 IP6_MFILTER_FOREACH(imf, &imo->im6o_head) { 268 inm = imf->im6f_in6m; 269 if (inm == NULL) 270 continue; 271 if ((ifp == NULL || (inm->in6m_ifp == ifp)) && 272 IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, 273 &gsin6->sin6_addr)) { 274 break; 275 } 276 } 277 return (imf); 278 } 279 280 /* 281 * Find an IPv6 multicast source entry for this imo which matches 282 * the given group index for this socket, and source address. 283 * 284 * XXX TODO: The scope ID, if present in src, is stripped before 285 * any comparison. We SHOULD enforce scope/zone checks where the source 286 * filter entry has a link scope. 287 * 288 * NOTE: This does not check if the entry is in-mode, merely if 289 * it exists, which may not be the desired behaviour. 290 */ 291 static struct in6_msource * 292 im6o_match_source(struct in6_mfilter *imf, const struct sockaddr *src) 293 { 294 struct ip6_msource find; 295 struct ip6_msource *ims; 296 const sockunion_t *psa; 297 298 KASSERT(src->sa_family == AF_INET6, ("%s: !AF_INET6", __func__)); 299 300 psa = (const sockunion_t *)src; 301 find.im6s_addr = psa->sin6.sin6_addr; 302 in6_clearscope(&find.im6s_addr); /* XXX */ 303 ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find); 304 305 return ((struct in6_msource *)ims); 306 } 307 308 /* 309 * Perform filtering for multicast datagrams on a socket by group and source. 310 * 311 * Returns 0 if a datagram should be allowed through, or various error codes 312 * if the socket was not a member of the group, or the source was muted, etc. 313 */ 314 int 315 im6o_mc_filter(const struct ip6_moptions *imo, const struct ifnet *ifp, 316 const struct sockaddr *group, const struct sockaddr *src) 317 { 318 struct in6_mfilter *imf; 319 struct in6_msource *ims; 320 int mode; 321 322 KASSERT(ifp != NULL, ("%s: null ifp", __func__)); 323 324 imf = im6o_match_group(imo, ifp, group); 325 if (imf == NULL) 326 return (MCAST_NOTGMEMBER); 327 328 /* 329 * Check if the source was included in an (S,G) join. 330 * Allow reception on exclusive memberships by default, 331 * reject reception on inclusive memberships by default. 332 * Exclude source only if an in-mode exclude filter exists. 333 * Include source only if an in-mode include filter exists. 334 * NOTE: We are comparing group state here at MLD t1 (now) 335 * with socket-layer t0 (since last downcall). 336 */ 337 mode = imf->im6f_st[1]; 338 ims = im6o_match_source(imf, src); 339 340 if ((ims == NULL && mode == MCAST_INCLUDE) || 341 (ims != NULL && ims->im6sl_st[0] != mode)) 342 return (MCAST_NOTSMEMBER); 343 344 return (MCAST_PASS); 345 } 346 347 /* 348 * Find and return a reference to an in6_multi record for (ifp, group), 349 * and bump its reference count. 350 * If one does not exist, try to allocate it, and update link-layer multicast 351 * filters on ifp to listen for group. 352 * Assumes the IN6_MULTI lock is held across the call. 353 * Return 0 if successful, otherwise return an appropriate error code. 354 */ 355 static int 356 in6_getmulti(struct ifnet *ifp, const struct in6_addr *group, 357 struct in6_multi **pinm) 358 { 359 struct epoch_tracker et; 360 struct sockaddr_in6 gsin6; 361 struct ifmultiaddr *ifma; 362 struct in6_multi *inm; 363 int error; 364 365 error = 0; 366 367 /* 368 * XXX: Accesses to ifma_protospec must be covered by IF_ADDR_LOCK; 369 * if_addmulti() takes this mutex itself, so we must drop and 370 * re-acquire around the call. 371 */ 372 IN6_MULTI_LOCK_ASSERT(); 373 IN6_MULTI_LIST_LOCK(); 374 IF_ADDR_WLOCK(ifp); 375 NET_EPOCH_ENTER(et); 376 inm = in6m_lookup_locked(ifp, group); 377 NET_EPOCH_EXIT(et); 378 379 if (inm != NULL) { 380 /* 381 * If we already joined this group, just bump the 382 * refcount and return it. 383 */ 384 KASSERT(inm->in6m_refcount >= 1, 385 ("%s: bad refcount %d", __func__, inm->in6m_refcount)); 386 in6m_acquire_locked(inm); 387 *pinm = inm; 388 goto out_locked; 389 } 390 391 memset(&gsin6, 0, sizeof(gsin6)); 392 gsin6.sin6_family = AF_INET6; 393 gsin6.sin6_len = sizeof(struct sockaddr_in6); 394 gsin6.sin6_addr = *group; 395 396 /* 397 * Check if a link-layer group is already associated 398 * with this network-layer group on the given ifnet. 399 */ 400 IN6_MULTI_LIST_UNLOCK(); 401 IF_ADDR_WUNLOCK(ifp); 402 error = if_addmulti(ifp, (struct sockaddr *)&gsin6, &ifma); 403 if (error != 0) 404 return (error); 405 IN6_MULTI_LIST_LOCK(); 406 IF_ADDR_WLOCK(ifp); 407 408 /* 409 * If something other than netinet6 is occupying the link-layer 410 * group, print a meaningful error message and back out of 411 * the allocation. 412 * Otherwise, bump the refcount on the existing network-layer 413 * group association and return it. 414 */ 415 if (ifma->ifma_protospec != NULL) { 416 inm = (struct in6_multi *)ifma->ifma_protospec; 417 #ifdef INVARIANTS 418 KASSERT(ifma->ifma_addr != NULL, ("%s: no ifma_addr", 419 __func__)); 420 KASSERT(ifma->ifma_addr->sa_family == AF_INET6, 421 ("%s: ifma not AF_INET6", __func__)); 422 KASSERT(inm != NULL, ("%s: no ifma_protospec", __func__)); 423 if (inm->in6m_ifma != ifma || inm->in6m_ifp != ifp || 424 !IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, group)) 425 panic("%s: ifma %p is inconsistent with %p (%p)", 426 __func__, ifma, inm, group); 427 #endif 428 in6m_acquire_locked(inm); 429 *pinm = inm; 430 goto out_locked; 431 } 432 433 IF_ADDR_WLOCK_ASSERT(ifp); 434 435 /* 436 * A new in6_multi record is needed; allocate and initialize it. 437 * We DO NOT perform an MLD join as the in6_ layer may need to 438 * push an initial source list down to MLD to support SSM. 439 * 440 * The initial source filter state is INCLUDE, {} as per the RFC. 441 * Pending state-changes per group are subject to a bounds check. 442 */ 443 inm = malloc(sizeof(*inm), M_IP6MADDR, M_NOWAIT | M_ZERO); 444 if (inm == NULL) { 445 IN6_MULTI_LIST_UNLOCK(); 446 IF_ADDR_WUNLOCK(ifp); 447 if_delmulti_ifma(ifma); 448 return (ENOMEM); 449 } 450 inm->in6m_addr = *group; 451 inm->in6m_ifp = ifp; 452 inm->in6m_mli = MLD_IFINFO(ifp); 453 inm->in6m_ifma = ifma; 454 inm->in6m_refcount = 1; 455 inm->in6m_state = MLD_NOT_MEMBER; 456 mbufq_init(&inm->in6m_scq, MLD_MAX_STATE_CHANGES); 457 458 inm->in6m_st[0].iss_fmode = MCAST_UNDEFINED; 459 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED; 460 RB_INIT(&inm->in6m_srcs); 461 462 ifma->ifma_protospec = inm; 463 *pinm = inm; 464 465 out_locked: 466 IN6_MULTI_LIST_UNLOCK(); 467 IF_ADDR_WUNLOCK(ifp); 468 return (error); 469 } 470 471 /* 472 * Drop a reference to an in6_multi record. 473 * 474 * If the refcount drops to 0, free the in6_multi record and 475 * delete the underlying link-layer membership. 476 */ 477 static void 478 in6m_release(struct in6_multi *inm) 479 { 480 struct ifmultiaddr *ifma; 481 struct ifnet *ifp; 482 483 CTR2(KTR_MLD, "%s: refcount is %d", __func__, inm->in6m_refcount); 484 485 MPASS(inm->in6m_refcount == 0); 486 CTR2(KTR_MLD, "%s: freeing inm %p", __func__, inm); 487 488 ifma = inm->in6m_ifma; 489 ifp = inm->in6m_ifp; 490 MPASS(ifma->ifma_llifma == NULL); 491 492 /* XXX this access is not covered by IF_ADDR_LOCK */ 493 CTR2(KTR_MLD, "%s: purging ifma %p", __func__, ifma); 494 KASSERT(ifma->ifma_protospec == NULL, 495 ("%s: ifma_protospec != NULL", __func__)); 496 if (ifp == NULL) 497 ifp = ifma->ifma_ifp; 498 499 if (ifp != NULL) { 500 CURVNET_SET(ifp->if_vnet); 501 in6m_purge(inm); 502 free(inm, M_IP6MADDR); 503 if_delmulti_ifma_flags(ifma, 1); 504 CURVNET_RESTORE(); 505 if_rele(ifp); 506 } else { 507 in6m_purge(inm); 508 free(inm, M_IP6MADDR); 509 if_delmulti_ifma_flags(ifma, 1); 510 } 511 } 512 513 /* 514 * Interface detach can happen in a taskqueue thread context, so we must use a 515 * dedicated thread to avoid deadlocks when draining in6m_release tasks. 516 */ 517 TASKQUEUE_DEFINE_THREAD(in6m_free); 518 static struct task in6m_free_task; 519 static struct in6_multi_head in6m_free_list = SLIST_HEAD_INITIALIZER(); 520 static void in6m_release_task(void *arg __unused, int pending __unused); 521 522 static void 523 in6m_init(void) 524 { 525 TASK_INIT(&in6m_free_task, 0, in6m_release_task, NULL); 526 } 527 SYSINIT(in6m_init, SI_SUB_TASKQ, SI_ORDER_ANY, in6m_init, NULL); 528 529 void 530 in6m_release_list_deferred(struct in6_multi_head *inmh) 531 { 532 if (SLIST_EMPTY(inmh)) 533 return; 534 mtx_lock(&in6_multi_free_mtx); 535 SLIST_CONCAT(&in6m_free_list, inmh, in6_multi, in6m_nrele); 536 mtx_unlock(&in6_multi_free_mtx); 537 taskqueue_enqueue(taskqueue_in6m_free, &in6m_free_task); 538 } 539 540 void 541 in6m_release_wait(void) 542 { 543 taskqueue_drain_all(taskqueue_in6m_free); 544 } 545 546 void 547 in6m_disconnect_locked(struct in6_multi_head *inmh, struct in6_multi *inm) 548 { 549 struct ifnet *ifp; 550 struct ifaddr *ifa; 551 struct in6_ifaddr *ifa6; 552 struct in6_multi_mship *imm, *imm_tmp; 553 struct ifmultiaddr *ifma, *ll_ifma; 554 555 IN6_MULTI_LIST_LOCK_ASSERT(); 556 557 ifp = inm->in6m_ifp; 558 if (ifp == NULL) 559 return; /* already called */ 560 561 inm->in6m_ifp = NULL; 562 IF_ADDR_WLOCK_ASSERT(ifp); 563 ifma = inm->in6m_ifma; 564 if (ifma == NULL) 565 return; 566 567 if_ref(ifp); 568 if (ifma->ifma_flags & IFMA_F_ENQUEUED) { 569 CK_STAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifmultiaddr, ifma_link); 570 ifma->ifma_flags &= ~IFMA_F_ENQUEUED; 571 } 572 MCDPRINTF("removed ifma: %p from %s\n", ifma, ifp->if_xname); 573 if ((ll_ifma = ifma->ifma_llifma) != NULL) { 574 MPASS(ifma != ll_ifma); 575 ifma->ifma_llifma = NULL; 576 MPASS(ll_ifma->ifma_llifma == NULL); 577 MPASS(ll_ifma->ifma_ifp == ifp); 578 if (--ll_ifma->ifma_refcount == 0) { 579 if (ll_ifma->ifma_flags & IFMA_F_ENQUEUED) { 580 CK_STAILQ_REMOVE(&ifp->if_multiaddrs, ll_ifma, ifmultiaddr, ifma_link); 581 ll_ifma->ifma_flags &= ~IFMA_F_ENQUEUED; 582 } 583 MCDPRINTF("removed ll_ifma: %p from %s\n", ll_ifma, ifp->if_xname); 584 if_freemulti(ll_ifma); 585 } 586 } 587 CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 588 if (ifa->ifa_addr->sa_family != AF_INET6) 589 continue; 590 ifa6 = (void *)ifa; 591 LIST_FOREACH_SAFE(imm, &ifa6->ia6_memberships, 592 i6mm_chain, imm_tmp) { 593 if (inm == imm->i6mm_maddr) { 594 LIST_REMOVE(imm, i6mm_chain); 595 free(imm, M_IP6MADDR); 596 in6m_rele_locked(inmh, inm); 597 } 598 } 599 } 600 } 601 602 static void 603 in6m_release_task(void *arg __unused, int pending __unused) 604 { 605 struct in6_multi_head in6m_free_tmp; 606 struct in6_multi *inm, *tinm; 607 608 SLIST_INIT(&in6m_free_tmp); 609 mtx_lock(&in6_multi_free_mtx); 610 SLIST_CONCAT(&in6m_free_tmp, &in6m_free_list, in6_multi, in6m_nrele); 611 mtx_unlock(&in6_multi_free_mtx); 612 IN6_MULTI_LOCK(); 613 SLIST_FOREACH_SAFE(inm, &in6m_free_tmp, in6m_nrele, tinm) { 614 SLIST_REMOVE_HEAD(&in6m_free_tmp, in6m_nrele); 615 in6m_release(inm); 616 } 617 IN6_MULTI_UNLOCK(); 618 } 619 620 /* 621 * Clear recorded source entries for a group. 622 * Used by the MLD code. Caller must hold the IN6_MULTI lock. 623 * FIXME: Should reap. 624 */ 625 void 626 in6m_clear_recorded(struct in6_multi *inm) 627 { 628 struct ip6_msource *ims; 629 630 IN6_MULTI_LIST_LOCK_ASSERT(); 631 632 RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) { 633 if (ims->im6s_stp) { 634 ims->im6s_stp = 0; 635 --inm->in6m_st[1].iss_rec; 636 } 637 } 638 KASSERT(inm->in6m_st[1].iss_rec == 0, 639 ("%s: iss_rec %d not 0", __func__, inm->in6m_st[1].iss_rec)); 640 } 641 642 /* 643 * Record a source as pending for a Source-Group MLDv2 query. 644 * This lives here as it modifies the shared tree. 645 * 646 * inm is the group descriptor. 647 * naddr is the address of the source to record in network-byte order. 648 * 649 * If the net.inet6.mld.sgalloc sysctl is non-zero, we will 650 * lazy-allocate a source node in response to an SG query. 651 * Otherwise, no allocation is performed. This saves some memory 652 * with the trade-off that the source will not be reported to the 653 * router if joined in the window between the query response and 654 * the group actually being joined on the local host. 655 * 656 * VIMAGE: XXX: Currently the mld_sgalloc feature has been removed. 657 * This turns off the allocation of a recorded source entry if 658 * the group has not been joined. 659 * 660 * Return 0 if the source didn't exist or was already marked as recorded. 661 * Return 1 if the source was marked as recorded by this function. 662 * Return <0 if any error occurred (negated errno code). 663 */ 664 int 665 in6m_record_source(struct in6_multi *inm, const struct in6_addr *addr) 666 { 667 struct ip6_msource find; 668 struct ip6_msource *ims, *nims; 669 670 IN6_MULTI_LIST_LOCK_ASSERT(); 671 672 find.im6s_addr = *addr; 673 ims = RB_FIND(ip6_msource_tree, &inm->in6m_srcs, &find); 674 if (ims && ims->im6s_stp) 675 return (0); 676 if (ims == NULL) { 677 if (inm->in6m_nsrc == in6_mcast_maxgrpsrc) 678 return (-ENOSPC); 679 nims = malloc(sizeof(struct ip6_msource), M_IP6MSOURCE, 680 M_NOWAIT | M_ZERO); 681 if (nims == NULL) 682 return (-ENOMEM); 683 nims->im6s_addr = find.im6s_addr; 684 RB_INSERT(ip6_msource_tree, &inm->in6m_srcs, nims); 685 ++inm->in6m_nsrc; 686 ims = nims; 687 } 688 689 /* 690 * Mark the source as recorded and update the recorded 691 * source count. 692 */ 693 ++ims->im6s_stp; 694 ++inm->in6m_st[1].iss_rec; 695 696 return (1); 697 } 698 699 /* 700 * Return a pointer to an in6_msource owned by an in6_mfilter, 701 * given its source address. 702 * Lazy-allocate if needed. If this is a new entry its filter state is 703 * undefined at t0. 704 * 705 * imf is the filter set being modified. 706 * addr is the source address. 707 * 708 * SMPng: May be called with locks held; malloc must not block. 709 */ 710 static int 711 im6f_get_source(struct in6_mfilter *imf, const struct sockaddr_in6 *psin, 712 struct in6_msource **plims) 713 { 714 struct ip6_msource find; 715 struct ip6_msource *ims, *nims; 716 struct in6_msource *lims; 717 int error; 718 719 error = 0; 720 ims = NULL; 721 lims = NULL; 722 723 find.im6s_addr = psin->sin6_addr; 724 ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find); 725 lims = (struct in6_msource *)ims; 726 if (lims == NULL) { 727 if (imf->im6f_nsrc == in6_mcast_maxsocksrc) 728 return (ENOSPC); 729 nims = malloc(sizeof(struct in6_msource), M_IN6MFILTER, 730 M_NOWAIT | M_ZERO); 731 if (nims == NULL) 732 return (ENOMEM); 733 lims = (struct in6_msource *)nims; 734 lims->im6s_addr = find.im6s_addr; 735 lims->im6sl_st[0] = MCAST_UNDEFINED; 736 RB_INSERT(ip6_msource_tree, &imf->im6f_sources, nims); 737 ++imf->im6f_nsrc; 738 } 739 740 *plims = lims; 741 742 return (error); 743 } 744 745 /* 746 * Graft a source entry into an existing socket-layer filter set, 747 * maintaining any required invariants and checking allocations. 748 * 749 * The source is marked as being in the new filter mode at t1. 750 * 751 * Return the pointer to the new node, otherwise return NULL. 752 */ 753 static struct in6_msource * 754 im6f_graft(struct in6_mfilter *imf, const uint8_t st1, 755 const struct sockaddr_in6 *psin) 756 { 757 struct ip6_msource *nims; 758 struct in6_msource *lims; 759 760 nims = malloc(sizeof(struct in6_msource), M_IN6MFILTER, 761 M_NOWAIT | M_ZERO); 762 if (nims == NULL) 763 return (NULL); 764 lims = (struct in6_msource *)nims; 765 lims->im6s_addr = psin->sin6_addr; 766 lims->im6sl_st[0] = MCAST_UNDEFINED; 767 lims->im6sl_st[1] = st1; 768 RB_INSERT(ip6_msource_tree, &imf->im6f_sources, nims); 769 ++imf->im6f_nsrc; 770 771 return (lims); 772 } 773 774 /* 775 * Prune a source entry from an existing socket-layer filter set, 776 * maintaining any required invariants and checking allocations. 777 * 778 * The source is marked as being left at t1, it is not freed. 779 * 780 * Return 0 if no error occurred, otherwise return an errno value. 781 */ 782 static int 783 im6f_prune(struct in6_mfilter *imf, const struct sockaddr_in6 *psin) 784 { 785 struct ip6_msource find; 786 struct ip6_msource *ims; 787 struct in6_msource *lims; 788 789 find.im6s_addr = psin->sin6_addr; 790 ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find); 791 if (ims == NULL) 792 return (ENOENT); 793 lims = (struct in6_msource *)ims; 794 lims->im6sl_st[1] = MCAST_UNDEFINED; 795 return (0); 796 } 797 798 /* 799 * Revert socket-layer filter set deltas at t1 to t0 state. 800 */ 801 static void 802 im6f_rollback(struct in6_mfilter *imf) 803 { 804 struct ip6_msource *ims, *tims; 805 struct in6_msource *lims; 806 807 RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) { 808 lims = (struct in6_msource *)ims; 809 if (lims->im6sl_st[0] == lims->im6sl_st[1]) { 810 /* no change at t1 */ 811 continue; 812 } else if (lims->im6sl_st[0] != MCAST_UNDEFINED) { 813 /* revert change to existing source at t1 */ 814 lims->im6sl_st[1] = lims->im6sl_st[0]; 815 } else { 816 /* revert source added t1 */ 817 CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); 818 RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims); 819 free(ims, M_IN6MFILTER); 820 imf->im6f_nsrc--; 821 } 822 } 823 imf->im6f_st[1] = imf->im6f_st[0]; 824 } 825 826 /* 827 * Mark socket-layer filter set as INCLUDE {} at t1. 828 */ 829 static void 830 im6f_leave(struct in6_mfilter *imf) 831 { 832 struct ip6_msource *ims; 833 struct in6_msource *lims; 834 835 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { 836 lims = (struct in6_msource *)ims; 837 lims->im6sl_st[1] = MCAST_UNDEFINED; 838 } 839 imf->im6f_st[1] = MCAST_INCLUDE; 840 } 841 842 /* 843 * Mark socket-layer filter set deltas as committed. 844 */ 845 static void 846 im6f_commit(struct in6_mfilter *imf) 847 { 848 struct ip6_msource *ims; 849 struct in6_msource *lims; 850 851 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { 852 lims = (struct in6_msource *)ims; 853 lims->im6sl_st[0] = lims->im6sl_st[1]; 854 } 855 imf->im6f_st[0] = imf->im6f_st[1]; 856 } 857 858 /* 859 * Reap unreferenced sources from socket-layer filter set. 860 */ 861 static void 862 im6f_reap(struct in6_mfilter *imf) 863 { 864 struct ip6_msource *ims, *tims; 865 struct in6_msource *lims; 866 867 RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) { 868 lims = (struct in6_msource *)ims; 869 if ((lims->im6sl_st[0] == MCAST_UNDEFINED) && 870 (lims->im6sl_st[1] == MCAST_UNDEFINED)) { 871 CTR2(KTR_MLD, "%s: free lims %p", __func__, ims); 872 RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims); 873 free(ims, M_IN6MFILTER); 874 imf->im6f_nsrc--; 875 } 876 } 877 } 878 879 /* 880 * Purge socket-layer filter set. 881 */ 882 static void 883 im6f_purge(struct in6_mfilter *imf) 884 { 885 struct ip6_msource *ims, *tims; 886 887 RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) { 888 CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); 889 RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims); 890 free(ims, M_IN6MFILTER); 891 imf->im6f_nsrc--; 892 } 893 imf->im6f_st[0] = imf->im6f_st[1] = MCAST_UNDEFINED; 894 KASSERT(RB_EMPTY(&imf->im6f_sources), 895 ("%s: im6f_sources not empty", __func__)); 896 } 897 898 /* 899 * Look up a source filter entry for a multicast group. 900 * 901 * inm is the group descriptor to work with. 902 * addr is the IPv6 address to look up. 903 * noalloc may be non-zero to suppress allocation of sources. 904 * *pims will be set to the address of the retrieved or allocated source. 905 * 906 * SMPng: NOTE: may be called with locks held. 907 * Return 0 if successful, otherwise return a non-zero error code. 908 */ 909 static int 910 in6m_get_source(struct in6_multi *inm, const struct in6_addr *addr, 911 const int noalloc, struct ip6_msource **pims) 912 { 913 struct ip6_msource find; 914 struct ip6_msource *ims, *nims; 915 #ifdef KTR 916 char ip6tbuf[INET6_ADDRSTRLEN]; 917 #endif 918 919 find.im6s_addr = *addr; 920 ims = RB_FIND(ip6_msource_tree, &inm->in6m_srcs, &find); 921 if (ims == NULL && !noalloc) { 922 if (inm->in6m_nsrc == in6_mcast_maxgrpsrc) 923 return (ENOSPC); 924 nims = malloc(sizeof(struct ip6_msource), M_IP6MSOURCE, 925 M_NOWAIT | M_ZERO); 926 if (nims == NULL) 927 return (ENOMEM); 928 nims->im6s_addr = *addr; 929 RB_INSERT(ip6_msource_tree, &inm->in6m_srcs, nims); 930 ++inm->in6m_nsrc; 931 ims = nims; 932 CTR3(KTR_MLD, "%s: allocated %s as %p", __func__, 933 ip6_sprintf(ip6tbuf, addr), ims); 934 } 935 936 *pims = ims; 937 return (0); 938 } 939 940 /* 941 * Merge socket-layer source into MLD-layer source. 942 * If rollback is non-zero, perform the inverse of the merge. 943 */ 944 static void 945 im6s_merge(struct ip6_msource *ims, const struct in6_msource *lims, 946 const int rollback) 947 { 948 int n = rollback ? -1 : 1; 949 #ifdef KTR 950 char ip6tbuf[INET6_ADDRSTRLEN]; 951 952 ip6_sprintf(ip6tbuf, &lims->im6s_addr); 953 #endif 954 955 if (lims->im6sl_st[0] == MCAST_EXCLUDE) { 956 CTR3(KTR_MLD, "%s: t1 ex -= %d on %s", __func__, n, ip6tbuf); 957 ims->im6s_st[1].ex -= n; 958 } else if (lims->im6sl_st[0] == MCAST_INCLUDE) { 959 CTR3(KTR_MLD, "%s: t1 in -= %d on %s", __func__, n, ip6tbuf); 960 ims->im6s_st[1].in -= n; 961 } 962 963 if (lims->im6sl_st[1] == MCAST_EXCLUDE) { 964 CTR3(KTR_MLD, "%s: t1 ex += %d on %s", __func__, n, ip6tbuf); 965 ims->im6s_st[1].ex += n; 966 } else if (lims->im6sl_st[1] == MCAST_INCLUDE) { 967 CTR3(KTR_MLD, "%s: t1 in += %d on %s", __func__, n, ip6tbuf); 968 ims->im6s_st[1].in += n; 969 } 970 } 971 972 /* 973 * Atomically update the global in6_multi state, when a membership's 974 * filter list is being updated in any way. 975 * 976 * imf is the per-inpcb-membership group filter pointer. 977 * A fake imf may be passed for in-kernel consumers. 978 * 979 * XXX This is a candidate for a set-symmetric-difference style loop 980 * which would eliminate the repeated lookup from root of ims nodes, 981 * as they share the same key space. 982 * 983 * If any error occurred this function will back out of refcounts 984 * and return a non-zero value. 985 */ 986 static int 987 in6m_merge(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) 988 { 989 struct ip6_msource *ims, *nims; 990 struct in6_msource *lims; 991 int schanged, error; 992 int nsrc0, nsrc1; 993 994 schanged = 0; 995 error = 0; 996 nsrc1 = nsrc0 = 0; 997 IN6_MULTI_LIST_LOCK_ASSERT(); 998 999 /* 1000 * Update the source filters first, as this may fail. 1001 * Maintain count of in-mode filters at t0, t1. These are 1002 * used to work out if we transition into ASM mode or not. 1003 * Maintain a count of source filters whose state was 1004 * actually modified by this operation. 1005 */ 1006 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { 1007 lims = (struct in6_msource *)ims; 1008 if (lims->im6sl_st[0] == imf->im6f_st[0]) nsrc0++; 1009 if (lims->im6sl_st[1] == imf->im6f_st[1]) nsrc1++; 1010 if (lims->im6sl_st[0] == lims->im6sl_st[1]) continue; 1011 error = in6m_get_source(inm, &lims->im6s_addr, 0, &nims); 1012 ++schanged; 1013 if (error) 1014 break; 1015 im6s_merge(nims, lims, 0); 1016 } 1017 if (error) { 1018 struct ip6_msource *bims; 1019 1020 RB_FOREACH_REVERSE_FROM(ims, ip6_msource_tree, nims) { 1021 lims = (struct in6_msource *)ims; 1022 if (lims->im6sl_st[0] == lims->im6sl_st[1]) 1023 continue; 1024 (void)in6m_get_source(inm, &lims->im6s_addr, 1, &bims); 1025 if (bims == NULL) 1026 continue; 1027 im6s_merge(bims, lims, 1); 1028 } 1029 goto out_reap; 1030 } 1031 1032 CTR3(KTR_MLD, "%s: imf filters in-mode: %d at t0, %d at t1", 1033 __func__, nsrc0, nsrc1); 1034 1035 /* Handle transition between INCLUDE {n} and INCLUDE {} on socket. */ 1036 if (imf->im6f_st[0] == imf->im6f_st[1] && 1037 imf->im6f_st[1] == MCAST_INCLUDE) { 1038 if (nsrc1 == 0) { 1039 CTR1(KTR_MLD, "%s: --in on inm at t1", __func__); 1040 --inm->in6m_st[1].iss_in; 1041 } 1042 } 1043 1044 /* Handle filter mode transition on socket. */ 1045 if (imf->im6f_st[0] != imf->im6f_st[1]) { 1046 CTR3(KTR_MLD, "%s: imf transition %d to %d", 1047 __func__, imf->im6f_st[0], imf->im6f_st[1]); 1048 1049 if (imf->im6f_st[0] == MCAST_EXCLUDE) { 1050 CTR1(KTR_MLD, "%s: --ex on inm at t1", __func__); 1051 --inm->in6m_st[1].iss_ex; 1052 } else if (imf->im6f_st[0] == MCAST_INCLUDE) { 1053 CTR1(KTR_MLD, "%s: --in on inm at t1", __func__); 1054 --inm->in6m_st[1].iss_in; 1055 } 1056 1057 if (imf->im6f_st[1] == MCAST_EXCLUDE) { 1058 CTR1(KTR_MLD, "%s: ex++ on inm at t1", __func__); 1059 inm->in6m_st[1].iss_ex++; 1060 } else if (imf->im6f_st[1] == MCAST_INCLUDE && nsrc1 > 0) { 1061 CTR1(KTR_MLD, "%s: in++ on inm at t1", __func__); 1062 inm->in6m_st[1].iss_in++; 1063 } 1064 } 1065 1066 /* 1067 * Track inm filter state in terms of listener counts. 1068 * If there are any exclusive listeners, stack-wide 1069 * membership is exclusive. 1070 * Otherwise, if only inclusive listeners, stack-wide is inclusive. 1071 * If no listeners remain, state is undefined at t1, 1072 * and the MLD lifecycle for this group should finish. 1073 */ 1074 if (inm->in6m_st[1].iss_ex > 0) { 1075 CTR1(KTR_MLD, "%s: transition to EX", __func__); 1076 inm->in6m_st[1].iss_fmode = MCAST_EXCLUDE; 1077 } else if (inm->in6m_st[1].iss_in > 0) { 1078 CTR1(KTR_MLD, "%s: transition to IN", __func__); 1079 inm->in6m_st[1].iss_fmode = MCAST_INCLUDE; 1080 } else { 1081 CTR1(KTR_MLD, "%s: transition to UNDEF", __func__); 1082 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED; 1083 } 1084 1085 /* Decrement ASM listener count on transition out of ASM mode. */ 1086 if (imf->im6f_st[0] == MCAST_EXCLUDE && nsrc0 == 0) { 1087 if ((imf->im6f_st[1] != MCAST_EXCLUDE) || 1088 (imf->im6f_st[1] == MCAST_EXCLUDE && nsrc1 > 0)) { 1089 CTR1(KTR_MLD, "%s: --asm on inm at t1", __func__); 1090 --inm->in6m_st[1].iss_asm; 1091 } 1092 } 1093 1094 /* Increment ASM listener count on transition to ASM mode. */ 1095 if (imf->im6f_st[1] == MCAST_EXCLUDE && nsrc1 == 0) { 1096 CTR1(KTR_MLD, "%s: asm++ on inm at t1", __func__); 1097 inm->in6m_st[1].iss_asm++; 1098 } 1099 1100 CTR3(KTR_MLD, "%s: merged imf %p to inm %p", __func__, imf, inm); 1101 in6m_print(inm); 1102 1103 out_reap: 1104 if (schanged > 0) { 1105 CTR1(KTR_MLD, "%s: sources changed; reaping", __func__); 1106 in6m_reap(inm); 1107 } 1108 return (error); 1109 } 1110 1111 /* 1112 * Mark an in6_multi's filter set deltas as committed. 1113 * Called by MLD after a state change has been enqueued. 1114 */ 1115 void 1116 in6m_commit(struct in6_multi *inm) 1117 { 1118 struct ip6_msource *ims; 1119 1120 CTR2(KTR_MLD, "%s: commit inm %p", __func__, inm); 1121 CTR1(KTR_MLD, "%s: pre commit:", __func__); 1122 in6m_print(inm); 1123 1124 RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) { 1125 ims->im6s_st[0] = ims->im6s_st[1]; 1126 } 1127 inm->in6m_st[0] = inm->in6m_st[1]; 1128 } 1129 1130 /* 1131 * Reap unreferenced nodes from an in6_multi's filter set. 1132 */ 1133 static void 1134 in6m_reap(struct in6_multi *inm) 1135 { 1136 struct ip6_msource *ims, *tims; 1137 1138 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, tims) { 1139 if (ims->im6s_st[0].ex > 0 || ims->im6s_st[0].in > 0 || 1140 ims->im6s_st[1].ex > 0 || ims->im6s_st[1].in > 0 || 1141 ims->im6s_stp != 0) 1142 continue; 1143 CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); 1144 RB_REMOVE(ip6_msource_tree, &inm->in6m_srcs, ims); 1145 free(ims, M_IP6MSOURCE); 1146 inm->in6m_nsrc--; 1147 } 1148 } 1149 1150 /* 1151 * Purge all source nodes from an in6_multi's filter set. 1152 */ 1153 static void 1154 in6m_purge(struct in6_multi *inm) 1155 { 1156 struct ip6_msource *ims, *tims; 1157 1158 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, tims) { 1159 CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); 1160 RB_REMOVE(ip6_msource_tree, &inm->in6m_srcs, ims); 1161 free(ims, M_IP6MSOURCE); 1162 inm->in6m_nsrc--; 1163 } 1164 /* Free state-change requests that might be queued. */ 1165 mbufq_drain(&inm->in6m_scq); 1166 } 1167 1168 /* 1169 * Join a multicast address w/o sources. 1170 * KAME compatibility entry point. 1171 * 1172 * SMPng: Assume no mc locks held by caller. 1173 */ 1174 int 1175 in6_joingroup(struct ifnet *ifp, const struct in6_addr *mcaddr, 1176 /*const*/ struct in6_mfilter *imf, struct in6_multi **pinm, 1177 const int delay) 1178 { 1179 int error; 1180 1181 IN6_MULTI_LOCK(); 1182 error = in6_joingroup_locked(ifp, mcaddr, NULL, pinm, delay); 1183 IN6_MULTI_UNLOCK(); 1184 return (error); 1185 } 1186 1187 /* 1188 * Join a multicast group; real entry point. 1189 * 1190 * Only preserves atomicity at inm level. 1191 * NOTE: imf argument cannot be const due to sys/tree.h limitations. 1192 * 1193 * If the MLD downcall fails, the group is not joined, and an error 1194 * code is returned. 1195 */ 1196 static int 1197 in6_joingroup_locked(struct ifnet *ifp, const struct in6_addr *mcaddr, 1198 /*const*/ struct in6_mfilter *imf, struct in6_multi **pinm, 1199 const int delay) 1200 { 1201 struct in6_multi_head inmh; 1202 struct in6_mfilter timf; 1203 struct in6_multi *inm; 1204 struct ifmultiaddr *ifma; 1205 int error; 1206 #ifdef KTR 1207 char ip6tbuf[INET6_ADDRSTRLEN]; 1208 #endif 1209 1210 /* 1211 * Sanity: Check scope zone ID was set for ifp, if and 1212 * only if group is scoped to an interface. 1213 */ 1214 KASSERT(IN6_IS_ADDR_MULTICAST(mcaddr), 1215 ("%s: not a multicast address", __func__)); 1216 if (IN6_IS_ADDR_MC_LINKLOCAL(mcaddr) || 1217 IN6_IS_ADDR_MC_INTFACELOCAL(mcaddr)) { 1218 KASSERT(mcaddr->s6_addr16[1] != 0, 1219 ("%s: scope zone ID not set", __func__)); 1220 } 1221 1222 IN6_MULTI_LOCK_ASSERT(); 1223 IN6_MULTI_LIST_UNLOCK_ASSERT(); 1224 1225 CTR4(KTR_MLD, "%s: join %s on %p(%s))", __func__, 1226 ip6_sprintf(ip6tbuf, mcaddr), ifp, if_name(ifp)); 1227 1228 error = 0; 1229 inm = NULL; 1230 1231 /* 1232 * If no imf was specified (i.e. kernel consumer), 1233 * fake one up and assume it is an ASM join. 1234 */ 1235 if (imf == NULL) { 1236 im6f_init(&timf, MCAST_UNDEFINED, MCAST_EXCLUDE); 1237 imf = &timf; 1238 } 1239 error = in6_getmulti(ifp, mcaddr, &inm); 1240 if (error) { 1241 CTR1(KTR_MLD, "%s: in6_getmulti() failure", __func__); 1242 return (error); 1243 } 1244 1245 IN6_MULTI_LIST_LOCK(); 1246 CTR1(KTR_MLD, "%s: merge inm state", __func__); 1247 error = in6m_merge(inm, imf); 1248 if (error) { 1249 CTR1(KTR_MLD, "%s: failed to merge inm state", __func__); 1250 goto out_in6m_release; 1251 } 1252 1253 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 1254 error = mld_change_state(inm, delay); 1255 if (error) { 1256 CTR1(KTR_MLD, "%s: failed to update source", __func__); 1257 goto out_in6m_release; 1258 } 1259 1260 out_in6m_release: 1261 SLIST_INIT(&inmh); 1262 if (error) { 1263 struct epoch_tracker et; 1264 1265 CTR2(KTR_MLD, "%s: dropping ref on %p", __func__, inm); 1266 IF_ADDR_WLOCK(ifp); 1267 NET_EPOCH_ENTER(et); 1268 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1269 if (ifma->ifma_protospec == inm) { 1270 ifma->ifma_protospec = NULL; 1271 break; 1272 } 1273 } 1274 in6m_disconnect_locked(&inmh, inm); 1275 in6m_rele_locked(&inmh, inm); 1276 NET_EPOCH_EXIT(et); 1277 IF_ADDR_WUNLOCK(ifp); 1278 } else { 1279 *pinm = inm; 1280 } 1281 IN6_MULTI_LIST_UNLOCK(); 1282 in6m_release_list_deferred(&inmh); 1283 return (error); 1284 } 1285 1286 /* 1287 * Leave a multicast group; unlocked entry point. 1288 */ 1289 int 1290 in6_leavegroup(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) 1291 { 1292 int error; 1293 1294 IN6_MULTI_LOCK(); 1295 error = in6_leavegroup_locked(inm, imf); 1296 IN6_MULTI_UNLOCK(); 1297 return (error); 1298 } 1299 1300 /* 1301 * Leave a multicast group; real entry point. 1302 * All source filters will be expunged. 1303 * 1304 * Only preserves atomicity at inm level. 1305 * 1306 * Holding the write lock for the INP which contains imf 1307 * is highly advisable. We can't assert for it as imf does not 1308 * contain a back-pointer to the owning inp. 1309 * 1310 * Note: This is not the same as in6m_release(*) as this function also 1311 * makes a state change downcall into MLD. 1312 */ 1313 int 1314 in6_leavegroup_locked(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) 1315 { 1316 struct in6_multi_head inmh; 1317 struct in6_mfilter timf; 1318 struct ifnet *ifp; 1319 int error; 1320 #ifdef KTR 1321 char ip6tbuf[INET6_ADDRSTRLEN]; 1322 #endif 1323 1324 error = 0; 1325 1326 IN6_MULTI_LOCK_ASSERT(); 1327 1328 CTR5(KTR_MLD, "%s: leave inm %p, %s/%s, imf %p", __func__, 1329 inm, ip6_sprintf(ip6tbuf, &inm->in6m_addr), 1330 (in6m_is_ifp_detached(inm) ? "null" : if_name(inm->in6m_ifp)), 1331 imf); 1332 1333 /* 1334 * If no imf was specified (i.e. kernel consumer), 1335 * fake one up and assume it is an ASM join. 1336 */ 1337 if (imf == NULL) { 1338 im6f_init(&timf, MCAST_EXCLUDE, MCAST_UNDEFINED); 1339 imf = &timf; 1340 } 1341 1342 /* 1343 * Begin state merge transaction at MLD layer. 1344 * 1345 * As this particular invocation should not cause any memory 1346 * to be allocated, and there is no opportunity to roll back 1347 * the transaction, it MUST NOT fail. 1348 */ 1349 1350 ifp = inm->in6m_ifp; 1351 IN6_MULTI_LIST_LOCK(); 1352 CTR1(KTR_MLD, "%s: merge inm state", __func__); 1353 error = in6m_merge(inm, imf); 1354 KASSERT(error == 0, ("%s: failed to merge inm state", __func__)); 1355 1356 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 1357 error = 0; 1358 if (ifp) 1359 error = mld_change_state(inm, 0); 1360 if (error) 1361 CTR1(KTR_MLD, "%s: failed mld downcall", __func__); 1362 1363 CTR2(KTR_MLD, "%s: dropping ref on %p", __func__, inm); 1364 if (ifp) 1365 IF_ADDR_WLOCK(ifp); 1366 1367 SLIST_INIT(&inmh); 1368 if (inm->in6m_refcount == 1) 1369 in6m_disconnect_locked(&inmh, inm); 1370 in6m_rele_locked(&inmh, inm); 1371 if (ifp) 1372 IF_ADDR_WUNLOCK(ifp); 1373 IN6_MULTI_LIST_UNLOCK(); 1374 in6m_release_list_deferred(&inmh); 1375 return (error); 1376 } 1377 1378 1379 /* 1380 * Block or unblock an ASM multicast source on an inpcb. 1381 * This implements the delta-based API described in RFC 3678. 1382 * 1383 * The delta-based API applies only to exclusive-mode memberships. 1384 * An MLD downcall will be performed. 1385 * 1386 * SMPng: NOTE: Must take Giant as a join may create a new ifma. 1387 * 1388 * Return 0 if successful, otherwise return an appropriate error code. 1389 */ 1390 static int 1391 in6p_block_unblock_source(struct inpcb *inp, struct sockopt *sopt) 1392 { 1393 struct group_source_req gsr; 1394 sockunion_t *gsa, *ssa; 1395 struct ifnet *ifp; 1396 struct in6_mfilter *imf; 1397 struct ip6_moptions *imo; 1398 struct in6_msource *ims; 1399 struct in6_multi *inm; 1400 uint16_t fmode; 1401 int error, doblock; 1402 #ifdef KTR 1403 char ip6tbuf[INET6_ADDRSTRLEN]; 1404 #endif 1405 1406 ifp = NULL; 1407 error = 0; 1408 doblock = 0; 1409 1410 memset(&gsr, 0, sizeof(struct group_source_req)); 1411 gsa = (sockunion_t *)&gsr.gsr_group; 1412 ssa = (sockunion_t *)&gsr.gsr_source; 1413 1414 switch (sopt->sopt_name) { 1415 case MCAST_BLOCK_SOURCE: 1416 case MCAST_UNBLOCK_SOURCE: 1417 error = sooptcopyin(sopt, &gsr, 1418 sizeof(struct group_source_req), 1419 sizeof(struct group_source_req)); 1420 if (error) 1421 return (error); 1422 1423 if (gsa->sin6.sin6_family != AF_INET6 || 1424 gsa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 1425 return (EINVAL); 1426 1427 if (ssa->sin6.sin6_family != AF_INET6 || 1428 ssa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 1429 return (EINVAL); 1430 1431 if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface) 1432 return (EADDRNOTAVAIL); 1433 1434 ifp = ifnet_byindex(gsr.gsr_interface); 1435 1436 if (sopt->sopt_name == MCAST_BLOCK_SOURCE) 1437 doblock = 1; 1438 break; 1439 1440 default: 1441 CTR2(KTR_MLD, "%s: unknown sopt_name %d", 1442 __func__, sopt->sopt_name); 1443 return (EOPNOTSUPP); 1444 break; 1445 } 1446 1447 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 1448 return (EINVAL); 1449 1450 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 1451 1452 /* 1453 * Check if we are actually a member of this group. 1454 */ 1455 imo = in6p_findmoptions(inp); 1456 imf = im6o_match_group(imo, ifp, &gsa->sa); 1457 if (imf == NULL) { 1458 error = EADDRNOTAVAIL; 1459 goto out_in6p_locked; 1460 } 1461 inm = imf->im6f_in6m; 1462 1463 /* 1464 * Attempting to use the delta-based API on an 1465 * non exclusive-mode membership is an error. 1466 */ 1467 fmode = imf->im6f_st[0]; 1468 if (fmode != MCAST_EXCLUDE) { 1469 error = EINVAL; 1470 goto out_in6p_locked; 1471 } 1472 1473 /* 1474 * Deal with error cases up-front: 1475 * Asked to block, but already blocked; or 1476 * Asked to unblock, but nothing to unblock. 1477 * If adding a new block entry, allocate it. 1478 */ 1479 ims = im6o_match_source(imf, &ssa->sa); 1480 if ((ims != NULL && doblock) || (ims == NULL && !doblock)) { 1481 CTR3(KTR_MLD, "%s: source %s %spresent", __func__, 1482 ip6_sprintf(ip6tbuf, &ssa->sin6.sin6_addr), 1483 doblock ? "" : "not "); 1484 error = EADDRNOTAVAIL; 1485 goto out_in6p_locked; 1486 } 1487 1488 INP_WLOCK_ASSERT(inp); 1489 1490 /* 1491 * Begin state merge transaction at socket layer. 1492 */ 1493 if (doblock) { 1494 CTR2(KTR_MLD, "%s: %s source", __func__, "block"); 1495 ims = im6f_graft(imf, fmode, &ssa->sin6); 1496 if (ims == NULL) 1497 error = ENOMEM; 1498 } else { 1499 CTR2(KTR_MLD, "%s: %s source", __func__, "allow"); 1500 error = im6f_prune(imf, &ssa->sin6); 1501 } 1502 1503 if (error) { 1504 CTR1(KTR_MLD, "%s: merge imf state failed", __func__); 1505 goto out_im6f_rollback; 1506 } 1507 1508 /* 1509 * Begin state merge transaction at MLD layer. 1510 */ 1511 IN6_MULTI_LIST_LOCK(); 1512 CTR1(KTR_MLD, "%s: merge inm state", __func__); 1513 error = in6m_merge(inm, imf); 1514 if (error) 1515 CTR1(KTR_MLD, "%s: failed to merge inm state", __func__); 1516 else { 1517 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 1518 error = mld_change_state(inm, 0); 1519 if (error) 1520 CTR1(KTR_MLD, "%s: failed mld downcall", __func__); 1521 } 1522 1523 IN6_MULTI_LIST_UNLOCK(); 1524 1525 out_im6f_rollback: 1526 if (error) 1527 im6f_rollback(imf); 1528 else 1529 im6f_commit(imf); 1530 1531 im6f_reap(imf); 1532 1533 out_in6p_locked: 1534 INP_WUNLOCK(inp); 1535 return (error); 1536 } 1537 1538 /* 1539 * Given an inpcb, return its multicast options structure pointer. Accepts 1540 * an unlocked inpcb pointer, but will return it locked. May sleep. 1541 * 1542 * SMPng: NOTE: Potentially calls malloc(M_WAITOK) with Giant held. 1543 * SMPng: NOTE: Returns with the INP write lock held. 1544 */ 1545 static struct ip6_moptions * 1546 in6p_findmoptions(struct inpcb *inp) 1547 { 1548 struct ip6_moptions *imo; 1549 1550 INP_WLOCK(inp); 1551 if (inp->in6p_moptions != NULL) 1552 return (inp->in6p_moptions); 1553 1554 INP_WUNLOCK(inp); 1555 1556 imo = malloc(sizeof(*imo), M_IP6MOPTS, M_WAITOK); 1557 1558 imo->im6o_multicast_ifp = NULL; 1559 imo->im6o_multicast_hlim = V_ip6_defmcasthlim; 1560 imo->im6o_multicast_loop = in6_mcast_loop; 1561 STAILQ_INIT(&imo->im6o_head); 1562 1563 INP_WLOCK(inp); 1564 if (inp->in6p_moptions != NULL) { 1565 free(imo, M_IP6MOPTS); 1566 return (inp->in6p_moptions); 1567 } 1568 inp->in6p_moptions = imo; 1569 return (imo); 1570 } 1571 1572 /* 1573 * Discard the IPv6 multicast options (and source filters). 1574 * 1575 * SMPng: NOTE: assumes INP write lock is held. 1576 * 1577 * XXX can all be safely deferred to epoch_call 1578 * 1579 */ 1580 1581 static void 1582 inp_gcmoptions(struct ip6_moptions *imo) 1583 { 1584 struct in6_mfilter *imf; 1585 struct in6_multi *inm; 1586 struct ifnet *ifp; 1587 1588 while ((imf = ip6_mfilter_first(&imo->im6o_head)) != NULL) { 1589 ip6_mfilter_remove(&imo->im6o_head, imf); 1590 1591 im6f_leave(imf); 1592 if ((inm = imf->im6f_in6m) != NULL) { 1593 if ((ifp = inm->in6m_ifp) != NULL) { 1594 CURVNET_SET(ifp->if_vnet); 1595 (void)in6_leavegroup(inm, imf); 1596 CURVNET_RESTORE(); 1597 } else { 1598 (void)in6_leavegroup(inm, imf); 1599 } 1600 } 1601 ip6_mfilter_free(imf); 1602 } 1603 free(imo, M_IP6MOPTS); 1604 } 1605 1606 void 1607 ip6_freemoptions(struct ip6_moptions *imo) 1608 { 1609 if (imo == NULL) 1610 return; 1611 inp_gcmoptions(imo); 1612 } 1613 1614 /* 1615 * Atomically get source filters on a socket for an IPv6 multicast group. 1616 * Called with INP lock held; returns with lock released. 1617 */ 1618 static int 1619 in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt) 1620 { 1621 struct __msfilterreq msfr; 1622 sockunion_t *gsa; 1623 struct ifnet *ifp; 1624 struct ip6_moptions *imo; 1625 struct in6_mfilter *imf; 1626 struct ip6_msource *ims; 1627 struct in6_msource *lims; 1628 struct sockaddr_in6 *psin; 1629 struct sockaddr_storage *ptss; 1630 struct sockaddr_storage *tss; 1631 int error; 1632 size_t nsrcs, ncsrcs; 1633 1634 INP_WLOCK_ASSERT(inp); 1635 1636 imo = inp->in6p_moptions; 1637 KASSERT(imo != NULL, ("%s: null ip6_moptions", __func__)); 1638 1639 INP_WUNLOCK(inp); 1640 1641 error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq), 1642 sizeof(struct __msfilterreq)); 1643 if (error) 1644 return (error); 1645 1646 if (msfr.msfr_group.ss_family != AF_INET6 || 1647 msfr.msfr_group.ss_len != sizeof(struct sockaddr_in6)) 1648 return (EINVAL); 1649 1650 gsa = (sockunion_t *)&msfr.msfr_group; 1651 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 1652 return (EINVAL); 1653 1654 if (msfr.msfr_ifindex == 0 || V_if_index < msfr.msfr_ifindex) 1655 return (EADDRNOTAVAIL); 1656 ifp = ifnet_byindex(msfr.msfr_ifindex); 1657 if (ifp == NULL) 1658 return (EADDRNOTAVAIL); 1659 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 1660 1661 INP_WLOCK(inp); 1662 1663 /* 1664 * Lookup group on the socket. 1665 */ 1666 imf = im6o_match_group(imo, ifp, &gsa->sa); 1667 if (imf == NULL) { 1668 INP_WUNLOCK(inp); 1669 return (EADDRNOTAVAIL); 1670 } 1671 1672 /* 1673 * Ignore memberships which are in limbo. 1674 */ 1675 if (imf->im6f_st[1] == MCAST_UNDEFINED) { 1676 INP_WUNLOCK(inp); 1677 return (EAGAIN); 1678 } 1679 msfr.msfr_fmode = imf->im6f_st[1]; 1680 1681 /* 1682 * If the user specified a buffer, copy out the source filter 1683 * entries to userland gracefully. 1684 * We only copy out the number of entries which userland 1685 * has asked for, but we always tell userland how big the 1686 * buffer really needs to be. 1687 */ 1688 if (msfr.msfr_nsrcs > in6_mcast_maxsocksrc) 1689 msfr.msfr_nsrcs = in6_mcast_maxsocksrc; 1690 tss = NULL; 1691 if (msfr.msfr_srcs != NULL && msfr.msfr_nsrcs > 0) { 1692 tss = malloc(sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs, 1693 M_TEMP, M_NOWAIT | M_ZERO); 1694 if (tss == NULL) { 1695 INP_WUNLOCK(inp); 1696 return (ENOBUFS); 1697 } 1698 } 1699 1700 /* 1701 * Count number of sources in-mode at t0. 1702 * If buffer space exists and remains, copy out source entries. 1703 */ 1704 nsrcs = msfr.msfr_nsrcs; 1705 ncsrcs = 0; 1706 ptss = tss; 1707 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { 1708 lims = (struct in6_msource *)ims; 1709 if (lims->im6sl_st[0] == MCAST_UNDEFINED || 1710 lims->im6sl_st[0] != imf->im6f_st[0]) 1711 continue; 1712 ++ncsrcs; 1713 if (tss != NULL && nsrcs > 0) { 1714 psin = (struct sockaddr_in6 *)ptss; 1715 psin->sin6_family = AF_INET6; 1716 psin->sin6_len = sizeof(struct sockaddr_in6); 1717 psin->sin6_addr = lims->im6s_addr; 1718 psin->sin6_port = 0; 1719 --nsrcs; 1720 ++ptss; 1721 } 1722 } 1723 1724 INP_WUNLOCK(inp); 1725 1726 if (tss != NULL) { 1727 error = copyout(tss, msfr.msfr_srcs, 1728 sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs); 1729 free(tss, M_TEMP); 1730 if (error) 1731 return (error); 1732 } 1733 1734 msfr.msfr_nsrcs = ncsrcs; 1735 error = sooptcopyout(sopt, &msfr, sizeof(struct __msfilterreq)); 1736 1737 return (error); 1738 } 1739 1740 /* 1741 * Return the IP multicast options in response to user getsockopt(). 1742 */ 1743 int 1744 ip6_getmoptions(struct inpcb *inp, struct sockopt *sopt) 1745 { 1746 struct ip6_moptions *im6o; 1747 int error; 1748 u_int optval; 1749 1750 INP_WLOCK(inp); 1751 im6o = inp->in6p_moptions; 1752 /* 1753 * If socket is neither of type SOCK_RAW or SOCK_DGRAM, 1754 * or is a divert socket, reject it. 1755 */ 1756 if (inp->inp_socket->so_proto->pr_protocol == IPPROTO_DIVERT || 1757 (inp->inp_socket->so_proto->pr_type != SOCK_RAW && 1758 inp->inp_socket->so_proto->pr_type != SOCK_DGRAM)) { 1759 INP_WUNLOCK(inp); 1760 return (EOPNOTSUPP); 1761 } 1762 1763 error = 0; 1764 switch (sopt->sopt_name) { 1765 case IPV6_MULTICAST_IF: 1766 if (im6o == NULL || im6o->im6o_multicast_ifp == NULL) { 1767 optval = 0; 1768 } else { 1769 optval = im6o->im6o_multicast_ifp->if_index; 1770 } 1771 INP_WUNLOCK(inp); 1772 error = sooptcopyout(sopt, &optval, sizeof(u_int)); 1773 break; 1774 1775 case IPV6_MULTICAST_HOPS: 1776 if (im6o == NULL) 1777 optval = V_ip6_defmcasthlim; 1778 else 1779 optval = im6o->im6o_multicast_hlim; 1780 INP_WUNLOCK(inp); 1781 error = sooptcopyout(sopt, &optval, sizeof(u_int)); 1782 break; 1783 1784 case IPV6_MULTICAST_LOOP: 1785 if (im6o == NULL) 1786 optval = in6_mcast_loop; /* XXX VIMAGE */ 1787 else 1788 optval = im6o->im6o_multicast_loop; 1789 INP_WUNLOCK(inp); 1790 error = sooptcopyout(sopt, &optval, sizeof(u_int)); 1791 break; 1792 1793 case IPV6_MSFILTER: 1794 if (im6o == NULL) { 1795 error = EADDRNOTAVAIL; 1796 INP_WUNLOCK(inp); 1797 } else { 1798 error = in6p_get_source_filters(inp, sopt); 1799 } 1800 break; 1801 1802 default: 1803 INP_WUNLOCK(inp); 1804 error = ENOPROTOOPT; 1805 break; 1806 } 1807 1808 INP_UNLOCK_ASSERT(inp); 1809 1810 return (error); 1811 } 1812 1813 /* 1814 * Look up the ifnet to use for a multicast group membership, 1815 * given the address of an IPv6 group. 1816 * 1817 * This routine exists to support legacy IPv6 multicast applications. 1818 * 1819 * If inp is non-NULL, use this socket's current FIB number for any 1820 * required FIB lookup. Look up the group address in the unicast FIB, 1821 * and use its ifp; usually, this points to the default next-hop. 1822 * If the FIB lookup fails, return NULL. 1823 * 1824 * FUTURE: Support multiple forwarding tables for IPv6. 1825 * 1826 * Returns NULL if no ifp could be found. 1827 */ 1828 static struct ifnet * 1829 in6p_lookup_mcast_ifp(const struct inpcb *inp, 1830 const struct sockaddr_in6 *gsin6) 1831 { 1832 struct nhop6_basic nh6; 1833 struct in6_addr dst; 1834 uint32_t scopeid; 1835 uint32_t fibnum; 1836 1837 KASSERT(inp->inp_vflag & INP_IPV6, 1838 ("%s: not INP_IPV6 inpcb", __func__)); 1839 KASSERT(gsin6->sin6_family == AF_INET6, 1840 ("%s: not AF_INET6 group", __func__)); 1841 1842 in6_splitscope(&gsin6->sin6_addr, &dst, &scopeid); 1843 fibnum = inp ? inp->inp_inc.inc_fibnum : RT_DEFAULT_FIB; 1844 if (fib6_lookup_nh_basic(fibnum, &dst, scopeid, 0, 0, &nh6) != 0) 1845 return (NULL); 1846 1847 return (nh6.nh_ifp); 1848 } 1849 1850 /* 1851 * Join an IPv6 multicast group, possibly with a source. 1852 * 1853 * FIXME: The KAME use of the unspecified address (::) 1854 * to join *all* multicast groups is currently unsupported. 1855 */ 1856 static int 1857 in6p_join_group(struct inpcb *inp, struct sockopt *sopt) 1858 { 1859 struct in6_multi_head inmh; 1860 struct group_source_req gsr; 1861 sockunion_t *gsa, *ssa; 1862 struct ifnet *ifp; 1863 struct in6_mfilter *imf; 1864 struct ip6_moptions *imo; 1865 struct in6_multi *inm; 1866 struct in6_msource *lims; 1867 int error, is_new; 1868 1869 SLIST_INIT(&inmh); 1870 ifp = NULL; 1871 lims = NULL; 1872 error = 0; 1873 1874 memset(&gsr, 0, sizeof(struct group_source_req)); 1875 gsa = (sockunion_t *)&gsr.gsr_group; 1876 gsa->ss.ss_family = AF_UNSPEC; 1877 ssa = (sockunion_t *)&gsr.gsr_source; 1878 ssa->ss.ss_family = AF_UNSPEC; 1879 1880 /* 1881 * Chew everything into struct group_source_req. 1882 * Overwrite the port field if present, as the sockaddr 1883 * being copied in may be matched with a binary comparison. 1884 * Ignore passed-in scope ID. 1885 */ 1886 switch (sopt->sopt_name) { 1887 case IPV6_JOIN_GROUP: { 1888 struct ipv6_mreq mreq; 1889 1890 error = sooptcopyin(sopt, &mreq, sizeof(struct ipv6_mreq), 1891 sizeof(struct ipv6_mreq)); 1892 if (error) 1893 return (error); 1894 1895 gsa->sin6.sin6_family = AF_INET6; 1896 gsa->sin6.sin6_len = sizeof(struct sockaddr_in6); 1897 gsa->sin6.sin6_addr = mreq.ipv6mr_multiaddr; 1898 1899 if (mreq.ipv6mr_interface == 0) { 1900 ifp = in6p_lookup_mcast_ifp(inp, &gsa->sin6); 1901 } else { 1902 if (V_if_index < mreq.ipv6mr_interface) 1903 return (EADDRNOTAVAIL); 1904 ifp = ifnet_byindex(mreq.ipv6mr_interface); 1905 } 1906 CTR3(KTR_MLD, "%s: ipv6mr_interface = %d, ifp = %p", 1907 __func__, mreq.ipv6mr_interface, ifp); 1908 } break; 1909 1910 case MCAST_JOIN_GROUP: 1911 case MCAST_JOIN_SOURCE_GROUP: 1912 if (sopt->sopt_name == MCAST_JOIN_GROUP) { 1913 error = sooptcopyin(sopt, &gsr, 1914 sizeof(struct group_req), 1915 sizeof(struct group_req)); 1916 } else if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) { 1917 error = sooptcopyin(sopt, &gsr, 1918 sizeof(struct group_source_req), 1919 sizeof(struct group_source_req)); 1920 } 1921 if (error) 1922 return (error); 1923 1924 if (gsa->sin6.sin6_family != AF_INET6 || 1925 gsa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 1926 return (EINVAL); 1927 1928 if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) { 1929 if (ssa->sin6.sin6_family != AF_INET6 || 1930 ssa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 1931 return (EINVAL); 1932 if (IN6_IS_ADDR_MULTICAST(&ssa->sin6.sin6_addr)) 1933 return (EINVAL); 1934 /* 1935 * TODO: Validate embedded scope ID in source 1936 * list entry against passed-in ifp, if and only 1937 * if source list filter entry is iface or node local. 1938 */ 1939 in6_clearscope(&ssa->sin6.sin6_addr); 1940 ssa->sin6.sin6_port = 0; 1941 ssa->sin6.sin6_scope_id = 0; 1942 } 1943 1944 if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface) 1945 return (EADDRNOTAVAIL); 1946 ifp = ifnet_byindex(gsr.gsr_interface); 1947 break; 1948 1949 default: 1950 CTR2(KTR_MLD, "%s: unknown sopt_name %d", 1951 __func__, sopt->sopt_name); 1952 return (EOPNOTSUPP); 1953 break; 1954 } 1955 1956 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 1957 return (EINVAL); 1958 1959 if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) 1960 return (EADDRNOTAVAIL); 1961 1962 gsa->sin6.sin6_port = 0; 1963 gsa->sin6.sin6_scope_id = 0; 1964 1965 /* 1966 * Always set the scope zone ID on memberships created from userland. 1967 * Use the passed-in ifp to do this. 1968 * XXX The in6_setscope() return value is meaningless. 1969 * XXX SCOPE6_LOCK() is taken by in6_setscope(). 1970 */ 1971 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 1972 1973 IN6_MULTI_LOCK(); 1974 1975 /* 1976 * Find the membership in the membership list. 1977 */ 1978 imo = in6p_findmoptions(inp); 1979 imf = im6o_match_group(imo, ifp, &gsa->sa); 1980 if (imf == NULL) { 1981 is_new = 1; 1982 inm = NULL; 1983 1984 if (ip6_mfilter_count(&imo->im6o_head) >= IPV6_MAX_MEMBERSHIPS) { 1985 error = ENOMEM; 1986 goto out_in6p_locked; 1987 } 1988 } else { 1989 is_new = 0; 1990 inm = imf->im6f_in6m; 1991 1992 if (ssa->ss.ss_family != AF_UNSPEC) { 1993 /* 1994 * MCAST_JOIN_SOURCE_GROUP on an exclusive membership 1995 * is an error. On an existing inclusive membership, 1996 * it just adds the source to the filter list. 1997 */ 1998 if (imf->im6f_st[1] != MCAST_INCLUDE) { 1999 error = EINVAL; 2000 goto out_in6p_locked; 2001 } 2002 /* 2003 * Throw out duplicates. 2004 * 2005 * XXX FIXME: This makes a naive assumption that 2006 * even if entries exist for *ssa in this imf, 2007 * they will be rejected as dupes, even if they 2008 * are not valid in the current mode (in-mode). 2009 * 2010 * in6_msource is transactioned just as for anything 2011 * else in SSM -- but note naive use of in6m_graft() 2012 * below for allocating new filter entries. 2013 * 2014 * This is only an issue if someone mixes the 2015 * full-state SSM API with the delta-based API, 2016 * which is discouraged in the relevant RFCs. 2017 */ 2018 lims = im6o_match_source(imf, &ssa->sa); 2019 if (lims != NULL /*&& 2020 lims->im6sl_st[1] == MCAST_INCLUDE*/) { 2021 error = EADDRNOTAVAIL; 2022 goto out_in6p_locked; 2023 } 2024 } else { 2025 /* 2026 * MCAST_JOIN_GROUP alone, on any existing membership, 2027 * is rejected, to stop the same inpcb tying up 2028 * multiple refs to the in_multi. 2029 * On an existing inclusive membership, this is also 2030 * an error; if you want to change filter mode, 2031 * you must use the userland API setsourcefilter(). 2032 * XXX We don't reject this for imf in UNDEFINED 2033 * state at t1, because allocation of a filter 2034 * is atomic with allocation of a membership. 2035 */ 2036 error = EINVAL; 2037 goto out_in6p_locked; 2038 } 2039 } 2040 2041 /* 2042 * Begin state merge transaction at socket layer. 2043 */ 2044 INP_WLOCK_ASSERT(inp); 2045 2046 /* 2047 * Graft new source into filter list for this inpcb's 2048 * membership of the group. The in6_multi may not have 2049 * been allocated yet if this is a new membership, however, 2050 * the in_mfilter slot will be allocated and must be initialized. 2051 * 2052 * Note: Grafting of exclusive mode filters doesn't happen 2053 * in this path. 2054 * XXX: Should check for non-NULL lims (node exists but may 2055 * not be in-mode) for interop with full-state API. 2056 */ 2057 if (ssa->ss.ss_family != AF_UNSPEC) { 2058 /* Membership starts in IN mode */ 2059 if (is_new) { 2060 CTR1(KTR_MLD, "%s: new join w/source", __func__); 2061 imf = ip6_mfilter_alloc(M_NOWAIT, MCAST_UNDEFINED, MCAST_INCLUDE); 2062 if (imf == NULL) { 2063 error = ENOMEM; 2064 goto out_in6p_locked; 2065 } 2066 } else { 2067 CTR2(KTR_MLD, "%s: %s source", __func__, "allow"); 2068 } 2069 lims = im6f_graft(imf, MCAST_INCLUDE, &ssa->sin6); 2070 if (lims == NULL) { 2071 CTR1(KTR_MLD, "%s: merge imf state failed", 2072 __func__); 2073 error = ENOMEM; 2074 goto out_in6p_locked; 2075 } 2076 } else { 2077 /* No address specified; Membership starts in EX mode */ 2078 if (is_new) { 2079 CTR1(KTR_MLD, "%s: new join w/o source", __func__); 2080 imf = ip6_mfilter_alloc(M_NOWAIT, MCAST_UNDEFINED, MCAST_EXCLUDE); 2081 if (imf == NULL) { 2082 error = ENOMEM; 2083 goto out_in6p_locked; 2084 } 2085 } 2086 } 2087 2088 /* 2089 * Begin state merge transaction at MLD layer. 2090 */ 2091 if (is_new) { 2092 in_pcbref(inp); 2093 INP_WUNLOCK(inp); 2094 2095 error = in6_joingroup_locked(ifp, &gsa->sin6.sin6_addr, imf, 2096 &imf->im6f_in6m, 0); 2097 2098 INP_WLOCK(inp); 2099 if (in_pcbrele_wlocked(inp)) { 2100 error = ENXIO; 2101 goto out_in6p_unlocked; 2102 } 2103 if (error) { 2104 goto out_in6p_locked; 2105 } 2106 /* 2107 * NOTE: Refcount from in6_joingroup_locked() 2108 * is protecting membership. 2109 */ 2110 ip6_mfilter_insert(&imo->im6o_head, imf); 2111 } else { 2112 CTR1(KTR_MLD, "%s: merge inm state", __func__); 2113 IN6_MULTI_LIST_LOCK(); 2114 error = in6m_merge(inm, imf); 2115 if (error) { 2116 CTR1(KTR_MLD, "%s: failed to merge inm state", 2117 __func__); 2118 IN6_MULTI_LIST_UNLOCK(); 2119 im6f_rollback(imf); 2120 im6f_reap(imf); 2121 goto out_in6p_locked; 2122 } 2123 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 2124 error = mld_change_state(inm, 0); 2125 IN6_MULTI_LIST_UNLOCK(); 2126 2127 if (error) { 2128 CTR1(KTR_MLD, "%s: failed mld downcall", 2129 __func__); 2130 im6f_rollback(imf); 2131 im6f_reap(imf); 2132 goto out_in6p_locked; 2133 } 2134 } 2135 2136 im6f_commit(imf); 2137 imf = NULL; 2138 2139 out_in6p_locked: 2140 INP_WUNLOCK(inp); 2141 out_in6p_unlocked: 2142 IN6_MULTI_UNLOCK(); 2143 2144 if (is_new && imf) { 2145 if (imf->im6f_in6m != NULL) { 2146 struct in6_multi_head inmh; 2147 2148 SLIST_INIT(&inmh); 2149 SLIST_INSERT_HEAD(&inmh, imf->im6f_in6m, in6m_defer); 2150 in6m_release_list_deferred(&inmh); 2151 } 2152 ip6_mfilter_free(imf); 2153 } 2154 return (error); 2155 } 2156 2157 /* 2158 * Leave an IPv6 multicast group on an inpcb, possibly with a source. 2159 */ 2160 static int 2161 in6p_leave_group(struct inpcb *inp, struct sockopt *sopt) 2162 { 2163 struct ipv6_mreq mreq; 2164 struct group_source_req gsr; 2165 sockunion_t *gsa, *ssa; 2166 struct ifnet *ifp; 2167 struct in6_mfilter *imf; 2168 struct ip6_moptions *imo; 2169 struct in6_msource *ims; 2170 struct in6_multi *inm; 2171 uint32_t ifindex; 2172 int error; 2173 bool is_final; 2174 #ifdef KTR 2175 char ip6tbuf[INET6_ADDRSTRLEN]; 2176 #endif 2177 2178 ifp = NULL; 2179 ifindex = 0; 2180 error = 0; 2181 is_final = true; 2182 2183 memset(&gsr, 0, sizeof(struct group_source_req)); 2184 gsa = (sockunion_t *)&gsr.gsr_group; 2185 gsa->ss.ss_family = AF_UNSPEC; 2186 ssa = (sockunion_t *)&gsr.gsr_source; 2187 ssa->ss.ss_family = AF_UNSPEC; 2188 2189 /* 2190 * Chew everything passed in up into a struct group_source_req 2191 * as that is easier to process. 2192 * Note: Any embedded scope ID in the multicast group passed 2193 * in by userland is ignored, the interface index is the recommended 2194 * mechanism to specify an interface; see below. 2195 */ 2196 switch (sopt->sopt_name) { 2197 case IPV6_LEAVE_GROUP: 2198 error = sooptcopyin(sopt, &mreq, sizeof(struct ipv6_mreq), 2199 sizeof(struct ipv6_mreq)); 2200 if (error) 2201 return (error); 2202 gsa->sin6.sin6_family = AF_INET6; 2203 gsa->sin6.sin6_len = sizeof(struct sockaddr_in6); 2204 gsa->sin6.sin6_addr = mreq.ipv6mr_multiaddr; 2205 gsa->sin6.sin6_port = 0; 2206 gsa->sin6.sin6_scope_id = 0; 2207 ifindex = mreq.ipv6mr_interface; 2208 break; 2209 2210 case MCAST_LEAVE_GROUP: 2211 case MCAST_LEAVE_SOURCE_GROUP: 2212 if (sopt->sopt_name == MCAST_LEAVE_GROUP) { 2213 error = sooptcopyin(sopt, &gsr, 2214 sizeof(struct group_req), 2215 sizeof(struct group_req)); 2216 } else if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) { 2217 error = sooptcopyin(sopt, &gsr, 2218 sizeof(struct group_source_req), 2219 sizeof(struct group_source_req)); 2220 } 2221 if (error) 2222 return (error); 2223 2224 if (gsa->sin6.sin6_family != AF_INET6 || 2225 gsa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 2226 return (EINVAL); 2227 if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) { 2228 if (ssa->sin6.sin6_family != AF_INET6 || 2229 ssa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 2230 return (EINVAL); 2231 if (IN6_IS_ADDR_MULTICAST(&ssa->sin6.sin6_addr)) 2232 return (EINVAL); 2233 /* 2234 * TODO: Validate embedded scope ID in source 2235 * list entry against passed-in ifp, if and only 2236 * if source list filter entry is iface or node local. 2237 */ 2238 in6_clearscope(&ssa->sin6.sin6_addr); 2239 } 2240 gsa->sin6.sin6_port = 0; 2241 gsa->sin6.sin6_scope_id = 0; 2242 ifindex = gsr.gsr_interface; 2243 break; 2244 2245 default: 2246 CTR2(KTR_MLD, "%s: unknown sopt_name %d", 2247 __func__, sopt->sopt_name); 2248 return (EOPNOTSUPP); 2249 break; 2250 } 2251 2252 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 2253 return (EINVAL); 2254 2255 /* 2256 * Validate interface index if provided. If no interface index 2257 * was provided separately, attempt to look the membership up 2258 * from the default scope as a last resort to disambiguate 2259 * the membership we are being asked to leave. 2260 * XXX SCOPE6 lock potentially taken here. 2261 */ 2262 if (ifindex != 0) { 2263 if (V_if_index < ifindex) 2264 return (EADDRNOTAVAIL); 2265 ifp = ifnet_byindex(ifindex); 2266 if (ifp == NULL) 2267 return (EADDRNOTAVAIL); 2268 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 2269 } else { 2270 error = sa6_embedscope(&gsa->sin6, V_ip6_use_defzone); 2271 if (error) 2272 return (EADDRNOTAVAIL); 2273 /* 2274 * Some badly behaved applications don't pass an ifindex 2275 * or a scope ID, which is an API violation. In this case, 2276 * perform a lookup as per a v6 join. 2277 * 2278 * XXX For now, stomp on zone ID for the corner case. 2279 * This is not the 'KAME way', but we need to see the ifp 2280 * directly until such time as this implementation is 2281 * refactored, assuming the scope IDs are the way to go. 2282 */ 2283 ifindex = ntohs(gsa->sin6.sin6_addr.s6_addr16[1]); 2284 if (ifindex == 0) { 2285 CTR2(KTR_MLD, "%s: warning: no ifindex, looking up " 2286 "ifp for group %s.", __func__, 2287 ip6_sprintf(ip6tbuf, &gsa->sin6.sin6_addr)); 2288 ifp = in6p_lookup_mcast_ifp(inp, &gsa->sin6); 2289 } else { 2290 ifp = ifnet_byindex(ifindex); 2291 } 2292 if (ifp == NULL) 2293 return (EADDRNOTAVAIL); 2294 } 2295 2296 CTR2(KTR_MLD, "%s: ifp = %p", __func__, ifp); 2297 KASSERT(ifp != NULL, ("%s: ifp did not resolve", __func__)); 2298 2299 IN6_MULTI_LOCK(); 2300 2301 /* 2302 * Find the membership in the membership list. 2303 */ 2304 imo = in6p_findmoptions(inp); 2305 imf = im6o_match_group(imo, ifp, &gsa->sa); 2306 if (imf == NULL) { 2307 error = EADDRNOTAVAIL; 2308 goto out_in6p_locked; 2309 } 2310 inm = imf->im6f_in6m; 2311 2312 if (ssa->ss.ss_family != AF_UNSPEC) 2313 is_final = false; 2314 2315 /* 2316 * Begin state merge transaction at socket layer. 2317 */ 2318 INP_WLOCK_ASSERT(inp); 2319 2320 /* 2321 * If we were instructed only to leave a given source, do so. 2322 * MCAST_LEAVE_SOURCE_GROUP is only valid for inclusive memberships. 2323 */ 2324 if (is_final) { 2325 ip6_mfilter_remove(&imo->im6o_head, imf); 2326 im6f_leave(imf); 2327 2328 /* 2329 * Give up the multicast address record to which 2330 * the membership points. 2331 */ 2332 (void)in6_leavegroup_locked(inm, imf); 2333 } else { 2334 if (imf->im6f_st[0] == MCAST_EXCLUDE) { 2335 error = EADDRNOTAVAIL; 2336 goto out_in6p_locked; 2337 } 2338 ims = im6o_match_source(imf, &ssa->sa); 2339 if (ims == NULL) { 2340 CTR3(KTR_MLD, "%s: source %p %spresent", __func__, 2341 ip6_sprintf(ip6tbuf, &ssa->sin6.sin6_addr), 2342 "not "); 2343 error = EADDRNOTAVAIL; 2344 goto out_in6p_locked; 2345 } 2346 CTR2(KTR_MLD, "%s: %s source", __func__, "block"); 2347 error = im6f_prune(imf, &ssa->sin6); 2348 if (error) { 2349 CTR1(KTR_MLD, "%s: merge imf state failed", 2350 __func__); 2351 goto out_in6p_locked; 2352 } 2353 } 2354 2355 /* 2356 * Begin state merge transaction at MLD layer. 2357 */ 2358 if (!is_final) { 2359 CTR1(KTR_MLD, "%s: merge inm state", __func__); 2360 IN6_MULTI_LIST_LOCK(); 2361 error = in6m_merge(inm, imf); 2362 if (error) { 2363 CTR1(KTR_MLD, "%s: failed to merge inm state", 2364 __func__); 2365 IN6_MULTI_LIST_UNLOCK(); 2366 im6f_rollback(imf); 2367 im6f_reap(imf); 2368 goto out_in6p_locked; 2369 } 2370 2371 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 2372 error = mld_change_state(inm, 0); 2373 IN6_MULTI_LIST_UNLOCK(); 2374 if (error) { 2375 CTR1(KTR_MLD, "%s: failed mld downcall", 2376 __func__); 2377 im6f_rollback(imf); 2378 im6f_reap(imf); 2379 goto out_in6p_locked; 2380 } 2381 } 2382 2383 im6f_commit(imf); 2384 im6f_reap(imf); 2385 2386 out_in6p_locked: 2387 INP_WUNLOCK(inp); 2388 2389 if (is_final && imf) 2390 ip6_mfilter_free(imf); 2391 2392 IN6_MULTI_UNLOCK(); 2393 return (error); 2394 } 2395 2396 /* 2397 * Select the interface for transmitting IPv6 multicast datagrams. 2398 * 2399 * Either an instance of struct in6_addr or an instance of struct ipv6_mreqn 2400 * may be passed to this socket option. An address of in6addr_any or an 2401 * interface index of 0 is used to remove a previous selection. 2402 * When no interface is selected, one is chosen for every send. 2403 */ 2404 static int 2405 in6p_set_multicast_if(struct inpcb *inp, struct sockopt *sopt) 2406 { 2407 struct ifnet *ifp; 2408 struct ip6_moptions *imo; 2409 u_int ifindex; 2410 int error; 2411 2412 if (sopt->sopt_valsize != sizeof(u_int)) 2413 return (EINVAL); 2414 2415 error = sooptcopyin(sopt, &ifindex, sizeof(u_int), sizeof(u_int)); 2416 if (error) 2417 return (error); 2418 if (V_if_index < ifindex) 2419 return (EINVAL); 2420 if (ifindex == 0) 2421 ifp = NULL; 2422 else { 2423 ifp = ifnet_byindex(ifindex); 2424 if (ifp == NULL) 2425 return (EINVAL); 2426 if ((ifp->if_flags & IFF_MULTICAST) == 0) 2427 return (EADDRNOTAVAIL); 2428 } 2429 imo = in6p_findmoptions(inp); 2430 imo->im6o_multicast_ifp = ifp; 2431 INP_WUNLOCK(inp); 2432 2433 return (0); 2434 } 2435 2436 /* 2437 * Atomically set source filters on a socket for an IPv6 multicast group. 2438 * 2439 * SMPng: NOTE: Potentially calls malloc(M_WAITOK) with Giant held. 2440 */ 2441 static int 2442 in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt) 2443 { 2444 struct __msfilterreq msfr; 2445 sockunion_t *gsa; 2446 struct ifnet *ifp; 2447 struct in6_mfilter *imf; 2448 struct ip6_moptions *imo; 2449 struct in6_multi *inm; 2450 int error; 2451 2452 error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq), 2453 sizeof(struct __msfilterreq)); 2454 if (error) 2455 return (error); 2456 2457 if (msfr.msfr_nsrcs > in6_mcast_maxsocksrc) 2458 return (ENOBUFS); 2459 2460 if (msfr.msfr_fmode != MCAST_EXCLUDE && 2461 msfr.msfr_fmode != MCAST_INCLUDE) 2462 return (EINVAL); 2463 2464 if (msfr.msfr_group.ss_family != AF_INET6 || 2465 msfr.msfr_group.ss_len != sizeof(struct sockaddr_in6)) 2466 return (EINVAL); 2467 2468 gsa = (sockunion_t *)&msfr.msfr_group; 2469 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 2470 return (EINVAL); 2471 2472 gsa->sin6.sin6_port = 0; /* ignore port */ 2473 2474 if (msfr.msfr_ifindex == 0 || V_if_index < msfr.msfr_ifindex) 2475 return (EADDRNOTAVAIL); 2476 ifp = ifnet_byindex(msfr.msfr_ifindex); 2477 if (ifp == NULL) 2478 return (EADDRNOTAVAIL); 2479 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 2480 2481 /* 2482 * Take the INP write lock. 2483 * Check if this socket is a member of this group. 2484 */ 2485 imo = in6p_findmoptions(inp); 2486 imf = im6o_match_group(imo, ifp, &gsa->sa); 2487 if (imf == NULL) { 2488 error = EADDRNOTAVAIL; 2489 goto out_in6p_locked; 2490 } 2491 inm = imf->im6f_in6m; 2492 2493 /* 2494 * Begin state merge transaction at socket layer. 2495 */ 2496 INP_WLOCK_ASSERT(inp); 2497 2498 imf->im6f_st[1] = msfr.msfr_fmode; 2499 2500 /* 2501 * Apply any new source filters, if present. 2502 * Make a copy of the user-space source vector so 2503 * that we may copy them with a single copyin. This 2504 * allows us to deal with page faults up-front. 2505 */ 2506 if (msfr.msfr_nsrcs > 0) { 2507 struct in6_msource *lims; 2508 struct sockaddr_in6 *psin; 2509 struct sockaddr_storage *kss, *pkss; 2510 int i; 2511 2512 INP_WUNLOCK(inp); 2513 2514 CTR2(KTR_MLD, "%s: loading %lu source list entries", 2515 __func__, (unsigned long)msfr.msfr_nsrcs); 2516 kss = malloc(sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs, 2517 M_TEMP, M_WAITOK); 2518 error = copyin(msfr.msfr_srcs, kss, 2519 sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs); 2520 if (error) { 2521 free(kss, M_TEMP); 2522 return (error); 2523 } 2524 2525 INP_WLOCK(inp); 2526 2527 /* 2528 * Mark all source filters as UNDEFINED at t1. 2529 * Restore new group filter mode, as im6f_leave() 2530 * will set it to INCLUDE. 2531 */ 2532 im6f_leave(imf); 2533 imf->im6f_st[1] = msfr.msfr_fmode; 2534 2535 /* 2536 * Update socket layer filters at t1, lazy-allocating 2537 * new entries. This saves a bunch of memory at the 2538 * cost of one RB_FIND() per source entry; duplicate 2539 * entries in the msfr_nsrcs vector are ignored. 2540 * If we encounter an error, rollback transaction. 2541 * 2542 * XXX This too could be replaced with a set-symmetric 2543 * difference like loop to avoid walking from root 2544 * every time, as the key space is common. 2545 */ 2546 for (i = 0, pkss = kss; i < msfr.msfr_nsrcs; i++, pkss++) { 2547 psin = (struct sockaddr_in6 *)pkss; 2548 if (psin->sin6_family != AF_INET6) { 2549 error = EAFNOSUPPORT; 2550 break; 2551 } 2552 if (psin->sin6_len != sizeof(struct sockaddr_in6)) { 2553 error = EINVAL; 2554 break; 2555 } 2556 if (IN6_IS_ADDR_MULTICAST(&psin->sin6_addr)) { 2557 error = EINVAL; 2558 break; 2559 } 2560 /* 2561 * TODO: Validate embedded scope ID in source 2562 * list entry against passed-in ifp, if and only 2563 * if source list filter entry is iface or node local. 2564 */ 2565 in6_clearscope(&psin->sin6_addr); 2566 error = im6f_get_source(imf, psin, &lims); 2567 if (error) 2568 break; 2569 lims->im6sl_st[1] = imf->im6f_st[1]; 2570 } 2571 free(kss, M_TEMP); 2572 } 2573 2574 if (error) 2575 goto out_im6f_rollback; 2576 2577 INP_WLOCK_ASSERT(inp); 2578 IN6_MULTI_LIST_LOCK(); 2579 2580 /* 2581 * Begin state merge transaction at MLD layer. 2582 */ 2583 CTR1(KTR_MLD, "%s: merge inm state", __func__); 2584 error = in6m_merge(inm, imf); 2585 if (error) 2586 CTR1(KTR_MLD, "%s: failed to merge inm state", __func__); 2587 else { 2588 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 2589 error = mld_change_state(inm, 0); 2590 if (error) 2591 CTR1(KTR_MLD, "%s: failed mld downcall", __func__); 2592 } 2593 2594 IN6_MULTI_LIST_UNLOCK(); 2595 2596 out_im6f_rollback: 2597 if (error) 2598 im6f_rollback(imf); 2599 else 2600 im6f_commit(imf); 2601 2602 im6f_reap(imf); 2603 2604 out_in6p_locked: 2605 INP_WUNLOCK(inp); 2606 return (error); 2607 } 2608 2609 /* 2610 * Set the IP multicast options in response to user setsockopt(). 2611 * 2612 * Many of the socket options handled in this function duplicate the 2613 * functionality of socket options in the regular unicast API. However, 2614 * it is not possible to merge the duplicate code, because the idempotence 2615 * of the IPv6 multicast part of the BSD Sockets API must be preserved; 2616 * the effects of these options must be treated as separate and distinct. 2617 * 2618 * SMPng: XXX: Unlocked read of inp_socket believed OK. 2619 */ 2620 int 2621 ip6_setmoptions(struct inpcb *inp, struct sockopt *sopt) 2622 { 2623 struct ip6_moptions *im6o; 2624 int error; 2625 2626 error = 0; 2627 2628 /* 2629 * If socket is neither of type SOCK_RAW or SOCK_DGRAM, 2630 * or is a divert socket, reject it. 2631 */ 2632 if (inp->inp_socket->so_proto->pr_protocol == IPPROTO_DIVERT || 2633 (inp->inp_socket->so_proto->pr_type != SOCK_RAW && 2634 inp->inp_socket->so_proto->pr_type != SOCK_DGRAM)) 2635 return (EOPNOTSUPP); 2636 2637 switch (sopt->sopt_name) { 2638 case IPV6_MULTICAST_IF: 2639 error = in6p_set_multicast_if(inp, sopt); 2640 break; 2641 2642 case IPV6_MULTICAST_HOPS: { 2643 int hlim; 2644 2645 if (sopt->sopt_valsize != sizeof(int)) { 2646 error = EINVAL; 2647 break; 2648 } 2649 error = sooptcopyin(sopt, &hlim, sizeof(hlim), sizeof(int)); 2650 if (error) 2651 break; 2652 if (hlim < -1 || hlim > 255) { 2653 error = EINVAL; 2654 break; 2655 } else if (hlim == -1) { 2656 hlim = V_ip6_defmcasthlim; 2657 } 2658 im6o = in6p_findmoptions(inp); 2659 im6o->im6o_multicast_hlim = hlim; 2660 INP_WUNLOCK(inp); 2661 break; 2662 } 2663 2664 case IPV6_MULTICAST_LOOP: { 2665 u_int loop; 2666 2667 /* 2668 * Set the loopback flag for outgoing multicast packets. 2669 * Must be zero or one. 2670 */ 2671 if (sopt->sopt_valsize != sizeof(u_int)) { 2672 error = EINVAL; 2673 break; 2674 } 2675 error = sooptcopyin(sopt, &loop, sizeof(u_int), sizeof(u_int)); 2676 if (error) 2677 break; 2678 if (loop > 1) { 2679 error = EINVAL; 2680 break; 2681 } 2682 im6o = in6p_findmoptions(inp); 2683 im6o->im6o_multicast_loop = loop; 2684 INP_WUNLOCK(inp); 2685 break; 2686 } 2687 2688 case IPV6_JOIN_GROUP: 2689 case MCAST_JOIN_GROUP: 2690 case MCAST_JOIN_SOURCE_GROUP: 2691 error = in6p_join_group(inp, sopt); 2692 break; 2693 2694 case IPV6_LEAVE_GROUP: 2695 case MCAST_LEAVE_GROUP: 2696 case MCAST_LEAVE_SOURCE_GROUP: 2697 error = in6p_leave_group(inp, sopt); 2698 break; 2699 2700 case MCAST_BLOCK_SOURCE: 2701 case MCAST_UNBLOCK_SOURCE: 2702 error = in6p_block_unblock_source(inp, sopt); 2703 break; 2704 2705 case IPV6_MSFILTER: 2706 error = in6p_set_source_filters(inp, sopt); 2707 break; 2708 2709 default: 2710 error = EOPNOTSUPP; 2711 break; 2712 } 2713 2714 INP_UNLOCK_ASSERT(inp); 2715 2716 return (error); 2717 } 2718 2719 /* 2720 * Expose MLD's multicast filter mode and source list(s) to userland, 2721 * keyed by (ifindex, group). 2722 * The filter mode is written out as a uint32_t, followed by 2723 * 0..n of struct in6_addr. 2724 * For use by ifmcstat(8). 2725 * SMPng: NOTE: unlocked read of ifindex space. 2726 */ 2727 static int 2728 sysctl_ip6_mcast_filters(SYSCTL_HANDLER_ARGS) 2729 { 2730 struct in6_addr mcaddr; 2731 struct in6_addr src; 2732 struct epoch_tracker et; 2733 struct ifnet *ifp; 2734 struct ifmultiaddr *ifma; 2735 struct in6_multi *inm; 2736 struct ip6_msource *ims; 2737 int *name; 2738 int retval; 2739 u_int namelen; 2740 uint32_t fmode, ifindex; 2741 #ifdef KTR 2742 char ip6tbuf[INET6_ADDRSTRLEN]; 2743 #endif 2744 2745 name = (int *)arg1; 2746 namelen = arg2; 2747 2748 if (req->newptr != NULL) 2749 return (EPERM); 2750 2751 /* int: ifindex + 4 * 32 bits of IPv6 address */ 2752 if (namelen != 5) 2753 return (EINVAL); 2754 2755 ifindex = name[0]; 2756 if (ifindex <= 0 || ifindex > V_if_index) { 2757 CTR2(KTR_MLD, "%s: ifindex %u out of range", 2758 __func__, ifindex); 2759 return (ENOENT); 2760 } 2761 2762 memcpy(&mcaddr, &name[1], sizeof(struct in6_addr)); 2763 if (!IN6_IS_ADDR_MULTICAST(&mcaddr)) { 2764 CTR2(KTR_MLD, "%s: group %s is not multicast", 2765 __func__, ip6_sprintf(ip6tbuf, &mcaddr)); 2766 return (EINVAL); 2767 } 2768 2769 NET_EPOCH_ENTER(et); 2770 ifp = ifnet_byindex(ifindex); 2771 if (ifp == NULL) { 2772 NET_EPOCH_EXIT(et); 2773 CTR2(KTR_MLD, "%s: no ifp for ifindex %u", 2774 __func__, ifindex); 2775 return (ENOENT); 2776 } 2777 /* 2778 * Internal MLD lookups require that scope/zone ID is set. 2779 */ 2780 (void)in6_setscope(&mcaddr, ifp, NULL); 2781 2782 retval = sysctl_wire_old_buffer(req, 2783 sizeof(uint32_t) + (in6_mcast_maxgrpsrc * sizeof(struct in6_addr))); 2784 if (retval) { 2785 NET_EPOCH_EXIT(et); 2786 return (retval); 2787 } 2788 2789 IN6_MULTI_LOCK(); 2790 IN6_MULTI_LIST_LOCK(); 2791 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2792 inm = in6m_ifmultiaddr_get_inm(ifma); 2793 if (inm == NULL) 2794 continue; 2795 if (!IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, &mcaddr)) 2796 continue; 2797 fmode = inm->in6m_st[1].iss_fmode; 2798 retval = SYSCTL_OUT(req, &fmode, sizeof(uint32_t)); 2799 if (retval != 0) 2800 break; 2801 RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) { 2802 CTR2(KTR_MLD, "%s: visit node %p", __func__, ims); 2803 /* 2804 * Only copy-out sources which are in-mode. 2805 */ 2806 if (fmode != im6s_get_mode(inm, ims, 1)) { 2807 CTR1(KTR_MLD, "%s: skip non-in-mode", 2808 __func__); 2809 continue; 2810 } 2811 src = ims->im6s_addr; 2812 retval = SYSCTL_OUT(req, &src, 2813 sizeof(struct in6_addr)); 2814 if (retval != 0) 2815 break; 2816 } 2817 } 2818 IN6_MULTI_LIST_UNLOCK(); 2819 IN6_MULTI_UNLOCK(); 2820 NET_EPOCH_EXIT(et); 2821 2822 return (retval); 2823 } 2824 2825 #ifdef KTR 2826 2827 static const char *in6m_modestrs[] = { "un", "in", "ex" }; 2828 2829 static const char * 2830 in6m_mode_str(const int mode) 2831 { 2832 2833 if (mode >= MCAST_UNDEFINED && mode <= MCAST_EXCLUDE) 2834 return (in6m_modestrs[mode]); 2835 return ("??"); 2836 } 2837 2838 static const char *in6m_statestrs[] = { 2839 "not-member", 2840 "silent", 2841 "idle", 2842 "lazy", 2843 "sleeping", 2844 "awakening", 2845 "query-pending", 2846 "sg-query-pending", 2847 "leaving" 2848 }; 2849 2850 static const char * 2851 in6m_state_str(const int state) 2852 { 2853 2854 if (state >= MLD_NOT_MEMBER && state <= MLD_LEAVING_MEMBER) 2855 return (in6m_statestrs[state]); 2856 return ("??"); 2857 } 2858 2859 /* 2860 * Dump an in6_multi structure to the console. 2861 */ 2862 void 2863 in6m_print(const struct in6_multi *inm) 2864 { 2865 int t; 2866 char ip6tbuf[INET6_ADDRSTRLEN]; 2867 2868 if ((ktr_mask & KTR_MLD) == 0) 2869 return; 2870 2871 printf("%s: --- begin in6m %p ---\n", __func__, inm); 2872 printf("addr %s ifp %p(%s) ifma %p\n", 2873 ip6_sprintf(ip6tbuf, &inm->in6m_addr), 2874 inm->in6m_ifp, 2875 if_name(inm->in6m_ifp), 2876 inm->in6m_ifma); 2877 printf("timer %u state %s refcount %u scq.len %u\n", 2878 inm->in6m_timer, 2879 in6m_state_str(inm->in6m_state), 2880 inm->in6m_refcount, 2881 mbufq_len(&inm->in6m_scq)); 2882 printf("mli %p nsrc %lu sctimer %u scrv %u\n", 2883 inm->in6m_mli, 2884 inm->in6m_nsrc, 2885 inm->in6m_sctimer, 2886 inm->in6m_scrv); 2887 for (t = 0; t < 2; t++) { 2888 printf("t%d: fmode %s asm %u ex %u in %u rec %u\n", t, 2889 in6m_mode_str(inm->in6m_st[t].iss_fmode), 2890 inm->in6m_st[t].iss_asm, 2891 inm->in6m_st[t].iss_ex, 2892 inm->in6m_st[t].iss_in, 2893 inm->in6m_st[t].iss_rec); 2894 } 2895 printf("%s: --- end in6m %p ---\n", __func__, inm); 2896 } 2897 2898 #else /* !KTR */ 2899 2900 void 2901 in6m_print(const struct in6_multi *inm) 2902 { 2903 2904 } 2905 2906 #endif /* KTR */ 2907