1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2009 Bruce Simpson. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote 16 * products derived from this software without specific prior written 17 * permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 /* 33 * IPv6 multicast socket, group, and socket option processing module. 34 * Normative references: RFC 2292, RFC 3492, RFC 3542, RFC 3678, RFC 3810. 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_inet6.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/ktr.h> 46 #include <sys/malloc.h> 47 #include <sys/mbuf.h> 48 #include <sys/protosw.h> 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/sysctl.h> 52 #include <sys/priv.h> 53 #include <sys/taskqueue.h> 54 #include <sys/tree.h> 55 56 #include <net/if.h> 57 #include <net/if_var.h> 58 #include <net/if_dl.h> 59 #include <net/route.h> 60 #include <net/route/nhop.h> 61 #include <net/vnet.h> 62 63 #include <netinet/in.h> 64 #include <netinet/udp.h> 65 #include <netinet/in_var.h> 66 #include <netinet/ip_var.h> 67 #include <netinet/udp_var.h> 68 #include <netinet6/in6_fib.h> 69 #include <netinet6/in6_var.h> 70 #include <netinet/ip6.h> 71 #include <netinet/icmp6.h> 72 #include <netinet6/ip6_var.h> 73 #include <netinet/in_pcb.h> 74 #include <netinet/tcp_var.h> 75 #include <netinet6/nd6.h> 76 #include <netinet6/mld6_var.h> 77 #include <netinet6/scope6_var.h> 78 79 #ifndef KTR_MLD 80 #define KTR_MLD KTR_INET6 81 #endif 82 83 #ifndef __SOCKUNION_DECLARED 84 union sockunion { 85 struct sockaddr_storage ss; 86 struct sockaddr sa; 87 struct sockaddr_dl sdl; 88 struct sockaddr_in6 sin6; 89 }; 90 typedef union sockunion sockunion_t; 91 #define __SOCKUNION_DECLARED 92 #endif /* __SOCKUNION_DECLARED */ 93 94 static MALLOC_DEFINE(M_IN6MFILTER, "in6_mfilter", 95 "IPv6 multicast PCB-layer source filter"); 96 MALLOC_DEFINE(M_IP6MADDR, "in6_multi", "IPv6 multicast group"); 97 static MALLOC_DEFINE(M_IP6MOPTS, "ip6_moptions", "IPv6 multicast options"); 98 static MALLOC_DEFINE(M_IP6MSOURCE, "ip6_msource", 99 "IPv6 multicast MLD-layer source filter"); 100 101 RB_GENERATE(ip6_msource_tree, ip6_msource, im6s_link, ip6_msource_cmp); 102 103 /* 104 * Locking: 105 * - Lock order is: Giant, IN6_MULTI_LOCK, INP_WLOCK, 106 * IN6_MULTI_LIST_LOCK, MLD_LOCK, IF_ADDR_LOCK. 107 * - The IF_ADDR_LOCK is implicitly taken by in6m_lookup() earlier, however 108 * it can be taken by code in net/if.c also. 109 * - ip6_moptions and in6_mfilter are covered by the INP_WLOCK. 110 * 111 * struct in6_multi is covered by IN6_MULTI_LOCK. There isn't strictly 112 * any need for in6_multi itself to be virtualized -- it is bound to an ifp 113 * anyway no matter what happens. 114 */ 115 struct mtx in6_multi_list_mtx; 116 MTX_SYSINIT(in6_multi_mtx, &in6_multi_list_mtx, "in6_multi_list_mtx", MTX_DEF); 117 118 struct mtx in6_multi_free_mtx; 119 MTX_SYSINIT(in6_multi_free_mtx, &in6_multi_free_mtx, "in6_multi_free_mtx", MTX_DEF); 120 121 struct sx in6_multi_sx; 122 SX_SYSINIT(in6_multi_sx, &in6_multi_sx, "in6_multi_sx"); 123 124 static void im6f_commit(struct in6_mfilter *); 125 static int im6f_get_source(struct in6_mfilter *imf, 126 const struct sockaddr_in6 *psin, 127 struct in6_msource **); 128 static struct in6_msource * 129 im6f_graft(struct in6_mfilter *, const uint8_t, 130 const struct sockaddr_in6 *); 131 static void im6f_leave(struct in6_mfilter *); 132 static int im6f_prune(struct in6_mfilter *, const struct sockaddr_in6 *); 133 static void im6f_purge(struct in6_mfilter *); 134 static void im6f_rollback(struct in6_mfilter *); 135 static void im6f_reap(struct in6_mfilter *); 136 static struct in6_mfilter * 137 im6o_match_group(const struct ip6_moptions *, 138 const struct ifnet *, const struct sockaddr *); 139 static struct in6_msource * 140 im6o_match_source(struct in6_mfilter *, const struct sockaddr *); 141 static void im6s_merge(struct ip6_msource *ims, 142 const struct in6_msource *lims, const int rollback); 143 static int in6_getmulti(struct ifnet *, const struct in6_addr *, 144 struct in6_multi **); 145 static int in6_joingroup_locked(struct ifnet *, const struct in6_addr *, 146 struct in6_mfilter *, struct in6_multi **, int); 147 static int in6m_get_source(struct in6_multi *inm, 148 const struct in6_addr *addr, const int noalloc, 149 struct ip6_msource **pims); 150 #ifdef KTR 151 static int in6m_is_ifp_detached(const struct in6_multi *); 152 #endif 153 static int in6m_merge(struct in6_multi *, /*const*/ struct in6_mfilter *); 154 static void in6m_purge(struct in6_multi *); 155 static void in6m_reap(struct in6_multi *); 156 static struct ip6_moptions * 157 in6p_findmoptions(struct inpcb *); 158 static int in6p_get_source_filters(struct inpcb *, struct sockopt *); 159 static int in6p_join_group(struct inpcb *, struct sockopt *); 160 static int in6p_leave_group(struct inpcb *, struct sockopt *); 161 static struct ifnet * 162 in6p_lookup_mcast_ifp(const struct inpcb *, 163 const struct sockaddr_in6 *); 164 static int in6p_block_unblock_source(struct inpcb *, struct sockopt *); 165 static int in6p_set_multicast_if(struct inpcb *, struct sockopt *); 166 static int in6p_set_source_filters(struct inpcb *, struct sockopt *); 167 static int sysctl_ip6_mcast_filters(SYSCTL_HANDLER_ARGS); 168 169 SYSCTL_DECL(_net_inet6_ip6); /* XXX Not in any common header. */ 170 171 static SYSCTL_NODE(_net_inet6_ip6, OID_AUTO, mcast, 172 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 173 "IPv6 multicast"); 174 175 static u_long in6_mcast_maxgrpsrc = IPV6_MAX_GROUP_SRC_FILTER; 176 SYSCTL_ULONG(_net_inet6_ip6_mcast, OID_AUTO, maxgrpsrc, 177 CTLFLAG_RWTUN, &in6_mcast_maxgrpsrc, 0, 178 "Max source filters per group"); 179 180 static u_long in6_mcast_maxsocksrc = IPV6_MAX_SOCK_SRC_FILTER; 181 SYSCTL_ULONG(_net_inet6_ip6_mcast, OID_AUTO, maxsocksrc, 182 CTLFLAG_RWTUN, &in6_mcast_maxsocksrc, 0, 183 "Max source filters per socket"); 184 185 /* TODO Virtualize this switch. */ 186 int in6_mcast_loop = IPV6_DEFAULT_MULTICAST_LOOP; 187 SYSCTL_INT(_net_inet6_ip6_mcast, OID_AUTO, loop, CTLFLAG_RWTUN, 188 &in6_mcast_loop, 0, "Loopback multicast datagrams by default"); 189 190 static SYSCTL_NODE(_net_inet6_ip6_mcast, OID_AUTO, filters, 191 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_ip6_mcast_filters, 192 "Per-interface stack-wide source filters"); 193 194 #ifdef KTR 195 /* 196 * Inline function which wraps assertions for a valid ifp. 197 * The ifnet layer will set the ifma's ifp pointer to NULL if the ifp 198 * is detached. 199 */ 200 static int __inline 201 in6m_is_ifp_detached(const struct in6_multi *inm) 202 { 203 struct ifnet *ifp; 204 205 KASSERT(inm->in6m_ifma != NULL, ("%s: no ifma", __func__)); 206 ifp = inm->in6m_ifma->ifma_ifp; 207 if (ifp != NULL) { 208 /* 209 * Sanity check that network-layer notion of ifp is the 210 * same as that of link-layer. 211 */ 212 KASSERT(inm->in6m_ifp == ifp, ("%s: bad ifp", __func__)); 213 } 214 215 return (ifp == NULL); 216 } 217 #endif 218 219 /* 220 * Initialize an in6_mfilter structure to a known state at t0, t1 221 * with an empty source filter list. 222 */ 223 static __inline void 224 im6f_init(struct in6_mfilter *imf, const int st0, const int st1) 225 { 226 memset(imf, 0, sizeof(struct in6_mfilter)); 227 RB_INIT(&imf->im6f_sources); 228 imf->im6f_st[0] = st0; 229 imf->im6f_st[1] = st1; 230 } 231 232 struct in6_mfilter * 233 ip6_mfilter_alloc(const int mflags, const int st0, const int st1) 234 { 235 struct in6_mfilter *imf; 236 237 imf = malloc(sizeof(*imf), M_IN6MFILTER, mflags); 238 239 if (imf != NULL) 240 im6f_init(imf, st0, st1); 241 242 return (imf); 243 } 244 245 void 246 ip6_mfilter_free(struct in6_mfilter *imf) 247 { 248 249 im6f_purge(imf); 250 free(imf, M_IN6MFILTER); 251 } 252 253 /* 254 * Find an IPv6 multicast group entry for this ip6_moptions instance 255 * which matches the specified group, and optionally an interface. 256 * Return its index into the array, or -1 if not found. 257 */ 258 static struct in6_mfilter * 259 im6o_match_group(const struct ip6_moptions *imo, const struct ifnet *ifp, 260 const struct sockaddr *group) 261 { 262 const struct sockaddr_in6 *gsin6; 263 struct in6_mfilter *imf; 264 struct in6_multi *inm; 265 266 gsin6 = (const struct sockaddr_in6 *)group; 267 268 IP6_MFILTER_FOREACH(imf, &imo->im6o_head) { 269 inm = imf->im6f_in6m; 270 if (inm == NULL) 271 continue; 272 if ((ifp == NULL || (inm->in6m_ifp == ifp)) && 273 IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, 274 &gsin6->sin6_addr)) { 275 break; 276 } 277 } 278 return (imf); 279 } 280 281 /* 282 * Find an IPv6 multicast source entry for this imo which matches 283 * the given group index for this socket, and source address. 284 * 285 * XXX TODO: The scope ID, if present in src, is stripped before 286 * any comparison. We SHOULD enforce scope/zone checks where the source 287 * filter entry has a link scope. 288 * 289 * NOTE: This does not check if the entry is in-mode, merely if 290 * it exists, which may not be the desired behaviour. 291 */ 292 static struct in6_msource * 293 im6o_match_source(struct in6_mfilter *imf, const struct sockaddr *src) 294 { 295 struct ip6_msource find; 296 struct ip6_msource *ims; 297 const sockunion_t *psa; 298 299 KASSERT(src->sa_family == AF_INET6, ("%s: !AF_INET6", __func__)); 300 301 psa = (const sockunion_t *)src; 302 find.im6s_addr = psa->sin6.sin6_addr; 303 in6_clearscope(&find.im6s_addr); /* XXX */ 304 ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find); 305 306 return ((struct in6_msource *)ims); 307 } 308 309 /* 310 * Perform filtering for multicast datagrams on a socket by group and source. 311 * 312 * Returns 0 if a datagram should be allowed through, or various error codes 313 * if the socket was not a member of the group, or the source was muted, etc. 314 */ 315 int 316 im6o_mc_filter(const struct ip6_moptions *imo, const struct ifnet *ifp, 317 const struct sockaddr *group, const struct sockaddr *src) 318 { 319 struct in6_mfilter *imf; 320 struct in6_msource *ims; 321 int mode; 322 323 KASSERT(ifp != NULL, ("%s: null ifp", __func__)); 324 325 imf = im6o_match_group(imo, ifp, group); 326 if (imf == NULL) 327 return (MCAST_NOTGMEMBER); 328 329 /* 330 * Check if the source was included in an (S,G) join. 331 * Allow reception on exclusive memberships by default, 332 * reject reception on inclusive memberships by default. 333 * Exclude source only if an in-mode exclude filter exists. 334 * Include source only if an in-mode include filter exists. 335 * NOTE: We are comparing group state here at MLD t1 (now) 336 * with socket-layer t0 (since last downcall). 337 */ 338 mode = imf->im6f_st[1]; 339 ims = im6o_match_source(imf, src); 340 341 if ((ims == NULL && mode == MCAST_INCLUDE) || 342 (ims != NULL && ims->im6sl_st[0] != mode)) 343 return (MCAST_NOTSMEMBER); 344 345 return (MCAST_PASS); 346 } 347 348 /* 349 * Find and return a reference to an in6_multi record for (ifp, group), 350 * and bump its reference count. 351 * If one does not exist, try to allocate it, and update link-layer multicast 352 * filters on ifp to listen for group. 353 * Assumes the IN6_MULTI lock is held across the call. 354 * Return 0 if successful, otherwise return an appropriate error code. 355 */ 356 static int 357 in6_getmulti(struct ifnet *ifp, const struct in6_addr *group, 358 struct in6_multi **pinm) 359 { 360 struct epoch_tracker et; 361 struct sockaddr_in6 gsin6; 362 struct ifmultiaddr *ifma; 363 struct in6_multi *inm; 364 int error; 365 366 error = 0; 367 368 /* 369 * XXX: Accesses to ifma_protospec must be covered by IF_ADDR_LOCK; 370 * if_addmulti() takes this mutex itself, so we must drop and 371 * re-acquire around the call. 372 */ 373 IN6_MULTI_LOCK_ASSERT(); 374 IN6_MULTI_LIST_LOCK(); 375 IF_ADDR_WLOCK(ifp); 376 NET_EPOCH_ENTER(et); 377 inm = in6m_lookup_locked(ifp, group); 378 NET_EPOCH_EXIT(et); 379 380 if (inm != NULL) { 381 /* 382 * If we already joined this group, just bump the 383 * refcount and return it. 384 */ 385 KASSERT(inm->in6m_refcount >= 1, 386 ("%s: bad refcount %d", __func__, inm->in6m_refcount)); 387 in6m_acquire_locked(inm); 388 *pinm = inm; 389 goto out_locked; 390 } 391 392 memset(&gsin6, 0, sizeof(gsin6)); 393 gsin6.sin6_family = AF_INET6; 394 gsin6.sin6_len = sizeof(struct sockaddr_in6); 395 gsin6.sin6_addr = *group; 396 397 /* 398 * Check if a link-layer group is already associated 399 * with this network-layer group on the given ifnet. 400 */ 401 IN6_MULTI_LIST_UNLOCK(); 402 IF_ADDR_WUNLOCK(ifp); 403 error = if_addmulti(ifp, (struct sockaddr *)&gsin6, &ifma); 404 if (error != 0) 405 return (error); 406 IN6_MULTI_LIST_LOCK(); 407 IF_ADDR_WLOCK(ifp); 408 409 /* 410 * If something other than netinet6 is occupying the link-layer 411 * group, print a meaningful error message and back out of 412 * the allocation. 413 * Otherwise, bump the refcount on the existing network-layer 414 * group association and return it. 415 */ 416 if (ifma->ifma_protospec != NULL) { 417 inm = (struct in6_multi *)ifma->ifma_protospec; 418 #ifdef INVARIANTS 419 KASSERT(ifma->ifma_addr != NULL, ("%s: no ifma_addr", 420 __func__)); 421 KASSERT(ifma->ifma_addr->sa_family == AF_INET6, 422 ("%s: ifma not AF_INET6", __func__)); 423 KASSERT(inm != NULL, ("%s: no ifma_protospec", __func__)); 424 if (inm->in6m_ifma != ifma || inm->in6m_ifp != ifp || 425 !IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, group)) 426 panic("%s: ifma %p is inconsistent with %p (%p)", 427 __func__, ifma, inm, group); 428 #endif 429 in6m_acquire_locked(inm); 430 *pinm = inm; 431 goto out_locked; 432 } 433 434 IF_ADDR_WLOCK_ASSERT(ifp); 435 436 /* 437 * A new in6_multi record is needed; allocate and initialize it. 438 * We DO NOT perform an MLD join as the in6_ layer may need to 439 * push an initial source list down to MLD to support SSM. 440 * 441 * The initial source filter state is INCLUDE, {} as per the RFC. 442 * Pending state-changes per group are subject to a bounds check. 443 */ 444 inm = malloc(sizeof(*inm), M_IP6MADDR, M_NOWAIT | M_ZERO); 445 if (inm == NULL) { 446 IN6_MULTI_LIST_UNLOCK(); 447 IF_ADDR_WUNLOCK(ifp); 448 if_delmulti_ifma(ifma); 449 return (ENOMEM); 450 } 451 inm->in6m_addr = *group; 452 inm->in6m_ifp = ifp; 453 inm->in6m_mli = MLD_IFINFO(ifp); 454 inm->in6m_ifma = ifma; 455 inm->in6m_refcount = 1; 456 inm->in6m_state = MLD_NOT_MEMBER; 457 mbufq_init(&inm->in6m_scq, MLD_MAX_STATE_CHANGES); 458 459 inm->in6m_st[0].iss_fmode = MCAST_UNDEFINED; 460 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED; 461 RB_INIT(&inm->in6m_srcs); 462 463 ifma->ifma_protospec = inm; 464 *pinm = inm; 465 466 out_locked: 467 IN6_MULTI_LIST_UNLOCK(); 468 IF_ADDR_WUNLOCK(ifp); 469 return (error); 470 } 471 472 /* 473 * Drop a reference to an in6_multi record. 474 * 475 * If the refcount drops to 0, free the in6_multi record and 476 * delete the underlying link-layer membership. 477 */ 478 static void 479 in6m_release(struct in6_multi *inm) 480 { 481 struct ifmultiaddr *ifma; 482 struct ifnet *ifp; 483 484 CTR2(KTR_MLD, "%s: refcount is %d", __func__, inm->in6m_refcount); 485 486 MPASS(inm->in6m_refcount == 0); 487 CTR2(KTR_MLD, "%s: freeing inm %p", __func__, inm); 488 489 ifma = inm->in6m_ifma; 490 ifp = inm->in6m_ifp; 491 MPASS(ifma->ifma_llifma == NULL); 492 493 /* XXX this access is not covered by IF_ADDR_LOCK */ 494 CTR2(KTR_MLD, "%s: purging ifma %p", __func__, ifma); 495 KASSERT(ifma->ifma_protospec == NULL, 496 ("%s: ifma_protospec != NULL", __func__)); 497 if (ifp == NULL) 498 ifp = ifma->ifma_ifp; 499 500 if (ifp != NULL) { 501 CURVNET_SET(ifp->if_vnet); 502 in6m_purge(inm); 503 free(inm, M_IP6MADDR); 504 if_delmulti_ifma_flags(ifma, 1); 505 CURVNET_RESTORE(); 506 if_rele(ifp); 507 } else { 508 in6m_purge(inm); 509 free(inm, M_IP6MADDR); 510 if_delmulti_ifma_flags(ifma, 1); 511 } 512 } 513 514 /* 515 * Interface detach can happen in a taskqueue thread context, so we must use a 516 * dedicated thread to avoid deadlocks when draining in6m_release tasks. 517 */ 518 TASKQUEUE_DEFINE_THREAD(in6m_free); 519 static struct in6_multi_head in6m_free_list = SLIST_HEAD_INITIALIZER(); 520 static void in6m_release_task(void *arg __unused, int pending __unused); 521 static struct task in6m_free_task = TASK_INITIALIZER(0, in6m_release_task, NULL); 522 523 void 524 in6m_release_list_deferred(struct in6_multi_head *inmh) 525 { 526 if (SLIST_EMPTY(inmh)) 527 return; 528 mtx_lock(&in6_multi_free_mtx); 529 SLIST_CONCAT(&in6m_free_list, inmh, in6_multi, in6m_nrele); 530 mtx_unlock(&in6_multi_free_mtx); 531 taskqueue_enqueue(taskqueue_in6m_free, &in6m_free_task); 532 } 533 534 void 535 in6m_release_wait(void *arg __unused) 536 { 537 538 /* 539 * Make sure all pending multicast addresses are freed before 540 * the VNET or network device is destroyed: 541 */ 542 taskqueue_drain_all(taskqueue_in6m_free); 543 } 544 #ifdef VIMAGE 545 /* XXX-BZ FIXME, see D24914. */ 546 VNET_SYSUNINIT(in6m_release_wait, SI_SUB_PROTO_DOMAIN, SI_ORDER_FIRST, in6m_release_wait, NULL); 547 #endif 548 549 void 550 in6m_disconnect_locked(struct in6_multi_head *inmh, struct in6_multi *inm) 551 { 552 struct ifnet *ifp; 553 struct ifaddr *ifa; 554 struct in6_ifaddr *ifa6; 555 struct in6_multi_mship *imm, *imm_tmp; 556 struct ifmultiaddr *ifma, *ll_ifma; 557 558 IN6_MULTI_LIST_LOCK_ASSERT(); 559 560 ifp = inm->in6m_ifp; 561 if (ifp == NULL) 562 return; /* already called */ 563 564 inm->in6m_ifp = NULL; 565 IF_ADDR_WLOCK_ASSERT(ifp); 566 ifma = inm->in6m_ifma; 567 if (ifma == NULL) 568 return; 569 570 if_ref(ifp); 571 if (ifma->ifma_flags & IFMA_F_ENQUEUED) { 572 CK_STAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifmultiaddr, ifma_link); 573 ifma->ifma_flags &= ~IFMA_F_ENQUEUED; 574 } 575 MCDPRINTF("removed ifma: %p from %s\n", ifma, ifp->if_xname); 576 if ((ll_ifma = ifma->ifma_llifma) != NULL) { 577 MPASS(ifma != ll_ifma); 578 ifma->ifma_llifma = NULL; 579 MPASS(ll_ifma->ifma_llifma == NULL); 580 MPASS(ll_ifma->ifma_ifp == ifp); 581 if (--ll_ifma->ifma_refcount == 0) { 582 if (ll_ifma->ifma_flags & IFMA_F_ENQUEUED) { 583 CK_STAILQ_REMOVE(&ifp->if_multiaddrs, ll_ifma, ifmultiaddr, ifma_link); 584 ll_ifma->ifma_flags &= ~IFMA_F_ENQUEUED; 585 } 586 MCDPRINTF("removed ll_ifma: %p from %s\n", ll_ifma, ifp->if_xname); 587 if_freemulti(ll_ifma); 588 } 589 } 590 CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 591 if (ifa->ifa_addr->sa_family != AF_INET6) 592 continue; 593 ifa6 = (void *)ifa; 594 LIST_FOREACH_SAFE(imm, &ifa6->ia6_memberships, 595 i6mm_chain, imm_tmp) { 596 if (inm == imm->i6mm_maddr) { 597 LIST_REMOVE(imm, i6mm_chain); 598 free(imm, M_IP6MADDR); 599 in6m_rele_locked(inmh, inm); 600 } 601 } 602 } 603 } 604 605 static void 606 in6m_release_task(void *arg __unused, int pending __unused) 607 { 608 struct in6_multi_head in6m_free_tmp; 609 struct in6_multi *inm, *tinm; 610 611 SLIST_INIT(&in6m_free_tmp); 612 mtx_lock(&in6_multi_free_mtx); 613 SLIST_CONCAT(&in6m_free_tmp, &in6m_free_list, in6_multi, in6m_nrele); 614 mtx_unlock(&in6_multi_free_mtx); 615 IN6_MULTI_LOCK(); 616 SLIST_FOREACH_SAFE(inm, &in6m_free_tmp, in6m_nrele, tinm) { 617 SLIST_REMOVE_HEAD(&in6m_free_tmp, in6m_nrele); 618 in6m_release(inm); 619 } 620 IN6_MULTI_UNLOCK(); 621 } 622 623 /* 624 * Clear recorded source entries for a group. 625 * Used by the MLD code. Caller must hold the IN6_MULTI lock. 626 * FIXME: Should reap. 627 */ 628 void 629 in6m_clear_recorded(struct in6_multi *inm) 630 { 631 struct ip6_msource *ims; 632 633 IN6_MULTI_LIST_LOCK_ASSERT(); 634 635 RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) { 636 if (ims->im6s_stp) { 637 ims->im6s_stp = 0; 638 --inm->in6m_st[1].iss_rec; 639 } 640 } 641 KASSERT(inm->in6m_st[1].iss_rec == 0, 642 ("%s: iss_rec %d not 0", __func__, inm->in6m_st[1].iss_rec)); 643 } 644 645 /* 646 * Record a source as pending for a Source-Group MLDv2 query. 647 * This lives here as it modifies the shared tree. 648 * 649 * inm is the group descriptor. 650 * naddr is the address of the source to record in network-byte order. 651 * 652 * If the net.inet6.mld.sgalloc sysctl is non-zero, we will 653 * lazy-allocate a source node in response to an SG query. 654 * Otherwise, no allocation is performed. This saves some memory 655 * with the trade-off that the source will not be reported to the 656 * router if joined in the window between the query response and 657 * the group actually being joined on the local host. 658 * 659 * VIMAGE: XXX: Currently the mld_sgalloc feature has been removed. 660 * This turns off the allocation of a recorded source entry if 661 * the group has not been joined. 662 * 663 * Return 0 if the source didn't exist or was already marked as recorded. 664 * Return 1 if the source was marked as recorded by this function. 665 * Return <0 if any error occurred (negated errno code). 666 */ 667 int 668 in6m_record_source(struct in6_multi *inm, const struct in6_addr *addr) 669 { 670 struct ip6_msource find; 671 struct ip6_msource *ims, *nims; 672 673 IN6_MULTI_LIST_LOCK_ASSERT(); 674 675 find.im6s_addr = *addr; 676 ims = RB_FIND(ip6_msource_tree, &inm->in6m_srcs, &find); 677 if (ims && ims->im6s_stp) 678 return (0); 679 if (ims == NULL) { 680 if (inm->in6m_nsrc == in6_mcast_maxgrpsrc) 681 return (-ENOSPC); 682 nims = malloc(sizeof(struct ip6_msource), M_IP6MSOURCE, 683 M_NOWAIT | M_ZERO); 684 if (nims == NULL) 685 return (-ENOMEM); 686 nims->im6s_addr = find.im6s_addr; 687 RB_INSERT(ip6_msource_tree, &inm->in6m_srcs, nims); 688 ++inm->in6m_nsrc; 689 ims = nims; 690 } 691 692 /* 693 * Mark the source as recorded and update the recorded 694 * source count. 695 */ 696 ++ims->im6s_stp; 697 ++inm->in6m_st[1].iss_rec; 698 699 return (1); 700 } 701 702 /* 703 * Return a pointer to an in6_msource owned by an in6_mfilter, 704 * given its source address. 705 * Lazy-allocate if needed. If this is a new entry its filter state is 706 * undefined at t0. 707 * 708 * imf is the filter set being modified. 709 * addr is the source address. 710 * 711 * SMPng: May be called with locks held; malloc must not block. 712 */ 713 static int 714 im6f_get_source(struct in6_mfilter *imf, const struct sockaddr_in6 *psin, 715 struct in6_msource **plims) 716 { 717 struct ip6_msource find; 718 struct ip6_msource *ims, *nims; 719 struct in6_msource *lims; 720 int error; 721 722 error = 0; 723 ims = NULL; 724 lims = NULL; 725 726 find.im6s_addr = psin->sin6_addr; 727 ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find); 728 lims = (struct in6_msource *)ims; 729 if (lims == NULL) { 730 if (imf->im6f_nsrc == in6_mcast_maxsocksrc) 731 return (ENOSPC); 732 nims = malloc(sizeof(struct in6_msource), M_IN6MFILTER, 733 M_NOWAIT | M_ZERO); 734 if (nims == NULL) 735 return (ENOMEM); 736 lims = (struct in6_msource *)nims; 737 lims->im6s_addr = find.im6s_addr; 738 lims->im6sl_st[0] = MCAST_UNDEFINED; 739 RB_INSERT(ip6_msource_tree, &imf->im6f_sources, nims); 740 ++imf->im6f_nsrc; 741 } 742 743 *plims = lims; 744 745 return (error); 746 } 747 748 /* 749 * Graft a source entry into an existing socket-layer filter set, 750 * maintaining any required invariants and checking allocations. 751 * 752 * The source is marked as being in the new filter mode at t1. 753 * 754 * Return the pointer to the new node, otherwise return NULL. 755 */ 756 static struct in6_msource * 757 im6f_graft(struct in6_mfilter *imf, const uint8_t st1, 758 const struct sockaddr_in6 *psin) 759 { 760 struct ip6_msource *nims; 761 struct in6_msource *lims; 762 763 nims = malloc(sizeof(struct in6_msource), M_IN6MFILTER, 764 M_NOWAIT | M_ZERO); 765 if (nims == NULL) 766 return (NULL); 767 lims = (struct in6_msource *)nims; 768 lims->im6s_addr = psin->sin6_addr; 769 lims->im6sl_st[0] = MCAST_UNDEFINED; 770 lims->im6sl_st[1] = st1; 771 RB_INSERT(ip6_msource_tree, &imf->im6f_sources, nims); 772 ++imf->im6f_nsrc; 773 774 return (lims); 775 } 776 777 /* 778 * Prune a source entry from an existing socket-layer filter set, 779 * maintaining any required invariants and checking allocations. 780 * 781 * The source is marked as being left at t1, it is not freed. 782 * 783 * Return 0 if no error occurred, otherwise return an errno value. 784 */ 785 static int 786 im6f_prune(struct in6_mfilter *imf, const struct sockaddr_in6 *psin) 787 { 788 struct ip6_msource find; 789 struct ip6_msource *ims; 790 struct in6_msource *lims; 791 792 find.im6s_addr = psin->sin6_addr; 793 ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find); 794 if (ims == NULL) 795 return (ENOENT); 796 lims = (struct in6_msource *)ims; 797 lims->im6sl_st[1] = MCAST_UNDEFINED; 798 return (0); 799 } 800 801 /* 802 * Revert socket-layer filter set deltas at t1 to t0 state. 803 */ 804 static void 805 im6f_rollback(struct in6_mfilter *imf) 806 { 807 struct ip6_msource *ims, *tims; 808 struct in6_msource *lims; 809 810 RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) { 811 lims = (struct in6_msource *)ims; 812 if (lims->im6sl_st[0] == lims->im6sl_st[1]) { 813 /* no change at t1 */ 814 continue; 815 } else if (lims->im6sl_st[0] != MCAST_UNDEFINED) { 816 /* revert change to existing source at t1 */ 817 lims->im6sl_st[1] = lims->im6sl_st[0]; 818 } else { 819 /* revert source added t1 */ 820 CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); 821 RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims); 822 free(ims, M_IN6MFILTER); 823 imf->im6f_nsrc--; 824 } 825 } 826 imf->im6f_st[1] = imf->im6f_st[0]; 827 } 828 829 /* 830 * Mark socket-layer filter set as INCLUDE {} at t1. 831 */ 832 static void 833 im6f_leave(struct in6_mfilter *imf) 834 { 835 struct ip6_msource *ims; 836 struct in6_msource *lims; 837 838 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { 839 lims = (struct in6_msource *)ims; 840 lims->im6sl_st[1] = MCAST_UNDEFINED; 841 } 842 imf->im6f_st[1] = MCAST_INCLUDE; 843 } 844 845 /* 846 * Mark socket-layer filter set deltas as committed. 847 */ 848 static void 849 im6f_commit(struct in6_mfilter *imf) 850 { 851 struct ip6_msource *ims; 852 struct in6_msource *lims; 853 854 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { 855 lims = (struct in6_msource *)ims; 856 lims->im6sl_st[0] = lims->im6sl_st[1]; 857 } 858 imf->im6f_st[0] = imf->im6f_st[1]; 859 } 860 861 /* 862 * Reap unreferenced sources from socket-layer filter set. 863 */ 864 static void 865 im6f_reap(struct in6_mfilter *imf) 866 { 867 struct ip6_msource *ims, *tims; 868 struct in6_msource *lims; 869 870 RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) { 871 lims = (struct in6_msource *)ims; 872 if ((lims->im6sl_st[0] == MCAST_UNDEFINED) && 873 (lims->im6sl_st[1] == MCAST_UNDEFINED)) { 874 CTR2(KTR_MLD, "%s: free lims %p", __func__, ims); 875 RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims); 876 free(ims, M_IN6MFILTER); 877 imf->im6f_nsrc--; 878 } 879 } 880 } 881 882 /* 883 * Purge socket-layer filter set. 884 */ 885 static void 886 im6f_purge(struct in6_mfilter *imf) 887 { 888 struct ip6_msource *ims, *tims; 889 890 RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) { 891 CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); 892 RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims); 893 free(ims, M_IN6MFILTER); 894 imf->im6f_nsrc--; 895 } 896 imf->im6f_st[0] = imf->im6f_st[1] = MCAST_UNDEFINED; 897 KASSERT(RB_EMPTY(&imf->im6f_sources), 898 ("%s: im6f_sources not empty", __func__)); 899 } 900 901 /* 902 * Look up a source filter entry for a multicast group. 903 * 904 * inm is the group descriptor to work with. 905 * addr is the IPv6 address to look up. 906 * noalloc may be non-zero to suppress allocation of sources. 907 * *pims will be set to the address of the retrieved or allocated source. 908 * 909 * SMPng: NOTE: may be called with locks held. 910 * Return 0 if successful, otherwise return a non-zero error code. 911 */ 912 static int 913 in6m_get_source(struct in6_multi *inm, const struct in6_addr *addr, 914 const int noalloc, struct ip6_msource **pims) 915 { 916 struct ip6_msource find; 917 struct ip6_msource *ims, *nims; 918 #ifdef KTR 919 char ip6tbuf[INET6_ADDRSTRLEN]; 920 #endif 921 922 find.im6s_addr = *addr; 923 ims = RB_FIND(ip6_msource_tree, &inm->in6m_srcs, &find); 924 if (ims == NULL && !noalloc) { 925 if (inm->in6m_nsrc == in6_mcast_maxgrpsrc) 926 return (ENOSPC); 927 nims = malloc(sizeof(struct ip6_msource), M_IP6MSOURCE, 928 M_NOWAIT | M_ZERO); 929 if (nims == NULL) 930 return (ENOMEM); 931 nims->im6s_addr = *addr; 932 RB_INSERT(ip6_msource_tree, &inm->in6m_srcs, nims); 933 ++inm->in6m_nsrc; 934 ims = nims; 935 CTR3(KTR_MLD, "%s: allocated %s as %p", __func__, 936 ip6_sprintf(ip6tbuf, addr), ims); 937 } 938 939 *pims = ims; 940 return (0); 941 } 942 943 /* 944 * Merge socket-layer source into MLD-layer source. 945 * If rollback is non-zero, perform the inverse of the merge. 946 */ 947 static void 948 im6s_merge(struct ip6_msource *ims, const struct in6_msource *lims, 949 const int rollback) 950 { 951 int n = rollback ? -1 : 1; 952 #ifdef KTR 953 char ip6tbuf[INET6_ADDRSTRLEN]; 954 955 ip6_sprintf(ip6tbuf, &lims->im6s_addr); 956 #endif 957 958 if (lims->im6sl_st[0] == MCAST_EXCLUDE) { 959 CTR3(KTR_MLD, "%s: t1 ex -= %d on %s", __func__, n, ip6tbuf); 960 ims->im6s_st[1].ex -= n; 961 } else if (lims->im6sl_st[0] == MCAST_INCLUDE) { 962 CTR3(KTR_MLD, "%s: t1 in -= %d on %s", __func__, n, ip6tbuf); 963 ims->im6s_st[1].in -= n; 964 } 965 966 if (lims->im6sl_st[1] == MCAST_EXCLUDE) { 967 CTR3(KTR_MLD, "%s: t1 ex += %d on %s", __func__, n, ip6tbuf); 968 ims->im6s_st[1].ex += n; 969 } else if (lims->im6sl_st[1] == MCAST_INCLUDE) { 970 CTR3(KTR_MLD, "%s: t1 in += %d on %s", __func__, n, ip6tbuf); 971 ims->im6s_st[1].in += n; 972 } 973 } 974 975 /* 976 * Atomically update the global in6_multi state, when a membership's 977 * filter list is being updated in any way. 978 * 979 * imf is the per-inpcb-membership group filter pointer. 980 * A fake imf may be passed for in-kernel consumers. 981 * 982 * XXX This is a candidate for a set-symmetric-difference style loop 983 * which would eliminate the repeated lookup from root of ims nodes, 984 * as they share the same key space. 985 * 986 * If any error occurred this function will back out of refcounts 987 * and return a non-zero value. 988 */ 989 static int 990 in6m_merge(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) 991 { 992 struct ip6_msource *ims, *nims; 993 struct in6_msource *lims; 994 int schanged, error; 995 int nsrc0, nsrc1; 996 997 schanged = 0; 998 error = 0; 999 nsrc1 = nsrc0 = 0; 1000 IN6_MULTI_LIST_LOCK_ASSERT(); 1001 1002 /* 1003 * Update the source filters first, as this may fail. 1004 * Maintain count of in-mode filters at t0, t1. These are 1005 * used to work out if we transition into ASM mode or not. 1006 * Maintain a count of source filters whose state was 1007 * actually modified by this operation. 1008 */ 1009 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { 1010 lims = (struct in6_msource *)ims; 1011 if (lims->im6sl_st[0] == imf->im6f_st[0]) nsrc0++; 1012 if (lims->im6sl_st[1] == imf->im6f_st[1]) nsrc1++; 1013 if (lims->im6sl_st[0] == lims->im6sl_st[1]) continue; 1014 error = in6m_get_source(inm, &lims->im6s_addr, 0, &nims); 1015 ++schanged; 1016 if (error) 1017 break; 1018 im6s_merge(nims, lims, 0); 1019 } 1020 if (error) { 1021 struct ip6_msource *bims; 1022 1023 RB_FOREACH_REVERSE_FROM(ims, ip6_msource_tree, nims) { 1024 lims = (struct in6_msource *)ims; 1025 if (lims->im6sl_st[0] == lims->im6sl_st[1]) 1026 continue; 1027 (void)in6m_get_source(inm, &lims->im6s_addr, 1, &bims); 1028 if (bims == NULL) 1029 continue; 1030 im6s_merge(bims, lims, 1); 1031 } 1032 goto out_reap; 1033 } 1034 1035 CTR3(KTR_MLD, "%s: imf filters in-mode: %d at t0, %d at t1", 1036 __func__, nsrc0, nsrc1); 1037 1038 /* Handle transition between INCLUDE {n} and INCLUDE {} on socket. */ 1039 if (imf->im6f_st[0] == imf->im6f_st[1] && 1040 imf->im6f_st[1] == MCAST_INCLUDE) { 1041 if (nsrc1 == 0) { 1042 CTR1(KTR_MLD, "%s: --in on inm at t1", __func__); 1043 --inm->in6m_st[1].iss_in; 1044 } 1045 } 1046 1047 /* Handle filter mode transition on socket. */ 1048 if (imf->im6f_st[0] != imf->im6f_st[1]) { 1049 CTR3(KTR_MLD, "%s: imf transition %d to %d", 1050 __func__, imf->im6f_st[0], imf->im6f_st[1]); 1051 1052 if (imf->im6f_st[0] == MCAST_EXCLUDE) { 1053 CTR1(KTR_MLD, "%s: --ex on inm at t1", __func__); 1054 --inm->in6m_st[1].iss_ex; 1055 } else if (imf->im6f_st[0] == MCAST_INCLUDE) { 1056 CTR1(KTR_MLD, "%s: --in on inm at t1", __func__); 1057 --inm->in6m_st[1].iss_in; 1058 } 1059 1060 if (imf->im6f_st[1] == MCAST_EXCLUDE) { 1061 CTR1(KTR_MLD, "%s: ex++ on inm at t1", __func__); 1062 inm->in6m_st[1].iss_ex++; 1063 } else if (imf->im6f_st[1] == MCAST_INCLUDE && nsrc1 > 0) { 1064 CTR1(KTR_MLD, "%s: in++ on inm at t1", __func__); 1065 inm->in6m_st[1].iss_in++; 1066 } 1067 } 1068 1069 /* 1070 * Track inm filter state in terms of listener counts. 1071 * If there are any exclusive listeners, stack-wide 1072 * membership is exclusive. 1073 * Otherwise, if only inclusive listeners, stack-wide is inclusive. 1074 * If no listeners remain, state is undefined at t1, 1075 * and the MLD lifecycle for this group should finish. 1076 */ 1077 if (inm->in6m_st[1].iss_ex > 0) { 1078 CTR1(KTR_MLD, "%s: transition to EX", __func__); 1079 inm->in6m_st[1].iss_fmode = MCAST_EXCLUDE; 1080 } else if (inm->in6m_st[1].iss_in > 0) { 1081 CTR1(KTR_MLD, "%s: transition to IN", __func__); 1082 inm->in6m_st[1].iss_fmode = MCAST_INCLUDE; 1083 } else { 1084 CTR1(KTR_MLD, "%s: transition to UNDEF", __func__); 1085 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED; 1086 } 1087 1088 /* Decrement ASM listener count on transition out of ASM mode. */ 1089 if (imf->im6f_st[0] == MCAST_EXCLUDE && nsrc0 == 0) { 1090 if ((imf->im6f_st[1] != MCAST_EXCLUDE) || 1091 (imf->im6f_st[1] == MCAST_EXCLUDE && nsrc1 > 0)) { 1092 CTR1(KTR_MLD, "%s: --asm on inm at t1", __func__); 1093 --inm->in6m_st[1].iss_asm; 1094 } 1095 } 1096 1097 /* Increment ASM listener count on transition to ASM mode. */ 1098 if (imf->im6f_st[1] == MCAST_EXCLUDE && nsrc1 == 0) { 1099 CTR1(KTR_MLD, "%s: asm++ on inm at t1", __func__); 1100 inm->in6m_st[1].iss_asm++; 1101 } 1102 1103 CTR3(KTR_MLD, "%s: merged imf %p to inm %p", __func__, imf, inm); 1104 in6m_print(inm); 1105 1106 out_reap: 1107 if (schanged > 0) { 1108 CTR1(KTR_MLD, "%s: sources changed; reaping", __func__); 1109 in6m_reap(inm); 1110 } 1111 return (error); 1112 } 1113 1114 /* 1115 * Mark an in6_multi's filter set deltas as committed. 1116 * Called by MLD after a state change has been enqueued. 1117 */ 1118 void 1119 in6m_commit(struct in6_multi *inm) 1120 { 1121 struct ip6_msource *ims; 1122 1123 CTR2(KTR_MLD, "%s: commit inm %p", __func__, inm); 1124 CTR1(KTR_MLD, "%s: pre commit:", __func__); 1125 in6m_print(inm); 1126 1127 RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) { 1128 ims->im6s_st[0] = ims->im6s_st[1]; 1129 } 1130 inm->in6m_st[0] = inm->in6m_st[1]; 1131 } 1132 1133 /* 1134 * Reap unreferenced nodes from an in6_multi's filter set. 1135 */ 1136 static void 1137 in6m_reap(struct in6_multi *inm) 1138 { 1139 struct ip6_msource *ims, *tims; 1140 1141 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, tims) { 1142 if (ims->im6s_st[0].ex > 0 || ims->im6s_st[0].in > 0 || 1143 ims->im6s_st[1].ex > 0 || ims->im6s_st[1].in > 0 || 1144 ims->im6s_stp != 0) 1145 continue; 1146 CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); 1147 RB_REMOVE(ip6_msource_tree, &inm->in6m_srcs, ims); 1148 free(ims, M_IP6MSOURCE); 1149 inm->in6m_nsrc--; 1150 } 1151 } 1152 1153 /* 1154 * Purge all source nodes from an in6_multi's filter set. 1155 */ 1156 static void 1157 in6m_purge(struct in6_multi *inm) 1158 { 1159 struct ip6_msource *ims, *tims; 1160 1161 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, tims) { 1162 CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); 1163 RB_REMOVE(ip6_msource_tree, &inm->in6m_srcs, ims); 1164 free(ims, M_IP6MSOURCE); 1165 inm->in6m_nsrc--; 1166 } 1167 /* Free state-change requests that might be queued. */ 1168 mbufq_drain(&inm->in6m_scq); 1169 } 1170 1171 /* 1172 * Join a multicast address w/o sources. 1173 * KAME compatibility entry point. 1174 * 1175 * SMPng: Assume no mc locks held by caller. 1176 */ 1177 int 1178 in6_joingroup(struct ifnet *ifp, const struct in6_addr *mcaddr, 1179 /*const*/ struct in6_mfilter *imf, struct in6_multi **pinm, 1180 const int delay) 1181 { 1182 int error; 1183 1184 IN6_MULTI_LOCK(); 1185 error = in6_joingroup_locked(ifp, mcaddr, NULL, pinm, delay); 1186 IN6_MULTI_UNLOCK(); 1187 return (error); 1188 } 1189 1190 /* 1191 * Join a multicast group; real entry point. 1192 * 1193 * Only preserves atomicity at inm level. 1194 * NOTE: imf argument cannot be const due to sys/tree.h limitations. 1195 * 1196 * If the MLD downcall fails, the group is not joined, and an error 1197 * code is returned. 1198 */ 1199 static int 1200 in6_joingroup_locked(struct ifnet *ifp, const struct in6_addr *mcaddr, 1201 /*const*/ struct in6_mfilter *imf, struct in6_multi **pinm, 1202 const int delay) 1203 { 1204 struct in6_multi_head inmh; 1205 struct in6_mfilter timf; 1206 struct in6_multi *inm; 1207 struct ifmultiaddr *ifma; 1208 int error; 1209 #ifdef KTR 1210 char ip6tbuf[INET6_ADDRSTRLEN]; 1211 #endif 1212 1213 /* 1214 * Sanity: Check scope zone ID was set for ifp, if and 1215 * only if group is scoped to an interface. 1216 */ 1217 KASSERT(IN6_IS_ADDR_MULTICAST(mcaddr), 1218 ("%s: not a multicast address", __func__)); 1219 if (IN6_IS_ADDR_MC_LINKLOCAL(mcaddr) || 1220 IN6_IS_ADDR_MC_INTFACELOCAL(mcaddr)) { 1221 KASSERT(mcaddr->s6_addr16[1] != 0, 1222 ("%s: scope zone ID not set", __func__)); 1223 } 1224 1225 IN6_MULTI_LOCK_ASSERT(); 1226 IN6_MULTI_LIST_UNLOCK_ASSERT(); 1227 1228 CTR4(KTR_MLD, "%s: join %s on %p(%s))", __func__, 1229 ip6_sprintf(ip6tbuf, mcaddr), ifp, if_name(ifp)); 1230 1231 error = 0; 1232 inm = NULL; 1233 1234 /* 1235 * If no imf was specified (i.e. kernel consumer), 1236 * fake one up and assume it is an ASM join. 1237 */ 1238 if (imf == NULL) { 1239 im6f_init(&timf, MCAST_UNDEFINED, MCAST_EXCLUDE); 1240 imf = &timf; 1241 } 1242 error = in6_getmulti(ifp, mcaddr, &inm); 1243 if (error) { 1244 CTR1(KTR_MLD, "%s: in6_getmulti() failure", __func__); 1245 return (error); 1246 } 1247 1248 IN6_MULTI_LIST_LOCK(); 1249 CTR1(KTR_MLD, "%s: merge inm state", __func__); 1250 error = in6m_merge(inm, imf); 1251 if (error) { 1252 CTR1(KTR_MLD, "%s: failed to merge inm state", __func__); 1253 goto out_in6m_release; 1254 } 1255 1256 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 1257 error = mld_change_state(inm, delay); 1258 if (error) { 1259 CTR1(KTR_MLD, "%s: failed to update source", __func__); 1260 goto out_in6m_release; 1261 } 1262 1263 out_in6m_release: 1264 SLIST_INIT(&inmh); 1265 if (error) { 1266 struct epoch_tracker et; 1267 1268 CTR2(KTR_MLD, "%s: dropping ref on %p", __func__, inm); 1269 IF_ADDR_WLOCK(ifp); 1270 NET_EPOCH_ENTER(et); 1271 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1272 if (ifma->ifma_protospec == inm) { 1273 ifma->ifma_protospec = NULL; 1274 break; 1275 } 1276 } 1277 in6m_disconnect_locked(&inmh, inm); 1278 in6m_rele_locked(&inmh, inm); 1279 NET_EPOCH_EXIT(et); 1280 IF_ADDR_WUNLOCK(ifp); 1281 } else { 1282 *pinm = inm; 1283 } 1284 IN6_MULTI_LIST_UNLOCK(); 1285 in6m_release_list_deferred(&inmh); 1286 return (error); 1287 } 1288 1289 /* 1290 * Leave a multicast group; unlocked entry point. 1291 */ 1292 int 1293 in6_leavegroup(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) 1294 { 1295 int error; 1296 1297 IN6_MULTI_LOCK(); 1298 error = in6_leavegroup_locked(inm, imf); 1299 IN6_MULTI_UNLOCK(); 1300 return (error); 1301 } 1302 1303 /* 1304 * Leave a multicast group; real entry point. 1305 * All source filters will be expunged. 1306 * 1307 * Only preserves atomicity at inm level. 1308 * 1309 * Holding the write lock for the INP which contains imf 1310 * is highly advisable. We can't assert for it as imf does not 1311 * contain a back-pointer to the owning inp. 1312 * 1313 * Note: This is not the same as in6m_release(*) as this function also 1314 * makes a state change downcall into MLD. 1315 */ 1316 int 1317 in6_leavegroup_locked(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) 1318 { 1319 struct in6_multi_head inmh; 1320 struct in6_mfilter timf; 1321 struct ifnet *ifp; 1322 int error; 1323 #ifdef KTR 1324 char ip6tbuf[INET6_ADDRSTRLEN]; 1325 #endif 1326 1327 error = 0; 1328 1329 IN6_MULTI_LOCK_ASSERT(); 1330 1331 CTR5(KTR_MLD, "%s: leave inm %p, %s/%s, imf %p", __func__, 1332 inm, ip6_sprintf(ip6tbuf, &inm->in6m_addr), 1333 (in6m_is_ifp_detached(inm) ? "null" : if_name(inm->in6m_ifp)), 1334 imf); 1335 1336 /* 1337 * If no imf was specified (i.e. kernel consumer), 1338 * fake one up and assume it is an ASM join. 1339 */ 1340 if (imf == NULL) { 1341 im6f_init(&timf, MCAST_EXCLUDE, MCAST_UNDEFINED); 1342 imf = &timf; 1343 } 1344 1345 /* 1346 * Begin state merge transaction at MLD layer. 1347 * 1348 * As this particular invocation should not cause any memory 1349 * to be allocated, and there is no opportunity to roll back 1350 * the transaction, it MUST NOT fail. 1351 */ 1352 1353 ifp = inm->in6m_ifp; 1354 IN6_MULTI_LIST_LOCK(); 1355 CTR1(KTR_MLD, "%s: merge inm state", __func__); 1356 error = in6m_merge(inm, imf); 1357 KASSERT(error == 0, ("%s: failed to merge inm state", __func__)); 1358 1359 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 1360 error = 0; 1361 if (ifp) 1362 error = mld_change_state(inm, 0); 1363 if (error) 1364 CTR1(KTR_MLD, "%s: failed mld downcall", __func__); 1365 1366 CTR2(KTR_MLD, "%s: dropping ref on %p", __func__, inm); 1367 if (ifp) 1368 IF_ADDR_WLOCK(ifp); 1369 1370 SLIST_INIT(&inmh); 1371 if (inm->in6m_refcount == 1) 1372 in6m_disconnect_locked(&inmh, inm); 1373 in6m_rele_locked(&inmh, inm); 1374 if (ifp) 1375 IF_ADDR_WUNLOCK(ifp); 1376 IN6_MULTI_LIST_UNLOCK(); 1377 in6m_release_list_deferred(&inmh); 1378 return (error); 1379 } 1380 1381 1382 /* 1383 * Block or unblock an ASM multicast source on an inpcb. 1384 * This implements the delta-based API described in RFC 3678. 1385 * 1386 * The delta-based API applies only to exclusive-mode memberships. 1387 * An MLD downcall will be performed. 1388 * 1389 * SMPng: NOTE: Must take Giant as a join may create a new ifma. 1390 * 1391 * Return 0 if successful, otherwise return an appropriate error code. 1392 */ 1393 static int 1394 in6p_block_unblock_source(struct inpcb *inp, struct sockopt *sopt) 1395 { 1396 struct group_source_req gsr; 1397 sockunion_t *gsa, *ssa; 1398 struct ifnet *ifp; 1399 struct in6_mfilter *imf; 1400 struct ip6_moptions *imo; 1401 struct in6_msource *ims; 1402 struct in6_multi *inm; 1403 uint16_t fmode; 1404 int error, doblock; 1405 #ifdef KTR 1406 char ip6tbuf[INET6_ADDRSTRLEN]; 1407 #endif 1408 1409 ifp = NULL; 1410 error = 0; 1411 doblock = 0; 1412 1413 memset(&gsr, 0, sizeof(struct group_source_req)); 1414 gsa = (sockunion_t *)&gsr.gsr_group; 1415 ssa = (sockunion_t *)&gsr.gsr_source; 1416 1417 switch (sopt->sopt_name) { 1418 case MCAST_BLOCK_SOURCE: 1419 case MCAST_UNBLOCK_SOURCE: 1420 error = sooptcopyin(sopt, &gsr, 1421 sizeof(struct group_source_req), 1422 sizeof(struct group_source_req)); 1423 if (error) 1424 return (error); 1425 1426 if (gsa->sin6.sin6_family != AF_INET6 || 1427 gsa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 1428 return (EINVAL); 1429 1430 if (ssa->sin6.sin6_family != AF_INET6 || 1431 ssa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 1432 return (EINVAL); 1433 1434 if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface) 1435 return (EADDRNOTAVAIL); 1436 1437 ifp = ifnet_byindex(gsr.gsr_interface); 1438 1439 if (sopt->sopt_name == MCAST_BLOCK_SOURCE) 1440 doblock = 1; 1441 break; 1442 1443 default: 1444 CTR2(KTR_MLD, "%s: unknown sopt_name %d", 1445 __func__, sopt->sopt_name); 1446 return (EOPNOTSUPP); 1447 break; 1448 } 1449 1450 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 1451 return (EINVAL); 1452 1453 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 1454 1455 /* 1456 * Check if we are actually a member of this group. 1457 */ 1458 imo = in6p_findmoptions(inp); 1459 imf = im6o_match_group(imo, ifp, &gsa->sa); 1460 if (imf == NULL) { 1461 error = EADDRNOTAVAIL; 1462 goto out_in6p_locked; 1463 } 1464 inm = imf->im6f_in6m; 1465 1466 /* 1467 * Attempting to use the delta-based API on an 1468 * non exclusive-mode membership is an error. 1469 */ 1470 fmode = imf->im6f_st[0]; 1471 if (fmode != MCAST_EXCLUDE) { 1472 error = EINVAL; 1473 goto out_in6p_locked; 1474 } 1475 1476 /* 1477 * Deal with error cases up-front: 1478 * Asked to block, but already blocked; or 1479 * Asked to unblock, but nothing to unblock. 1480 * If adding a new block entry, allocate it. 1481 */ 1482 ims = im6o_match_source(imf, &ssa->sa); 1483 if ((ims != NULL && doblock) || (ims == NULL && !doblock)) { 1484 CTR3(KTR_MLD, "%s: source %s %spresent", __func__, 1485 ip6_sprintf(ip6tbuf, &ssa->sin6.sin6_addr), 1486 doblock ? "" : "not "); 1487 error = EADDRNOTAVAIL; 1488 goto out_in6p_locked; 1489 } 1490 1491 INP_WLOCK_ASSERT(inp); 1492 1493 /* 1494 * Begin state merge transaction at socket layer. 1495 */ 1496 if (doblock) { 1497 CTR2(KTR_MLD, "%s: %s source", __func__, "block"); 1498 ims = im6f_graft(imf, fmode, &ssa->sin6); 1499 if (ims == NULL) 1500 error = ENOMEM; 1501 } else { 1502 CTR2(KTR_MLD, "%s: %s source", __func__, "allow"); 1503 error = im6f_prune(imf, &ssa->sin6); 1504 } 1505 1506 if (error) { 1507 CTR1(KTR_MLD, "%s: merge imf state failed", __func__); 1508 goto out_im6f_rollback; 1509 } 1510 1511 /* 1512 * Begin state merge transaction at MLD layer. 1513 */ 1514 IN6_MULTI_LIST_LOCK(); 1515 CTR1(KTR_MLD, "%s: merge inm state", __func__); 1516 error = in6m_merge(inm, imf); 1517 if (error) 1518 CTR1(KTR_MLD, "%s: failed to merge inm state", __func__); 1519 else { 1520 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 1521 error = mld_change_state(inm, 0); 1522 if (error) 1523 CTR1(KTR_MLD, "%s: failed mld downcall", __func__); 1524 } 1525 1526 IN6_MULTI_LIST_UNLOCK(); 1527 1528 out_im6f_rollback: 1529 if (error) 1530 im6f_rollback(imf); 1531 else 1532 im6f_commit(imf); 1533 1534 im6f_reap(imf); 1535 1536 out_in6p_locked: 1537 INP_WUNLOCK(inp); 1538 return (error); 1539 } 1540 1541 /* 1542 * Given an inpcb, return its multicast options structure pointer. Accepts 1543 * an unlocked inpcb pointer, but will return it locked. May sleep. 1544 * 1545 * SMPng: NOTE: Potentially calls malloc(M_WAITOK) with Giant held. 1546 * SMPng: NOTE: Returns with the INP write lock held. 1547 */ 1548 static struct ip6_moptions * 1549 in6p_findmoptions(struct inpcb *inp) 1550 { 1551 struct ip6_moptions *imo; 1552 1553 INP_WLOCK(inp); 1554 if (inp->in6p_moptions != NULL) 1555 return (inp->in6p_moptions); 1556 1557 INP_WUNLOCK(inp); 1558 1559 imo = malloc(sizeof(*imo), M_IP6MOPTS, M_WAITOK); 1560 1561 imo->im6o_multicast_ifp = NULL; 1562 imo->im6o_multicast_hlim = V_ip6_defmcasthlim; 1563 imo->im6o_multicast_loop = in6_mcast_loop; 1564 STAILQ_INIT(&imo->im6o_head); 1565 1566 INP_WLOCK(inp); 1567 if (inp->in6p_moptions != NULL) { 1568 free(imo, M_IP6MOPTS); 1569 return (inp->in6p_moptions); 1570 } 1571 inp->in6p_moptions = imo; 1572 return (imo); 1573 } 1574 1575 /* 1576 * Discard the IPv6 multicast options (and source filters). 1577 * 1578 * SMPng: NOTE: assumes INP write lock is held. 1579 * 1580 * XXX can all be safely deferred to epoch_call 1581 * 1582 */ 1583 1584 static void 1585 inp_gcmoptions(struct ip6_moptions *imo) 1586 { 1587 struct in6_mfilter *imf; 1588 struct in6_multi *inm; 1589 struct ifnet *ifp; 1590 1591 while ((imf = ip6_mfilter_first(&imo->im6o_head)) != NULL) { 1592 ip6_mfilter_remove(&imo->im6o_head, imf); 1593 1594 im6f_leave(imf); 1595 if ((inm = imf->im6f_in6m) != NULL) { 1596 if ((ifp = inm->in6m_ifp) != NULL) { 1597 CURVNET_SET(ifp->if_vnet); 1598 (void)in6_leavegroup(inm, imf); 1599 CURVNET_RESTORE(); 1600 } else { 1601 (void)in6_leavegroup(inm, imf); 1602 } 1603 } 1604 ip6_mfilter_free(imf); 1605 } 1606 free(imo, M_IP6MOPTS); 1607 } 1608 1609 void 1610 ip6_freemoptions(struct ip6_moptions *imo) 1611 { 1612 if (imo == NULL) 1613 return; 1614 inp_gcmoptions(imo); 1615 } 1616 1617 /* 1618 * Atomically get source filters on a socket for an IPv6 multicast group. 1619 * Called with INP lock held; returns with lock released. 1620 */ 1621 static int 1622 in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt) 1623 { 1624 struct __msfilterreq msfr; 1625 sockunion_t *gsa; 1626 struct ifnet *ifp; 1627 struct ip6_moptions *imo; 1628 struct in6_mfilter *imf; 1629 struct ip6_msource *ims; 1630 struct in6_msource *lims; 1631 struct sockaddr_in6 *psin; 1632 struct sockaddr_storage *ptss; 1633 struct sockaddr_storage *tss; 1634 int error; 1635 size_t nsrcs, ncsrcs; 1636 1637 INP_WLOCK_ASSERT(inp); 1638 1639 imo = inp->in6p_moptions; 1640 KASSERT(imo != NULL, ("%s: null ip6_moptions", __func__)); 1641 1642 INP_WUNLOCK(inp); 1643 1644 error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq), 1645 sizeof(struct __msfilterreq)); 1646 if (error) 1647 return (error); 1648 1649 if (msfr.msfr_group.ss_family != AF_INET6 || 1650 msfr.msfr_group.ss_len != sizeof(struct sockaddr_in6)) 1651 return (EINVAL); 1652 1653 gsa = (sockunion_t *)&msfr.msfr_group; 1654 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 1655 return (EINVAL); 1656 1657 if (msfr.msfr_ifindex == 0 || V_if_index < msfr.msfr_ifindex) 1658 return (EADDRNOTAVAIL); 1659 ifp = ifnet_byindex(msfr.msfr_ifindex); 1660 if (ifp == NULL) 1661 return (EADDRNOTAVAIL); 1662 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 1663 1664 INP_WLOCK(inp); 1665 1666 /* 1667 * Lookup group on the socket. 1668 */ 1669 imf = im6o_match_group(imo, ifp, &gsa->sa); 1670 if (imf == NULL) { 1671 INP_WUNLOCK(inp); 1672 return (EADDRNOTAVAIL); 1673 } 1674 1675 /* 1676 * Ignore memberships which are in limbo. 1677 */ 1678 if (imf->im6f_st[1] == MCAST_UNDEFINED) { 1679 INP_WUNLOCK(inp); 1680 return (EAGAIN); 1681 } 1682 msfr.msfr_fmode = imf->im6f_st[1]; 1683 1684 /* 1685 * If the user specified a buffer, copy out the source filter 1686 * entries to userland gracefully. 1687 * We only copy out the number of entries which userland 1688 * has asked for, but we always tell userland how big the 1689 * buffer really needs to be. 1690 */ 1691 if (msfr.msfr_nsrcs > in6_mcast_maxsocksrc) 1692 msfr.msfr_nsrcs = in6_mcast_maxsocksrc; 1693 tss = NULL; 1694 if (msfr.msfr_srcs != NULL && msfr.msfr_nsrcs > 0) { 1695 tss = malloc(sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs, 1696 M_TEMP, M_NOWAIT | M_ZERO); 1697 if (tss == NULL) { 1698 INP_WUNLOCK(inp); 1699 return (ENOBUFS); 1700 } 1701 } 1702 1703 /* 1704 * Count number of sources in-mode at t0. 1705 * If buffer space exists and remains, copy out source entries. 1706 */ 1707 nsrcs = msfr.msfr_nsrcs; 1708 ncsrcs = 0; 1709 ptss = tss; 1710 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { 1711 lims = (struct in6_msource *)ims; 1712 if (lims->im6sl_st[0] == MCAST_UNDEFINED || 1713 lims->im6sl_st[0] != imf->im6f_st[0]) 1714 continue; 1715 ++ncsrcs; 1716 if (tss != NULL && nsrcs > 0) { 1717 psin = (struct sockaddr_in6 *)ptss; 1718 psin->sin6_family = AF_INET6; 1719 psin->sin6_len = sizeof(struct sockaddr_in6); 1720 psin->sin6_addr = lims->im6s_addr; 1721 psin->sin6_port = 0; 1722 --nsrcs; 1723 ++ptss; 1724 } 1725 } 1726 1727 INP_WUNLOCK(inp); 1728 1729 if (tss != NULL) { 1730 error = copyout(tss, msfr.msfr_srcs, 1731 sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs); 1732 free(tss, M_TEMP); 1733 if (error) 1734 return (error); 1735 } 1736 1737 msfr.msfr_nsrcs = ncsrcs; 1738 error = sooptcopyout(sopt, &msfr, sizeof(struct __msfilterreq)); 1739 1740 return (error); 1741 } 1742 1743 /* 1744 * Return the IP multicast options in response to user getsockopt(). 1745 */ 1746 int 1747 ip6_getmoptions(struct inpcb *inp, struct sockopt *sopt) 1748 { 1749 struct ip6_moptions *im6o; 1750 int error; 1751 u_int optval; 1752 1753 INP_WLOCK(inp); 1754 im6o = inp->in6p_moptions; 1755 /* 1756 * If socket is neither of type SOCK_RAW or SOCK_DGRAM, 1757 * or is a divert socket, reject it. 1758 */ 1759 if (inp->inp_socket->so_proto->pr_protocol == IPPROTO_DIVERT || 1760 (inp->inp_socket->so_proto->pr_type != SOCK_RAW && 1761 inp->inp_socket->so_proto->pr_type != SOCK_DGRAM)) { 1762 INP_WUNLOCK(inp); 1763 return (EOPNOTSUPP); 1764 } 1765 1766 error = 0; 1767 switch (sopt->sopt_name) { 1768 case IPV6_MULTICAST_IF: 1769 if (im6o == NULL || im6o->im6o_multicast_ifp == NULL) { 1770 optval = 0; 1771 } else { 1772 optval = im6o->im6o_multicast_ifp->if_index; 1773 } 1774 INP_WUNLOCK(inp); 1775 error = sooptcopyout(sopt, &optval, sizeof(u_int)); 1776 break; 1777 1778 case IPV6_MULTICAST_HOPS: 1779 if (im6o == NULL) 1780 optval = V_ip6_defmcasthlim; 1781 else 1782 optval = im6o->im6o_multicast_hlim; 1783 INP_WUNLOCK(inp); 1784 error = sooptcopyout(sopt, &optval, sizeof(u_int)); 1785 break; 1786 1787 case IPV6_MULTICAST_LOOP: 1788 if (im6o == NULL) 1789 optval = in6_mcast_loop; /* XXX VIMAGE */ 1790 else 1791 optval = im6o->im6o_multicast_loop; 1792 INP_WUNLOCK(inp); 1793 error = sooptcopyout(sopt, &optval, sizeof(u_int)); 1794 break; 1795 1796 case IPV6_MSFILTER: 1797 if (im6o == NULL) { 1798 error = EADDRNOTAVAIL; 1799 INP_WUNLOCK(inp); 1800 } else { 1801 error = in6p_get_source_filters(inp, sopt); 1802 } 1803 break; 1804 1805 default: 1806 INP_WUNLOCK(inp); 1807 error = ENOPROTOOPT; 1808 break; 1809 } 1810 1811 INP_UNLOCK_ASSERT(inp); 1812 1813 return (error); 1814 } 1815 1816 /* 1817 * Look up the ifnet to use for a multicast group membership, 1818 * given the address of an IPv6 group. 1819 * 1820 * This routine exists to support legacy IPv6 multicast applications. 1821 * 1822 * Use the socket's current FIB number for any required FIB lookup. Look up the 1823 * group address in the unicast FIB, and use its ifp; usually, this points to 1824 * the default next-hop. If the FIB lookup fails, return NULL. 1825 * 1826 * FUTURE: Support multiple forwarding tables for IPv6. 1827 * 1828 * Returns NULL if no ifp could be found. 1829 */ 1830 static struct ifnet * 1831 in6p_lookup_mcast_ifp(const struct inpcb *inp, const struct sockaddr_in6 *gsin6) 1832 { 1833 struct nhop_object *nh; 1834 struct in6_addr dst; 1835 uint32_t scopeid; 1836 uint32_t fibnum; 1837 1838 KASSERT(gsin6->sin6_family == AF_INET6, 1839 ("%s: not AF_INET6 group", __func__)); 1840 1841 in6_splitscope(&gsin6->sin6_addr, &dst, &scopeid); 1842 fibnum = inp->inp_inc.inc_fibnum; 1843 nh = fib6_lookup(fibnum, &dst, scopeid, 0, 0); 1844 1845 return (nh ? nh->nh_ifp : NULL); 1846 } 1847 1848 /* 1849 * Join an IPv6 multicast group, possibly with a source. 1850 * 1851 * FIXME: The KAME use of the unspecified address (::) 1852 * to join *all* multicast groups is currently unsupported. 1853 */ 1854 static int 1855 in6p_join_group(struct inpcb *inp, struct sockopt *sopt) 1856 { 1857 struct in6_multi_head inmh; 1858 struct group_source_req gsr; 1859 sockunion_t *gsa, *ssa; 1860 struct ifnet *ifp; 1861 struct in6_mfilter *imf; 1862 struct ip6_moptions *imo; 1863 struct in6_multi *inm; 1864 struct in6_msource *lims; 1865 int error, is_new; 1866 1867 SLIST_INIT(&inmh); 1868 ifp = NULL; 1869 lims = NULL; 1870 error = 0; 1871 1872 memset(&gsr, 0, sizeof(struct group_source_req)); 1873 gsa = (sockunion_t *)&gsr.gsr_group; 1874 gsa->ss.ss_family = AF_UNSPEC; 1875 ssa = (sockunion_t *)&gsr.gsr_source; 1876 ssa->ss.ss_family = AF_UNSPEC; 1877 1878 /* 1879 * Chew everything into struct group_source_req. 1880 * Overwrite the port field if present, as the sockaddr 1881 * being copied in may be matched with a binary comparison. 1882 * Ignore passed-in scope ID. 1883 */ 1884 switch (sopt->sopt_name) { 1885 case IPV6_JOIN_GROUP: { 1886 struct ipv6_mreq mreq; 1887 1888 error = sooptcopyin(sopt, &mreq, sizeof(struct ipv6_mreq), 1889 sizeof(struct ipv6_mreq)); 1890 if (error) 1891 return (error); 1892 1893 gsa->sin6.sin6_family = AF_INET6; 1894 gsa->sin6.sin6_len = sizeof(struct sockaddr_in6); 1895 gsa->sin6.sin6_addr = mreq.ipv6mr_multiaddr; 1896 1897 if (mreq.ipv6mr_interface == 0) { 1898 ifp = in6p_lookup_mcast_ifp(inp, &gsa->sin6); 1899 } else { 1900 if (V_if_index < mreq.ipv6mr_interface) 1901 return (EADDRNOTAVAIL); 1902 ifp = ifnet_byindex(mreq.ipv6mr_interface); 1903 } 1904 CTR3(KTR_MLD, "%s: ipv6mr_interface = %d, ifp = %p", 1905 __func__, mreq.ipv6mr_interface, ifp); 1906 } break; 1907 1908 case MCAST_JOIN_GROUP: 1909 case MCAST_JOIN_SOURCE_GROUP: 1910 if (sopt->sopt_name == MCAST_JOIN_GROUP) { 1911 error = sooptcopyin(sopt, &gsr, 1912 sizeof(struct group_req), 1913 sizeof(struct group_req)); 1914 } else if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) { 1915 error = sooptcopyin(sopt, &gsr, 1916 sizeof(struct group_source_req), 1917 sizeof(struct group_source_req)); 1918 } 1919 if (error) 1920 return (error); 1921 1922 if (gsa->sin6.sin6_family != AF_INET6 || 1923 gsa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 1924 return (EINVAL); 1925 1926 if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) { 1927 if (ssa->sin6.sin6_family != AF_INET6 || 1928 ssa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 1929 return (EINVAL); 1930 if (IN6_IS_ADDR_MULTICAST(&ssa->sin6.sin6_addr)) 1931 return (EINVAL); 1932 /* 1933 * TODO: Validate embedded scope ID in source 1934 * list entry against passed-in ifp, if and only 1935 * if source list filter entry is iface or node local. 1936 */ 1937 in6_clearscope(&ssa->sin6.sin6_addr); 1938 ssa->sin6.sin6_port = 0; 1939 ssa->sin6.sin6_scope_id = 0; 1940 } 1941 1942 if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface) 1943 return (EADDRNOTAVAIL); 1944 ifp = ifnet_byindex(gsr.gsr_interface); 1945 break; 1946 1947 default: 1948 CTR2(KTR_MLD, "%s: unknown sopt_name %d", 1949 __func__, sopt->sopt_name); 1950 return (EOPNOTSUPP); 1951 break; 1952 } 1953 1954 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 1955 return (EINVAL); 1956 1957 if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) 1958 return (EADDRNOTAVAIL); 1959 1960 gsa->sin6.sin6_port = 0; 1961 gsa->sin6.sin6_scope_id = 0; 1962 1963 /* 1964 * Always set the scope zone ID on memberships created from userland. 1965 * Use the passed-in ifp to do this. 1966 * XXX The in6_setscope() return value is meaningless. 1967 * XXX SCOPE6_LOCK() is taken by in6_setscope(). 1968 */ 1969 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 1970 1971 IN6_MULTI_LOCK(); 1972 1973 /* 1974 * Find the membership in the membership list. 1975 */ 1976 imo = in6p_findmoptions(inp); 1977 imf = im6o_match_group(imo, ifp, &gsa->sa); 1978 if (imf == NULL) { 1979 is_new = 1; 1980 inm = NULL; 1981 1982 if (ip6_mfilter_count(&imo->im6o_head) >= IPV6_MAX_MEMBERSHIPS) { 1983 error = ENOMEM; 1984 goto out_in6p_locked; 1985 } 1986 } else { 1987 is_new = 0; 1988 inm = imf->im6f_in6m; 1989 1990 if (ssa->ss.ss_family != AF_UNSPEC) { 1991 /* 1992 * MCAST_JOIN_SOURCE_GROUP on an exclusive membership 1993 * is an error. On an existing inclusive membership, 1994 * it just adds the source to the filter list. 1995 */ 1996 if (imf->im6f_st[1] != MCAST_INCLUDE) { 1997 error = EINVAL; 1998 goto out_in6p_locked; 1999 } 2000 /* 2001 * Throw out duplicates. 2002 * 2003 * XXX FIXME: This makes a naive assumption that 2004 * even if entries exist for *ssa in this imf, 2005 * they will be rejected as dupes, even if they 2006 * are not valid in the current mode (in-mode). 2007 * 2008 * in6_msource is transactioned just as for anything 2009 * else in SSM -- but note naive use of in6m_graft() 2010 * below for allocating new filter entries. 2011 * 2012 * This is only an issue if someone mixes the 2013 * full-state SSM API with the delta-based API, 2014 * which is discouraged in the relevant RFCs. 2015 */ 2016 lims = im6o_match_source(imf, &ssa->sa); 2017 if (lims != NULL /*&& 2018 lims->im6sl_st[1] == MCAST_INCLUDE*/) { 2019 error = EADDRNOTAVAIL; 2020 goto out_in6p_locked; 2021 } 2022 } else { 2023 /* 2024 * MCAST_JOIN_GROUP alone, on any existing membership, 2025 * is rejected, to stop the same inpcb tying up 2026 * multiple refs to the in_multi. 2027 * On an existing inclusive membership, this is also 2028 * an error; if you want to change filter mode, 2029 * you must use the userland API setsourcefilter(). 2030 * XXX We don't reject this for imf in UNDEFINED 2031 * state at t1, because allocation of a filter 2032 * is atomic with allocation of a membership. 2033 */ 2034 error = EINVAL; 2035 goto out_in6p_locked; 2036 } 2037 } 2038 2039 /* 2040 * Begin state merge transaction at socket layer. 2041 */ 2042 INP_WLOCK_ASSERT(inp); 2043 2044 /* 2045 * Graft new source into filter list for this inpcb's 2046 * membership of the group. The in6_multi may not have 2047 * been allocated yet if this is a new membership, however, 2048 * the in_mfilter slot will be allocated and must be initialized. 2049 * 2050 * Note: Grafting of exclusive mode filters doesn't happen 2051 * in this path. 2052 * XXX: Should check for non-NULL lims (node exists but may 2053 * not be in-mode) for interop with full-state API. 2054 */ 2055 if (ssa->ss.ss_family != AF_UNSPEC) { 2056 /* Membership starts in IN mode */ 2057 if (is_new) { 2058 CTR1(KTR_MLD, "%s: new join w/source", __func__); 2059 imf = ip6_mfilter_alloc(M_NOWAIT, MCAST_UNDEFINED, MCAST_INCLUDE); 2060 if (imf == NULL) { 2061 error = ENOMEM; 2062 goto out_in6p_locked; 2063 } 2064 } else { 2065 CTR2(KTR_MLD, "%s: %s source", __func__, "allow"); 2066 } 2067 lims = im6f_graft(imf, MCAST_INCLUDE, &ssa->sin6); 2068 if (lims == NULL) { 2069 CTR1(KTR_MLD, "%s: merge imf state failed", 2070 __func__); 2071 error = ENOMEM; 2072 goto out_in6p_locked; 2073 } 2074 } else { 2075 /* No address specified; Membership starts in EX mode */ 2076 if (is_new) { 2077 CTR1(KTR_MLD, "%s: new join w/o source", __func__); 2078 imf = ip6_mfilter_alloc(M_NOWAIT, MCAST_UNDEFINED, MCAST_EXCLUDE); 2079 if (imf == NULL) { 2080 error = ENOMEM; 2081 goto out_in6p_locked; 2082 } 2083 } 2084 } 2085 2086 /* 2087 * Begin state merge transaction at MLD layer. 2088 */ 2089 if (is_new) { 2090 in_pcbref(inp); 2091 INP_WUNLOCK(inp); 2092 2093 error = in6_joingroup_locked(ifp, &gsa->sin6.sin6_addr, imf, 2094 &imf->im6f_in6m, 0); 2095 2096 INP_WLOCK(inp); 2097 if (in_pcbrele_wlocked(inp)) { 2098 error = ENXIO; 2099 goto out_in6p_unlocked; 2100 } 2101 if (error) { 2102 goto out_in6p_locked; 2103 } 2104 /* 2105 * NOTE: Refcount from in6_joingroup_locked() 2106 * is protecting membership. 2107 */ 2108 ip6_mfilter_insert(&imo->im6o_head, imf); 2109 } else { 2110 CTR1(KTR_MLD, "%s: merge inm state", __func__); 2111 IN6_MULTI_LIST_LOCK(); 2112 error = in6m_merge(inm, imf); 2113 if (error) { 2114 CTR1(KTR_MLD, "%s: failed to merge inm state", 2115 __func__); 2116 IN6_MULTI_LIST_UNLOCK(); 2117 im6f_rollback(imf); 2118 im6f_reap(imf); 2119 goto out_in6p_locked; 2120 } 2121 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 2122 error = mld_change_state(inm, 0); 2123 IN6_MULTI_LIST_UNLOCK(); 2124 2125 if (error) { 2126 CTR1(KTR_MLD, "%s: failed mld downcall", 2127 __func__); 2128 im6f_rollback(imf); 2129 im6f_reap(imf); 2130 goto out_in6p_locked; 2131 } 2132 } 2133 2134 im6f_commit(imf); 2135 imf = NULL; 2136 2137 out_in6p_locked: 2138 INP_WUNLOCK(inp); 2139 out_in6p_unlocked: 2140 IN6_MULTI_UNLOCK(); 2141 2142 if (is_new && imf) { 2143 if (imf->im6f_in6m != NULL) { 2144 struct in6_multi_head inmh; 2145 2146 SLIST_INIT(&inmh); 2147 SLIST_INSERT_HEAD(&inmh, imf->im6f_in6m, in6m_defer); 2148 in6m_release_list_deferred(&inmh); 2149 } 2150 ip6_mfilter_free(imf); 2151 } 2152 return (error); 2153 } 2154 2155 /* 2156 * Leave an IPv6 multicast group on an inpcb, possibly with a source. 2157 */ 2158 static int 2159 in6p_leave_group(struct inpcb *inp, struct sockopt *sopt) 2160 { 2161 struct ipv6_mreq mreq; 2162 struct group_source_req gsr; 2163 sockunion_t *gsa, *ssa; 2164 struct ifnet *ifp; 2165 struct in6_mfilter *imf; 2166 struct ip6_moptions *imo; 2167 struct in6_msource *ims; 2168 struct in6_multi *inm; 2169 uint32_t ifindex; 2170 int error; 2171 bool is_final; 2172 #ifdef KTR 2173 char ip6tbuf[INET6_ADDRSTRLEN]; 2174 #endif 2175 2176 ifp = NULL; 2177 ifindex = 0; 2178 error = 0; 2179 is_final = true; 2180 2181 memset(&gsr, 0, sizeof(struct group_source_req)); 2182 gsa = (sockunion_t *)&gsr.gsr_group; 2183 gsa->ss.ss_family = AF_UNSPEC; 2184 ssa = (sockunion_t *)&gsr.gsr_source; 2185 ssa->ss.ss_family = AF_UNSPEC; 2186 2187 /* 2188 * Chew everything passed in up into a struct group_source_req 2189 * as that is easier to process. 2190 * Note: Any embedded scope ID in the multicast group passed 2191 * in by userland is ignored, the interface index is the recommended 2192 * mechanism to specify an interface; see below. 2193 */ 2194 switch (sopt->sopt_name) { 2195 case IPV6_LEAVE_GROUP: 2196 error = sooptcopyin(sopt, &mreq, sizeof(struct ipv6_mreq), 2197 sizeof(struct ipv6_mreq)); 2198 if (error) 2199 return (error); 2200 gsa->sin6.sin6_family = AF_INET6; 2201 gsa->sin6.sin6_len = sizeof(struct sockaddr_in6); 2202 gsa->sin6.sin6_addr = mreq.ipv6mr_multiaddr; 2203 gsa->sin6.sin6_port = 0; 2204 gsa->sin6.sin6_scope_id = 0; 2205 ifindex = mreq.ipv6mr_interface; 2206 break; 2207 2208 case MCAST_LEAVE_GROUP: 2209 case MCAST_LEAVE_SOURCE_GROUP: 2210 if (sopt->sopt_name == MCAST_LEAVE_GROUP) { 2211 error = sooptcopyin(sopt, &gsr, 2212 sizeof(struct group_req), 2213 sizeof(struct group_req)); 2214 } else if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) { 2215 error = sooptcopyin(sopt, &gsr, 2216 sizeof(struct group_source_req), 2217 sizeof(struct group_source_req)); 2218 } 2219 if (error) 2220 return (error); 2221 2222 if (gsa->sin6.sin6_family != AF_INET6 || 2223 gsa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 2224 return (EINVAL); 2225 if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) { 2226 if (ssa->sin6.sin6_family != AF_INET6 || 2227 ssa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 2228 return (EINVAL); 2229 if (IN6_IS_ADDR_MULTICAST(&ssa->sin6.sin6_addr)) 2230 return (EINVAL); 2231 /* 2232 * TODO: Validate embedded scope ID in source 2233 * list entry against passed-in ifp, if and only 2234 * if source list filter entry is iface or node local. 2235 */ 2236 in6_clearscope(&ssa->sin6.sin6_addr); 2237 } 2238 gsa->sin6.sin6_port = 0; 2239 gsa->sin6.sin6_scope_id = 0; 2240 ifindex = gsr.gsr_interface; 2241 break; 2242 2243 default: 2244 CTR2(KTR_MLD, "%s: unknown sopt_name %d", 2245 __func__, sopt->sopt_name); 2246 return (EOPNOTSUPP); 2247 break; 2248 } 2249 2250 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 2251 return (EINVAL); 2252 2253 /* 2254 * Validate interface index if provided. If no interface index 2255 * was provided separately, attempt to look the membership up 2256 * from the default scope as a last resort to disambiguate 2257 * the membership we are being asked to leave. 2258 * XXX SCOPE6 lock potentially taken here. 2259 */ 2260 if (ifindex != 0) { 2261 if (V_if_index < ifindex) 2262 return (EADDRNOTAVAIL); 2263 ifp = ifnet_byindex(ifindex); 2264 if (ifp == NULL) 2265 return (EADDRNOTAVAIL); 2266 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 2267 } else { 2268 error = sa6_embedscope(&gsa->sin6, V_ip6_use_defzone); 2269 if (error) 2270 return (EADDRNOTAVAIL); 2271 /* 2272 * Some badly behaved applications don't pass an ifindex 2273 * or a scope ID, which is an API violation. In this case, 2274 * perform a lookup as per a v6 join. 2275 * 2276 * XXX For now, stomp on zone ID for the corner case. 2277 * This is not the 'KAME way', but we need to see the ifp 2278 * directly until such time as this implementation is 2279 * refactored, assuming the scope IDs are the way to go. 2280 */ 2281 ifindex = ntohs(gsa->sin6.sin6_addr.s6_addr16[1]); 2282 if (ifindex == 0) { 2283 CTR2(KTR_MLD, "%s: warning: no ifindex, looking up " 2284 "ifp for group %s.", __func__, 2285 ip6_sprintf(ip6tbuf, &gsa->sin6.sin6_addr)); 2286 ifp = in6p_lookup_mcast_ifp(inp, &gsa->sin6); 2287 } else { 2288 ifp = ifnet_byindex(ifindex); 2289 } 2290 if (ifp == NULL) 2291 return (EADDRNOTAVAIL); 2292 } 2293 2294 CTR2(KTR_MLD, "%s: ifp = %p", __func__, ifp); 2295 KASSERT(ifp != NULL, ("%s: ifp did not resolve", __func__)); 2296 2297 IN6_MULTI_LOCK(); 2298 2299 /* 2300 * Find the membership in the membership list. 2301 */ 2302 imo = in6p_findmoptions(inp); 2303 imf = im6o_match_group(imo, ifp, &gsa->sa); 2304 if (imf == NULL) { 2305 error = EADDRNOTAVAIL; 2306 goto out_in6p_locked; 2307 } 2308 inm = imf->im6f_in6m; 2309 2310 if (ssa->ss.ss_family != AF_UNSPEC) 2311 is_final = false; 2312 2313 /* 2314 * Begin state merge transaction at socket layer. 2315 */ 2316 INP_WLOCK_ASSERT(inp); 2317 2318 /* 2319 * If we were instructed only to leave a given source, do so. 2320 * MCAST_LEAVE_SOURCE_GROUP is only valid for inclusive memberships. 2321 */ 2322 if (is_final) { 2323 ip6_mfilter_remove(&imo->im6o_head, imf); 2324 im6f_leave(imf); 2325 2326 /* 2327 * Give up the multicast address record to which 2328 * the membership points. 2329 */ 2330 (void)in6_leavegroup_locked(inm, imf); 2331 } else { 2332 if (imf->im6f_st[0] == MCAST_EXCLUDE) { 2333 error = EADDRNOTAVAIL; 2334 goto out_in6p_locked; 2335 } 2336 ims = im6o_match_source(imf, &ssa->sa); 2337 if (ims == NULL) { 2338 CTR3(KTR_MLD, "%s: source %p %spresent", __func__, 2339 ip6_sprintf(ip6tbuf, &ssa->sin6.sin6_addr), 2340 "not "); 2341 error = EADDRNOTAVAIL; 2342 goto out_in6p_locked; 2343 } 2344 CTR2(KTR_MLD, "%s: %s source", __func__, "block"); 2345 error = im6f_prune(imf, &ssa->sin6); 2346 if (error) { 2347 CTR1(KTR_MLD, "%s: merge imf state failed", 2348 __func__); 2349 goto out_in6p_locked; 2350 } 2351 } 2352 2353 /* 2354 * Begin state merge transaction at MLD layer. 2355 */ 2356 if (!is_final) { 2357 CTR1(KTR_MLD, "%s: merge inm state", __func__); 2358 IN6_MULTI_LIST_LOCK(); 2359 error = in6m_merge(inm, imf); 2360 if (error) { 2361 CTR1(KTR_MLD, "%s: failed to merge inm state", 2362 __func__); 2363 IN6_MULTI_LIST_UNLOCK(); 2364 im6f_rollback(imf); 2365 im6f_reap(imf); 2366 goto out_in6p_locked; 2367 } 2368 2369 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 2370 error = mld_change_state(inm, 0); 2371 IN6_MULTI_LIST_UNLOCK(); 2372 if (error) { 2373 CTR1(KTR_MLD, "%s: failed mld downcall", 2374 __func__); 2375 im6f_rollback(imf); 2376 im6f_reap(imf); 2377 goto out_in6p_locked; 2378 } 2379 } 2380 2381 im6f_commit(imf); 2382 im6f_reap(imf); 2383 2384 out_in6p_locked: 2385 INP_WUNLOCK(inp); 2386 2387 if (is_final && imf) 2388 ip6_mfilter_free(imf); 2389 2390 IN6_MULTI_UNLOCK(); 2391 return (error); 2392 } 2393 2394 /* 2395 * Select the interface for transmitting IPv6 multicast datagrams. 2396 * 2397 * Either an instance of struct in6_addr or an instance of struct ipv6_mreqn 2398 * may be passed to this socket option. An address of in6addr_any or an 2399 * interface index of 0 is used to remove a previous selection. 2400 * When no interface is selected, one is chosen for every send. 2401 */ 2402 static int 2403 in6p_set_multicast_if(struct inpcb *inp, struct sockopt *sopt) 2404 { 2405 struct ifnet *ifp; 2406 struct ip6_moptions *imo; 2407 u_int ifindex; 2408 int error; 2409 2410 if (sopt->sopt_valsize != sizeof(u_int)) 2411 return (EINVAL); 2412 2413 error = sooptcopyin(sopt, &ifindex, sizeof(u_int), sizeof(u_int)); 2414 if (error) 2415 return (error); 2416 if (V_if_index < ifindex) 2417 return (EINVAL); 2418 if (ifindex == 0) 2419 ifp = NULL; 2420 else { 2421 ifp = ifnet_byindex(ifindex); 2422 if (ifp == NULL) 2423 return (EINVAL); 2424 if ((ifp->if_flags & IFF_MULTICAST) == 0) 2425 return (EADDRNOTAVAIL); 2426 } 2427 imo = in6p_findmoptions(inp); 2428 imo->im6o_multicast_ifp = ifp; 2429 INP_WUNLOCK(inp); 2430 2431 return (0); 2432 } 2433 2434 /* 2435 * Atomically set source filters on a socket for an IPv6 multicast group. 2436 * 2437 * SMPng: NOTE: Potentially calls malloc(M_WAITOK) with Giant held. 2438 */ 2439 static int 2440 in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt) 2441 { 2442 struct __msfilterreq msfr; 2443 sockunion_t *gsa; 2444 struct ifnet *ifp; 2445 struct in6_mfilter *imf; 2446 struct ip6_moptions *imo; 2447 struct in6_multi *inm; 2448 int error; 2449 2450 error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq), 2451 sizeof(struct __msfilterreq)); 2452 if (error) 2453 return (error); 2454 2455 if (msfr.msfr_nsrcs > in6_mcast_maxsocksrc) 2456 return (ENOBUFS); 2457 2458 if (msfr.msfr_fmode != MCAST_EXCLUDE && 2459 msfr.msfr_fmode != MCAST_INCLUDE) 2460 return (EINVAL); 2461 2462 if (msfr.msfr_group.ss_family != AF_INET6 || 2463 msfr.msfr_group.ss_len != sizeof(struct sockaddr_in6)) 2464 return (EINVAL); 2465 2466 gsa = (sockunion_t *)&msfr.msfr_group; 2467 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 2468 return (EINVAL); 2469 2470 gsa->sin6.sin6_port = 0; /* ignore port */ 2471 2472 if (msfr.msfr_ifindex == 0 || V_if_index < msfr.msfr_ifindex) 2473 return (EADDRNOTAVAIL); 2474 ifp = ifnet_byindex(msfr.msfr_ifindex); 2475 if (ifp == NULL) 2476 return (EADDRNOTAVAIL); 2477 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 2478 2479 /* 2480 * Take the INP write lock. 2481 * Check if this socket is a member of this group. 2482 */ 2483 imo = in6p_findmoptions(inp); 2484 imf = im6o_match_group(imo, ifp, &gsa->sa); 2485 if (imf == NULL) { 2486 error = EADDRNOTAVAIL; 2487 goto out_in6p_locked; 2488 } 2489 inm = imf->im6f_in6m; 2490 2491 /* 2492 * Begin state merge transaction at socket layer. 2493 */ 2494 INP_WLOCK_ASSERT(inp); 2495 2496 imf->im6f_st[1] = msfr.msfr_fmode; 2497 2498 /* 2499 * Apply any new source filters, if present. 2500 * Make a copy of the user-space source vector so 2501 * that we may copy them with a single copyin. This 2502 * allows us to deal with page faults up-front. 2503 */ 2504 if (msfr.msfr_nsrcs > 0) { 2505 struct in6_msource *lims; 2506 struct sockaddr_in6 *psin; 2507 struct sockaddr_storage *kss, *pkss; 2508 int i; 2509 2510 INP_WUNLOCK(inp); 2511 2512 CTR2(KTR_MLD, "%s: loading %lu source list entries", 2513 __func__, (unsigned long)msfr.msfr_nsrcs); 2514 kss = malloc(sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs, 2515 M_TEMP, M_WAITOK); 2516 error = copyin(msfr.msfr_srcs, kss, 2517 sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs); 2518 if (error) { 2519 free(kss, M_TEMP); 2520 return (error); 2521 } 2522 2523 INP_WLOCK(inp); 2524 2525 /* 2526 * Mark all source filters as UNDEFINED at t1. 2527 * Restore new group filter mode, as im6f_leave() 2528 * will set it to INCLUDE. 2529 */ 2530 im6f_leave(imf); 2531 imf->im6f_st[1] = msfr.msfr_fmode; 2532 2533 /* 2534 * Update socket layer filters at t1, lazy-allocating 2535 * new entries. This saves a bunch of memory at the 2536 * cost of one RB_FIND() per source entry; duplicate 2537 * entries in the msfr_nsrcs vector are ignored. 2538 * If we encounter an error, rollback transaction. 2539 * 2540 * XXX This too could be replaced with a set-symmetric 2541 * difference like loop to avoid walking from root 2542 * every time, as the key space is common. 2543 */ 2544 for (i = 0, pkss = kss; i < msfr.msfr_nsrcs; i++, pkss++) { 2545 psin = (struct sockaddr_in6 *)pkss; 2546 if (psin->sin6_family != AF_INET6) { 2547 error = EAFNOSUPPORT; 2548 break; 2549 } 2550 if (psin->sin6_len != sizeof(struct sockaddr_in6)) { 2551 error = EINVAL; 2552 break; 2553 } 2554 if (IN6_IS_ADDR_MULTICAST(&psin->sin6_addr)) { 2555 error = EINVAL; 2556 break; 2557 } 2558 /* 2559 * TODO: Validate embedded scope ID in source 2560 * list entry against passed-in ifp, if and only 2561 * if source list filter entry is iface or node local. 2562 */ 2563 in6_clearscope(&psin->sin6_addr); 2564 error = im6f_get_source(imf, psin, &lims); 2565 if (error) 2566 break; 2567 lims->im6sl_st[1] = imf->im6f_st[1]; 2568 } 2569 free(kss, M_TEMP); 2570 } 2571 2572 if (error) 2573 goto out_im6f_rollback; 2574 2575 INP_WLOCK_ASSERT(inp); 2576 IN6_MULTI_LIST_LOCK(); 2577 2578 /* 2579 * Begin state merge transaction at MLD layer. 2580 */ 2581 CTR1(KTR_MLD, "%s: merge inm state", __func__); 2582 error = in6m_merge(inm, imf); 2583 if (error) 2584 CTR1(KTR_MLD, "%s: failed to merge inm state", __func__); 2585 else { 2586 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 2587 error = mld_change_state(inm, 0); 2588 if (error) 2589 CTR1(KTR_MLD, "%s: failed mld downcall", __func__); 2590 } 2591 2592 IN6_MULTI_LIST_UNLOCK(); 2593 2594 out_im6f_rollback: 2595 if (error) 2596 im6f_rollback(imf); 2597 else 2598 im6f_commit(imf); 2599 2600 im6f_reap(imf); 2601 2602 out_in6p_locked: 2603 INP_WUNLOCK(inp); 2604 return (error); 2605 } 2606 2607 /* 2608 * Set the IP multicast options in response to user setsockopt(). 2609 * 2610 * Many of the socket options handled in this function duplicate the 2611 * functionality of socket options in the regular unicast API. However, 2612 * it is not possible to merge the duplicate code, because the idempotence 2613 * of the IPv6 multicast part of the BSD Sockets API must be preserved; 2614 * the effects of these options must be treated as separate and distinct. 2615 * 2616 * SMPng: XXX: Unlocked read of inp_socket believed OK. 2617 */ 2618 int 2619 ip6_setmoptions(struct inpcb *inp, struct sockopt *sopt) 2620 { 2621 struct ip6_moptions *im6o; 2622 int error; 2623 2624 error = 0; 2625 2626 /* 2627 * If socket is neither of type SOCK_RAW or SOCK_DGRAM, 2628 * or is a divert socket, reject it. 2629 */ 2630 if (inp->inp_socket->so_proto->pr_protocol == IPPROTO_DIVERT || 2631 (inp->inp_socket->so_proto->pr_type != SOCK_RAW && 2632 inp->inp_socket->so_proto->pr_type != SOCK_DGRAM)) 2633 return (EOPNOTSUPP); 2634 2635 switch (sopt->sopt_name) { 2636 case IPV6_MULTICAST_IF: 2637 error = in6p_set_multicast_if(inp, sopt); 2638 break; 2639 2640 case IPV6_MULTICAST_HOPS: { 2641 int hlim; 2642 2643 if (sopt->sopt_valsize != sizeof(int)) { 2644 error = EINVAL; 2645 break; 2646 } 2647 error = sooptcopyin(sopt, &hlim, sizeof(hlim), sizeof(int)); 2648 if (error) 2649 break; 2650 if (hlim < -1 || hlim > 255) { 2651 error = EINVAL; 2652 break; 2653 } else if (hlim == -1) { 2654 hlim = V_ip6_defmcasthlim; 2655 } 2656 im6o = in6p_findmoptions(inp); 2657 im6o->im6o_multicast_hlim = hlim; 2658 INP_WUNLOCK(inp); 2659 break; 2660 } 2661 2662 case IPV6_MULTICAST_LOOP: { 2663 u_int loop; 2664 2665 /* 2666 * Set the loopback flag for outgoing multicast packets. 2667 * Must be zero or one. 2668 */ 2669 if (sopt->sopt_valsize != sizeof(u_int)) { 2670 error = EINVAL; 2671 break; 2672 } 2673 error = sooptcopyin(sopt, &loop, sizeof(u_int), sizeof(u_int)); 2674 if (error) 2675 break; 2676 if (loop > 1) { 2677 error = EINVAL; 2678 break; 2679 } 2680 im6o = in6p_findmoptions(inp); 2681 im6o->im6o_multicast_loop = loop; 2682 INP_WUNLOCK(inp); 2683 break; 2684 } 2685 2686 case IPV6_JOIN_GROUP: 2687 case MCAST_JOIN_GROUP: 2688 case MCAST_JOIN_SOURCE_GROUP: 2689 error = in6p_join_group(inp, sopt); 2690 break; 2691 2692 case IPV6_LEAVE_GROUP: 2693 case MCAST_LEAVE_GROUP: 2694 case MCAST_LEAVE_SOURCE_GROUP: 2695 error = in6p_leave_group(inp, sopt); 2696 break; 2697 2698 case MCAST_BLOCK_SOURCE: 2699 case MCAST_UNBLOCK_SOURCE: 2700 error = in6p_block_unblock_source(inp, sopt); 2701 break; 2702 2703 case IPV6_MSFILTER: 2704 error = in6p_set_source_filters(inp, sopt); 2705 break; 2706 2707 default: 2708 error = EOPNOTSUPP; 2709 break; 2710 } 2711 2712 INP_UNLOCK_ASSERT(inp); 2713 2714 return (error); 2715 } 2716 2717 /* 2718 * Expose MLD's multicast filter mode and source list(s) to userland, 2719 * keyed by (ifindex, group). 2720 * The filter mode is written out as a uint32_t, followed by 2721 * 0..n of struct in6_addr. 2722 * For use by ifmcstat(8). 2723 * SMPng: NOTE: unlocked read of ifindex space. 2724 */ 2725 static int 2726 sysctl_ip6_mcast_filters(SYSCTL_HANDLER_ARGS) 2727 { 2728 struct in6_addr mcaddr; 2729 struct in6_addr src; 2730 struct epoch_tracker et; 2731 struct ifnet *ifp; 2732 struct ifmultiaddr *ifma; 2733 struct in6_multi *inm; 2734 struct ip6_msource *ims; 2735 int *name; 2736 int retval; 2737 u_int namelen; 2738 uint32_t fmode, ifindex; 2739 #ifdef KTR 2740 char ip6tbuf[INET6_ADDRSTRLEN]; 2741 #endif 2742 2743 name = (int *)arg1; 2744 namelen = arg2; 2745 2746 if (req->newptr != NULL) 2747 return (EPERM); 2748 2749 /* int: ifindex + 4 * 32 bits of IPv6 address */ 2750 if (namelen != 5) 2751 return (EINVAL); 2752 2753 ifindex = name[0]; 2754 if (ifindex <= 0 || ifindex > V_if_index) { 2755 CTR2(KTR_MLD, "%s: ifindex %u out of range", 2756 __func__, ifindex); 2757 return (ENOENT); 2758 } 2759 2760 memcpy(&mcaddr, &name[1], sizeof(struct in6_addr)); 2761 if (!IN6_IS_ADDR_MULTICAST(&mcaddr)) { 2762 CTR2(KTR_MLD, "%s: group %s is not multicast", 2763 __func__, ip6_sprintf(ip6tbuf, &mcaddr)); 2764 return (EINVAL); 2765 } 2766 2767 NET_EPOCH_ENTER(et); 2768 ifp = ifnet_byindex(ifindex); 2769 if (ifp == NULL) { 2770 NET_EPOCH_EXIT(et); 2771 CTR2(KTR_MLD, "%s: no ifp for ifindex %u", 2772 __func__, ifindex); 2773 return (ENOENT); 2774 } 2775 /* 2776 * Internal MLD lookups require that scope/zone ID is set. 2777 */ 2778 (void)in6_setscope(&mcaddr, ifp, NULL); 2779 2780 retval = sysctl_wire_old_buffer(req, 2781 sizeof(uint32_t) + (in6_mcast_maxgrpsrc * sizeof(struct in6_addr))); 2782 if (retval) { 2783 NET_EPOCH_EXIT(et); 2784 return (retval); 2785 } 2786 2787 IN6_MULTI_LOCK(); 2788 IN6_MULTI_LIST_LOCK(); 2789 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2790 inm = in6m_ifmultiaddr_get_inm(ifma); 2791 if (inm == NULL) 2792 continue; 2793 if (!IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, &mcaddr)) 2794 continue; 2795 fmode = inm->in6m_st[1].iss_fmode; 2796 retval = SYSCTL_OUT(req, &fmode, sizeof(uint32_t)); 2797 if (retval != 0) 2798 break; 2799 RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) { 2800 CTR2(KTR_MLD, "%s: visit node %p", __func__, ims); 2801 /* 2802 * Only copy-out sources which are in-mode. 2803 */ 2804 if (fmode != im6s_get_mode(inm, ims, 1)) { 2805 CTR1(KTR_MLD, "%s: skip non-in-mode", 2806 __func__); 2807 continue; 2808 } 2809 src = ims->im6s_addr; 2810 retval = SYSCTL_OUT(req, &src, 2811 sizeof(struct in6_addr)); 2812 if (retval != 0) 2813 break; 2814 } 2815 } 2816 IN6_MULTI_LIST_UNLOCK(); 2817 IN6_MULTI_UNLOCK(); 2818 NET_EPOCH_EXIT(et); 2819 2820 return (retval); 2821 } 2822 2823 #ifdef KTR 2824 2825 static const char *in6m_modestrs[] = { "un", "in", "ex" }; 2826 2827 static const char * 2828 in6m_mode_str(const int mode) 2829 { 2830 2831 if (mode >= MCAST_UNDEFINED && mode <= MCAST_EXCLUDE) 2832 return (in6m_modestrs[mode]); 2833 return ("??"); 2834 } 2835 2836 static const char *in6m_statestrs[] = { 2837 "not-member", 2838 "silent", 2839 "idle", 2840 "lazy", 2841 "sleeping", 2842 "awakening", 2843 "query-pending", 2844 "sg-query-pending", 2845 "leaving" 2846 }; 2847 2848 static const char * 2849 in6m_state_str(const int state) 2850 { 2851 2852 if (state >= MLD_NOT_MEMBER && state <= MLD_LEAVING_MEMBER) 2853 return (in6m_statestrs[state]); 2854 return ("??"); 2855 } 2856 2857 /* 2858 * Dump an in6_multi structure to the console. 2859 */ 2860 void 2861 in6m_print(const struct in6_multi *inm) 2862 { 2863 int t; 2864 char ip6tbuf[INET6_ADDRSTRLEN]; 2865 2866 if ((ktr_mask & KTR_MLD) == 0) 2867 return; 2868 2869 printf("%s: --- begin in6m %p ---\n", __func__, inm); 2870 printf("addr %s ifp %p(%s) ifma %p\n", 2871 ip6_sprintf(ip6tbuf, &inm->in6m_addr), 2872 inm->in6m_ifp, 2873 if_name(inm->in6m_ifp), 2874 inm->in6m_ifma); 2875 printf("timer %u state %s refcount %u scq.len %u\n", 2876 inm->in6m_timer, 2877 in6m_state_str(inm->in6m_state), 2878 inm->in6m_refcount, 2879 mbufq_len(&inm->in6m_scq)); 2880 printf("mli %p nsrc %lu sctimer %u scrv %u\n", 2881 inm->in6m_mli, 2882 inm->in6m_nsrc, 2883 inm->in6m_sctimer, 2884 inm->in6m_scrv); 2885 for (t = 0; t < 2; t++) { 2886 printf("t%d: fmode %s asm %u ex %u in %u rec %u\n", t, 2887 in6m_mode_str(inm->in6m_st[t].iss_fmode), 2888 inm->in6m_st[t].iss_asm, 2889 inm->in6m_st[t].iss_ex, 2890 inm->in6m_st[t].iss_in, 2891 inm->in6m_st[t].iss_rec); 2892 } 2893 printf("%s: --- end in6m %p ---\n", __func__, inm); 2894 } 2895 2896 #else /* !KTR */ 2897 2898 void 2899 in6m_print(const struct in6_multi *inm) 2900 { 2901 2902 } 2903 2904 #endif /* KTR */ 2905