1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2009 Bruce Simpson. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote 16 * products derived from this software without specific prior written 17 * permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 /* 33 * IPv6 multicast socket, group, and socket option processing module. 34 * Normative references: RFC 2292, RFC 3492, RFC 3542, RFC 3678, RFC 3810. 35 */ 36 37 #include <sys/cdefs.h> 38 #include "opt_inet6.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/kernel.h> 43 #include <sys/ktr.h> 44 #include <sys/malloc.h> 45 #include <sys/mbuf.h> 46 #include <sys/protosw.h> 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/sysctl.h> 50 #include <sys/priv.h> 51 #include <sys/taskqueue.h> 52 #include <sys/tree.h> 53 54 #include <net/if.h> 55 #include <net/if_var.h> 56 #include <net/if_dl.h> 57 #include <net/if_private.h> 58 #include <net/route.h> 59 #include <net/route/nhop.h> 60 #include <net/vnet.h> 61 62 #include <netinet/in.h> 63 #include <netinet/udp.h> 64 #include <netinet/in_var.h> 65 #include <netinet/ip_var.h> 66 #include <netinet/udp_var.h> 67 #include <netinet6/in6_fib.h> 68 #include <netinet6/in6_var.h> 69 #include <netinet/ip6.h> 70 #include <netinet/icmp6.h> 71 #include <netinet6/ip6_var.h> 72 #include <netinet/in_pcb.h> 73 #include <netinet/tcp_var.h> 74 #include <netinet6/nd6.h> 75 #include <netinet6/mld6_var.h> 76 #include <netinet6/scope6_var.h> 77 78 #ifndef KTR_MLD 79 #define KTR_MLD KTR_INET6 80 #endif 81 82 #ifndef __SOCKUNION_DECLARED 83 union sockunion { 84 struct sockaddr_storage ss; 85 struct sockaddr sa; 86 struct sockaddr_dl sdl; 87 struct sockaddr_in6 sin6; 88 }; 89 typedef union sockunion sockunion_t; 90 #define __SOCKUNION_DECLARED 91 #endif /* __SOCKUNION_DECLARED */ 92 93 static MALLOC_DEFINE(M_IN6MFILTER, "in6_mfilter", 94 "IPv6 multicast PCB-layer source filter"); 95 MALLOC_DEFINE(M_IP6MADDR, "in6_multi", "IPv6 multicast group"); 96 static MALLOC_DEFINE(M_IP6MOPTS, "ip6_moptions", "IPv6 multicast options"); 97 static MALLOC_DEFINE(M_IP6MSOURCE, "ip6_msource", 98 "IPv6 multicast MLD-layer source filter"); 99 100 RB_GENERATE(ip6_msource_tree, ip6_msource, im6s_link, ip6_msource_cmp); 101 102 /* 103 * Locking: 104 * - Lock order is: Giant, IN6_MULTI_LOCK, INP_WLOCK, 105 * IN6_MULTI_LIST_LOCK, MLD_LOCK, IF_ADDR_LOCK. 106 * - The IF_ADDR_LOCK is implicitly taken by in6m_lookup() earlier, however 107 * it can be taken by code in net/if.c also. 108 * - ip6_moptions and in6_mfilter are covered by the INP_WLOCK. 109 * 110 * struct in6_multi is covered by IN6_MULTI_LOCK. There isn't strictly 111 * any need for in6_multi itself to be virtualized -- it is bound to an ifp 112 * anyway no matter what happens. 113 */ 114 struct mtx in6_multi_list_mtx; 115 MTX_SYSINIT(in6_multi_mtx, &in6_multi_list_mtx, "in6_multi_list_mtx", MTX_DEF); 116 117 struct mtx in6_multi_free_mtx; 118 MTX_SYSINIT(in6_multi_free_mtx, &in6_multi_free_mtx, "in6_multi_free_mtx", MTX_DEF); 119 120 struct sx in6_multi_sx; 121 SX_SYSINIT(in6_multi_sx, &in6_multi_sx, "in6_multi_sx"); 122 123 static void im6f_commit(struct in6_mfilter *); 124 static int im6f_get_source(struct in6_mfilter *imf, 125 const struct sockaddr_in6 *psin, 126 struct in6_msource **); 127 static struct in6_msource * 128 im6f_graft(struct in6_mfilter *, const uint8_t, 129 const struct sockaddr_in6 *); 130 static void im6f_leave(struct in6_mfilter *); 131 static int im6f_prune(struct in6_mfilter *, const struct sockaddr_in6 *); 132 static void im6f_purge(struct in6_mfilter *); 133 static void im6f_rollback(struct in6_mfilter *); 134 static void im6f_reap(struct in6_mfilter *); 135 static struct in6_mfilter * 136 im6o_match_group(const struct ip6_moptions *, 137 const struct ifnet *, const struct sockaddr *); 138 static struct in6_msource * 139 im6o_match_source(struct in6_mfilter *, const struct sockaddr *); 140 static void im6s_merge(struct ip6_msource *ims, 141 const struct in6_msource *lims, const int rollback); 142 static int in6_getmulti(struct ifnet *, const struct in6_addr *, 143 struct in6_multi **); 144 static int in6_joingroup_locked(struct ifnet *, const struct in6_addr *, 145 struct in6_mfilter *, struct in6_multi **, int); 146 static int in6m_get_source(struct in6_multi *inm, 147 const struct in6_addr *addr, const int noalloc, 148 struct ip6_msource **pims); 149 #ifdef KTR 150 static int in6m_is_ifp_detached(const struct in6_multi *); 151 #endif 152 static int in6m_merge(struct in6_multi *, /*const*/ struct in6_mfilter *); 153 static void in6m_purge(struct in6_multi *); 154 static void in6m_reap(struct in6_multi *); 155 static struct ip6_moptions * 156 in6p_findmoptions(struct inpcb *); 157 static int in6p_get_source_filters(struct inpcb *, struct sockopt *); 158 static int in6p_join_group(struct inpcb *, struct sockopt *); 159 static int in6p_leave_group(struct inpcb *, struct sockopt *); 160 static struct ifnet * 161 in6p_lookup_mcast_ifp(const struct inpcb *, 162 const struct sockaddr_in6 *); 163 static int in6p_block_unblock_source(struct inpcb *, struct sockopt *); 164 static int in6p_set_multicast_if(struct inpcb *, struct sockopt *); 165 static int in6p_set_source_filters(struct inpcb *, struct sockopt *); 166 static int sysctl_ip6_mcast_filters(SYSCTL_HANDLER_ARGS); 167 168 SYSCTL_DECL(_net_inet6_ip6); /* XXX Not in any common header. */ 169 170 static SYSCTL_NODE(_net_inet6_ip6, OID_AUTO, mcast, 171 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 172 "IPv6 multicast"); 173 174 static u_long in6_mcast_maxgrpsrc = IPV6_MAX_GROUP_SRC_FILTER; 175 SYSCTL_ULONG(_net_inet6_ip6_mcast, OID_AUTO, maxgrpsrc, 176 CTLFLAG_RWTUN, &in6_mcast_maxgrpsrc, 0, 177 "Max source filters per group"); 178 179 static u_long in6_mcast_maxsocksrc = IPV6_MAX_SOCK_SRC_FILTER; 180 SYSCTL_ULONG(_net_inet6_ip6_mcast, OID_AUTO, maxsocksrc, 181 CTLFLAG_RWTUN, &in6_mcast_maxsocksrc, 0, 182 "Max source filters per socket"); 183 184 /* TODO Virtualize this switch. */ 185 int in6_mcast_loop = IPV6_DEFAULT_MULTICAST_LOOP; 186 SYSCTL_INT(_net_inet6_ip6_mcast, OID_AUTO, loop, CTLFLAG_RWTUN, 187 &in6_mcast_loop, 0, "Loopback multicast datagrams by default"); 188 189 static SYSCTL_NODE(_net_inet6_ip6_mcast, OID_AUTO, filters, 190 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_ip6_mcast_filters, 191 "Per-interface stack-wide source filters"); 192 193 #ifdef KTR 194 /* 195 * Inline function which wraps assertions for a valid ifp. 196 * The ifnet layer will set the ifma's ifp pointer to NULL if the ifp 197 * is detached. 198 */ 199 static int __inline 200 in6m_is_ifp_detached(const struct in6_multi *inm) 201 { 202 struct ifnet *ifp; 203 204 KASSERT(inm->in6m_ifma != NULL, ("%s: no ifma", __func__)); 205 ifp = inm->in6m_ifma->ifma_ifp; 206 if (ifp != NULL) { 207 /* 208 * Sanity check that network-layer notion of ifp is the 209 * same as that of link-layer. 210 */ 211 KASSERT(inm->in6m_ifp == ifp, ("%s: bad ifp", __func__)); 212 } 213 214 return (ifp == NULL); 215 } 216 #endif 217 218 /* 219 * Initialize an in6_mfilter structure to a known state at t0, t1 220 * with an empty source filter list. 221 */ 222 static __inline void 223 im6f_init(struct in6_mfilter *imf, const int st0, const int st1) 224 { 225 memset(imf, 0, sizeof(struct in6_mfilter)); 226 RB_INIT(&imf->im6f_sources); 227 imf->im6f_st[0] = st0; 228 imf->im6f_st[1] = st1; 229 } 230 231 struct in6_mfilter * 232 ip6_mfilter_alloc(const int mflags, const int st0, const int st1) 233 { 234 struct in6_mfilter *imf; 235 236 imf = malloc(sizeof(*imf), M_IN6MFILTER, mflags); 237 238 if (imf != NULL) 239 im6f_init(imf, st0, st1); 240 241 return (imf); 242 } 243 244 void 245 ip6_mfilter_free(struct in6_mfilter *imf) 246 { 247 248 im6f_purge(imf); 249 free(imf, M_IN6MFILTER); 250 } 251 252 /* 253 * Find an IPv6 multicast group entry for this ip6_moptions instance 254 * which matches the specified group, and optionally an interface. 255 * Return its index into the array, or -1 if not found. 256 */ 257 static struct in6_mfilter * 258 im6o_match_group(const struct ip6_moptions *imo, const struct ifnet *ifp, 259 const struct sockaddr *group) 260 { 261 const struct sockaddr_in6 *gsin6; 262 struct in6_mfilter *imf; 263 struct in6_multi *inm; 264 265 gsin6 = (const struct sockaddr_in6 *)group; 266 267 IP6_MFILTER_FOREACH(imf, &imo->im6o_head) { 268 inm = imf->im6f_in6m; 269 if (inm == NULL) 270 continue; 271 if ((ifp == NULL || (inm->in6m_ifp == ifp)) && 272 IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, 273 &gsin6->sin6_addr)) { 274 break; 275 } 276 } 277 return (imf); 278 } 279 280 /* 281 * Find an IPv6 multicast source entry for this imo which matches 282 * the given group index for this socket, and source address. 283 * 284 * XXX TODO: The scope ID, if present in src, is stripped before 285 * any comparison. We SHOULD enforce scope/zone checks where the source 286 * filter entry has a link scope. 287 * 288 * NOTE: This does not check if the entry is in-mode, merely if 289 * it exists, which may not be the desired behaviour. 290 */ 291 static struct in6_msource * 292 im6o_match_source(struct in6_mfilter *imf, const struct sockaddr *src) 293 { 294 struct ip6_msource find; 295 struct ip6_msource *ims; 296 const sockunion_t *psa; 297 298 KASSERT(src->sa_family == AF_INET6, ("%s: !AF_INET6", __func__)); 299 300 psa = (const sockunion_t *)src; 301 find.im6s_addr = psa->sin6.sin6_addr; 302 in6_clearscope(&find.im6s_addr); /* XXX */ 303 ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find); 304 305 return ((struct in6_msource *)ims); 306 } 307 308 /* 309 * Perform filtering for multicast datagrams on a socket by group and source. 310 * 311 * Returns 0 if a datagram should be allowed through, or various error codes 312 * if the socket was not a member of the group, or the source was muted, etc. 313 */ 314 int 315 im6o_mc_filter(const struct ip6_moptions *imo, const struct ifnet *ifp, 316 const struct sockaddr *group, const struct sockaddr *src) 317 { 318 struct in6_mfilter *imf; 319 struct in6_msource *ims; 320 int mode; 321 322 KASSERT(ifp != NULL, ("%s: null ifp", __func__)); 323 324 imf = im6o_match_group(imo, ifp, group); 325 if (imf == NULL) 326 return (MCAST_NOTGMEMBER); 327 328 /* 329 * Check if the source was included in an (S,G) join. 330 * Allow reception on exclusive memberships by default, 331 * reject reception on inclusive memberships by default. 332 * Exclude source only if an in-mode exclude filter exists. 333 * Include source only if an in-mode include filter exists. 334 * NOTE: We are comparing group state here at MLD t1 (now) 335 * with socket-layer t0 (since last downcall). 336 */ 337 mode = imf->im6f_st[1]; 338 ims = im6o_match_source(imf, src); 339 340 if ((ims == NULL && mode == MCAST_INCLUDE) || 341 (ims != NULL && ims->im6sl_st[0] != mode)) 342 return (MCAST_NOTSMEMBER); 343 344 return (MCAST_PASS); 345 } 346 347 /* 348 * Look up an in6_multi record for an IPv6 multicast address 349 * on the interface ifp. 350 * If no record found, return NULL. 351 * 352 * SMPng: The IN6_MULTI_LOCK and must be held and must be in network epoch. 353 */ 354 struct in6_multi * 355 in6m_lookup_locked(struct ifnet *ifp, const struct in6_addr *mcaddr) 356 { 357 struct ifmultiaddr *ifma; 358 struct in6_multi *inm; 359 360 NET_EPOCH_ASSERT(); 361 362 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 363 inm = in6m_ifmultiaddr_get_inm(ifma); 364 if (inm == NULL) 365 continue; 366 if (IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, mcaddr)) 367 return (inm); 368 } 369 return (NULL); 370 } 371 372 /* 373 * Find and return a reference to an in6_multi record for (ifp, group), 374 * and bump its reference count. 375 * If one does not exist, try to allocate it, and update link-layer multicast 376 * filters on ifp to listen for group. 377 * Assumes the IN6_MULTI lock is held across the call. 378 * Return 0 if successful, otherwise return an appropriate error code. 379 */ 380 static int 381 in6_getmulti(struct ifnet *ifp, const struct in6_addr *group, 382 struct in6_multi **pinm) 383 { 384 struct epoch_tracker et; 385 struct sockaddr_in6 gsin6; 386 struct ifmultiaddr *ifma; 387 struct in6_multi *inm; 388 int error; 389 390 error = 0; 391 392 /* 393 * XXX: Accesses to ifma_protospec must be covered by IF_ADDR_LOCK; 394 * if_addmulti() takes this mutex itself, so we must drop and 395 * re-acquire around the call. 396 */ 397 IN6_MULTI_LOCK_ASSERT(); 398 IN6_MULTI_LIST_LOCK(); 399 IF_ADDR_WLOCK(ifp); 400 NET_EPOCH_ENTER(et); 401 /* 402 * Does ifp support IPv6 multicasts? 403 */ 404 if (ifp->if_afdata[AF_INET6] == NULL) 405 error = ENODEV; 406 else 407 inm = in6m_lookup_locked(ifp, group); 408 NET_EPOCH_EXIT(et); 409 410 if (error != 0) 411 goto out_locked; 412 413 if (inm != NULL) { 414 /* 415 * If we already joined this group, just bump the 416 * refcount and return it. 417 */ 418 KASSERT(inm->in6m_refcount >= 1, 419 ("%s: bad refcount %d", __func__, inm->in6m_refcount)); 420 in6m_acquire_locked(inm); 421 *pinm = inm; 422 goto out_locked; 423 } 424 425 memset(&gsin6, 0, sizeof(gsin6)); 426 gsin6.sin6_family = AF_INET6; 427 gsin6.sin6_len = sizeof(struct sockaddr_in6); 428 gsin6.sin6_addr = *group; 429 430 /* 431 * Check if a link-layer group is already associated 432 * with this network-layer group on the given ifnet. 433 */ 434 IN6_MULTI_LIST_UNLOCK(); 435 IF_ADDR_WUNLOCK(ifp); 436 error = if_addmulti(ifp, (struct sockaddr *)&gsin6, &ifma); 437 if (error != 0) 438 return (error); 439 IN6_MULTI_LIST_LOCK(); 440 IF_ADDR_WLOCK(ifp); 441 442 /* 443 * If something other than netinet6 is occupying the link-layer 444 * group, print a meaningful error message and back out of 445 * the allocation. 446 * Otherwise, bump the refcount on the existing network-layer 447 * group association and return it. 448 */ 449 if (ifma->ifma_protospec != NULL) { 450 inm = (struct in6_multi *)ifma->ifma_protospec; 451 #ifdef INVARIANTS 452 KASSERT(ifma->ifma_addr != NULL, ("%s: no ifma_addr", 453 __func__)); 454 KASSERT(ifma->ifma_addr->sa_family == AF_INET6, 455 ("%s: ifma not AF_INET6", __func__)); 456 KASSERT(inm != NULL, ("%s: no ifma_protospec", __func__)); 457 if (inm->in6m_ifma != ifma || inm->in6m_ifp != ifp || 458 !IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, group)) 459 panic("%s: ifma %p is inconsistent with %p (%p)", 460 __func__, ifma, inm, group); 461 #endif 462 in6m_acquire_locked(inm); 463 *pinm = inm; 464 goto out_locked; 465 } 466 467 IF_ADDR_WLOCK_ASSERT(ifp); 468 469 /* 470 * A new in6_multi record is needed; allocate and initialize it. 471 * We DO NOT perform an MLD join as the in6_ layer may need to 472 * push an initial source list down to MLD to support SSM. 473 * 474 * The initial source filter state is INCLUDE, {} as per the RFC. 475 * Pending state-changes per group are subject to a bounds check. 476 */ 477 inm = malloc(sizeof(*inm), M_IP6MADDR, M_NOWAIT | M_ZERO); 478 if (inm == NULL) { 479 IN6_MULTI_LIST_UNLOCK(); 480 IF_ADDR_WUNLOCK(ifp); 481 if_delmulti_ifma(ifma); 482 return (ENOMEM); 483 } 484 inm->in6m_addr = *group; 485 inm->in6m_ifp = ifp; 486 inm->in6m_mli = MLD_IFINFO(ifp); 487 inm->in6m_ifma = ifma; 488 inm->in6m_refcount = 1; 489 inm->in6m_state = MLD_NOT_MEMBER; 490 mbufq_init(&inm->in6m_scq, MLD_MAX_STATE_CHANGES); 491 492 inm->in6m_st[0].iss_fmode = MCAST_UNDEFINED; 493 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED; 494 RB_INIT(&inm->in6m_srcs); 495 496 ifma->ifma_protospec = inm; 497 *pinm = inm; 498 499 out_locked: 500 IN6_MULTI_LIST_UNLOCK(); 501 IF_ADDR_WUNLOCK(ifp); 502 return (error); 503 } 504 505 /* 506 * Drop a reference to an in6_multi record. 507 * 508 * If the refcount drops to 0, free the in6_multi record and 509 * delete the underlying link-layer membership. 510 */ 511 static void 512 in6m_release(struct in6_multi *inm) 513 { 514 struct ifmultiaddr *ifma; 515 struct ifnet *ifp; 516 517 CTR2(KTR_MLD, "%s: refcount is %d", __func__, inm->in6m_refcount); 518 519 MPASS(inm->in6m_refcount == 0); 520 CTR2(KTR_MLD, "%s: freeing inm %p", __func__, inm); 521 522 ifma = inm->in6m_ifma; 523 ifp = inm->in6m_ifp; 524 MPASS(ifma->ifma_llifma == NULL); 525 526 /* XXX this access is not covered by IF_ADDR_LOCK */ 527 CTR2(KTR_MLD, "%s: purging ifma %p", __func__, ifma); 528 KASSERT(ifma->ifma_protospec == NULL, 529 ("%s: ifma_protospec != NULL", __func__)); 530 if (ifp == NULL) 531 ifp = ifma->ifma_ifp; 532 533 if (ifp != NULL) { 534 CURVNET_SET(ifp->if_vnet); 535 in6m_purge(inm); 536 free(inm, M_IP6MADDR); 537 if_delmulti_ifma_flags(ifma, 1); 538 CURVNET_RESTORE(); 539 if_rele(ifp); 540 } else { 541 in6m_purge(inm); 542 free(inm, M_IP6MADDR); 543 if_delmulti_ifma_flags(ifma, 1); 544 } 545 } 546 547 /* 548 * Interface detach can happen in a taskqueue thread context, so we must use a 549 * dedicated thread to avoid deadlocks when draining in6m_release tasks. 550 */ 551 TASKQUEUE_DEFINE_THREAD(in6m_free); 552 static struct in6_multi_head in6m_free_list = SLIST_HEAD_INITIALIZER(); 553 static void in6m_release_task(void *arg __unused, int pending __unused); 554 static struct task in6m_free_task = TASK_INITIALIZER(0, in6m_release_task, NULL); 555 556 void 557 in6m_release_list_deferred(struct in6_multi_head *inmh) 558 { 559 if (SLIST_EMPTY(inmh)) 560 return; 561 mtx_lock(&in6_multi_free_mtx); 562 SLIST_CONCAT(&in6m_free_list, inmh, in6_multi, in6m_nrele); 563 mtx_unlock(&in6_multi_free_mtx); 564 taskqueue_enqueue(taskqueue_in6m_free, &in6m_free_task); 565 } 566 567 void 568 in6m_release_wait(void *arg __unused) 569 { 570 571 /* 572 * Make sure all pending multicast addresses are freed before 573 * the VNET or network device is destroyed: 574 */ 575 taskqueue_drain_all(taskqueue_in6m_free); 576 } 577 #ifdef VIMAGE 578 /* XXX-BZ FIXME, see D24914. */ 579 VNET_SYSUNINIT(in6m_release_wait, SI_SUB_PROTO_DOMAIN, SI_ORDER_FIRST, in6m_release_wait, NULL); 580 #endif 581 582 void 583 in6m_disconnect_locked(struct in6_multi_head *inmh, struct in6_multi *inm) 584 { 585 struct ifnet *ifp; 586 struct ifaddr *ifa; 587 struct in6_ifaddr *ifa6; 588 struct in6_multi_mship *imm, *imm_tmp; 589 struct ifmultiaddr *ifma, *ll_ifma; 590 591 IN6_MULTI_LIST_LOCK_ASSERT(); 592 593 ifp = inm->in6m_ifp; 594 if (ifp == NULL) 595 return; /* already called */ 596 597 inm->in6m_ifp = NULL; 598 IF_ADDR_WLOCK_ASSERT(ifp); 599 ifma = inm->in6m_ifma; 600 if (ifma == NULL) 601 return; 602 603 if_ref(ifp); 604 if (ifma->ifma_flags & IFMA_F_ENQUEUED) { 605 CK_STAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifmultiaddr, ifma_link); 606 ifma->ifma_flags &= ~IFMA_F_ENQUEUED; 607 } 608 MCDPRINTF("removed ifma: %p from %s\n", ifma, ifp->if_xname); 609 if ((ll_ifma = ifma->ifma_llifma) != NULL) { 610 MPASS(ifma != ll_ifma); 611 ifma->ifma_llifma = NULL; 612 MPASS(ll_ifma->ifma_llifma == NULL); 613 MPASS(ll_ifma->ifma_ifp == ifp); 614 if (--ll_ifma->ifma_refcount == 0) { 615 if (ll_ifma->ifma_flags & IFMA_F_ENQUEUED) { 616 CK_STAILQ_REMOVE(&ifp->if_multiaddrs, ll_ifma, ifmultiaddr, ifma_link); 617 ll_ifma->ifma_flags &= ~IFMA_F_ENQUEUED; 618 } 619 MCDPRINTF("removed ll_ifma: %p from %s\n", ll_ifma, ifp->if_xname); 620 if_freemulti(ll_ifma); 621 } 622 } 623 CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 624 if (ifa->ifa_addr->sa_family != AF_INET6) 625 continue; 626 ifa6 = (void *)ifa; 627 LIST_FOREACH_SAFE(imm, &ifa6->ia6_memberships, 628 i6mm_chain, imm_tmp) { 629 if (inm == imm->i6mm_maddr) { 630 LIST_REMOVE(imm, i6mm_chain); 631 free(imm, M_IP6MADDR); 632 in6m_rele_locked(inmh, inm); 633 } 634 } 635 } 636 } 637 638 static void 639 in6m_release_task(void *arg __unused, int pending __unused) 640 { 641 struct in6_multi_head in6m_free_tmp; 642 struct in6_multi *inm, *tinm; 643 644 SLIST_INIT(&in6m_free_tmp); 645 mtx_lock(&in6_multi_free_mtx); 646 SLIST_CONCAT(&in6m_free_tmp, &in6m_free_list, in6_multi, in6m_nrele); 647 mtx_unlock(&in6_multi_free_mtx); 648 IN6_MULTI_LOCK(); 649 SLIST_FOREACH_SAFE(inm, &in6m_free_tmp, in6m_nrele, tinm) { 650 SLIST_REMOVE_HEAD(&in6m_free_tmp, in6m_nrele); 651 in6m_release(inm); 652 } 653 IN6_MULTI_UNLOCK(); 654 } 655 656 /* 657 * Clear recorded source entries for a group. 658 * Used by the MLD code. Caller must hold the IN6_MULTI lock. 659 * FIXME: Should reap. 660 */ 661 void 662 in6m_clear_recorded(struct in6_multi *inm) 663 { 664 struct ip6_msource *ims; 665 666 IN6_MULTI_LIST_LOCK_ASSERT(); 667 668 RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) { 669 if (ims->im6s_stp) { 670 ims->im6s_stp = 0; 671 --inm->in6m_st[1].iss_rec; 672 } 673 } 674 KASSERT(inm->in6m_st[1].iss_rec == 0, 675 ("%s: iss_rec %d not 0", __func__, inm->in6m_st[1].iss_rec)); 676 } 677 678 /* 679 * Record a source as pending for a Source-Group MLDv2 query. 680 * This lives here as it modifies the shared tree. 681 * 682 * inm is the group descriptor. 683 * naddr is the address of the source to record in network-byte order. 684 * 685 * If the net.inet6.mld.sgalloc sysctl is non-zero, we will 686 * lazy-allocate a source node in response to an SG query. 687 * Otherwise, no allocation is performed. This saves some memory 688 * with the trade-off that the source will not be reported to the 689 * router if joined in the window between the query response and 690 * the group actually being joined on the local host. 691 * 692 * VIMAGE: XXX: Currently the mld_sgalloc feature has been removed. 693 * This turns off the allocation of a recorded source entry if 694 * the group has not been joined. 695 * 696 * Return 0 if the source didn't exist or was already marked as recorded. 697 * Return 1 if the source was marked as recorded by this function. 698 * Return <0 if any error occurred (negated errno code). 699 */ 700 int 701 in6m_record_source(struct in6_multi *inm, const struct in6_addr *addr) 702 { 703 struct ip6_msource find; 704 struct ip6_msource *ims, *nims; 705 706 IN6_MULTI_LIST_LOCK_ASSERT(); 707 708 find.im6s_addr = *addr; 709 ims = RB_FIND(ip6_msource_tree, &inm->in6m_srcs, &find); 710 if (ims && ims->im6s_stp) 711 return (0); 712 if (ims == NULL) { 713 if (inm->in6m_nsrc == in6_mcast_maxgrpsrc) 714 return (-ENOSPC); 715 nims = malloc(sizeof(struct ip6_msource), M_IP6MSOURCE, 716 M_NOWAIT | M_ZERO); 717 if (nims == NULL) 718 return (-ENOMEM); 719 nims->im6s_addr = find.im6s_addr; 720 RB_INSERT(ip6_msource_tree, &inm->in6m_srcs, nims); 721 ++inm->in6m_nsrc; 722 ims = nims; 723 } 724 725 /* 726 * Mark the source as recorded and update the recorded 727 * source count. 728 */ 729 ++ims->im6s_stp; 730 ++inm->in6m_st[1].iss_rec; 731 732 return (1); 733 } 734 735 /* 736 * Return a pointer to an in6_msource owned by an in6_mfilter, 737 * given its source address. 738 * Lazy-allocate if needed. If this is a new entry its filter state is 739 * undefined at t0. 740 * 741 * imf is the filter set being modified. 742 * addr is the source address. 743 * 744 * SMPng: May be called with locks held; malloc must not block. 745 */ 746 static int 747 im6f_get_source(struct in6_mfilter *imf, const struct sockaddr_in6 *psin, 748 struct in6_msource **plims) 749 { 750 struct ip6_msource find; 751 struct ip6_msource *ims, *nims; 752 struct in6_msource *lims; 753 int error; 754 755 error = 0; 756 ims = NULL; 757 lims = NULL; 758 759 find.im6s_addr = psin->sin6_addr; 760 ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find); 761 lims = (struct in6_msource *)ims; 762 if (lims == NULL) { 763 if (imf->im6f_nsrc == in6_mcast_maxsocksrc) 764 return (ENOSPC); 765 nims = malloc(sizeof(struct in6_msource), M_IN6MFILTER, 766 M_NOWAIT | M_ZERO); 767 if (nims == NULL) 768 return (ENOMEM); 769 lims = (struct in6_msource *)nims; 770 lims->im6s_addr = find.im6s_addr; 771 lims->im6sl_st[0] = MCAST_UNDEFINED; 772 RB_INSERT(ip6_msource_tree, &imf->im6f_sources, nims); 773 ++imf->im6f_nsrc; 774 } 775 776 *plims = lims; 777 778 return (error); 779 } 780 781 /* 782 * Graft a source entry into an existing socket-layer filter set, 783 * maintaining any required invariants and checking allocations. 784 * 785 * The source is marked as being in the new filter mode at t1. 786 * 787 * Return the pointer to the new node, otherwise return NULL. 788 */ 789 static struct in6_msource * 790 im6f_graft(struct in6_mfilter *imf, const uint8_t st1, 791 const struct sockaddr_in6 *psin) 792 { 793 struct ip6_msource *nims; 794 struct in6_msource *lims; 795 796 nims = malloc(sizeof(struct in6_msource), M_IN6MFILTER, 797 M_NOWAIT | M_ZERO); 798 if (nims == NULL) 799 return (NULL); 800 lims = (struct in6_msource *)nims; 801 lims->im6s_addr = psin->sin6_addr; 802 lims->im6sl_st[0] = MCAST_UNDEFINED; 803 lims->im6sl_st[1] = st1; 804 RB_INSERT(ip6_msource_tree, &imf->im6f_sources, nims); 805 ++imf->im6f_nsrc; 806 807 return (lims); 808 } 809 810 /* 811 * Prune a source entry from an existing socket-layer filter set, 812 * maintaining any required invariants and checking allocations. 813 * 814 * The source is marked as being left at t1, it is not freed. 815 * 816 * Return 0 if no error occurred, otherwise return an errno value. 817 */ 818 static int 819 im6f_prune(struct in6_mfilter *imf, const struct sockaddr_in6 *psin) 820 { 821 struct ip6_msource find; 822 struct ip6_msource *ims; 823 struct in6_msource *lims; 824 825 find.im6s_addr = psin->sin6_addr; 826 ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find); 827 if (ims == NULL) 828 return (ENOENT); 829 lims = (struct in6_msource *)ims; 830 lims->im6sl_st[1] = MCAST_UNDEFINED; 831 return (0); 832 } 833 834 /* 835 * Revert socket-layer filter set deltas at t1 to t0 state. 836 */ 837 static void 838 im6f_rollback(struct in6_mfilter *imf) 839 { 840 struct ip6_msource *ims, *tims; 841 struct in6_msource *lims; 842 843 RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) { 844 lims = (struct in6_msource *)ims; 845 if (lims->im6sl_st[0] == lims->im6sl_st[1]) { 846 /* no change at t1 */ 847 continue; 848 } else if (lims->im6sl_st[0] != MCAST_UNDEFINED) { 849 /* revert change to existing source at t1 */ 850 lims->im6sl_st[1] = lims->im6sl_st[0]; 851 } else { 852 /* revert source added t1 */ 853 CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); 854 RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims); 855 free(ims, M_IN6MFILTER); 856 imf->im6f_nsrc--; 857 } 858 } 859 imf->im6f_st[1] = imf->im6f_st[0]; 860 } 861 862 /* 863 * Mark socket-layer filter set as INCLUDE {} at t1. 864 */ 865 static void 866 im6f_leave(struct in6_mfilter *imf) 867 { 868 struct ip6_msource *ims; 869 struct in6_msource *lims; 870 871 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { 872 lims = (struct in6_msource *)ims; 873 lims->im6sl_st[1] = MCAST_UNDEFINED; 874 } 875 imf->im6f_st[1] = MCAST_INCLUDE; 876 } 877 878 /* 879 * Mark socket-layer filter set deltas as committed. 880 */ 881 static void 882 im6f_commit(struct in6_mfilter *imf) 883 { 884 struct ip6_msource *ims; 885 struct in6_msource *lims; 886 887 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { 888 lims = (struct in6_msource *)ims; 889 lims->im6sl_st[0] = lims->im6sl_st[1]; 890 } 891 imf->im6f_st[0] = imf->im6f_st[1]; 892 } 893 894 /* 895 * Reap unreferenced sources from socket-layer filter set. 896 */ 897 static void 898 im6f_reap(struct in6_mfilter *imf) 899 { 900 struct ip6_msource *ims, *tims; 901 struct in6_msource *lims; 902 903 RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) { 904 lims = (struct in6_msource *)ims; 905 if ((lims->im6sl_st[0] == MCAST_UNDEFINED) && 906 (lims->im6sl_st[1] == MCAST_UNDEFINED)) { 907 CTR2(KTR_MLD, "%s: free lims %p", __func__, ims); 908 RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims); 909 free(ims, M_IN6MFILTER); 910 imf->im6f_nsrc--; 911 } 912 } 913 } 914 915 /* 916 * Purge socket-layer filter set. 917 */ 918 static void 919 im6f_purge(struct in6_mfilter *imf) 920 { 921 struct ip6_msource *ims, *tims; 922 923 RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) { 924 CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); 925 RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims); 926 free(ims, M_IN6MFILTER); 927 imf->im6f_nsrc--; 928 } 929 imf->im6f_st[0] = imf->im6f_st[1] = MCAST_UNDEFINED; 930 KASSERT(RB_EMPTY(&imf->im6f_sources), 931 ("%s: im6f_sources not empty", __func__)); 932 } 933 934 /* 935 * Look up a source filter entry for a multicast group. 936 * 937 * inm is the group descriptor to work with. 938 * addr is the IPv6 address to look up. 939 * noalloc may be non-zero to suppress allocation of sources. 940 * *pims will be set to the address of the retrieved or allocated source. 941 * 942 * SMPng: NOTE: may be called with locks held. 943 * Return 0 if successful, otherwise return a non-zero error code. 944 */ 945 static int 946 in6m_get_source(struct in6_multi *inm, const struct in6_addr *addr, 947 const int noalloc, struct ip6_msource **pims) 948 { 949 struct ip6_msource find; 950 struct ip6_msource *ims, *nims; 951 #ifdef KTR 952 char ip6tbuf[INET6_ADDRSTRLEN]; 953 #endif 954 955 find.im6s_addr = *addr; 956 ims = RB_FIND(ip6_msource_tree, &inm->in6m_srcs, &find); 957 if (ims == NULL && !noalloc) { 958 if (inm->in6m_nsrc == in6_mcast_maxgrpsrc) 959 return (ENOSPC); 960 nims = malloc(sizeof(struct ip6_msource), M_IP6MSOURCE, 961 M_NOWAIT | M_ZERO); 962 if (nims == NULL) 963 return (ENOMEM); 964 nims->im6s_addr = *addr; 965 RB_INSERT(ip6_msource_tree, &inm->in6m_srcs, nims); 966 ++inm->in6m_nsrc; 967 ims = nims; 968 CTR3(KTR_MLD, "%s: allocated %s as %p", __func__, 969 ip6_sprintf(ip6tbuf, addr), ims); 970 } 971 972 *pims = ims; 973 return (0); 974 } 975 976 /* 977 * Merge socket-layer source into MLD-layer source. 978 * If rollback is non-zero, perform the inverse of the merge. 979 */ 980 static void 981 im6s_merge(struct ip6_msource *ims, const struct in6_msource *lims, 982 const int rollback) 983 { 984 int n = rollback ? -1 : 1; 985 #ifdef KTR 986 char ip6tbuf[INET6_ADDRSTRLEN]; 987 988 ip6_sprintf(ip6tbuf, &lims->im6s_addr); 989 #endif 990 991 if (lims->im6sl_st[0] == MCAST_EXCLUDE) { 992 CTR3(KTR_MLD, "%s: t1 ex -= %d on %s", __func__, n, ip6tbuf); 993 ims->im6s_st[1].ex -= n; 994 } else if (lims->im6sl_st[0] == MCAST_INCLUDE) { 995 CTR3(KTR_MLD, "%s: t1 in -= %d on %s", __func__, n, ip6tbuf); 996 ims->im6s_st[1].in -= n; 997 } 998 999 if (lims->im6sl_st[1] == MCAST_EXCLUDE) { 1000 CTR3(KTR_MLD, "%s: t1 ex += %d on %s", __func__, n, ip6tbuf); 1001 ims->im6s_st[1].ex += n; 1002 } else if (lims->im6sl_st[1] == MCAST_INCLUDE) { 1003 CTR3(KTR_MLD, "%s: t1 in += %d on %s", __func__, n, ip6tbuf); 1004 ims->im6s_st[1].in += n; 1005 } 1006 } 1007 1008 /* 1009 * Atomically update the global in6_multi state, when a membership's 1010 * filter list is being updated in any way. 1011 * 1012 * imf is the per-inpcb-membership group filter pointer. 1013 * A fake imf may be passed for in-kernel consumers. 1014 * 1015 * XXX This is a candidate for a set-symmetric-difference style loop 1016 * which would eliminate the repeated lookup from root of ims nodes, 1017 * as they share the same key space. 1018 * 1019 * If any error occurred this function will back out of refcounts 1020 * and return a non-zero value. 1021 */ 1022 static int 1023 in6m_merge(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) 1024 { 1025 struct ip6_msource *ims, *nims; 1026 struct in6_msource *lims; 1027 int schanged, error; 1028 int nsrc0, nsrc1; 1029 1030 schanged = 0; 1031 error = 0; 1032 nsrc1 = nsrc0 = 0; 1033 IN6_MULTI_LIST_LOCK_ASSERT(); 1034 1035 /* 1036 * Update the source filters first, as this may fail. 1037 * Maintain count of in-mode filters at t0, t1. These are 1038 * used to work out if we transition into ASM mode or not. 1039 * Maintain a count of source filters whose state was 1040 * actually modified by this operation. 1041 */ 1042 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { 1043 lims = (struct in6_msource *)ims; 1044 if (lims->im6sl_st[0] == imf->im6f_st[0]) nsrc0++; 1045 if (lims->im6sl_st[1] == imf->im6f_st[1]) nsrc1++; 1046 if (lims->im6sl_st[0] == lims->im6sl_st[1]) continue; 1047 error = in6m_get_source(inm, &lims->im6s_addr, 0, &nims); 1048 ++schanged; 1049 if (error) 1050 break; 1051 im6s_merge(nims, lims, 0); 1052 } 1053 if (error) { 1054 struct ip6_msource *bims; 1055 1056 RB_FOREACH_REVERSE_FROM(ims, ip6_msource_tree, nims) { 1057 lims = (struct in6_msource *)ims; 1058 if (lims->im6sl_st[0] == lims->im6sl_st[1]) 1059 continue; 1060 (void)in6m_get_source(inm, &lims->im6s_addr, 1, &bims); 1061 if (bims == NULL) 1062 continue; 1063 im6s_merge(bims, lims, 1); 1064 } 1065 goto out_reap; 1066 } 1067 1068 CTR3(KTR_MLD, "%s: imf filters in-mode: %d at t0, %d at t1", 1069 __func__, nsrc0, nsrc1); 1070 1071 /* Handle transition between INCLUDE {n} and INCLUDE {} on socket. */ 1072 if (imf->im6f_st[0] == imf->im6f_st[1] && 1073 imf->im6f_st[1] == MCAST_INCLUDE) { 1074 if (nsrc1 == 0) { 1075 CTR1(KTR_MLD, "%s: --in on inm at t1", __func__); 1076 --inm->in6m_st[1].iss_in; 1077 } 1078 } 1079 1080 /* Handle filter mode transition on socket. */ 1081 if (imf->im6f_st[0] != imf->im6f_st[1]) { 1082 CTR3(KTR_MLD, "%s: imf transition %d to %d", 1083 __func__, imf->im6f_st[0], imf->im6f_st[1]); 1084 1085 if (imf->im6f_st[0] == MCAST_EXCLUDE) { 1086 CTR1(KTR_MLD, "%s: --ex on inm at t1", __func__); 1087 --inm->in6m_st[1].iss_ex; 1088 } else if (imf->im6f_st[0] == MCAST_INCLUDE) { 1089 CTR1(KTR_MLD, "%s: --in on inm at t1", __func__); 1090 --inm->in6m_st[1].iss_in; 1091 } 1092 1093 if (imf->im6f_st[1] == MCAST_EXCLUDE) { 1094 CTR1(KTR_MLD, "%s: ex++ on inm at t1", __func__); 1095 inm->in6m_st[1].iss_ex++; 1096 } else if (imf->im6f_st[1] == MCAST_INCLUDE && nsrc1 > 0) { 1097 CTR1(KTR_MLD, "%s: in++ on inm at t1", __func__); 1098 inm->in6m_st[1].iss_in++; 1099 } 1100 } 1101 1102 /* 1103 * Track inm filter state in terms of listener counts. 1104 * If there are any exclusive listeners, stack-wide 1105 * membership is exclusive. 1106 * Otherwise, if only inclusive listeners, stack-wide is inclusive. 1107 * If no listeners remain, state is undefined at t1, 1108 * and the MLD lifecycle for this group should finish. 1109 */ 1110 if (inm->in6m_st[1].iss_ex > 0) { 1111 CTR1(KTR_MLD, "%s: transition to EX", __func__); 1112 inm->in6m_st[1].iss_fmode = MCAST_EXCLUDE; 1113 } else if (inm->in6m_st[1].iss_in > 0) { 1114 CTR1(KTR_MLD, "%s: transition to IN", __func__); 1115 inm->in6m_st[1].iss_fmode = MCAST_INCLUDE; 1116 } else { 1117 CTR1(KTR_MLD, "%s: transition to UNDEF", __func__); 1118 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED; 1119 } 1120 1121 /* Decrement ASM listener count on transition out of ASM mode. */ 1122 if (imf->im6f_st[0] == MCAST_EXCLUDE && nsrc0 == 0) { 1123 if ((imf->im6f_st[1] != MCAST_EXCLUDE) || 1124 (imf->im6f_st[1] == MCAST_EXCLUDE && nsrc1 > 0)) { 1125 CTR1(KTR_MLD, "%s: --asm on inm at t1", __func__); 1126 --inm->in6m_st[1].iss_asm; 1127 } 1128 } 1129 1130 /* Increment ASM listener count on transition to ASM mode. */ 1131 if (imf->im6f_st[1] == MCAST_EXCLUDE && nsrc1 == 0) { 1132 CTR1(KTR_MLD, "%s: asm++ on inm at t1", __func__); 1133 inm->in6m_st[1].iss_asm++; 1134 } 1135 1136 CTR3(KTR_MLD, "%s: merged imf %p to inm %p", __func__, imf, inm); 1137 in6m_print(inm); 1138 1139 out_reap: 1140 if (schanged > 0) { 1141 CTR1(KTR_MLD, "%s: sources changed; reaping", __func__); 1142 in6m_reap(inm); 1143 } 1144 return (error); 1145 } 1146 1147 /* 1148 * Mark an in6_multi's filter set deltas as committed. 1149 * Called by MLD after a state change has been enqueued. 1150 */ 1151 void 1152 in6m_commit(struct in6_multi *inm) 1153 { 1154 struct ip6_msource *ims; 1155 1156 CTR2(KTR_MLD, "%s: commit inm %p", __func__, inm); 1157 CTR1(KTR_MLD, "%s: pre commit:", __func__); 1158 in6m_print(inm); 1159 1160 RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) { 1161 ims->im6s_st[0] = ims->im6s_st[1]; 1162 } 1163 inm->in6m_st[0] = inm->in6m_st[1]; 1164 } 1165 1166 /* 1167 * Reap unreferenced nodes from an in6_multi's filter set. 1168 */ 1169 static void 1170 in6m_reap(struct in6_multi *inm) 1171 { 1172 struct ip6_msource *ims, *tims; 1173 1174 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, tims) { 1175 if (ims->im6s_st[0].ex > 0 || ims->im6s_st[0].in > 0 || 1176 ims->im6s_st[1].ex > 0 || ims->im6s_st[1].in > 0 || 1177 ims->im6s_stp != 0) 1178 continue; 1179 CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); 1180 RB_REMOVE(ip6_msource_tree, &inm->in6m_srcs, ims); 1181 free(ims, M_IP6MSOURCE); 1182 inm->in6m_nsrc--; 1183 } 1184 } 1185 1186 /* 1187 * Purge all source nodes from an in6_multi's filter set. 1188 */ 1189 static void 1190 in6m_purge(struct in6_multi *inm) 1191 { 1192 struct ip6_msource *ims, *tims; 1193 1194 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, tims) { 1195 CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); 1196 RB_REMOVE(ip6_msource_tree, &inm->in6m_srcs, ims); 1197 free(ims, M_IP6MSOURCE); 1198 inm->in6m_nsrc--; 1199 } 1200 /* Free state-change requests that might be queued. */ 1201 mbufq_drain(&inm->in6m_scq); 1202 } 1203 1204 /* 1205 * Join a multicast address w/o sources. 1206 * KAME compatibility entry point. 1207 * 1208 * SMPng: Assume no mc locks held by caller. 1209 */ 1210 int 1211 in6_joingroup(struct ifnet *ifp, const struct in6_addr *mcaddr, 1212 /*const*/ struct in6_mfilter *imf, struct in6_multi **pinm, 1213 const int delay) 1214 { 1215 int error; 1216 1217 IN6_MULTI_LOCK(); 1218 error = in6_joingroup_locked(ifp, mcaddr, NULL, pinm, delay); 1219 IN6_MULTI_UNLOCK(); 1220 return (error); 1221 } 1222 1223 /* 1224 * Join a multicast group; real entry point. 1225 * 1226 * Only preserves atomicity at inm level. 1227 * NOTE: imf argument cannot be const due to sys/tree.h limitations. 1228 * 1229 * If the MLD downcall fails, the group is not joined, and an error 1230 * code is returned. 1231 */ 1232 static int 1233 in6_joingroup_locked(struct ifnet *ifp, const struct in6_addr *mcaddr, 1234 /*const*/ struct in6_mfilter *imf, struct in6_multi **pinm, 1235 const int delay) 1236 { 1237 struct in6_multi_head inmh; 1238 struct in6_mfilter timf; 1239 struct in6_multi *inm; 1240 struct ifmultiaddr *ifma; 1241 int error; 1242 #ifdef KTR 1243 char ip6tbuf[INET6_ADDRSTRLEN]; 1244 #endif 1245 1246 /* 1247 * Sanity: Check scope zone ID was set for ifp, if and 1248 * only if group is scoped to an interface. 1249 */ 1250 KASSERT(IN6_IS_ADDR_MULTICAST(mcaddr), 1251 ("%s: not a multicast address", __func__)); 1252 if (IN6_IS_ADDR_MC_LINKLOCAL(mcaddr) || 1253 IN6_IS_ADDR_MC_INTFACELOCAL(mcaddr)) { 1254 KASSERT(mcaddr->s6_addr16[1] != 0, 1255 ("%s: scope zone ID not set", __func__)); 1256 } 1257 1258 IN6_MULTI_LOCK_ASSERT(); 1259 IN6_MULTI_LIST_UNLOCK_ASSERT(); 1260 1261 CTR4(KTR_MLD, "%s: join %s on %p(%s))", __func__, 1262 ip6_sprintf(ip6tbuf, mcaddr), ifp, if_name(ifp)); 1263 1264 error = 0; 1265 inm = NULL; 1266 1267 /* 1268 * If no imf was specified (i.e. kernel consumer), 1269 * fake one up and assume it is an ASM join. 1270 */ 1271 if (imf == NULL) { 1272 im6f_init(&timf, MCAST_UNDEFINED, MCAST_EXCLUDE); 1273 imf = &timf; 1274 } 1275 error = in6_getmulti(ifp, mcaddr, &inm); 1276 if (error) { 1277 CTR1(KTR_MLD, "%s: in6_getmulti() failure", __func__); 1278 return (error); 1279 } 1280 1281 IN6_MULTI_LIST_LOCK(); 1282 CTR1(KTR_MLD, "%s: merge inm state", __func__); 1283 error = in6m_merge(inm, imf); 1284 if (error) { 1285 CTR1(KTR_MLD, "%s: failed to merge inm state", __func__); 1286 goto out_in6m_release; 1287 } 1288 1289 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 1290 error = mld_change_state(inm, delay); 1291 if (error) { 1292 CTR1(KTR_MLD, "%s: failed to update source", __func__); 1293 goto out_in6m_release; 1294 } 1295 1296 out_in6m_release: 1297 SLIST_INIT(&inmh); 1298 if (error) { 1299 struct epoch_tracker et; 1300 1301 CTR2(KTR_MLD, "%s: dropping ref on %p", __func__, inm); 1302 IF_ADDR_WLOCK(ifp); 1303 NET_EPOCH_ENTER(et); 1304 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1305 if (ifma->ifma_protospec == inm) { 1306 ifma->ifma_protospec = NULL; 1307 break; 1308 } 1309 } 1310 in6m_disconnect_locked(&inmh, inm); 1311 in6m_rele_locked(&inmh, inm); 1312 NET_EPOCH_EXIT(et); 1313 IF_ADDR_WUNLOCK(ifp); 1314 } else { 1315 *pinm = inm; 1316 } 1317 IN6_MULTI_LIST_UNLOCK(); 1318 in6m_release_list_deferred(&inmh); 1319 return (error); 1320 } 1321 1322 /* 1323 * Leave a multicast group; unlocked entry point. 1324 */ 1325 int 1326 in6_leavegroup(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) 1327 { 1328 int error; 1329 1330 IN6_MULTI_LOCK(); 1331 error = in6_leavegroup_locked(inm, imf); 1332 IN6_MULTI_UNLOCK(); 1333 return (error); 1334 } 1335 1336 /* 1337 * Leave a multicast group; real entry point. 1338 * All source filters will be expunged. 1339 * 1340 * Only preserves atomicity at inm level. 1341 * 1342 * Holding the write lock for the INP which contains imf 1343 * is highly advisable. We can't assert for it as imf does not 1344 * contain a back-pointer to the owning inp. 1345 * 1346 * Note: This is not the same as in6m_release(*) as this function also 1347 * makes a state change downcall into MLD. 1348 */ 1349 int 1350 in6_leavegroup_locked(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) 1351 { 1352 struct in6_multi_head inmh; 1353 struct in6_mfilter timf; 1354 struct ifnet *ifp; 1355 int error; 1356 #ifdef KTR 1357 char ip6tbuf[INET6_ADDRSTRLEN]; 1358 #endif 1359 1360 error = 0; 1361 1362 IN6_MULTI_LOCK_ASSERT(); 1363 1364 CTR5(KTR_MLD, "%s: leave inm %p, %s/%s, imf %p", __func__, 1365 inm, ip6_sprintf(ip6tbuf, &inm->in6m_addr), 1366 (in6m_is_ifp_detached(inm) ? "null" : if_name(inm->in6m_ifp)), 1367 imf); 1368 1369 /* 1370 * If no imf was specified (i.e. kernel consumer), 1371 * fake one up and assume it is an ASM join. 1372 */ 1373 if (imf == NULL) { 1374 im6f_init(&timf, MCAST_EXCLUDE, MCAST_UNDEFINED); 1375 imf = &timf; 1376 } 1377 1378 /* 1379 * Begin state merge transaction at MLD layer. 1380 * 1381 * As this particular invocation should not cause any memory 1382 * to be allocated, and there is no opportunity to roll back 1383 * the transaction, it MUST NOT fail. 1384 */ 1385 1386 ifp = inm->in6m_ifp; 1387 IN6_MULTI_LIST_LOCK(); 1388 CTR1(KTR_MLD, "%s: merge inm state", __func__); 1389 error = in6m_merge(inm, imf); 1390 KASSERT(error == 0, ("%s: failed to merge inm state", __func__)); 1391 1392 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 1393 error = 0; 1394 if (ifp) 1395 error = mld_change_state(inm, 0); 1396 if (error) 1397 CTR1(KTR_MLD, "%s: failed mld downcall", __func__); 1398 1399 CTR2(KTR_MLD, "%s: dropping ref on %p", __func__, inm); 1400 if (ifp) 1401 IF_ADDR_WLOCK(ifp); 1402 1403 SLIST_INIT(&inmh); 1404 if (inm->in6m_refcount == 1) 1405 in6m_disconnect_locked(&inmh, inm); 1406 in6m_rele_locked(&inmh, inm); 1407 if (ifp) 1408 IF_ADDR_WUNLOCK(ifp); 1409 IN6_MULTI_LIST_UNLOCK(); 1410 in6m_release_list_deferred(&inmh); 1411 return (error); 1412 } 1413 1414 /* 1415 * Block or unblock an ASM multicast source on an inpcb. 1416 * This implements the delta-based API described in RFC 3678. 1417 * 1418 * The delta-based API applies only to exclusive-mode memberships. 1419 * An MLD downcall will be performed. 1420 * 1421 * SMPng: NOTE: Must take Giant as a join may create a new ifma. 1422 * 1423 * Return 0 if successful, otherwise return an appropriate error code. 1424 */ 1425 static int 1426 in6p_block_unblock_source(struct inpcb *inp, struct sockopt *sopt) 1427 { 1428 struct group_source_req gsr; 1429 struct epoch_tracker et; 1430 sockunion_t *gsa, *ssa; 1431 struct ifnet *ifp; 1432 struct in6_mfilter *imf; 1433 struct ip6_moptions *imo; 1434 struct in6_msource *ims; 1435 struct in6_multi *inm; 1436 uint16_t fmode; 1437 int error, doblock; 1438 #ifdef KTR 1439 char ip6tbuf[INET6_ADDRSTRLEN]; 1440 #endif 1441 1442 ifp = NULL; 1443 error = 0; 1444 doblock = 0; 1445 1446 memset(&gsr, 0, sizeof(struct group_source_req)); 1447 gsa = (sockunion_t *)&gsr.gsr_group; 1448 ssa = (sockunion_t *)&gsr.gsr_source; 1449 1450 switch (sopt->sopt_name) { 1451 case MCAST_BLOCK_SOURCE: 1452 case MCAST_UNBLOCK_SOURCE: 1453 error = sooptcopyin(sopt, &gsr, 1454 sizeof(struct group_source_req), 1455 sizeof(struct group_source_req)); 1456 if (error) 1457 return (error); 1458 1459 if (gsa->sin6.sin6_family != AF_INET6 || 1460 gsa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 1461 return (EINVAL); 1462 1463 if (ssa->sin6.sin6_family != AF_INET6 || 1464 ssa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 1465 return (EINVAL); 1466 1467 /* 1468 * XXXGL: this function should use ifnet_byindex_ref, or 1469 * expand the epoch section all the way to where we put 1470 * the reference. 1471 */ 1472 NET_EPOCH_ENTER(et); 1473 ifp = ifnet_byindex(gsr.gsr_interface); 1474 NET_EPOCH_EXIT(et); 1475 if (ifp == NULL) 1476 return (EADDRNOTAVAIL); 1477 1478 if (sopt->sopt_name == MCAST_BLOCK_SOURCE) 1479 doblock = 1; 1480 break; 1481 1482 default: 1483 CTR2(KTR_MLD, "%s: unknown sopt_name %d", 1484 __func__, sopt->sopt_name); 1485 return (EOPNOTSUPP); 1486 break; 1487 } 1488 1489 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 1490 return (EINVAL); 1491 1492 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 1493 1494 /* 1495 * Check if we are actually a member of this group. 1496 */ 1497 imo = in6p_findmoptions(inp); 1498 imf = im6o_match_group(imo, ifp, &gsa->sa); 1499 if (imf == NULL) { 1500 error = EADDRNOTAVAIL; 1501 goto out_in6p_locked; 1502 } 1503 inm = imf->im6f_in6m; 1504 1505 /* 1506 * Attempting to use the delta-based API on an 1507 * non exclusive-mode membership is an error. 1508 */ 1509 fmode = imf->im6f_st[0]; 1510 if (fmode != MCAST_EXCLUDE) { 1511 error = EINVAL; 1512 goto out_in6p_locked; 1513 } 1514 1515 /* 1516 * Deal with error cases up-front: 1517 * Asked to block, but already blocked; or 1518 * Asked to unblock, but nothing to unblock. 1519 * If adding a new block entry, allocate it. 1520 */ 1521 ims = im6o_match_source(imf, &ssa->sa); 1522 if ((ims != NULL && doblock) || (ims == NULL && !doblock)) { 1523 CTR3(KTR_MLD, "%s: source %s %spresent", __func__, 1524 ip6_sprintf(ip6tbuf, &ssa->sin6.sin6_addr), 1525 doblock ? "" : "not "); 1526 error = EADDRNOTAVAIL; 1527 goto out_in6p_locked; 1528 } 1529 1530 INP_WLOCK_ASSERT(inp); 1531 1532 /* 1533 * Begin state merge transaction at socket layer. 1534 */ 1535 if (doblock) { 1536 CTR2(KTR_MLD, "%s: %s source", __func__, "block"); 1537 ims = im6f_graft(imf, fmode, &ssa->sin6); 1538 if (ims == NULL) 1539 error = ENOMEM; 1540 } else { 1541 CTR2(KTR_MLD, "%s: %s source", __func__, "allow"); 1542 error = im6f_prune(imf, &ssa->sin6); 1543 } 1544 1545 if (error) { 1546 CTR1(KTR_MLD, "%s: merge imf state failed", __func__); 1547 goto out_im6f_rollback; 1548 } 1549 1550 /* 1551 * Begin state merge transaction at MLD layer. 1552 */ 1553 IN6_MULTI_LIST_LOCK(); 1554 CTR1(KTR_MLD, "%s: merge inm state", __func__); 1555 error = in6m_merge(inm, imf); 1556 if (error) 1557 CTR1(KTR_MLD, "%s: failed to merge inm state", __func__); 1558 else { 1559 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 1560 error = mld_change_state(inm, 0); 1561 if (error) 1562 CTR1(KTR_MLD, "%s: failed mld downcall", __func__); 1563 } 1564 1565 IN6_MULTI_LIST_UNLOCK(); 1566 1567 out_im6f_rollback: 1568 if (error) 1569 im6f_rollback(imf); 1570 else 1571 im6f_commit(imf); 1572 1573 im6f_reap(imf); 1574 1575 out_in6p_locked: 1576 INP_WUNLOCK(inp); 1577 return (error); 1578 } 1579 1580 /* 1581 * Given an inpcb, return its multicast options structure pointer. Accepts 1582 * an unlocked inpcb pointer, but will return it locked. May sleep. 1583 * 1584 * SMPng: NOTE: Potentially calls malloc(M_WAITOK) with Giant held. 1585 * SMPng: NOTE: Returns with the INP write lock held. 1586 */ 1587 static struct ip6_moptions * 1588 in6p_findmoptions(struct inpcb *inp) 1589 { 1590 struct ip6_moptions *imo; 1591 1592 INP_WLOCK(inp); 1593 if (inp->in6p_moptions != NULL) 1594 return (inp->in6p_moptions); 1595 1596 INP_WUNLOCK(inp); 1597 1598 imo = malloc(sizeof(*imo), M_IP6MOPTS, M_WAITOK); 1599 1600 imo->im6o_multicast_ifp = NULL; 1601 imo->im6o_multicast_hlim = V_ip6_defmcasthlim; 1602 imo->im6o_multicast_loop = in6_mcast_loop; 1603 STAILQ_INIT(&imo->im6o_head); 1604 1605 INP_WLOCK(inp); 1606 if (inp->in6p_moptions != NULL) { 1607 free(imo, M_IP6MOPTS); 1608 return (inp->in6p_moptions); 1609 } 1610 inp->in6p_moptions = imo; 1611 return (imo); 1612 } 1613 1614 /* 1615 * Discard the IPv6 multicast options (and source filters). 1616 * 1617 * SMPng: NOTE: assumes INP write lock is held. 1618 * 1619 * XXX can all be safely deferred to epoch_call 1620 * 1621 */ 1622 1623 static void 1624 inp_gcmoptions(struct ip6_moptions *imo) 1625 { 1626 struct in6_mfilter *imf; 1627 struct in6_multi *inm; 1628 struct ifnet *ifp; 1629 1630 while ((imf = ip6_mfilter_first(&imo->im6o_head)) != NULL) { 1631 ip6_mfilter_remove(&imo->im6o_head, imf); 1632 1633 im6f_leave(imf); 1634 if ((inm = imf->im6f_in6m) != NULL) { 1635 if ((ifp = inm->in6m_ifp) != NULL) { 1636 CURVNET_SET(ifp->if_vnet); 1637 (void)in6_leavegroup(inm, imf); 1638 CURVNET_RESTORE(); 1639 } else { 1640 (void)in6_leavegroup(inm, imf); 1641 } 1642 } 1643 ip6_mfilter_free(imf); 1644 } 1645 free(imo, M_IP6MOPTS); 1646 } 1647 1648 void 1649 ip6_freemoptions(struct ip6_moptions *imo) 1650 { 1651 if (imo == NULL) 1652 return; 1653 inp_gcmoptions(imo); 1654 } 1655 1656 /* 1657 * Atomically get source filters on a socket for an IPv6 multicast group. 1658 * Called with INP lock held; returns with lock released. 1659 */ 1660 static int 1661 in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt) 1662 { 1663 struct epoch_tracker et; 1664 struct __msfilterreq msfr; 1665 sockunion_t *gsa; 1666 struct ifnet *ifp; 1667 struct ip6_moptions *imo; 1668 struct in6_mfilter *imf; 1669 struct ip6_msource *ims; 1670 struct in6_msource *lims; 1671 struct sockaddr_in6 *psin; 1672 struct sockaddr_storage *ptss; 1673 struct sockaddr_storage *tss; 1674 int error; 1675 size_t nsrcs, ncsrcs; 1676 1677 INP_WLOCK_ASSERT(inp); 1678 1679 imo = inp->in6p_moptions; 1680 KASSERT(imo != NULL, ("%s: null ip6_moptions", __func__)); 1681 1682 INP_WUNLOCK(inp); 1683 1684 error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq), 1685 sizeof(struct __msfilterreq)); 1686 if (error) 1687 return (error); 1688 1689 if (msfr.msfr_group.ss_family != AF_INET6 || 1690 msfr.msfr_group.ss_len != sizeof(struct sockaddr_in6)) 1691 return (EINVAL); 1692 1693 gsa = (sockunion_t *)&msfr.msfr_group; 1694 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 1695 return (EINVAL); 1696 1697 /* 1698 * XXXGL: this function should use ifnet_byindex_ref, or expand the 1699 * epoch section all the way to where the interface is referenced. 1700 */ 1701 NET_EPOCH_ENTER(et); 1702 ifp = ifnet_byindex(msfr.msfr_ifindex); 1703 NET_EPOCH_EXIT(et); 1704 if (ifp == NULL) 1705 return (EADDRNOTAVAIL); 1706 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 1707 1708 INP_WLOCK(inp); 1709 1710 /* 1711 * Lookup group on the socket. 1712 */ 1713 imf = im6o_match_group(imo, ifp, &gsa->sa); 1714 if (imf == NULL) { 1715 INP_WUNLOCK(inp); 1716 return (EADDRNOTAVAIL); 1717 } 1718 1719 /* 1720 * Ignore memberships which are in limbo. 1721 */ 1722 if (imf->im6f_st[1] == MCAST_UNDEFINED) { 1723 INP_WUNLOCK(inp); 1724 return (EAGAIN); 1725 } 1726 msfr.msfr_fmode = imf->im6f_st[1]; 1727 1728 /* 1729 * If the user specified a buffer, copy out the source filter 1730 * entries to userland gracefully. 1731 * We only copy out the number of entries which userland 1732 * has asked for, but we always tell userland how big the 1733 * buffer really needs to be. 1734 */ 1735 if (msfr.msfr_nsrcs > in6_mcast_maxsocksrc) 1736 msfr.msfr_nsrcs = in6_mcast_maxsocksrc; 1737 tss = NULL; 1738 if (msfr.msfr_srcs != NULL && msfr.msfr_nsrcs > 0) { 1739 tss = malloc(sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs, 1740 M_TEMP, M_NOWAIT | M_ZERO); 1741 if (tss == NULL) { 1742 INP_WUNLOCK(inp); 1743 return (ENOBUFS); 1744 } 1745 } 1746 1747 /* 1748 * Count number of sources in-mode at t0. 1749 * If buffer space exists and remains, copy out source entries. 1750 */ 1751 nsrcs = msfr.msfr_nsrcs; 1752 ncsrcs = 0; 1753 ptss = tss; 1754 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { 1755 lims = (struct in6_msource *)ims; 1756 if (lims->im6sl_st[0] == MCAST_UNDEFINED || 1757 lims->im6sl_st[0] != imf->im6f_st[0]) 1758 continue; 1759 ++ncsrcs; 1760 if (tss != NULL && nsrcs > 0) { 1761 psin = (struct sockaddr_in6 *)ptss; 1762 psin->sin6_family = AF_INET6; 1763 psin->sin6_len = sizeof(struct sockaddr_in6); 1764 psin->sin6_addr = lims->im6s_addr; 1765 psin->sin6_port = 0; 1766 --nsrcs; 1767 ++ptss; 1768 } 1769 } 1770 1771 INP_WUNLOCK(inp); 1772 1773 if (tss != NULL) { 1774 error = copyout(tss, msfr.msfr_srcs, 1775 sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs); 1776 free(tss, M_TEMP); 1777 if (error) 1778 return (error); 1779 } 1780 1781 msfr.msfr_nsrcs = ncsrcs; 1782 error = sooptcopyout(sopt, &msfr, sizeof(struct __msfilterreq)); 1783 1784 return (error); 1785 } 1786 1787 /* 1788 * Return the IP multicast options in response to user getsockopt(). 1789 */ 1790 int 1791 ip6_getmoptions(struct inpcb *inp, struct sockopt *sopt) 1792 { 1793 struct ip6_moptions *im6o; 1794 int error; 1795 u_int optval; 1796 1797 INP_WLOCK(inp); 1798 im6o = inp->in6p_moptions; 1799 /* If socket is neither of type SOCK_RAW or SOCK_DGRAM, reject it. */ 1800 if (inp->inp_socket->so_proto->pr_type != SOCK_RAW && 1801 inp->inp_socket->so_proto->pr_type != SOCK_DGRAM) { 1802 INP_WUNLOCK(inp); 1803 return (EOPNOTSUPP); 1804 } 1805 1806 error = 0; 1807 switch (sopt->sopt_name) { 1808 case IPV6_MULTICAST_IF: 1809 if (im6o == NULL || im6o->im6o_multicast_ifp == NULL) { 1810 optval = 0; 1811 } else { 1812 optval = im6o->im6o_multicast_ifp->if_index; 1813 } 1814 INP_WUNLOCK(inp); 1815 error = sooptcopyout(sopt, &optval, sizeof(u_int)); 1816 break; 1817 1818 case IPV6_MULTICAST_HOPS: 1819 if (im6o == NULL) 1820 optval = V_ip6_defmcasthlim; 1821 else 1822 optval = im6o->im6o_multicast_hlim; 1823 INP_WUNLOCK(inp); 1824 error = sooptcopyout(sopt, &optval, sizeof(u_int)); 1825 break; 1826 1827 case IPV6_MULTICAST_LOOP: 1828 if (im6o == NULL) 1829 optval = in6_mcast_loop; /* XXX VIMAGE */ 1830 else 1831 optval = im6o->im6o_multicast_loop; 1832 INP_WUNLOCK(inp); 1833 error = sooptcopyout(sopt, &optval, sizeof(u_int)); 1834 break; 1835 1836 case IPV6_MSFILTER: 1837 if (im6o == NULL) { 1838 error = EADDRNOTAVAIL; 1839 INP_WUNLOCK(inp); 1840 } else { 1841 error = in6p_get_source_filters(inp, sopt); 1842 } 1843 break; 1844 1845 default: 1846 INP_WUNLOCK(inp); 1847 error = ENOPROTOOPT; 1848 break; 1849 } 1850 1851 INP_UNLOCK_ASSERT(inp); 1852 1853 return (error); 1854 } 1855 1856 /* 1857 * Look up the ifnet to use for a multicast group membership, 1858 * given the address of an IPv6 group. 1859 * 1860 * This routine exists to support legacy IPv6 multicast applications. 1861 * 1862 * Use the socket's current FIB number for any required FIB lookup. Look up the 1863 * group address in the unicast FIB, and use its ifp; usually, this points to 1864 * the default next-hop. If the FIB lookup fails, return NULL. 1865 * 1866 * FUTURE: Support multiple forwarding tables for IPv6. 1867 * 1868 * Returns NULL if no ifp could be found. 1869 */ 1870 static struct ifnet * 1871 in6p_lookup_mcast_ifp(const struct inpcb *inp, const struct sockaddr_in6 *gsin6) 1872 { 1873 struct nhop_object *nh; 1874 struct in6_addr dst; 1875 uint32_t scopeid; 1876 uint32_t fibnum; 1877 1878 KASSERT(gsin6->sin6_family == AF_INET6, 1879 ("%s: not AF_INET6 group", __func__)); 1880 1881 in6_splitscope(&gsin6->sin6_addr, &dst, &scopeid); 1882 fibnum = inp->inp_inc.inc_fibnum; 1883 nh = fib6_lookup(fibnum, &dst, scopeid, 0, 0); 1884 1885 return (nh ? nh->nh_ifp : NULL); 1886 } 1887 1888 /* 1889 * Join an IPv6 multicast group, possibly with a source. 1890 * 1891 * FIXME: The KAME use of the unspecified address (::) 1892 * to join *all* multicast groups is currently unsupported. 1893 * 1894 * XXXGL: this function multiple times uses ifnet_byindex() without 1895 * proper protection - staying in epoch, or putting reference on ifnet. 1896 */ 1897 static int 1898 in6p_join_group(struct inpcb *inp, struct sockopt *sopt) 1899 { 1900 struct in6_multi_head inmh; 1901 struct group_source_req gsr; 1902 struct epoch_tracker et; 1903 sockunion_t *gsa, *ssa; 1904 struct ifnet *ifp; 1905 struct in6_mfilter *imf; 1906 struct ip6_moptions *imo; 1907 struct in6_multi *inm; 1908 struct in6_msource *lims; 1909 int error, is_new; 1910 1911 SLIST_INIT(&inmh); 1912 ifp = NULL; 1913 lims = NULL; 1914 error = 0; 1915 1916 memset(&gsr, 0, sizeof(struct group_source_req)); 1917 gsa = (sockunion_t *)&gsr.gsr_group; 1918 gsa->ss.ss_family = AF_UNSPEC; 1919 ssa = (sockunion_t *)&gsr.gsr_source; 1920 ssa->ss.ss_family = AF_UNSPEC; 1921 1922 /* 1923 * Chew everything into struct group_source_req. 1924 * Overwrite the port field if present, as the sockaddr 1925 * being copied in may be matched with a binary comparison. 1926 * Ignore passed-in scope ID. 1927 */ 1928 switch (sopt->sopt_name) { 1929 case IPV6_JOIN_GROUP: { 1930 struct ipv6_mreq mreq; 1931 1932 error = sooptcopyin(sopt, &mreq, sizeof(struct ipv6_mreq), 1933 sizeof(struct ipv6_mreq)); 1934 if (error) 1935 return (error); 1936 1937 gsa->sin6.sin6_family = AF_INET6; 1938 gsa->sin6.sin6_len = sizeof(struct sockaddr_in6); 1939 gsa->sin6.sin6_addr = mreq.ipv6mr_multiaddr; 1940 1941 if (mreq.ipv6mr_interface == 0) { 1942 ifp = in6p_lookup_mcast_ifp(inp, &gsa->sin6); 1943 } else { 1944 NET_EPOCH_ENTER(et); 1945 ifp = ifnet_byindex(mreq.ipv6mr_interface); 1946 NET_EPOCH_EXIT(et); 1947 if (ifp == NULL) 1948 return (EADDRNOTAVAIL); 1949 } 1950 CTR3(KTR_MLD, "%s: ipv6mr_interface = %d, ifp = %p", 1951 __func__, mreq.ipv6mr_interface, ifp); 1952 } break; 1953 1954 case MCAST_JOIN_GROUP: 1955 case MCAST_JOIN_SOURCE_GROUP: 1956 if (sopt->sopt_name == MCAST_JOIN_GROUP) { 1957 error = sooptcopyin(sopt, &gsr, 1958 sizeof(struct group_req), 1959 sizeof(struct group_req)); 1960 } else if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) { 1961 error = sooptcopyin(sopt, &gsr, 1962 sizeof(struct group_source_req), 1963 sizeof(struct group_source_req)); 1964 } 1965 if (error) 1966 return (error); 1967 1968 if (gsa->sin6.sin6_family != AF_INET6 || 1969 gsa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 1970 return (EINVAL); 1971 1972 if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) { 1973 if (ssa->sin6.sin6_family != AF_INET6 || 1974 ssa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 1975 return (EINVAL); 1976 if (IN6_IS_ADDR_MULTICAST(&ssa->sin6.sin6_addr)) 1977 return (EINVAL); 1978 /* 1979 * TODO: Validate embedded scope ID in source 1980 * list entry against passed-in ifp, if and only 1981 * if source list filter entry is iface or node local. 1982 */ 1983 in6_clearscope(&ssa->sin6.sin6_addr); 1984 ssa->sin6.sin6_port = 0; 1985 ssa->sin6.sin6_scope_id = 0; 1986 } 1987 NET_EPOCH_ENTER(et); 1988 ifp = ifnet_byindex(gsr.gsr_interface); 1989 NET_EPOCH_EXIT(et); 1990 if (ifp == NULL) 1991 return (EADDRNOTAVAIL); 1992 break; 1993 1994 default: 1995 CTR2(KTR_MLD, "%s: unknown sopt_name %d", 1996 __func__, sopt->sopt_name); 1997 return (EOPNOTSUPP); 1998 break; 1999 } 2000 2001 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 2002 return (EINVAL); 2003 2004 if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) 2005 return (EADDRNOTAVAIL); 2006 2007 gsa->sin6.sin6_port = 0; 2008 gsa->sin6.sin6_scope_id = 0; 2009 2010 /* 2011 * Always set the scope zone ID on memberships created from userland. 2012 * Use the passed-in ifp to do this. 2013 * XXX The in6_setscope() return value is meaningless. 2014 * XXX SCOPE6_LOCK() is taken by in6_setscope(). 2015 */ 2016 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 2017 2018 IN6_MULTI_LOCK(); 2019 2020 /* 2021 * Find the membership in the membership list. 2022 */ 2023 imo = in6p_findmoptions(inp); 2024 imf = im6o_match_group(imo, ifp, &gsa->sa); 2025 if (imf == NULL) { 2026 is_new = 1; 2027 inm = NULL; 2028 2029 if (ip6_mfilter_count(&imo->im6o_head) >= IPV6_MAX_MEMBERSHIPS) { 2030 error = ENOMEM; 2031 goto out_in6p_locked; 2032 } 2033 } else { 2034 is_new = 0; 2035 inm = imf->im6f_in6m; 2036 2037 if (ssa->ss.ss_family != AF_UNSPEC) { 2038 /* 2039 * MCAST_JOIN_SOURCE_GROUP on an exclusive membership 2040 * is an error. On an existing inclusive membership, 2041 * it just adds the source to the filter list. 2042 */ 2043 if (imf->im6f_st[1] != MCAST_INCLUDE) { 2044 error = EINVAL; 2045 goto out_in6p_locked; 2046 } 2047 /* 2048 * Throw out duplicates. 2049 * 2050 * XXX FIXME: This makes a naive assumption that 2051 * even if entries exist for *ssa in this imf, 2052 * they will be rejected as dupes, even if they 2053 * are not valid in the current mode (in-mode). 2054 * 2055 * in6_msource is transactioned just as for anything 2056 * else in SSM -- but note naive use of in6m_graft() 2057 * below for allocating new filter entries. 2058 * 2059 * This is only an issue if someone mixes the 2060 * full-state SSM API with the delta-based API, 2061 * which is discouraged in the relevant RFCs. 2062 */ 2063 lims = im6o_match_source(imf, &ssa->sa); 2064 if (lims != NULL /*&& 2065 lims->im6sl_st[1] == MCAST_INCLUDE*/) { 2066 error = EADDRNOTAVAIL; 2067 goto out_in6p_locked; 2068 } 2069 } else { 2070 /* 2071 * MCAST_JOIN_GROUP alone, on any existing membership, 2072 * is rejected, to stop the same inpcb tying up 2073 * multiple refs to the in_multi. 2074 * On an existing inclusive membership, this is also 2075 * an error; if you want to change filter mode, 2076 * you must use the userland API setsourcefilter(). 2077 * XXX We don't reject this for imf in UNDEFINED 2078 * state at t1, because allocation of a filter 2079 * is atomic with allocation of a membership. 2080 */ 2081 error = EADDRINUSE; 2082 goto out_in6p_locked; 2083 } 2084 } 2085 2086 /* 2087 * Begin state merge transaction at socket layer. 2088 */ 2089 INP_WLOCK_ASSERT(inp); 2090 2091 /* 2092 * Graft new source into filter list for this inpcb's 2093 * membership of the group. The in6_multi may not have 2094 * been allocated yet if this is a new membership, however, 2095 * the in_mfilter slot will be allocated and must be initialized. 2096 * 2097 * Note: Grafting of exclusive mode filters doesn't happen 2098 * in this path. 2099 * XXX: Should check for non-NULL lims (node exists but may 2100 * not be in-mode) for interop with full-state API. 2101 */ 2102 if (ssa->ss.ss_family != AF_UNSPEC) { 2103 /* Membership starts in IN mode */ 2104 if (is_new) { 2105 CTR1(KTR_MLD, "%s: new join w/source", __func__); 2106 imf = ip6_mfilter_alloc(M_NOWAIT, MCAST_UNDEFINED, MCAST_INCLUDE); 2107 if (imf == NULL) { 2108 error = ENOMEM; 2109 goto out_in6p_locked; 2110 } 2111 } else { 2112 CTR2(KTR_MLD, "%s: %s source", __func__, "allow"); 2113 } 2114 lims = im6f_graft(imf, MCAST_INCLUDE, &ssa->sin6); 2115 if (lims == NULL) { 2116 CTR1(KTR_MLD, "%s: merge imf state failed", 2117 __func__); 2118 error = ENOMEM; 2119 goto out_in6p_locked; 2120 } 2121 } else { 2122 /* No address specified; Membership starts in EX mode */ 2123 if (is_new) { 2124 CTR1(KTR_MLD, "%s: new join w/o source", __func__); 2125 imf = ip6_mfilter_alloc(M_NOWAIT, MCAST_UNDEFINED, MCAST_EXCLUDE); 2126 if (imf == NULL) { 2127 error = ENOMEM; 2128 goto out_in6p_locked; 2129 } 2130 } 2131 } 2132 2133 /* 2134 * Begin state merge transaction at MLD layer. 2135 */ 2136 if (is_new) { 2137 in_pcbref(inp); 2138 INP_WUNLOCK(inp); 2139 2140 error = in6_joingroup_locked(ifp, &gsa->sin6.sin6_addr, imf, 2141 &imf->im6f_in6m, 0); 2142 2143 INP_WLOCK(inp); 2144 if (in_pcbrele_wlocked(inp)) { 2145 error = ENXIO; 2146 goto out_in6p_unlocked; 2147 } 2148 if (error) { 2149 goto out_in6p_locked; 2150 } 2151 /* 2152 * NOTE: Refcount from in6_joingroup_locked() 2153 * is protecting membership. 2154 */ 2155 ip6_mfilter_insert(&imo->im6o_head, imf); 2156 } else { 2157 CTR1(KTR_MLD, "%s: merge inm state", __func__); 2158 IN6_MULTI_LIST_LOCK(); 2159 error = in6m_merge(inm, imf); 2160 if (error) { 2161 CTR1(KTR_MLD, "%s: failed to merge inm state", 2162 __func__); 2163 IN6_MULTI_LIST_UNLOCK(); 2164 im6f_rollback(imf); 2165 im6f_reap(imf); 2166 goto out_in6p_locked; 2167 } 2168 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 2169 error = mld_change_state(inm, 0); 2170 IN6_MULTI_LIST_UNLOCK(); 2171 2172 if (error) { 2173 CTR1(KTR_MLD, "%s: failed mld downcall", 2174 __func__); 2175 im6f_rollback(imf); 2176 im6f_reap(imf); 2177 goto out_in6p_locked; 2178 } 2179 } 2180 2181 im6f_commit(imf); 2182 imf = NULL; 2183 2184 out_in6p_locked: 2185 INP_WUNLOCK(inp); 2186 out_in6p_unlocked: 2187 IN6_MULTI_UNLOCK(); 2188 2189 if (is_new && imf) { 2190 if (imf->im6f_in6m != NULL) { 2191 struct in6_multi_head inmh; 2192 2193 SLIST_INIT(&inmh); 2194 SLIST_INSERT_HEAD(&inmh, imf->im6f_in6m, in6m_defer); 2195 in6m_release_list_deferred(&inmh); 2196 } 2197 ip6_mfilter_free(imf); 2198 } 2199 return (error); 2200 } 2201 2202 /* 2203 * Leave an IPv6 multicast group on an inpcb, possibly with a source. 2204 */ 2205 static int 2206 in6p_leave_group(struct inpcb *inp, struct sockopt *sopt) 2207 { 2208 struct ipv6_mreq mreq; 2209 struct group_source_req gsr; 2210 struct epoch_tracker et; 2211 sockunion_t *gsa, *ssa; 2212 struct ifnet *ifp; 2213 struct in6_mfilter *imf; 2214 struct ip6_moptions *imo; 2215 struct in6_msource *ims; 2216 struct in6_multi *inm; 2217 uint32_t ifindex; 2218 int error; 2219 bool is_final; 2220 #ifdef KTR 2221 char ip6tbuf[INET6_ADDRSTRLEN]; 2222 #endif 2223 2224 ifp = NULL; 2225 ifindex = 0; 2226 error = 0; 2227 is_final = true; 2228 2229 memset(&gsr, 0, sizeof(struct group_source_req)); 2230 gsa = (sockunion_t *)&gsr.gsr_group; 2231 gsa->ss.ss_family = AF_UNSPEC; 2232 ssa = (sockunion_t *)&gsr.gsr_source; 2233 ssa->ss.ss_family = AF_UNSPEC; 2234 2235 /* 2236 * Chew everything passed in up into a struct group_source_req 2237 * as that is easier to process. 2238 * Note: Any embedded scope ID in the multicast group passed 2239 * in by userland is ignored, the interface index is the recommended 2240 * mechanism to specify an interface; see below. 2241 */ 2242 switch (sopt->sopt_name) { 2243 case IPV6_LEAVE_GROUP: 2244 error = sooptcopyin(sopt, &mreq, sizeof(struct ipv6_mreq), 2245 sizeof(struct ipv6_mreq)); 2246 if (error) 2247 return (error); 2248 gsa->sin6.sin6_family = AF_INET6; 2249 gsa->sin6.sin6_len = sizeof(struct sockaddr_in6); 2250 gsa->sin6.sin6_addr = mreq.ipv6mr_multiaddr; 2251 gsa->sin6.sin6_port = 0; 2252 gsa->sin6.sin6_scope_id = 0; 2253 ifindex = mreq.ipv6mr_interface; 2254 break; 2255 2256 case MCAST_LEAVE_GROUP: 2257 case MCAST_LEAVE_SOURCE_GROUP: 2258 if (sopt->sopt_name == MCAST_LEAVE_GROUP) { 2259 error = sooptcopyin(sopt, &gsr, 2260 sizeof(struct group_req), 2261 sizeof(struct group_req)); 2262 } else if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) { 2263 error = sooptcopyin(sopt, &gsr, 2264 sizeof(struct group_source_req), 2265 sizeof(struct group_source_req)); 2266 } 2267 if (error) 2268 return (error); 2269 2270 if (gsa->sin6.sin6_family != AF_INET6 || 2271 gsa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 2272 return (EINVAL); 2273 if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) { 2274 if (ssa->sin6.sin6_family != AF_INET6 || 2275 ssa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 2276 return (EINVAL); 2277 if (IN6_IS_ADDR_MULTICAST(&ssa->sin6.sin6_addr)) 2278 return (EINVAL); 2279 /* 2280 * TODO: Validate embedded scope ID in source 2281 * list entry against passed-in ifp, if and only 2282 * if source list filter entry is iface or node local. 2283 */ 2284 in6_clearscope(&ssa->sin6.sin6_addr); 2285 } 2286 gsa->sin6.sin6_port = 0; 2287 gsa->sin6.sin6_scope_id = 0; 2288 ifindex = gsr.gsr_interface; 2289 break; 2290 2291 default: 2292 CTR2(KTR_MLD, "%s: unknown sopt_name %d", 2293 __func__, sopt->sopt_name); 2294 return (EOPNOTSUPP); 2295 break; 2296 } 2297 2298 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 2299 return (EINVAL); 2300 2301 /* 2302 * Validate interface index if provided. If no interface index 2303 * was provided separately, attempt to look the membership up 2304 * from the default scope as a last resort to disambiguate 2305 * the membership we are being asked to leave. 2306 * XXX SCOPE6 lock potentially taken here. 2307 */ 2308 if (ifindex != 0) { 2309 NET_EPOCH_ENTER(et); 2310 ifp = ifnet_byindex(ifindex); 2311 NET_EPOCH_EXIT(et); /* XXXGL: unsafe ifp */ 2312 if (ifp == NULL) 2313 return (EADDRNOTAVAIL); 2314 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 2315 } else { 2316 error = sa6_embedscope(&gsa->sin6, V_ip6_use_defzone); 2317 if (error) 2318 return (EADDRNOTAVAIL); 2319 /* 2320 * Some badly behaved applications don't pass an ifindex 2321 * or a scope ID, which is an API violation. In this case, 2322 * perform a lookup as per a v6 join. 2323 * 2324 * XXX For now, stomp on zone ID for the corner case. 2325 * This is not the 'KAME way', but we need to see the ifp 2326 * directly until such time as this implementation is 2327 * refactored, assuming the scope IDs are the way to go. 2328 */ 2329 ifindex = ntohs(gsa->sin6.sin6_addr.s6_addr16[1]); 2330 if (ifindex == 0) { 2331 CTR2(KTR_MLD, "%s: warning: no ifindex, looking up " 2332 "ifp for group %s.", __func__, 2333 ip6_sprintf(ip6tbuf, &gsa->sin6.sin6_addr)); 2334 ifp = in6p_lookup_mcast_ifp(inp, &gsa->sin6); 2335 } else { 2336 NET_EPOCH_ENTER(et); 2337 ifp = ifnet_byindex(ifindex); 2338 NET_EPOCH_EXIT(et); /* XXXGL: unsafe ifp */ 2339 } 2340 if (ifp == NULL) 2341 return (EADDRNOTAVAIL); 2342 } 2343 2344 CTR2(KTR_MLD, "%s: ifp = %p", __func__, ifp); 2345 KASSERT(ifp != NULL, ("%s: ifp did not resolve", __func__)); 2346 2347 IN6_MULTI_LOCK(); 2348 2349 /* 2350 * Find the membership in the membership list. 2351 */ 2352 imo = in6p_findmoptions(inp); 2353 imf = im6o_match_group(imo, ifp, &gsa->sa); 2354 if (imf == NULL) { 2355 error = EADDRNOTAVAIL; 2356 goto out_in6p_locked; 2357 } 2358 inm = imf->im6f_in6m; 2359 2360 if (ssa->ss.ss_family != AF_UNSPEC) 2361 is_final = false; 2362 2363 /* 2364 * Begin state merge transaction at socket layer. 2365 */ 2366 INP_WLOCK_ASSERT(inp); 2367 2368 /* 2369 * If we were instructed only to leave a given source, do so. 2370 * MCAST_LEAVE_SOURCE_GROUP is only valid for inclusive memberships. 2371 */ 2372 if (is_final) { 2373 ip6_mfilter_remove(&imo->im6o_head, imf); 2374 im6f_leave(imf); 2375 2376 /* 2377 * Give up the multicast address record to which 2378 * the membership points. 2379 */ 2380 (void)in6_leavegroup_locked(inm, imf); 2381 } else { 2382 if (imf->im6f_st[0] == MCAST_EXCLUDE) { 2383 error = EADDRNOTAVAIL; 2384 goto out_in6p_locked; 2385 } 2386 ims = im6o_match_source(imf, &ssa->sa); 2387 if (ims == NULL) { 2388 CTR3(KTR_MLD, "%s: source %p %spresent", __func__, 2389 ip6_sprintf(ip6tbuf, &ssa->sin6.sin6_addr), 2390 "not "); 2391 error = EADDRNOTAVAIL; 2392 goto out_in6p_locked; 2393 } 2394 CTR2(KTR_MLD, "%s: %s source", __func__, "block"); 2395 error = im6f_prune(imf, &ssa->sin6); 2396 if (error) { 2397 CTR1(KTR_MLD, "%s: merge imf state failed", 2398 __func__); 2399 goto out_in6p_locked; 2400 } 2401 } 2402 2403 /* 2404 * Begin state merge transaction at MLD layer. 2405 */ 2406 if (!is_final) { 2407 CTR1(KTR_MLD, "%s: merge inm state", __func__); 2408 IN6_MULTI_LIST_LOCK(); 2409 error = in6m_merge(inm, imf); 2410 if (error) { 2411 CTR1(KTR_MLD, "%s: failed to merge inm state", 2412 __func__); 2413 IN6_MULTI_LIST_UNLOCK(); 2414 im6f_rollback(imf); 2415 im6f_reap(imf); 2416 goto out_in6p_locked; 2417 } 2418 2419 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 2420 error = mld_change_state(inm, 0); 2421 IN6_MULTI_LIST_UNLOCK(); 2422 if (error) { 2423 CTR1(KTR_MLD, "%s: failed mld downcall", 2424 __func__); 2425 im6f_rollback(imf); 2426 im6f_reap(imf); 2427 goto out_in6p_locked; 2428 } 2429 } 2430 2431 im6f_commit(imf); 2432 im6f_reap(imf); 2433 2434 out_in6p_locked: 2435 INP_WUNLOCK(inp); 2436 2437 if (is_final && imf) 2438 ip6_mfilter_free(imf); 2439 2440 IN6_MULTI_UNLOCK(); 2441 return (error); 2442 } 2443 2444 /* 2445 * Select the interface for transmitting IPv6 multicast datagrams. 2446 * 2447 * Either an instance of struct in6_addr or an instance of struct ipv6_mreqn 2448 * may be passed to this socket option. An address of in6addr_any or an 2449 * interface index of 0 is used to remove a previous selection. 2450 * When no interface is selected, one is chosen for every send. 2451 */ 2452 static int 2453 in6p_set_multicast_if(struct inpcb *inp, struct sockopt *sopt) 2454 { 2455 struct epoch_tracker et; 2456 struct ifnet *ifp; 2457 struct ip6_moptions *imo; 2458 u_int ifindex; 2459 int error; 2460 2461 if (sopt->sopt_valsize != sizeof(u_int)) 2462 return (EINVAL); 2463 2464 error = sooptcopyin(sopt, &ifindex, sizeof(u_int), sizeof(u_int)); 2465 if (error) 2466 return (error); 2467 NET_EPOCH_ENTER(et); 2468 if (ifindex == 0) 2469 ifp = NULL; 2470 else { 2471 ifp = ifnet_byindex(ifindex); 2472 if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) { 2473 NET_EPOCH_EXIT(et); 2474 return (EADDRNOTAVAIL); 2475 } 2476 } 2477 NET_EPOCH_EXIT(et); /* XXXGL: unsafe ifp */ 2478 imo = in6p_findmoptions(inp); 2479 imo->im6o_multicast_ifp = ifp; 2480 INP_WUNLOCK(inp); 2481 2482 return (0); 2483 } 2484 2485 /* 2486 * Atomically set source filters on a socket for an IPv6 multicast group. 2487 * 2488 * XXXGL: unsafely exits epoch with ifnet pointer 2489 */ 2490 static int 2491 in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt) 2492 { 2493 struct __msfilterreq msfr; 2494 struct epoch_tracker et; 2495 sockunion_t *gsa; 2496 struct ifnet *ifp; 2497 struct in6_mfilter *imf; 2498 struct ip6_moptions *imo; 2499 struct in6_multi *inm; 2500 int error; 2501 2502 error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq), 2503 sizeof(struct __msfilterreq)); 2504 if (error) 2505 return (error); 2506 2507 if (msfr.msfr_nsrcs > in6_mcast_maxsocksrc) 2508 return (ENOBUFS); 2509 2510 if (msfr.msfr_fmode != MCAST_EXCLUDE && 2511 msfr.msfr_fmode != MCAST_INCLUDE) 2512 return (EINVAL); 2513 2514 if (msfr.msfr_group.ss_family != AF_INET6 || 2515 msfr.msfr_group.ss_len != sizeof(struct sockaddr_in6)) 2516 return (EINVAL); 2517 2518 gsa = (sockunion_t *)&msfr.msfr_group; 2519 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 2520 return (EINVAL); 2521 2522 gsa->sin6.sin6_port = 0; /* ignore port */ 2523 2524 NET_EPOCH_ENTER(et); 2525 ifp = ifnet_byindex(msfr.msfr_ifindex); 2526 NET_EPOCH_EXIT(et); 2527 if (ifp == NULL) 2528 return (EADDRNOTAVAIL); 2529 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 2530 2531 /* 2532 * Take the INP write lock. 2533 * Check if this socket is a member of this group. 2534 */ 2535 imo = in6p_findmoptions(inp); 2536 imf = im6o_match_group(imo, ifp, &gsa->sa); 2537 if (imf == NULL) { 2538 error = EADDRNOTAVAIL; 2539 goto out_in6p_locked; 2540 } 2541 inm = imf->im6f_in6m; 2542 2543 /* 2544 * Begin state merge transaction at socket layer. 2545 */ 2546 INP_WLOCK_ASSERT(inp); 2547 2548 imf->im6f_st[1] = msfr.msfr_fmode; 2549 2550 /* 2551 * Apply any new source filters, if present. 2552 * Make a copy of the user-space source vector so 2553 * that we may copy them with a single copyin. This 2554 * allows us to deal with page faults up-front. 2555 */ 2556 if (msfr.msfr_nsrcs > 0) { 2557 struct in6_msource *lims; 2558 struct sockaddr_in6 *psin; 2559 struct sockaddr_storage *kss, *pkss; 2560 int i; 2561 2562 INP_WUNLOCK(inp); 2563 2564 CTR2(KTR_MLD, "%s: loading %lu source list entries", 2565 __func__, (unsigned long)msfr.msfr_nsrcs); 2566 kss = malloc(sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs, 2567 M_TEMP, M_WAITOK); 2568 error = copyin(msfr.msfr_srcs, kss, 2569 sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs); 2570 if (error) { 2571 free(kss, M_TEMP); 2572 return (error); 2573 } 2574 2575 INP_WLOCK(inp); 2576 2577 /* 2578 * Mark all source filters as UNDEFINED at t1. 2579 * Restore new group filter mode, as im6f_leave() 2580 * will set it to INCLUDE. 2581 */ 2582 im6f_leave(imf); 2583 imf->im6f_st[1] = msfr.msfr_fmode; 2584 2585 /* 2586 * Update socket layer filters at t1, lazy-allocating 2587 * new entries. This saves a bunch of memory at the 2588 * cost of one RB_FIND() per source entry; duplicate 2589 * entries in the msfr_nsrcs vector are ignored. 2590 * If we encounter an error, rollback transaction. 2591 * 2592 * XXX This too could be replaced with a set-symmetric 2593 * difference like loop to avoid walking from root 2594 * every time, as the key space is common. 2595 */ 2596 for (i = 0, pkss = kss; i < msfr.msfr_nsrcs; i++, pkss++) { 2597 psin = (struct sockaddr_in6 *)pkss; 2598 if (psin->sin6_family != AF_INET6) { 2599 error = EAFNOSUPPORT; 2600 break; 2601 } 2602 if (psin->sin6_len != sizeof(struct sockaddr_in6)) { 2603 error = EINVAL; 2604 break; 2605 } 2606 if (IN6_IS_ADDR_MULTICAST(&psin->sin6_addr)) { 2607 error = EINVAL; 2608 break; 2609 } 2610 /* 2611 * TODO: Validate embedded scope ID in source 2612 * list entry against passed-in ifp, if and only 2613 * if source list filter entry is iface or node local. 2614 */ 2615 in6_clearscope(&psin->sin6_addr); 2616 error = im6f_get_source(imf, psin, &lims); 2617 if (error) 2618 break; 2619 lims->im6sl_st[1] = imf->im6f_st[1]; 2620 } 2621 free(kss, M_TEMP); 2622 } 2623 2624 if (error) 2625 goto out_im6f_rollback; 2626 2627 INP_WLOCK_ASSERT(inp); 2628 IN6_MULTI_LIST_LOCK(); 2629 2630 /* 2631 * Begin state merge transaction at MLD layer. 2632 */ 2633 CTR1(KTR_MLD, "%s: merge inm state", __func__); 2634 error = in6m_merge(inm, imf); 2635 if (error) 2636 CTR1(KTR_MLD, "%s: failed to merge inm state", __func__); 2637 else { 2638 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 2639 error = mld_change_state(inm, 0); 2640 if (error) 2641 CTR1(KTR_MLD, "%s: failed mld downcall", __func__); 2642 } 2643 2644 IN6_MULTI_LIST_UNLOCK(); 2645 2646 out_im6f_rollback: 2647 if (error) 2648 im6f_rollback(imf); 2649 else 2650 im6f_commit(imf); 2651 2652 im6f_reap(imf); 2653 2654 out_in6p_locked: 2655 INP_WUNLOCK(inp); 2656 return (error); 2657 } 2658 2659 /* 2660 * Set the IP multicast options in response to user setsockopt(). 2661 * 2662 * Many of the socket options handled in this function duplicate the 2663 * functionality of socket options in the regular unicast API. However, 2664 * it is not possible to merge the duplicate code, because the idempotence 2665 * of the IPv6 multicast part of the BSD Sockets API must be preserved; 2666 * the effects of these options must be treated as separate and distinct. 2667 * 2668 * SMPng: XXX: Unlocked read of inp_socket believed OK. 2669 */ 2670 int 2671 ip6_setmoptions(struct inpcb *inp, struct sockopt *sopt) 2672 { 2673 struct ip6_moptions *im6o; 2674 int error; 2675 2676 error = 0; 2677 2678 /* If socket is neither of type SOCK_RAW or SOCK_DGRAM, reject it. */ 2679 if (inp->inp_socket->so_proto->pr_type != SOCK_RAW && 2680 inp->inp_socket->so_proto->pr_type != SOCK_DGRAM) 2681 return (EOPNOTSUPP); 2682 2683 switch (sopt->sopt_name) { 2684 case IPV6_MULTICAST_IF: 2685 error = in6p_set_multicast_if(inp, sopt); 2686 break; 2687 2688 case IPV6_MULTICAST_HOPS: { 2689 int hlim; 2690 2691 if (sopt->sopt_valsize != sizeof(int)) { 2692 error = EINVAL; 2693 break; 2694 } 2695 error = sooptcopyin(sopt, &hlim, sizeof(hlim), sizeof(int)); 2696 if (error) 2697 break; 2698 if (hlim < -1 || hlim > 255) { 2699 error = EINVAL; 2700 break; 2701 } else if (hlim == -1) { 2702 hlim = V_ip6_defmcasthlim; 2703 } 2704 im6o = in6p_findmoptions(inp); 2705 im6o->im6o_multicast_hlim = hlim; 2706 INP_WUNLOCK(inp); 2707 break; 2708 } 2709 2710 case IPV6_MULTICAST_LOOP: { 2711 u_int loop; 2712 2713 /* 2714 * Set the loopback flag for outgoing multicast packets. 2715 * Must be zero or one. 2716 */ 2717 if (sopt->sopt_valsize != sizeof(u_int)) { 2718 error = EINVAL; 2719 break; 2720 } 2721 error = sooptcopyin(sopt, &loop, sizeof(u_int), sizeof(u_int)); 2722 if (error) 2723 break; 2724 if (loop > 1) { 2725 error = EINVAL; 2726 break; 2727 } 2728 im6o = in6p_findmoptions(inp); 2729 im6o->im6o_multicast_loop = loop; 2730 INP_WUNLOCK(inp); 2731 break; 2732 } 2733 2734 case IPV6_JOIN_GROUP: 2735 case MCAST_JOIN_GROUP: 2736 case MCAST_JOIN_SOURCE_GROUP: 2737 error = in6p_join_group(inp, sopt); 2738 break; 2739 2740 case IPV6_LEAVE_GROUP: 2741 case MCAST_LEAVE_GROUP: 2742 case MCAST_LEAVE_SOURCE_GROUP: 2743 error = in6p_leave_group(inp, sopt); 2744 break; 2745 2746 case MCAST_BLOCK_SOURCE: 2747 case MCAST_UNBLOCK_SOURCE: 2748 error = in6p_block_unblock_source(inp, sopt); 2749 break; 2750 2751 case IPV6_MSFILTER: 2752 error = in6p_set_source_filters(inp, sopt); 2753 break; 2754 2755 default: 2756 error = EOPNOTSUPP; 2757 break; 2758 } 2759 2760 INP_UNLOCK_ASSERT(inp); 2761 2762 return (error); 2763 } 2764 2765 /* 2766 * Expose MLD's multicast filter mode and source list(s) to userland, 2767 * keyed by (ifindex, group). 2768 * The filter mode is written out as a uint32_t, followed by 2769 * 0..n of struct in6_addr. 2770 * For use by ifmcstat(8). 2771 * SMPng: NOTE: unlocked read of ifindex space. 2772 */ 2773 static int 2774 sysctl_ip6_mcast_filters(SYSCTL_HANDLER_ARGS) 2775 { 2776 struct in6_addr mcaddr; 2777 struct in6_addr src; 2778 struct epoch_tracker et; 2779 struct ifnet *ifp; 2780 struct ifmultiaddr *ifma; 2781 struct in6_multi *inm; 2782 struct ip6_msource *ims; 2783 int *name; 2784 int retval; 2785 u_int namelen; 2786 uint32_t fmode, ifindex; 2787 #ifdef KTR 2788 char ip6tbuf[INET6_ADDRSTRLEN]; 2789 #endif 2790 2791 name = (int *)arg1; 2792 namelen = arg2; 2793 2794 if (req->newptr != NULL) 2795 return (EPERM); 2796 2797 /* int: ifindex + 4 * 32 bits of IPv6 address */ 2798 if (namelen != 5) 2799 return (EINVAL); 2800 2801 memcpy(&mcaddr, &name[1], sizeof(struct in6_addr)); 2802 if (!IN6_IS_ADDR_MULTICAST(&mcaddr)) { 2803 CTR2(KTR_MLD, "%s: group %s is not multicast", 2804 __func__, ip6_sprintf(ip6tbuf, &mcaddr)); 2805 return (EINVAL); 2806 } 2807 2808 ifindex = name[0]; 2809 NET_EPOCH_ENTER(et); 2810 ifp = ifnet_byindex(ifindex); 2811 if (ifp == NULL) { 2812 NET_EPOCH_EXIT(et); 2813 CTR2(KTR_MLD, "%s: no ifp for ifindex %u", 2814 __func__, ifindex); 2815 return (ENOENT); 2816 } 2817 /* 2818 * Internal MLD lookups require that scope/zone ID is set. 2819 */ 2820 (void)in6_setscope(&mcaddr, ifp, NULL); 2821 2822 retval = sysctl_wire_old_buffer(req, 2823 sizeof(uint32_t) + (in6_mcast_maxgrpsrc * sizeof(struct in6_addr))); 2824 if (retval) { 2825 NET_EPOCH_EXIT(et); 2826 return (retval); 2827 } 2828 2829 IN6_MULTI_LOCK(); 2830 IN6_MULTI_LIST_LOCK(); 2831 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2832 inm = in6m_ifmultiaddr_get_inm(ifma); 2833 if (inm == NULL) 2834 continue; 2835 if (!IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, &mcaddr)) 2836 continue; 2837 fmode = inm->in6m_st[1].iss_fmode; 2838 retval = SYSCTL_OUT(req, &fmode, sizeof(uint32_t)); 2839 if (retval != 0) 2840 break; 2841 RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) { 2842 CTR2(KTR_MLD, "%s: visit node %p", __func__, ims); 2843 /* 2844 * Only copy-out sources which are in-mode. 2845 */ 2846 if (fmode != im6s_get_mode(inm, ims, 1)) { 2847 CTR1(KTR_MLD, "%s: skip non-in-mode", 2848 __func__); 2849 continue; 2850 } 2851 src = ims->im6s_addr; 2852 retval = SYSCTL_OUT(req, &src, 2853 sizeof(struct in6_addr)); 2854 if (retval != 0) 2855 break; 2856 } 2857 } 2858 IN6_MULTI_LIST_UNLOCK(); 2859 IN6_MULTI_UNLOCK(); 2860 NET_EPOCH_EXIT(et); 2861 2862 return (retval); 2863 } 2864 2865 #ifdef KTR 2866 2867 static const char *in6m_modestrs[] = { "un", "in", "ex" }; 2868 2869 static const char * 2870 in6m_mode_str(const int mode) 2871 { 2872 2873 if (mode >= MCAST_UNDEFINED && mode <= MCAST_EXCLUDE) 2874 return (in6m_modestrs[mode]); 2875 return ("??"); 2876 } 2877 2878 static const char *in6m_statestrs[] = { 2879 "not-member", 2880 "silent", 2881 "reporting", 2882 "idle", 2883 "lazy", 2884 "sleeping", 2885 "awakening", 2886 "query-pending", 2887 "sg-query-pending", 2888 "leaving" 2889 }; 2890 _Static_assert(nitems(in6m_statestrs) == 2891 MLD_LEAVING_MEMBER - MLD_NOT_MEMBER + 1, "Missing MLD group state"); 2892 2893 static const char * 2894 in6m_state_str(const int state) 2895 { 2896 2897 if (state >= MLD_NOT_MEMBER && state <= MLD_LEAVING_MEMBER) 2898 return (in6m_statestrs[state]); 2899 return ("??"); 2900 } 2901 2902 /* 2903 * Dump an in6_multi structure to the console. 2904 */ 2905 void 2906 in6m_print(const struct in6_multi *inm) 2907 { 2908 int t; 2909 char ip6tbuf[INET6_ADDRSTRLEN]; 2910 2911 if ((ktr_mask & KTR_MLD) == 0) 2912 return; 2913 2914 printf("%s: --- begin in6m %p ---\n", __func__, inm); 2915 printf("addr %s ifp %p(%s) ifma %p\n", 2916 ip6_sprintf(ip6tbuf, &inm->in6m_addr), 2917 inm->in6m_ifp, 2918 if_name(inm->in6m_ifp), 2919 inm->in6m_ifma); 2920 printf("timer %u state %s refcount %u scq.len %u\n", 2921 inm->in6m_timer, 2922 in6m_state_str(inm->in6m_state), 2923 inm->in6m_refcount, 2924 mbufq_len(&inm->in6m_scq)); 2925 printf("mli %p nsrc %lu sctimer %u scrv %u\n", 2926 inm->in6m_mli, 2927 inm->in6m_nsrc, 2928 inm->in6m_sctimer, 2929 inm->in6m_scrv); 2930 for (t = 0; t < 2; t++) { 2931 printf("t%d: fmode %s asm %u ex %u in %u rec %u\n", t, 2932 in6m_mode_str(inm->in6m_st[t].iss_fmode), 2933 inm->in6m_st[t].iss_asm, 2934 inm->in6m_st[t].iss_ex, 2935 inm->in6m_st[t].iss_in, 2936 inm->in6m_st[t].iss_rec); 2937 } 2938 printf("%s: --- end in6m %p ---\n", __func__, inm); 2939 } 2940 2941 #else /* !KTR */ 2942 2943 void 2944 in6m_print(const struct in6_multi *inm) 2945 { 2946 2947 } 2948 2949 #endif /* KTR */ 2950