1 /* 2 * Copyright (c) 2009 Bruce Simpson. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. The name of the author may not be used to endorse or promote 14 * products derived from this software without specific prior written 15 * permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * IPv6 multicast socket, group, and socket option processing module. 32 * Normative references: RFC 2292, RFC 3492, RFC 3542, RFC 3678, RFC 3810. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include "opt_inet6.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/kernel.h> 43 #include <sys/malloc.h> 44 #include <sys/mbuf.h> 45 #include <sys/protosw.h> 46 #include <sys/socket.h> 47 #include <sys/socketvar.h> 48 #include <sys/protosw.h> 49 #include <sys/sysctl.h> 50 #include <sys/priv.h> 51 #include <sys/ktr.h> 52 #include <sys/tree.h> 53 54 #include <net/if.h> 55 #include <net/if_var.h> 56 #include <net/if_dl.h> 57 #include <net/route.h> 58 #include <net/vnet.h> 59 60 #include <netinet/in.h> 61 #include <netinet/in_var.h> 62 #include <netinet6/in6_var.h> 63 #include <netinet/ip6.h> 64 #include <netinet/icmp6.h> 65 #include <netinet6/ip6_var.h> 66 #include <netinet/in_pcb.h> 67 #include <netinet/tcp_var.h> 68 #include <netinet6/nd6.h> 69 #include <netinet6/mld6_var.h> 70 #include <netinet6/scope6_var.h> 71 72 #ifndef KTR_MLD 73 #define KTR_MLD KTR_INET6 74 #endif 75 76 #ifndef __SOCKUNION_DECLARED 77 union sockunion { 78 struct sockaddr_storage ss; 79 struct sockaddr sa; 80 struct sockaddr_dl sdl; 81 struct sockaddr_in6 sin6; 82 }; 83 typedef union sockunion sockunion_t; 84 #define __SOCKUNION_DECLARED 85 #endif /* __SOCKUNION_DECLARED */ 86 87 static MALLOC_DEFINE(M_IN6MFILTER, "in6_mfilter", 88 "IPv6 multicast PCB-layer source filter"); 89 static MALLOC_DEFINE(M_IP6MADDR, "in6_multi", "IPv6 multicast group"); 90 static MALLOC_DEFINE(M_IP6MOPTS, "ip6_moptions", "IPv6 multicast options"); 91 static MALLOC_DEFINE(M_IP6MSOURCE, "ip6_msource", 92 "IPv6 multicast MLD-layer source filter"); 93 94 RB_GENERATE(ip6_msource_tree, ip6_msource, im6s_link, ip6_msource_cmp); 95 96 /* 97 * Locking: 98 * - Lock order is: Giant, INP_WLOCK, IN6_MULTI_LOCK, MLD_LOCK, IF_ADDR_LOCK. 99 * - The IF_ADDR_LOCK is implicitly taken by in6m_lookup() earlier, however 100 * it can be taken by code in net/if.c also. 101 * - ip6_moptions and in6_mfilter are covered by the INP_WLOCK. 102 * 103 * struct in6_multi is covered by IN6_MULTI_LOCK. There isn't strictly 104 * any need for in6_multi itself to be virtualized -- it is bound to an ifp 105 * anyway no matter what happens. 106 */ 107 struct mtx in6_multi_mtx; 108 MTX_SYSINIT(in6_multi_mtx, &in6_multi_mtx, "in6_multi_mtx", MTX_DEF); 109 110 static void im6f_commit(struct in6_mfilter *); 111 static int im6f_get_source(struct in6_mfilter *imf, 112 const struct sockaddr_in6 *psin, 113 struct in6_msource **); 114 static struct in6_msource * 115 im6f_graft(struct in6_mfilter *, const uint8_t, 116 const struct sockaddr_in6 *); 117 static void im6f_leave(struct in6_mfilter *); 118 static int im6f_prune(struct in6_mfilter *, const struct sockaddr_in6 *); 119 static void im6f_purge(struct in6_mfilter *); 120 static void im6f_rollback(struct in6_mfilter *); 121 static void im6f_reap(struct in6_mfilter *); 122 static int im6o_grow(struct ip6_moptions *); 123 static size_t im6o_match_group(const struct ip6_moptions *, 124 const struct ifnet *, const struct sockaddr *); 125 static struct in6_msource * 126 im6o_match_source(const struct ip6_moptions *, const size_t, 127 const struct sockaddr *); 128 static void im6s_merge(struct ip6_msource *ims, 129 const struct in6_msource *lims, const int rollback); 130 static int in6_mc_get(struct ifnet *, const struct in6_addr *, 131 struct in6_multi **); 132 static int in6m_get_source(struct in6_multi *inm, 133 const struct in6_addr *addr, const int noalloc, 134 struct ip6_msource **pims); 135 #ifdef KTR 136 static int in6m_is_ifp_detached(const struct in6_multi *); 137 #endif 138 static int in6m_merge(struct in6_multi *, /*const*/ struct in6_mfilter *); 139 static void in6m_purge(struct in6_multi *); 140 static void in6m_reap(struct in6_multi *); 141 static struct ip6_moptions * 142 in6p_findmoptions(struct inpcb *); 143 static int in6p_get_source_filters(struct inpcb *, struct sockopt *); 144 static int in6p_join_group(struct inpcb *, struct sockopt *); 145 static int in6p_leave_group(struct inpcb *, struct sockopt *); 146 static struct ifnet * 147 in6p_lookup_mcast_ifp(const struct inpcb *, 148 const struct sockaddr_in6 *); 149 static int in6p_block_unblock_source(struct inpcb *, struct sockopt *); 150 static int in6p_set_multicast_if(struct inpcb *, struct sockopt *); 151 static int in6p_set_source_filters(struct inpcb *, struct sockopt *); 152 static int sysctl_ip6_mcast_filters(SYSCTL_HANDLER_ARGS); 153 154 SYSCTL_DECL(_net_inet6_ip6); /* XXX Not in any common header. */ 155 156 static SYSCTL_NODE(_net_inet6_ip6, OID_AUTO, mcast, CTLFLAG_RW, 0, 157 "IPv6 multicast"); 158 159 static u_long in6_mcast_maxgrpsrc = IPV6_MAX_GROUP_SRC_FILTER; 160 SYSCTL_ULONG(_net_inet6_ip6_mcast, OID_AUTO, maxgrpsrc, 161 CTLFLAG_RW | CTLFLAG_TUN, &in6_mcast_maxgrpsrc, 0, 162 "Max source filters per group"); 163 TUNABLE_ULONG("net.inet6.ip6.mcast.maxgrpsrc", &in6_mcast_maxgrpsrc); 164 165 static u_long in6_mcast_maxsocksrc = IPV6_MAX_SOCK_SRC_FILTER; 166 SYSCTL_ULONG(_net_inet6_ip6_mcast, OID_AUTO, maxsocksrc, 167 CTLFLAG_RW | CTLFLAG_TUN, &in6_mcast_maxsocksrc, 0, 168 "Max source filters per socket"); 169 TUNABLE_ULONG("net.inet6.ip6.mcast.maxsocksrc", &in6_mcast_maxsocksrc); 170 171 /* TODO Virtualize this switch. */ 172 int in6_mcast_loop = IPV6_DEFAULT_MULTICAST_LOOP; 173 SYSCTL_INT(_net_inet6_ip6_mcast, OID_AUTO, loop, CTLFLAG_RW | CTLFLAG_TUN, 174 &in6_mcast_loop, 0, "Loopback multicast datagrams by default"); 175 TUNABLE_INT("net.inet6.ip6.mcast.loop", &in6_mcast_loop); 176 177 static SYSCTL_NODE(_net_inet6_ip6_mcast, OID_AUTO, filters, 178 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_ip6_mcast_filters, 179 "Per-interface stack-wide source filters"); 180 181 #ifdef KTR 182 /* 183 * Inline function which wraps assertions for a valid ifp. 184 * The ifnet layer will set the ifma's ifp pointer to NULL if the ifp 185 * is detached. 186 */ 187 static int __inline 188 in6m_is_ifp_detached(const struct in6_multi *inm) 189 { 190 struct ifnet *ifp; 191 192 KASSERT(inm->in6m_ifma != NULL, ("%s: no ifma", __func__)); 193 ifp = inm->in6m_ifma->ifma_ifp; 194 if (ifp != NULL) { 195 /* 196 * Sanity check that network-layer notion of ifp is the 197 * same as that of link-layer. 198 */ 199 KASSERT(inm->in6m_ifp == ifp, ("%s: bad ifp", __func__)); 200 } 201 202 return (ifp == NULL); 203 } 204 #endif 205 206 /* 207 * Initialize an in6_mfilter structure to a known state at t0, t1 208 * with an empty source filter list. 209 */ 210 static __inline void 211 im6f_init(struct in6_mfilter *imf, const int st0, const int st1) 212 { 213 memset(imf, 0, sizeof(struct in6_mfilter)); 214 RB_INIT(&imf->im6f_sources); 215 imf->im6f_st[0] = st0; 216 imf->im6f_st[1] = st1; 217 } 218 219 /* 220 * Resize the ip6_moptions vector to the next power-of-two minus 1. 221 * May be called with locks held; do not sleep. 222 */ 223 static int 224 im6o_grow(struct ip6_moptions *imo) 225 { 226 struct in6_multi **nmships; 227 struct in6_multi **omships; 228 struct in6_mfilter *nmfilters; 229 struct in6_mfilter *omfilters; 230 size_t idx; 231 size_t newmax; 232 size_t oldmax; 233 234 nmships = NULL; 235 nmfilters = NULL; 236 omships = imo->im6o_membership; 237 omfilters = imo->im6o_mfilters; 238 oldmax = imo->im6o_max_memberships; 239 newmax = ((oldmax + 1) * 2) - 1; 240 241 if (newmax <= IPV6_MAX_MEMBERSHIPS) { 242 nmships = (struct in6_multi **)realloc(omships, 243 sizeof(struct in6_multi *) * newmax, M_IP6MOPTS, M_NOWAIT); 244 nmfilters = (struct in6_mfilter *)realloc(omfilters, 245 sizeof(struct in6_mfilter) * newmax, M_IN6MFILTER, 246 M_NOWAIT); 247 if (nmships != NULL && nmfilters != NULL) { 248 /* Initialize newly allocated source filter heads. */ 249 for (idx = oldmax; idx < newmax; idx++) { 250 im6f_init(&nmfilters[idx], MCAST_UNDEFINED, 251 MCAST_EXCLUDE); 252 } 253 imo->im6o_max_memberships = newmax; 254 imo->im6o_membership = nmships; 255 imo->im6o_mfilters = nmfilters; 256 } 257 } 258 259 if (nmships == NULL || nmfilters == NULL) { 260 if (nmships != NULL) 261 free(nmships, M_IP6MOPTS); 262 if (nmfilters != NULL) 263 free(nmfilters, M_IN6MFILTER); 264 return (ETOOMANYREFS); 265 } 266 267 return (0); 268 } 269 270 /* 271 * Find an IPv6 multicast group entry for this ip6_moptions instance 272 * which matches the specified group, and optionally an interface. 273 * Return its index into the array, or -1 if not found. 274 */ 275 static size_t 276 im6o_match_group(const struct ip6_moptions *imo, const struct ifnet *ifp, 277 const struct sockaddr *group) 278 { 279 const struct sockaddr_in6 *gsin6; 280 struct in6_multi **pinm; 281 int idx; 282 int nmships; 283 284 gsin6 = (const struct sockaddr_in6 *)group; 285 286 /* The im6o_membership array may be lazy allocated. */ 287 if (imo->im6o_membership == NULL || imo->im6o_num_memberships == 0) 288 return (-1); 289 290 nmships = imo->im6o_num_memberships; 291 pinm = &imo->im6o_membership[0]; 292 for (idx = 0; idx < nmships; idx++, pinm++) { 293 if (*pinm == NULL) 294 continue; 295 if ((ifp == NULL || ((*pinm)->in6m_ifp == ifp)) && 296 IN6_ARE_ADDR_EQUAL(&(*pinm)->in6m_addr, 297 &gsin6->sin6_addr)) { 298 break; 299 } 300 } 301 if (idx >= nmships) 302 idx = -1; 303 304 return (idx); 305 } 306 307 /* 308 * Find an IPv6 multicast source entry for this imo which matches 309 * the given group index for this socket, and source address. 310 * 311 * XXX TODO: The scope ID, if present in src, is stripped before 312 * any comparison. We SHOULD enforce scope/zone checks where the source 313 * filter entry has a link scope. 314 * 315 * NOTE: This does not check if the entry is in-mode, merely if 316 * it exists, which may not be the desired behaviour. 317 */ 318 static struct in6_msource * 319 im6o_match_source(const struct ip6_moptions *imo, const size_t gidx, 320 const struct sockaddr *src) 321 { 322 struct ip6_msource find; 323 struct in6_mfilter *imf; 324 struct ip6_msource *ims; 325 const sockunion_t *psa; 326 327 KASSERT(src->sa_family == AF_INET6, ("%s: !AF_INET6", __func__)); 328 KASSERT(gidx != -1 && gidx < imo->im6o_num_memberships, 329 ("%s: invalid index %d\n", __func__, (int)gidx)); 330 331 /* The im6o_mfilters array may be lazy allocated. */ 332 if (imo->im6o_mfilters == NULL) 333 return (NULL); 334 imf = &imo->im6o_mfilters[gidx]; 335 336 psa = (const sockunion_t *)src; 337 find.im6s_addr = psa->sin6.sin6_addr; 338 in6_clearscope(&find.im6s_addr); /* XXX */ 339 ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find); 340 341 return ((struct in6_msource *)ims); 342 } 343 344 /* 345 * Perform filtering for multicast datagrams on a socket by group and source. 346 * 347 * Returns 0 if a datagram should be allowed through, or various error codes 348 * if the socket was not a member of the group, or the source was muted, etc. 349 */ 350 int 351 im6o_mc_filter(const struct ip6_moptions *imo, const struct ifnet *ifp, 352 const struct sockaddr *group, const struct sockaddr *src) 353 { 354 size_t gidx; 355 struct in6_msource *ims; 356 int mode; 357 358 KASSERT(ifp != NULL, ("%s: null ifp", __func__)); 359 360 gidx = im6o_match_group(imo, ifp, group); 361 if (gidx == -1) 362 return (MCAST_NOTGMEMBER); 363 364 /* 365 * Check if the source was included in an (S,G) join. 366 * Allow reception on exclusive memberships by default, 367 * reject reception on inclusive memberships by default. 368 * Exclude source only if an in-mode exclude filter exists. 369 * Include source only if an in-mode include filter exists. 370 * NOTE: We are comparing group state here at MLD t1 (now) 371 * with socket-layer t0 (since last downcall). 372 */ 373 mode = imo->im6o_mfilters[gidx].im6f_st[1]; 374 ims = im6o_match_source(imo, gidx, src); 375 376 if ((ims == NULL && mode == MCAST_INCLUDE) || 377 (ims != NULL && ims->im6sl_st[0] != mode)) 378 return (MCAST_NOTSMEMBER); 379 380 return (MCAST_PASS); 381 } 382 383 /* 384 * Find and return a reference to an in6_multi record for (ifp, group), 385 * and bump its reference count. 386 * If one does not exist, try to allocate it, and update link-layer multicast 387 * filters on ifp to listen for group. 388 * Assumes the IN6_MULTI lock is held across the call. 389 * Return 0 if successful, otherwise return an appropriate error code. 390 */ 391 static int 392 in6_mc_get(struct ifnet *ifp, const struct in6_addr *group, 393 struct in6_multi **pinm) 394 { 395 struct sockaddr_in6 gsin6; 396 struct ifmultiaddr *ifma; 397 struct in6_multi *inm; 398 int error; 399 400 error = 0; 401 402 /* 403 * XXX: Accesses to ifma_protospec must be covered by IF_ADDR_LOCK; 404 * if_addmulti() takes this mutex itself, so we must drop and 405 * re-acquire around the call. 406 */ 407 IN6_MULTI_LOCK_ASSERT(); 408 IF_ADDR_WLOCK(ifp); 409 410 inm = in6m_lookup_locked(ifp, group); 411 if (inm != NULL) { 412 /* 413 * If we already joined this group, just bump the 414 * refcount and return it. 415 */ 416 KASSERT(inm->in6m_refcount >= 1, 417 ("%s: bad refcount %d", __func__, inm->in6m_refcount)); 418 ++inm->in6m_refcount; 419 *pinm = inm; 420 goto out_locked; 421 } 422 423 memset(&gsin6, 0, sizeof(gsin6)); 424 gsin6.sin6_family = AF_INET6; 425 gsin6.sin6_len = sizeof(struct sockaddr_in6); 426 gsin6.sin6_addr = *group; 427 428 /* 429 * Check if a link-layer group is already associated 430 * with this network-layer group on the given ifnet. 431 */ 432 IF_ADDR_WUNLOCK(ifp); 433 error = if_addmulti(ifp, (struct sockaddr *)&gsin6, &ifma); 434 if (error != 0) 435 return (error); 436 IF_ADDR_WLOCK(ifp); 437 438 /* 439 * If something other than netinet6 is occupying the link-layer 440 * group, print a meaningful error message and back out of 441 * the allocation. 442 * Otherwise, bump the refcount on the existing network-layer 443 * group association and return it. 444 */ 445 if (ifma->ifma_protospec != NULL) { 446 inm = (struct in6_multi *)ifma->ifma_protospec; 447 #ifdef INVARIANTS 448 KASSERT(ifma->ifma_addr != NULL, ("%s: no ifma_addr", 449 __func__)); 450 KASSERT(ifma->ifma_addr->sa_family == AF_INET6, 451 ("%s: ifma not AF_INET6", __func__)); 452 KASSERT(inm != NULL, ("%s: no ifma_protospec", __func__)); 453 if (inm->in6m_ifma != ifma || inm->in6m_ifp != ifp || 454 !IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, group)) 455 panic("%s: ifma %p is inconsistent with %p (%p)", 456 __func__, ifma, inm, group); 457 #endif 458 ++inm->in6m_refcount; 459 *pinm = inm; 460 goto out_locked; 461 } 462 463 IF_ADDR_WLOCK_ASSERT(ifp); 464 465 /* 466 * A new in6_multi record is needed; allocate and initialize it. 467 * We DO NOT perform an MLD join as the in6_ layer may need to 468 * push an initial source list down to MLD to support SSM. 469 * 470 * The initial source filter state is INCLUDE, {} as per the RFC. 471 * Pending state-changes per group are subject to a bounds check. 472 */ 473 inm = malloc(sizeof(*inm), M_IP6MADDR, M_NOWAIT | M_ZERO); 474 if (inm == NULL) { 475 if_delmulti_ifma(ifma); 476 error = ENOMEM; 477 goto out_locked; 478 } 479 inm->in6m_addr = *group; 480 inm->in6m_ifp = ifp; 481 inm->in6m_mli = MLD_IFINFO(ifp); 482 inm->in6m_ifma = ifma; 483 inm->in6m_refcount = 1; 484 inm->in6m_state = MLD_NOT_MEMBER; 485 IFQ_SET_MAXLEN(&inm->in6m_scq, MLD_MAX_STATE_CHANGES); 486 487 inm->in6m_st[0].iss_fmode = MCAST_UNDEFINED; 488 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED; 489 RB_INIT(&inm->in6m_srcs); 490 491 ifma->ifma_protospec = inm; 492 *pinm = inm; 493 494 out_locked: 495 IF_ADDR_WUNLOCK(ifp); 496 return (error); 497 } 498 499 /* 500 * Drop a reference to an in6_multi record. 501 * 502 * If the refcount drops to 0, free the in6_multi record and 503 * delete the underlying link-layer membership. 504 */ 505 void 506 in6m_release_locked(struct in6_multi *inm) 507 { 508 struct ifmultiaddr *ifma; 509 510 IN6_MULTI_LOCK_ASSERT(); 511 512 CTR2(KTR_MLD, "%s: refcount is %d", __func__, inm->in6m_refcount); 513 514 if (--inm->in6m_refcount > 0) { 515 CTR2(KTR_MLD, "%s: refcount is now %d", __func__, 516 inm->in6m_refcount); 517 return; 518 } 519 520 CTR2(KTR_MLD, "%s: freeing inm %p", __func__, inm); 521 522 ifma = inm->in6m_ifma; 523 524 /* XXX this access is not covered by IF_ADDR_LOCK */ 525 CTR2(KTR_MLD, "%s: purging ifma %p", __func__, ifma); 526 KASSERT(ifma->ifma_protospec == inm, 527 ("%s: ifma_protospec != inm", __func__)); 528 ifma->ifma_protospec = NULL; 529 530 in6m_purge(inm); 531 532 free(inm, M_IP6MADDR); 533 534 if_delmulti_ifma(ifma); 535 } 536 537 /* 538 * Clear recorded source entries for a group. 539 * Used by the MLD code. Caller must hold the IN6_MULTI lock. 540 * FIXME: Should reap. 541 */ 542 void 543 in6m_clear_recorded(struct in6_multi *inm) 544 { 545 struct ip6_msource *ims; 546 547 IN6_MULTI_LOCK_ASSERT(); 548 549 RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) { 550 if (ims->im6s_stp) { 551 ims->im6s_stp = 0; 552 --inm->in6m_st[1].iss_rec; 553 } 554 } 555 KASSERT(inm->in6m_st[1].iss_rec == 0, 556 ("%s: iss_rec %d not 0", __func__, inm->in6m_st[1].iss_rec)); 557 } 558 559 /* 560 * Record a source as pending for a Source-Group MLDv2 query. 561 * This lives here as it modifies the shared tree. 562 * 563 * inm is the group descriptor. 564 * naddr is the address of the source to record in network-byte order. 565 * 566 * If the net.inet6.mld.sgalloc sysctl is non-zero, we will 567 * lazy-allocate a source node in response to an SG query. 568 * Otherwise, no allocation is performed. This saves some memory 569 * with the trade-off that the source will not be reported to the 570 * router if joined in the window between the query response and 571 * the group actually being joined on the local host. 572 * 573 * VIMAGE: XXX: Currently the mld_sgalloc feature has been removed. 574 * This turns off the allocation of a recorded source entry if 575 * the group has not been joined. 576 * 577 * Return 0 if the source didn't exist or was already marked as recorded. 578 * Return 1 if the source was marked as recorded by this function. 579 * Return <0 if any error occured (negated errno code). 580 */ 581 int 582 in6m_record_source(struct in6_multi *inm, const struct in6_addr *addr) 583 { 584 struct ip6_msource find; 585 struct ip6_msource *ims, *nims; 586 587 IN6_MULTI_LOCK_ASSERT(); 588 589 find.im6s_addr = *addr; 590 ims = RB_FIND(ip6_msource_tree, &inm->in6m_srcs, &find); 591 if (ims && ims->im6s_stp) 592 return (0); 593 if (ims == NULL) { 594 if (inm->in6m_nsrc == in6_mcast_maxgrpsrc) 595 return (-ENOSPC); 596 nims = malloc(sizeof(struct ip6_msource), M_IP6MSOURCE, 597 M_NOWAIT | M_ZERO); 598 if (nims == NULL) 599 return (-ENOMEM); 600 nims->im6s_addr = find.im6s_addr; 601 RB_INSERT(ip6_msource_tree, &inm->in6m_srcs, nims); 602 ++inm->in6m_nsrc; 603 ims = nims; 604 } 605 606 /* 607 * Mark the source as recorded and update the recorded 608 * source count. 609 */ 610 ++ims->im6s_stp; 611 ++inm->in6m_st[1].iss_rec; 612 613 return (1); 614 } 615 616 /* 617 * Return a pointer to an in6_msource owned by an in6_mfilter, 618 * given its source address. 619 * Lazy-allocate if needed. If this is a new entry its filter state is 620 * undefined at t0. 621 * 622 * imf is the filter set being modified. 623 * addr is the source address. 624 * 625 * SMPng: May be called with locks held; malloc must not block. 626 */ 627 static int 628 im6f_get_source(struct in6_mfilter *imf, const struct sockaddr_in6 *psin, 629 struct in6_msource **plims) 630 { 631 struct ip6_msource find; 632 struct ip6_msource *ims, *nims; 633 struct in6_msource *lims; 634 int error; 635 636 error = 0; 637 ims = NULL; 638 lims = NULL; 639 640 find.im6s_addr = psin->sin6_addr; 641 ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find); 642 lims = (struct in6_msource *)ims; 643 if (lims == NULL) { 644 if (imf->im6f_nsrc == in6_mcast_maxsocksrc) 645 return (ENOSPC); 646 nims = malloc(sizeof(struct in6_msource), M_IN6MFILTER, 647 M_NOWAIT | M_ZERO); 648 if (nims == NULL) 649 return (ENOMEM); 650 lims = (struct in6_msource *)nims; 651 lims->im6s_addr = find.im6s_addr; 652 lims->im6sl_st[0] = MCAST_UNDEFINED; 653 RB_INSERT(ip6_msource_tree, &imf->im6f_sources, nims); 654 ++imf->im6f_nsrc; 655 } 656 657 *plims = lims; 658 659 return (error); 660 } 661 662 /* 663 * Graft a source entry into an existing socket-layer filter set, 664 * maintaining any required invariants and checking allocations. 665 * 666 * The source is marked as being in the new filter mode at t1. 667 * 668 * Return the pointer to the new node, otherwise return NULL. 669 */ 670 static struct in6_msource * 671 im6f_graft(struct in6_mfilter *imf, const uint8_t st1, 672 const struct sockaddr_in6 *psin) 673 { 674 struct ip6_msource *nims; 675 struct in6_msource *lims; 676 677 nims = malloc(sizeof(struct in6_msource), M_IN6MFILTER, 678 M_NOWAIT | M_ZERO); 679 if (nims == NULL) 680 return (NULL); 681 lims = (struct in6_msource *)nims; 682 lims->im6s_addr = psin->sin6_addr; 683 lims->im6sl_st[0] = MCAST_UNDEFINED; 684 lims->im6sl_st[1] = st1; 685 RB_INSERT(ip6_msource_tree, &imf->im6f_sources, nims); 686 ++imf->im6f_nsrc; 687 688 return (lims); 689 } 690 691 /* 692 * Prune a source entry from an existing socket-layer filter set, 693 * maintaining any required invariants and checking allocations. 694 * 695 * The source is marked as being left at t1, it is not freed. 696 * 697 * Return 0 if no error occurred, otherwise return an errno value. 698 */ 699 static int 700 im6f_prune(struct in6_mfilter *imf, const struct sockaddr_in6 *psin) 701 { 702 struct ip6_msource find; 703 struct ip6_msource *ims; 704 struct in6_msource *lims; 705 706 find.im6s_addr = psin->sin6_addr; 707 ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find); 708 if (ims == NULL) 709 return (ENOENT); 710 lims = (struct in6_msource *)ims; 711 lims->im6sl_st[1] = MCAST_UNDEFINED; 712 return (0); 713 } 714 715 /* 716 * Revert socket-layer filter set deltas at t1 to t0 state. 717 */ 718 static void 719 im6f_rollback(struct in6_mfilter *imf) 720 { 721 struct ip6_msource *ims, *tims; 722 struct in6_msource *lims; 723 724 RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) { 725 lims = (struct in6_msource *)ims; 726 if (lims->im6sl_st[0] == lims->im6sl_st[1]) { 727 /* no change at t1 */ 728 continue; 729 } else if (lims->im6sl_st[0] != MCAST_UNDEFINED) { 730 /* revert change to existing source at t1 */ 731 lims->im6sl_st[1] = lims->im6sl_st[0]; 732 } else { 733 /* revert source added t1 */ 734 CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); 735 RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims); 736 free(ims, M_IN6MFILTER); 737 imf->im6f_nsrc--; 738 } 739 } 740 imf->im6f_st[1] = imf->im6f_st[0]; 741 } 742 743 /* 744 * Mark socket-layer filter set as INCLUDE {} at t1. 745 */ 746 static void 747 im6f_leave(struct in6_mfilter *imf) 748 { 749 struct ip6_msource *ims; 750 struct in6_msource *lims; 751 752 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { 753 lims = (struct in6_msource *)ims; 754 lims->im6sl_st[1] = MCAST_UNDEFINED; 755 } 756 imf->im6f_st[1] = MCAST_INCLUDE; 757 } 758 759 /* 760 * Mark socket-layer filter set deltas as committed. 761 */ 762 static void 763 im6f_commit(struct in6_mfilter *imf) 764 { 765 struct ip6_msource *ims; 766 struct in6_msource *lims; 767 768 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { 769 lims = (struct in6_msource *)ims; 770 lims->im6sl_st[0] = lims->im6sl_st[1]; 771 } 772 imf->im6f_st[0] = imf->im6f_st[1]; 773 } 774 775 /* 776 * Reap unreferenced sources from socket-layer filter set. 777 */ 778 static void 779 im6f_reap(struct in6_mfilter *imf) 780 { 781 struct ip6_msource *ims, *tims; 782 struct in6_msource *lims; 783 784 RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) { 785 lims = (struct in6_msource *)ims; 786 if ((lims->im6sl_st[0] == MCAST_UNDEFINED) && 787 (lims->im6sl_st[1] == MCAST_UNDEFINED)) { 788 CTR2(KTR_MLD, "%s: free lims %p", __func__, ims); 789 RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims); 790 free(ims, M_IN6MFILTER); 791 imf->im6f_nsrc--; 792 } 793 } 794 } 795 796 /* 797 * Purge socket-layer filter set. 798 */ 799 static void 800 im6f_purge(struct in6_mfilter *imf) 801 { 802 struct ip6_msource *ims, *tims; 803 804 RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) { 805 CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); 806 RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims); 807 free(ims, M_IN6MFILTER); 808 imf->im6f_nsrc--; 809 } 810 imf->im6f_st[0] = imf->im6f_st[1] = MCAST_UNDEFINED; 811 KASSERT(RB_EMPTY(&imf->im6f_sources), 812 ("%s: im6f_sources not empty", __func__)); 813 } 814 815 /* 816 * Look up a source filter entry for a multicast group. 817 * 818 * inm is the group descriptor to work with. 819 * addr is the IPv6 address to look up. 820 * noalloc may be non-zero to suppress allocation of sources. 821 * *pims will be set to the address of the retrieved or allocated source. 822 * 823 * SMPng: NOTE: may be called with locks held. 824 * Return 0 if successful, otherwise return a non-zero error code. 825 */ 826 static int 827 in6m_get_source(struct in6_multi *inm, const struct in6_addr *addr, 828 const int noalloc, struct ip6_msource **pims) 829 { 830 struct ip6_msource find; 831 struct ip6_msource *ims, *nims; 832 #ifdef KTR 833 char ip6tbuf[INET6_ADDRSTRLEN]; 834 #endif 835 836 find.im6s_addr = *addr; 837 ims = RB_FIND(ip6_msource_tree, &inm->in6m_srcs, &find); 838 if (ims == NULL && !noalloc) { 839 if (inm->in6m_nsrc == in6_mcast_maxgrpsrc) 840 return (ENOSPC); 841 nims = malloc(sizeof(struct ip6_msource), M_IP6MSOURCE, 842 M_NOWAIT | M_ZERO); 843 if (nims == NULL) 844 return (ENOMEM); 845 nims->im6s_addr = *addr; 846 RB_INSERT(ip6_msource_tree, &inm->in6m_srcs, nims); 847 ++inm->in6m_nsrc; 848 ims = nims; 849 CTR3(KTR_MLD, "%s: allocated %s as %p", __func__, 850 ip6_sprintf(ip6tbuf, addr), ims); 851 } 852 853 *pims = ims; 854 return (0); 855 } 856 857 /* 858 * Merge socket-layer source into MLD-layer source. 859 * If rollback is non-zero, perform the inverse of the merge. 860 */ 861 static void 862 im6s_merge(struct ip6_msource *ims, const struct in6_msource *lims, 863 const int rollback) 864 { 865 int n = rollback ? -1 : 1; 866 #ifdef KTR 867 char ip6tbuf[INET6_ADDRSTRLEN]; 868 869 ip6_sprintf(ip6tbuf, &lims->im6s_addr); 870 #endif 871 872 if (lims->im6sl_st[0] == MCAST_EXCLUDE) { 873 CTR3(KTR_MLD, "%s: t1 ex -= %d on %s", __func__, n, ip6tbuf); 874 ims->im6s_st[1].ex -= n; 875 } else if (lims->im6sl_st[0] == MCAST_INCLUDE) { 876 CTR3(KTR_MLD, "%s: t1 in -= %d on %s", __func__, n, ip6tbuf); 877 ims->im6s_st[1].in -= n; 878 } 879 880 if (lims->im6sl_st[1] == MCAST_EXCLUDE) { 881 CTR3(KTR_MLD, "%s: t1 ex += %d on %s", __func__, n, ip6tbuf); 882 ims->im6s_st[1].ex += n; 883 } else if (lims->im6sl_st[1] == MCAST_INCLUDE) { 884 CTR3(KTR_MLD, "%s: t1 in += %d on %s", __func__, n, ip6tbuf); 885 ims->im6s_st[1].in += n; 886 } 887 } 888 889 /* 890 * Atomically update the global in6_multi state, when a membership's 891 * filter list is being updated in any way. 892 * 893 * imf is the per-inpcb-membership group filter pointer. 894 * A fake imf may be passed for in-kernel consumers. 895 * 896 * XXX This is a candidate for a set-symmetric-difference style loop 897 * which would eliminate the repeated lookup from root of ims nodes, 898 * as they share the same key space. 899 * 900 * If any error occurred this function will back out of refcounts 901 * and return a non-zero value. 902 */ 903 static int 904 in6m_merge(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) 905 { 906 struct ip6_msource *ims, *nims; 907 struct in6_msource *lims; 908 int schanged, error; 909 int nsrc0, nsrc1; 910 911 schanged = 0; 912 error = 0; 913 nsrc1 = nsrc0 = 0; 914 915 /* 916 * Update the source filters first, as this may fail. 917 * Maintain count of in-mode filters at t0, t1. These are 918 * used to work out if we transition into ASM mode or not. 919 * Maintain a count of source filters whose state was 920 * actually modified by this operation. 921 */ 922 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { 923 lims = (struct in6_msource *)ims; 924 if (lims->im6sl_st[0] == imf->im6f_st[0]) nsrc0++; 925 if (lims->im6sl_st[1] == imf->im6f_st[1]) nsrc1++; 926 if (lims->im6sl_st[0] == lims->im6sl_st[1]) continue; 927 error = in6m_get_source(inm, &lims->im6s_addr, 0, &nims); 928 ++schanged; 929 if (error) 930 break; 931 im6s_merge(nims, lims, 0); 932 } 933 if (error) { 934 struct ip6_msource *bims; 935 936 RB_FOREACH_REVERSE_FROM(ims, ip6_msource_tree, nims) { 937 lims = (struct in6_msource *)ims; 938 if (lims->im6sl_st[0] == lims->im6sl_st[1]) 939 continue; 940 (void)in6m_get_source(inm, &lims->im6s_addr, 1, &bims); 941 if (bims == NULL) 942 continue; 943 im6s_merge(bims, lims, 1); 944 } 945 goto out_reap; 946 } 947 948 CTR3(KTR_MLD, "%s: imf filters in-mode: %d at t0, %d at t1", 949 __func__, nsrc0, nsrc1); 950 951 /* Handle transition between INCLUDE {n} and INCLUDE {} on socket. */ 952 if (imf->im6f_st[0] == imf->im6f_st[1] && 953 imf->im6f_st[1] == MCAST_INCLUDE) { 954 if (nsrc1 == 0) { 955 CTR1(KTR_MLD, "%s: --in on inm at t1", __func__); 956 --inm->in6m_st[1].iss_in; 957 } 958 } 959 960 /* Handle filter mode transition on socket. */ 961 if (imf->im6f_st[0] != imf->im6f_st[1]) { 962 CTR3(KTR_MLD, "%s: imf transition %d to %d", 963 __func__, imf->im6f_st[0], imf->im6f_st[1]); 964 965 if (imf->im6f_st[0] == MCAST_EXCLUDE) { 966 CTR1(KTR_MLD, "%s: --ex on inm at t1", __func__); 967 --inm->in6m_st[1].iss_ex; 968 } else if (imf->im6f_st[0] == MCAST_INCLUDE) { 969 CTR1(KTR_MLD, "%s: --in on inm at t1", __func__); 970 --inm->in6m_st[1].iss_in; 971 } 972 973 if (imf->im6f_st[1] == MCAST_EXCLUDE) { 974 CTR1(KTR_MLD, "%s: ex++ on inm at t1", __func__); 975 inm->in6m_st[1].iss_ex++; 976 } else if (imf->im6f_st[1] == MCAST_INCLUDE && nsrc1 > 0) { 977 CTR1(KTR_MLD, "%s: in++ on inm at t1", __func__); 978 inm->in6m_st[1].iss_in++; 979 } 980 } 981 982 /* 983 * Track inm filter state in terms of listener counts. 984 * If there are any exclusive listeners, stack-wide 985 * membership is exclusive. 986 * Otherwise, if only inclusive listeners, stack-wide is inclusive. 987 * If no listeners remain, state is undefined at t1, 988 * and the MLD lifecycle for this group should finish. 989 */ 990 if (inm->in6m_st[1].iss_ex > 0) { 991 CTR1(KTR_MLD, "%s: transition to EX", __func__); 992 inm->in6m_st[1].iss_fmode = MCAST_EXCLUDE; 993 } else if (inm->in6m_st[1].iss_in > 0) { 994 CTR1(KTR_MLD, "%s: transition to IN", __func__); 995 inm->in6m_st[1].iss_fmode = MCAST_INCLUDE; 996 } else { 997 CTR1(KTR_MLD, "%s: transition to UNDEF", __func__); 998 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED; 999 } 1000 1001 /* Decrement ASM listener count on transition out of ASM mode. */ 1002 if (imf->im6f_st[0] == MCAST_EXCLUDE && nsrc0 == 0) { 1003 if ((imf->im6f_st[1] != MCAST_EXCLUDE) || 1004 (imf->im6f_st[1] == MCAST_EXCLUDE && nsrc1 > 0)) 1005 CTR1(KTR_MLD, "%s: --asm on inm at t1", __func__); 1006 --inm->in6m_st[1].iss_asm; 1007 } 1008 1009 /* Increment ASM listener count on transition to ASM mode. */ 1010 if (imf->im6f_st[1] == MCAST_EXCLUDE && nsrc1 == 0) { 1011 CTR1(KTR_MLD, "%s: asm++ on inm at t1", __func__); 1012 inm->in6m_st[1].iss_asm++; 1013 } 1014 1015 CTR3(KTR_MLD, "%s: merged imf %p to inm %p", __func__, imf, inm); 1016 in6m_print(inm); 1017 1018 out_reap: 1019 if (schanged > 0) { 1020 CTR1(KTR_MLD, "%s: sources changed; reaping", __func__); 1021 in6m_reap(inm); 1022 } 1023 return (error); 1024 } 1025 1026 /* 1027 * Mark an in6_multi's filter set deltas as committed. 1028 * Called by MLD after a state change has been enqueued. 1029 */ 1030 void 1031 in6m_commit(struct in6_multi *inm) 1032 { 1033 struct ip6_msource *ims; 1034 1035 CTR2(KTR_MLD, "%s: commit inm %p", __func__, inm); 1036 CTR1(KTR_MLD, "%s: pre commit:", __func__); 1037 in6m_print(inm); 1038 1039 RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) { 1040 ims->im6s_st[0] = ims->im6s_st[1]; 1041 } 1042 inm->in6m_st[0] = inm->in6m_st[1]; 1043 } 1044 1045 /* 1046 * Reap unreferenced nodes from an in6_multi's filter set. 1047 */ 1048 static void 1049 in6m_reap(struct in6_multi *inm) 1050 { 1051 struct ip6_msource *ims, *tims; 1052 1053 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, tims) { 1054 if (ims->im6s_st[0].ex > 0 || ims->im6s_st[0].in > 0 || 1055 ims->im6s_st[1].ex > 0 || ims->im6s_st[1].in > 0 || 1056 ims->im6s_stp != 0) 1057 continue; 1058 CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); 1059 RB_REMOVE(ip6_msource_tree, &inm->in6m_srcs, ims); 1060 free(ims, M_IP6MSOURCE); 1061 inm->in6m_nsrc--; 1062 } 1063 } 1064 1065 /* 1066 * Purge all source nodes from an in6_multi's filter set. 1067 */ 1068 static void 1069 in6m_purge(struct in6_multi *inm) 1070 { 1071 struct ip6_msource *ims, *tims; 1072 1073 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, tims) { 1074 CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); 1075 RB_REMOVE(ip6_msource_tree, &inm->in6m_srcs, ims); 1076 free(ims, M_IP6MSOURCE); 1077 inm->in6m_nsrc--; 1078 } 1079 } 1080 1081 /* 1082 * Join a multicast address w/o sources. 1083 * KAME compatibility entry point. 1084 * 1085 * SMPng: Assume no mc locks held by caller. 1086 */ 1087 struct in6_multi_mship * 1088 in6_joingroup(struct ifnet *ifp, struct in6_addr *mcaddr, 1089 int *errorp, int delay) 1090 { 1091 struct in6_multi_mship *imm; 1092 int error; 1093 1094 imm = malloc(sizeof(*imm), M_IP6MADDR, M_NOWAIT); 1095 if (imm == NULL) { 1096 *errorp = ENOBUFS; 1097 return (NULL); 1098 } 1099 1100 delay = (delay * PR_FASTHZ) / hz; 1101 1102 error = in6_mc_join(ifp, mcaddr, NULL, &imm->i6mm_maddr, delay); 1103 if (error) { 1104 *errorp = error; 1105 free(imm, M_IP6MADDR); 1106 return (NULL); 1107 } 1108 1109 return (imm); 1110 } 1111 1112 /* 1113 * Leave a multicast address w/o sources. 1114 * KAME compatibility entry point. 1115 * 1116 * SMPng: Assume no mc locks held by caller. 1117 */ 1118 int 1119 in6_leavegroup(struct in6_multi_mship *imm) 1120 { 1121 1122 if (imm->i6mm_maddr != NULL) 1123 in6_mc_leave(imm->i6mm_maddr, NULL); 1124 free(imm, M_IP6MADDR); 1125 return 0; 1126 } 1127 1128 /* 1129 * Join a multicast group; unlocked entry point. 1130 * 1131 * SMPng: XXX: in6_mc_join() is called from in6_control() when upper 1132 * locks are not held. Fortunately, ifp is unlikely to have been detached 1133 * at this point, so we assume it's OK to recurse. 1134 */ 1135 int 1136 in6_mc_join(struct ifnet *ifp, const struct in6_addr *mcaddr, 1137 /*const*/ struct in6_mfilter *imf, struct in6_multi **pinm, 1138 const int delay) 1139 { 1140 int error; 1141 1142 IN6_MULTI_LOCK(); 1143 error = in6_mc_join_locked(ifp, mcaddr, imf, pinm, delay); 1144 IN6_MULTI_UNLOCK(); 1145 1146 return (error); 1147 } 1148 1149 /* 1150 * Join a multicast group; real entry point. 1151 * 1152 * Only preserves atomicity at inm level. 1153 * NOTE: imf argument cannot be const due to sys/tree.h limitations. 1154 * 1155 * If the MLD downcall fails, the group is not joined, and an error 1156 * code is returned. 1157 */ 1158 int 1159 in6_mc_join_locked(struct ifnet *ifp, const struct in6_addr *mcaddr, 1160 /*const*/ struct in6_mfilter *imf, struct in6_multi **pinm, 1161 const int delay) 1162 { 1163 struct in6_mfilter timf; 1164 struct in6_multi *inm; 1165 int error; 1166 #ifdef KTR 1167 char ip6tbuf[INET6_ADDRSTRLEN]; 1168 #endif 1169 1170 #ifdef INVARIANTS 1171 /* 1172 * Sanity: Check scope zone ID was set for ifp, if and 1173 * only if group is scoped to an interface. 1174 */ 1175 KASSERT(IN6_IS_ADDR_MULTICAST(mcaddr), 1176 ("%s: not a multicast address", __func__)); 1177 if (IN6_IS_ADDR_MC_LINKLOCAL(mcaddr) || 1178 IN6_IS_ADDR_MC_INTFACELOCAL(mcaddr)) { 1179 KASSERT(mcaddr->s6_addr16[1] != 0, 1180 ("%s: scope zone ID not set", __func__)); 1181 } 1182 #endif 1183 1184 IN6_MULTI_LOCK_ASSERT(); 1185 1186 CTR4(KTR_MLD, "%s: join %s on %p(%s))", __func__, 1187 ip6_sprintf(ip6tbuf, mcaddr), ifp, if_name(ifp)); 1188 1189 error = 0; 1190 inm = NULL; 1191 1192 /* 1193 * If no imf was specified (i.e. kernel consumer), 1194 * fake one up and assume it is an ASM join. 1195 */ 1196 if (imf == NULL) { 1197 im6f_init(&timf, MCAST_UNDEFINED, MCAST_EXCLUDE); 1198 imf = &timf; 1199 } 1200 1201 error = in6_mc_get(ifp, mcaddr, &inm); 1202 if (error) { 1203 CTR1(KTR_MLD, "%s: in6_mc_get() failure", __func__); 1204 return (error); 1205 } 1206 1207 CTR1(KTR_MLD, "%s: merge inm state", __func__); 1208 error = in6m_merge(inm, imf); 1209 if (error) { 1210 CTR1(KTR_MLD, "%s: failed to merge inm state", __func__); 1211 goto out_in6m_release; 1212 } 1213 1214 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 1215 error = mld_change_state(inm, delay); 1216 if (error) { 1217 CTR1(KTR_MLD, "%s: failed to update source", __func__); 1218 goto out_in6m_release; 1219 } 1220 1221 out_in6m_release: 1222 if (error) { 1223 CTR2(KTR_MLD, "%s: dropping ref on %p", __func__, inm); 1224 in6m_release_locked(inm); 1225 } else { 1226 *pinm = inm; 1227 } 1228 1229 return (error); 1230 } 1231 1232 /* 1233 * Leave a multicast group; unlocked entry point. 1234 */ 1235 int 1236 in6_mc_leave(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) 1237 { 1238 struct ifnet *ifp; 1239 int error; 1240 1241 ifp = inm->in6m_ifp; 1242 1243 IN6_MULTI_LOCK(); 1244 error = in6_mc_leave_locked(inm, imf); 1245 IN6_MULTI_UNLOCK(); 1246 1247 return (error); 1248 } 1249 1250 /* 1251 * Leave a multicast group; real entry point. 1252 * All source filters will be expunged. 1253 * 1254 * Only preserves atomicity at inm level. 1255 * 1256 * Holding the write lock for the INP which contains imf 1257 * is highly advisable. We can't assert for it as imf does not 1258 * contain a back-pointer to the owning inp. 1259 * 1260 * Note: This is not the same as in6m_release(*) as this function also 1261 * makes a state change downcall into MLD. 1262 */ 1263 int 1264 in6_mc_leave_locked(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) 1265 { 1266 struct in6_mfilter timf; 1267 int error; 1268 #ifdef KTR 1269 char ip6tbuf[INET6_ADDRSTRLEN]; 1270 #endif 1271 1272 error = 0; 1273 1274 IN6_MULTI_LOCK_ASSERT(); 1275 1276 CTR5(KTR_MLD, "%s: leave inm %p, %s/%s, imf %p", __func__, 1277 inm, ip6_sprintf(ip6tbuf, &inm->in6m_addr), 1278 (in6m_is_ifp_detached(inm) ? "null" : if_name(inm->in6m_ifp)), 1279 imf); 1280 1281 /* 1282 * If no imf was specified (i.e. kernel consumer), 1283 * fake one up and assume it is an ASM join. 1284 */ 1285 if (imf == NULL) { 1286 im6f_init(&timf, MCAST_EXCLUDE, MCAST_UNDEFINED); 1287 imf = &timf; 1288 } 1289 1290 /* 1291 * Begin state merge transaction at MLD layer. 1292 * 1293 * As this particular invocation should not cause any memory 1294 * to be allocated, and there is no opportunity to roll back 1295 * the transaction, it MUST NOT fail. 1296 */ 1297 CTR1(KTR_MLD, "%s: merge inm state", __func__); 1298 error = in6m_merge(inm, imf); 1299 KASSERT(error == 0, ("%s: failed to merge inm state", __func__)); 1300 1301 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 1302 error = mld_change_state(inm, 0); 1303 if (error) 1304 CTR1(KTR_MLD, "%s: failed mld downcall", __func__); 1305 1306 CTR2(KTR_MLD, "%s: dropping ref on %p", __func__, inm); 1307 in6m_release_locked(inm); 1308 1309 return (error); 1310 } 1311 1312 /* 1313 * Block or unblock an ASM multicast source on an inpcb. 1314 * This implements the delta-based API described in RFC 3678. 1315 * 1316 * The delta-based API applies only to exclusive-mode memberships. 1317 * An MLD downcall will be performed. 1318 * 1319 * SMPng: NOTE: Must take Giant as a join may create a new ifma. 1320 * 1321 * Return 0 if successful, otherwise return an appropriate error code. 1322 */ 1323 static int 1324 in6p_block_unblock_source(struct inpcb *inp, struct sockopt *sopt) 1325 { 1326 struct group_source_req gsr; 1327 sockunion_t *gsa, *ssa; 1328 struct ifnet *ifp; 1329 struct in6_mfilter *imf; 1330 struct ip6_moptions *imo; 1331 struct in6_msource *ims; 1332 struct in6_multi *inm; 1333 size_t idx; 1334 uint16_t fmode; 1335 int error, doblock; 1336 #ifdef KTR 1337 char ip6tbuf[INET6_ADDRSTRLEN]; 1338 #endif 1339 1340 ifp = NULL; 1341 error = 0; 1342 doblock = 0; 1343 1344 memset(&gsr, 0, sizeof(struct group_source_req)); 1345 gsa = (sockunion_t *)&gsr.gsr_group; 1346 ssa = (sockunion_t *)&gsr.gsr_source; 1347 1348 switch (sopt->sopt_name) { 1349 case MCAST_BLOCK_SOURCE: 1350 case MCAST_UNBLOCK_SOURCE: 1351 error = sooptcopyin(sopt, &gsr, 1352 sizeof(struct group_source_req), 1353 sizeof(struct group_source_req)); 1354 if (error) 1355 return (error); 1356 1357 if (gsa->sin6.sin6_family != AF_INET6 || 1358 gsa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 1359 return (EINVAL); 1360 1361 if (ssa->sin6.sin6_family != AF_INET6 || 1362 ssa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 1363 return (EINVAL); 1364 1365 if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface) 1366 return (EADDRNOTAVAIL); 1367 1368 ifp = ifnet_byindex(gsr.gsr_interface); 1369 1370 if (sopt->sopt_name == MCAST_BLOCK_SOURCE) 1371 doblock = 1; 1372 break; 1373 1374 default: 1375 CTR2(KTR_MLD, "%s: unknown sopt_name %d", 1376 __func__, sopt->sopt_name); 1377 return (EOPNOTSUPP); 1378 break; 1379 } 1380 1381 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 1382 return (EINVAL); 1383 1384 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 1385 1386 /* 1387 * Check if we are actually a member of this group. 1388 */ 1389 imo = in6p_findmoptions(inp); 1390 idx = im6o_match_group(imo, ifp, &gsa->sa); 1391 if (idx == -1 || imo->im6o_mfilters == NULL) { 1392 error = EADDRNOTAVAIL; 1393 goto out_in6p_locked; 1394 } 1395 1396 KASSERT(imo->im6o_mfilters != NULL, 1397 ("%s: im6o_mfilters not allocated", __func__)); 1398 imf = &imo->im6o_mfilters[idx]; 1399 inm = imo->im6o_membership[idx]; 1400 1401 /* 1402 * Attempting to use the delta-based API on an 1403 * non exclusive-mode membership is an error. 1404 */ 1405 fmode = imf->im6f_st[0]; 1406 if (fmode != MCAST_EXCLUDE) { 1407 error = EINVAL; 1408 goto out_in6p_locked; 1409 } 1410 1411 /* 1412 * Deal with error cases up-front: 1413 * Asked to block, but already blocked; or 1414 * Asked to unblock, but nothing to unblock. 1415 * If adding a new block entry, allocate it. 1416 */ 1417 ims = im6o_match_source(imo, idx, &ssa->sa); 1418 if ((ims != NULL && doblock) || (ims == NULL && !doblock)) { 1419 CTR3(KTR_MLD, "%s: source %s %spresent", __func__, 1420 ip6_sprintf(ip6tbuf, &ssa->sin6.sin6_addr), 1421 doblock ? "" : "not "); 1422 error = EADDRNOTAVAIL; 1423 goto out_in6p_locked; 1424 } 1425 1426 INP_WLOCK_ASSERT(inp); 1427 1428 /* 1429 * Begin state merge transaction at socket layer. 1430 */ 1431 if (doblock) { 1432 CTR2(KTR_MLD, "%s: %s source", __func__, "block"); 1433 ims = im6f_graft(imf, fmode, &ssa->sin6); 1434 if (ims == NULL) 1435 error = ENOMEM; 1436 } else { 1437 CTR2(KTR_MLD, "%s: %s source", __func__, "allow"); 1438 error = im6f_prune(imf, &ssa->sin6); 1439 } 1440 1441 if (error) { 1442 CTR1(KTR_MLD, "%s: merge imf state failed", __func__); 1443 goto out_im6f_rollback; 1444 } 1445 1446 /* 1447 * Begin state merge transaction at MLD layer. 1448 */ 1449 IN6_MULTI_LOCK(); 1450 1451 CTR1(KTR_MLD, "%s: merge inm state", __func__); 1452 error = in6m_merge(inm, imf); 1453 if (error) 1454 CTR1(KTR_MLD, "%s: failed to merge inm state", __func__); 1455 else { 1456 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 1457 error = mld_change_state(inm, 0); 1458 if (error) 1459 CTR1(KTR_MLD, "%s: failed mld downcall", __func__); 1460 } 1461 1462 IN6_MULTI_UNLOCK(); 1463 1464 out_im6f_rollback: 1465 if (error) 1466 im6f_rollback(imf); 1467 else 1468 im6f_commit(imf); 1469 1470 im6f_reap(imf); 1471 1472 out_in6p_locked: 1473 INP_WUNLOCK(inp); 1474 return (error); 1475 } 1476 1477 /* 1478 * Given an inpcb, return its multicast options structure pointer. Accepts 1479 * an unlocked inpcb pointer, but will return it locked. May sleep. 1480 * 1481 * SMPng: NOTE: Potentially calls malloc(M_WAITOK) with Giant held. 1482 * SMPng: NOTE: Returns with the INP write lock held. 1483 */ 1484 static struct ip6_moptions * 1485 in6p_findmoptions(struct inpcb *inp) 1486 { 1487 struct ip6_moptions *imo; 1488 struct in6_multi **immp; 1489 struct in6_mfilter *imfp; 1490 size_t idx; 1491 1492 INP_WLOCK(inp); 1493 if (inp->in6p_moptions != NULL) 1494 return (inp->in6p_moptions); 1495 1496 INP_WUNLOCK(inp); 1497 1498 imo = malloc(sizeof(*imo), M_IP6MOPTS, M_WAITOK); 1499 immp = malloc(sizeof(*immp) * IPV6_MIN_MEMBERSHIPS, M_IP6MOPTS, 1500 M_WAITOK | M_ZERO); 1501 imfp = malloc(sizeof(struct in6_mfilter) * IPV6_MIN_MEMBERSHIPS, 1502 M_IN6MFILTER, M_WAITOK); 1503 1504 imo->im6o_multicast_ifp = NULL; 1505 imo->im6o_multicast_hlim = V_ip6_defmcasthlim; 1506 imo->im6o_multicast_loop = in6_mcast_loop; 1507 imo->im6o_num_memberships = 0; 1508 imo->im6o_max_memberships = IPV6_MIN_MEMBERSHIPS; 1509 imo->im6o_membership = immp; 1510 1511 /* Initialize per-group source filters. */ 1512 for (idx = 0; idx < IPV6_MIN_MEMBERSHIPS; idx++) 1513 im6f_init(&imfp[idx], MCAST_UNDEFINED, MCAST_EXCLUDE); 1514 imo->im6o_mfilters = imfp; 1515 1516 INP_WLOCK(inp); 1517 if (inp->in6p_moptions != NULL) { 1518 free(imfp, M_IN6MFILTER); 1519 free(immp, M_IP6MOPTS); 1520 free(imo, M_IP6MOPTS); 1521 return (inp->in6p_moptions); 1522 } 1523 inp->in6p_moptions = imo; 1524 return (imo); 1525 } 1526 1527 /* 1528 * Discard the IPv6 multicast options (and source filters). 1529 * 1530 * SMPng: NOTE: assumes INP write lock is held. 1531 */ 1532 void 1533 ip6_freemoptions(struct ip6_moptions *imo) 1534 { 1535 struct in6_mfilter *imf; 1536 size_t idx, nmships; 1537 1538 KASSERT(imo != NULL, ("%s: ip6_moptions is NULL", __func__)); 1539 1540 nmships = imo->im6o_num_memberships; 1541 for (idx = 0; idx < nmships; ++idx) { 1542 imf = imo->im6o_mfilters ? &imo->im6o_mfilters[idx] : NULL; 1543 if (imf) 1544 im6f_leave(imf); 1545 /* XXX this will thrash the lock(s) */ 1546 (void)in6_mc_leave(imo->im6o_membership[idx], imf); 1547 if (imf) 1548 im6f_purge(imf); 1549 } 1550 1551 if (imo->im6o_mfilters) 1552 free(imo->im6o_mfilters, M_IN6MFILTER); 1553 free(imo->im6o_membership, M_IP6MOPTS); 1554 free(imo, M_IP6MOPTS); 1555 } 1556 1557 /* 1558 * Atomically get source filters on a socket for an IPv6 multicast group. 1559 * Called with INP lock held; returns with lock released. 1560 */ 1561 static int 1562 in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt) 1563 { 1564 struct __msfilterreq msfr; 1565 sockunion_t *gsa; 1566 struct ifnet *ifp; 1567 struct ip6_moptions *imo; 1568 struct in6_mfilter *imf; 1569 struct ip6_msource *ims; 1570 struct in6_msource *lims; 1571 struct sockaddr_in6 *psin; 1572 struct sockaddr_storage *ptss; 1573 struct sockaddr_storage *tss; 1574 int error; 1575 size_t idx, nsrcs, ncsrcs; 1576 1577 INP_WLOCK_ASSERT(inp); 1578 1579 imo = inp->in6p_moptions; 1580 KASSERT(imo != NULL, ("%s: null ip6_moptions", __func__)); 1581 1582 INP_WUNLOCK(inp); 1583 1584 error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq), 1585 sizeof(struct __msfilterreq)); 1586 if (error) 1587 return (error); 1588 1589 if (msfr.msfr_group.ss_family != AF_INET6 || 1590 msfr.msfr_group.ss_len != sizeof(struct sockaddr_in6)) 1591 return (EINVAL); 1592 1593 gsa = (sockunion_t *)&msfr.msfr_group; 1594 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 1595 return (EINVAL); 1596 1597 if (msfr.msfr_ifindex == 0 || V_if_index < msfr.msfr_ifindex) 1598 return (EADDRNOTAVAIL); 1599 ifp = ifnet_byindex(msfr.msfr_ifindex); 1600 if (ifp == NULL) 1601 return (EADDRNOTAVAIL); 1602 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 1603 1604 INP_WLOCK(inp); 1605 1606 /* 1607 * Lookup group on the socket. 1608 */ 1609 idx = im6o_match_group(imo, ifp, &gsa->sa); 1610 if (idx == -1 || imo->im6o_mfilters == NULL) { 1611 INP_WUNLOCK(inp); 1612 return (EADDRNOTAVAIL); 1613 } 1614 imf = &imo->im6o_mfilters[idx]; 1615 1616 /* 1617 * Ignore memberships which are in limbo. 1618 */ 1619 if (imf->im6f_st[1] == MCAST_UNDEFINED) { 1620 INP_WUNLOCK(inp); 1621 return (EAGAIN); 1622 } 1623 msfr.msfr_fmode = imf->im6f_st[1]; 1624 1625 /* 1626 * If the user specified a buffer, copy out the source filter 1627 * entries to userland gracefully. 1628 * We only copy out the number of entries which userland 1629 * has asked for, but we always tell userland how big the 1630 * buffer really needs to be. 1631 */ 1632 if (msfr.msfr_nsrcs > in6_mcast_maxsocksrc) 1633 msfr.msfr_nsrcs = in6_mcast_maxsocksrc; 1634 tss = NULL; 1635 if (msfr.msfr_srcs != NULL && msfr.msfr_nsrcs > 0) { 1636 tss = malloc(sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs, 1637 M_TEMP, M_NOWAIT | M_ZERO); 1638 if (tss == NULL) { 1639 INP_WUNLOCK(inp); 1640 return (ENOBUFS); 1641 } 1642 } 1643 1644 /* 1645 * Count number of sources in-mode at t0. 1646 * If buffer space exists and remains, copy out source entries. 1647 */ 1648 nsrcs = msfr.msfr_nsrcs; 1649 ncsrcs = 0; 1650 ptss = tss; 1651 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { 1652 lims = (struct in6_msource *)ims; 1653 if (lims->im6sl_st[0] == MCAST_UNDEFINED || 1654 lims->im6sl_st[0] != imf->im6f_st[0]) 1655 continue; 1656 ++ncsrcs; 1657 if (tss != NULL && nsrcs > 0) { 1658 psin = (struct sockaddr_in6 *)ptss; 1659 psin->sin6_family = AF_INET6; 1660 psin->sin6_len = sizeof(struct sockaddr_in6); 1661 psin->sin6_addr = lims->im6s_addr; 1662 psin->sin6_port = 0; 1663 --nsrcs; 1664 ++ptss; 1665 } 1666 } 1667 1668 INP_WUNLOCK(inp); 1669 1670 if (tss != NULL) { 1671 error = copyout(tss, msfr.msfr_srcs, 1672 sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs); 1673 free(tss, M_TEMP); 1674 if (error) 1675 return (error); 1676 } 1677 1678 msfr.msfr_nsrcs = ncsrcs; 1679 error = sooptcopyout(sopt, &msfr, sizeof(struct __msfilterreq)); 1680 1681 return (error); 1682 } 1683 1684 /* 1685 * Return the IP multicast options in response to user getsockopt(). 1686 */ 1687 int 1688 ip6_getmoptions(struct inpcb *inp, struct sockopt *sopt) 1689 { 1690 struct ip6_moptions *im6o; 1691 int error; 1692 u_int optval; 1693 1694 INP_WLOCK(inp); 1695 im6o = inp->in6p_moptions; 1696 /* 1697 * If socket is neither of type SOCK_RAW or SOCK_DGRAM, 1698 * or is a divert socket, reject it. 1699 */ 1700 if (inp->inp_socket->so_proto->pr_protocol == IPPROTO_DIVERT || 1701 (inp->inp_socket->so_proto->pr_type != SOCK_RAW && 1702 inp->inp_socket->so_proto->pr_type != SOCK_DGRAM)) { 1703 INP_WUNLOCK(inp); 1704 return (EOPNOTSUPP); 1705 } 1706 1707 error = 0; 1708 switch (sopt->sopt_name) { 1709 case IPV6_MULTICAST_IF: 1710 if (im6o == NULL || im6o->im6o_multicast_ifp == NULL) { 1711 optval = 0; 1712 } else { 1713 optval = im6o->im6o_multicast_ifp->if_index; 1714 } 1715 INP_WUNLOCK(inp); 1716 error = sooptcopyout(sopt, &optval, sizeof(u_int)); 1717 break; 1718 1719 case IPV6_MULTICAST_HOPS: 1720 if (im6o == NULL) 1721 optval = V_ip6_defmcasthlim; 1722 else 1723 optval = im6o->im6o_multicast_hlim; 1724 INP_WUNLOCK(inp); 1725 error = sooptcopyout(sopt, &optval, sizeof(u_int)); 1726 break; 1727 1728 case IPV6_MULTICAST_LOOP: 1729 if (im6o == NULL) 1730 optval = in6_mcast_loop; /* XXX VIMAGE */ 1731 else 1732 optval = im6o->im6o_multicast_loop; 1733 INP_WUNLOCK(inp); 1734 error = sooptcopyout(sopt, &optval, sizeof(u_int)); 1735 break; 1736 1737 case IPV6_MSFILTER: 1738 if (im6o == NULL) { 1739 error = EADDRNOTAVAIL; 1740 INP_WUNLOCK(inp); 1741 } else { 1742 error = in6p_get_source_filters(inp, sopt); 1743 } 1744 break; 1745 1746 default: 1747 INP_WUNLOCK(inp); 1748 error = ENOPROTOOPT; 1749 break; 1750 } 1751 1752 INP_UNLOCK_ASSERT(inp); 1753 1754 return (error); 1755 } 1756 1757 /* 1758 * Look up the ifnet to use for a multicast group membership, 1759 * given the address of an IPv6 group. 1760 * 1761 * This routine exists to support legacy IPv6 multicast applications. 1762 * 1763 * If inp is non-NULL, use this socket's current FIB number for any 1764 * required FIB lookup. Look up the group address in the unicast FIB, 1765 * and use its ifp; usually, this points to the default next-hop. 1766 * If the FIB lookup fails, return NULL. 1767 * 1768 * FUTURE: Support multiple forwarding tables for IPv6. 1769 * 1770 * Returns NULL if no ifp could be found. 1771 */ 1772 static struct ifnet * 1773 in6p_lookup_mcast_ifp(const struct inpcb *in6p, 1774 const struct sockaddr_in6 *gsin6) 1775 { 1776 struct route_in6 ro6; 1777 struct ifnet *ifp; 1778 1779 KASSERT(in6p->inp_vflag & INP_IPV6, 1780 ("%s: not INP_IPV6 inpcb", __func__)); 1781 KASSERT(gsin6->sin6_family == AF_INET6, 1782 ("%s: not AF_INET6 group", __func__)); 1783 1784 ifp = NULL; 1785 memset(&ro6, 0, sizeof(struct route_in6)); 1786 memcpy(&ro6.ro_dst, gsin6, sizeof(struct sockaddr_in6)); 1787 rtalloc_ign_fib((struct route *)&ro6, 0, 1788 in6p ? in6p->inp_inc.inc_fibnum : RT_DEFAULT_FIB); 1789 if (ro6.ro_rt != NULL) { 1790 ifp = ro6.ro_rt->rt_ifp; 1791 KASSERT(ifp != NULL, ("%s: null ifp", __func__)); 1792 RTFREE(ro6.ro_rt); 1793 } 1794 1795 return (ifp); 1796 } 1797 1798 /* 1799 * Join an IPv6 multicast group, possibly with a source. 1800 * 1801 * FIXME: The KAME use of the unspecified address (::) 1802 * to join *all* multicast groups is currently unsupported. 1803 */ 1804 static int 1805 in6p_join_group(struct inpcb *inp, struct sockopt *sopt) 1806 { 1807 struct group_source_req gsr; 1808 sockunion_t *gsa, *ssa; 1809 struct ifnet *ifp; 1810 struct in6_mfilter *imf; 1811 struct ip6_moptions *imo; 1812 struct in6_multi *inm; 1813 struct in6_msource *lims; 1814 size_t idx; 1815 int error, is_new; 1816 1817 ifp = NULL; 1818 imf = NULL; 1819 lims = NULL; 1820 error = 0; 1821 is_new = 0; 1822 1823 memset(&gsr, 0, sizeof(struct group_source_req)); 1824 gsa = (sockunion_t *)&gsr.gsr_group; 1825 gsa->ss.ss_family = AF_UNSPEC; 1826 ssa = (sockunion_t *)&gsr.gsr_source; 1827 ssa->ss.ss_family = AF_UNSPEC; 1828 1829 /* 1830 * Chew everything into struct group_source_req. 1831 * Overwrite the port field if present, as the sockaddr 1832 * being copied in may be matched with a binary comparison. 1833 * Ignore passed-in scope ID. 1834 */ 1835 switch (sopt->sopt_name) { 1836 case IPV6_JOIN_GROUP: { 1837 struct ipv6_mreq mreq; 1838 1839 error = sooptcopyin(sopt, &mreq, sizeof(struct ipv6_mreq), 1840 sizeof(struct ipv6_mreq)); 1841 if (error) 1842 return (error); 1843 1844 gsa->sin6.sin6_family = AF_INET6; 1845 gsa->sin6.sin6_len = sizeof(struct sockaddr_in6); 1846 gsa->sin6.sin6_addr = mreq.ipv6mr_multiaddr; 1847 1848 if (mreq.ipv6mr_interface == 0) { 1849 ifp = in6p_lookup_mcast_ifp(inp, &gsa->sin6); 1850 } else { 1851 if (V_if_index < mreq.ipv6mr_interface) 1852 return (EADDRNOTAVAIL); 1853 ifp = ifnet_byindex(mreq.ipv6mr_interface); 1854 } 1855 CTR3(KTR_MLD, "%s: ipv6mr_interface = %d, ifp = %p", 1856 __func__, mreq.ipv6mr_interface, ifp); 1857 } break; 1858 1859 case MCAST_JOIN_GROUP: 1860 case MCAST_JOIN_SOURCE_GROUP: 1861 if (sopt->sopt_name == MCAST_JOIN_GROUP) { 1862 error = sooptcopyin(sopt, &gsr, 1863 sizeof(struct group_req), 1864 sizeof(struct group_req)); 1865 } else if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) { 1866 error = sooptcopyin(sopt, &gsr, 1867 sizeof(struct group_source_req), 1868 sizeof(struct group_source_req)); 1869 } 1870 if (error) 1871 return (error); 1872 1873 if (gsa->sin6.sin6_family != AF_INET6 || 1874 gsa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 1875 return (EINVAL); 1876 1877 if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) { 1878 if (ssa->sin6.sin6_family != AF_INET6 || 1879 ssa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 1880 return (EINVAL); 1881 if (IN6_IS_ADDR_MULTICAST(&ssa->sin6.sin6_addr)) 1882 return (EINVAL); 1883 /* 1884 * TODO: Validate embedded scope ID in source 1885 * list entry against passed-in ifp, if and only 1886 * if source list filter entry is iface or node local. 1887 */ 1888 in6_clearscope(&ssa->sin6.sin6_addr); 1889 ssa->sin6.sin6_port = 0; 1890 ssa->sin6.sin6_scope_id = 0; 1891 } 1892 1893 if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface) 1894 return (EADDRNOTAVAIL); 1895 ifp = ifnet_byindex(gsr.gsr_interface); 1896 break; 1897 1898 default: 1899 CTR2(KTR_MLD, "%s: unknown sopt_name %d", 1900 __func__, sopt->sopt_name); 1901 return (EOPNOTSUPP); 1902 break; 1903 } 1904 1905 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 1906 return (EINVAL); 1907 1908 if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) 1909 return (EADDRNOTAVAIL); 1910 1911 gsa->sin6.sin6_port = 0; 1912 gsa->sin6.sin6_scope_id = 0; 1913 1914 /* 1915 * Always set the scope zone ID on memberships created from userland. 1916 * Use the passed-in ifp to do this. 1917 * XXX The in6_setscope() return value is meaningless. 1918 * XXX SCOPE6_LOCK() is taken by in6_setscope(). 1919 */ 1920 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 1921 1922 imo = in6p_findmoptions(inp); 1923 idx = im6o_match_group(imo, ifp, &gsa->sa); 1924 if (idx == -1) { 1925 is_new = 1; 1926 } else { 1927 inm = imo->im6o_membership[idx]; 1928 imf = &imo->im6o_mfilters[idx]; 1929 if (ssa->ss.ss_family != AF_UNSPEC) { 1930 /* 1931 * MCAST_JOIN_SOURCE_GROUP on an exclusive membership 1932 * is an error. On an existing inclusive membership, 1933 * it just adds the source to the filter list. 1934 */ 1935 if (imf->im6f_st[1] != MCAST_INCLUDE) { 1936 error = EINVAL; 1937 goto out_in6p_locked; 1938 } 1939 /* 1940 * Throw out duplicates. 1941 * 1942 * XXX FIXME: This makes a naive assumption that 1943 * even if entries exist for *ssa in this imf, 1944 * they will be rejected as dupes, even if they 1945 * are not valid in the current mode (in-mode). 1946 * 1947 * in6_msource is transactioned just as for anything 1948 * else in SSM -- but note naive use of in6m_graft() 1949 * below for allocating new filter entries. 1950 * 1951 * This is only an issue if someone mixes the 1952 * full-state SSM API with the delta-based API, 1953 * which is discouraged in the relevant RFCs. 1954 */ 1955 lims = im6o_match_source(imo, idx, &ssa->sa); 1956 if (lims != NULL /*&& 1957 lims->im6sl_st[1] == MCAST_INCLUDE*/) { 1958 error = EADDRNOTAVAIL; 1959 goto out_in6p_locked; 1960 } 1961 } else { 1962 /* 1963 * MCAST_JOIN_GROUP alone, on any existing membership, 1964 * is rejected, to stop the same inpcb tying up 1965 * multiple refs to the in_multi. 1966 * On an existing inclusive membership, this is also 1967 * an error; if you want to change filter mode, 1968 * you must use the userland API setsourcefilter(). 1969 * XXX We don't reject this for imf in UNDEFINED 1970 * state at t1, because allocation of a filter 1971 * is atomic with allocation of a membership. 1972 */ 1973 error = EINVAL; 1974 goto out_in6p_locked; 1975 } 1976 } 1977 1978 /* 1979 * Begin state merge transaction at socket layer. 1980 */ 1981 INP_WLOCK_ASSERT(inp); 1982 1983 if (is_new) { 1984 if (imo->im6o_num_memberships == imo->im6o_max_memberships) { 1985 error = im6o_grow(imo); 1986 if (error) 1987 goto out_in6p_locked; 1988 } 1989 /* 1990 * Allocate the new slot upfront so we can deal with 1991 * grafting the new source filter in same code path 1992 * as for join-source on existing membership. 1993 */ 1994 idx = imo->im6o_num_memberships; 1995 imo->im6o_membership[idx] = NULL; 1996 imo->im6o_num_memberships++; 1997 KASSERT(imo->im6o_mfilters != NULL, 1998 ("%s: im6f_mfilters vector was not allocated", __func__)); 1999 imf = &imo->im6o_mfilters[idx]; 2000 KASSERT(RB_EMPTY(&imf->im6f_sources), 2001 ("%s: im6f_sources not empty", __func__)); 2002 } 2003 2004 /* 2005 * Graft new source into filter list for this inpcb's 2006 * membership of the group. The in6_multi may not have 2007 * been allocated yet if this is a new membership, however, 2008 * the in_mfilter slot will be allocated and must be initialized. 2009 * 2010 * Note: Grafting of exclusive mode filters doesn't happen 2011 * in this path. 2012 * XXX: Should check for non-NULL lims (node exists but may 2013 * not be in-mode) for interop with full-state API. 2014 */ 2015 if (ssa->ss.ss_family != AF_UNSPEC) { 2016 /* Membership starts in IN mode */ 2017 if (is_new) { 2018 CTR1(KTR_MLD, "%s: new join w/source", __func__); 2019 im6f_init(imf, MCAST_UNDEFINED, MCAST_INCLUDE); 2020 } else { 2021 CTR2(KTR_MLD, "%s: %s source", __func__, "allow"); 2022 } 2023 lims = im6f_graft(imf, MCAST_INCLUDE, &ssa->sin6); 2024 if (lims == NULL) { 2025 CTR1(KTR_MLD, "%s: merge imf state failed", 2026 __func__); 2027 error = ENOMEM; 2028 goto out_im6o_free; 2029 } 2030 } else { 2031 /* No address specified; Membership starts in EX mode */ 2032 if (is_new) { 2033 CTR1(KTR_MLD, "%s: new join w/o source", __func__); 2034 im6f_init(imf, MCAST_UNDEFINED, MCAST_EXCLUDE); 2035 } 2036 } 2037 2038 /* 2039 * Begin state merge transaction at MLD layer. 2040 */ 2041 IN6_MULTI_LOCK(); 2042 2043 if (is_new) { 2044 error = in6_mc_join_locked(ifp, &gsa->sin6.sin6_addr, imf, 2045 &inm, 0); 2046 if (error) { 2047 IN6_MULTI_UNLOCK(); 2048 goto out_im6o_free; 2049 } 2050 imo->im6o_membership[idx] = inm; 2051 } else { 2052 CTR1(KTR_MLD, "%s: merge inm state", __func__); 2053 error = in6m_merge(inm, imf); 2054 if (error) 2055 CTR1(KTR_MLD, "%s: failed to merge inm state", 2056 __func__); 2057 else { 2058 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 2059 error = mld_change_state(inm, 0); 2060 if (error) 2061 CTR1(KTR_MLD, "%s: failed mld downcall", 2062 __func__); 2063 } 2064 } 2065 2066 IN6_MULTI_UNLOCK(); 2067 INP_WLOCK_ASSERT(inp); 2068 if (error) { 2069 im6f_rollback(imf); 2070 if (is_new) 2071 im6f_purge(imf); 2072 else 2073 im6f_reap(imf); 2074 } else { 2075 im6f_commit(imf); 2076 } 2077 2078 out_im6o_free: 2079 if (error && is_new) { 2080 imo->im6o_membership[idx] = NULL; 2081 --imo->im6o_num_memberships; 2082 } 2083 2084 out_in6p_locked: 2085 INP_WUNLOCK(inp); 2086 return (error); 2087 } 2088 2089 /* 2090 * Leave an IPv6 multicast group on an inpcb, possibly with a source. 2091 */ 2092 static int 2093 in6p_leave_group(struct inpcb *inp, struct sockopt *sopt) 2094 { 2095 struct ipv6_mreq mreq; 2096 struct group_source_req gsr; 2097 sockunion_t *gsa, *ssa; 2098 struct ifnet *ifp; 2099 struct in6_mfilter *imf; 2100 struct ip6_moptions *imo; 2101 struct in6_msource *ims; 2102 struct in6_multi *inm; 2103 uint32_t ifindex; 2104 size_t idx; 2105 int error, is_final; 2106 #ifdef KTR 2107 char ip6tbuf[INET6_ADDRSTRLEN]; 2108 #endif 2109 2110 ifp = NULL; 2111 ifindex = 0; 2112 error = 0; 2113 is_final = 1; 2114 2115 memset(&gsr, 0, sizeof(struct group_source_req)); 2116 gsa = (sockunion_t *)&gsr.gsr_group; 2117 gsa->ss.ss_family = AF_UNSPEC; 2118 ssa = (sockunion_t *)&gsr.gsr_source; 2119 ssa->ss.ss_family = AF_UNSPEC; 2120 2121 /* 2122 * Chew everything passed in up into a struct group_source_req 2123 * as that is easier to process. 2124 * Note: Any embedded scope ID in the multicast group passed 2125 * in by userland is ignored, the interface index is the recommended 2126 * mechanism to specify an interface; see below. 2127 */ 2128 switch (sopt->sopt_name) { 2129 case IPV6_LEAVE_GROUP: 2130 error = sooptcopyin(sopt, &mreq, sizeof(struct ipv6_mreq), 2131 sizeof(struct ipv6_mreq)); 2132 if (error) 2133 return (error); 2134 gsa->sin6.sin6_family = AF_INET6; 2135 gsa->sin6.sin6_len = sizeof(struct sockaddr_in6); 2136 gsa->sin6.sin6_addr = mreq.ipv6mr_multiaddr; 2137 gsa->sin6.sin6_port = 0; 2138 gsa->sin6.sin6_scope_id = 0; 2139 ifindex = mreq.ipv6mr_interface; 2140 break; 2141 2142 case MCAST_LEAVE_GROUP: 2143 case MCAST_LEAVE_SOURCE_GROUP: 2144 if (sopt->sopt_name == MCAST_LEAVE_GROUP) { 2145 error = sooptcopyin(sopt, &gsr, 2146 sizeof(struct group_req), 2147 sizeof(struct group_req)); 2148 } else if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) { 2149 error = sooptcopyin(sopt, &gsr, 2150 sizeof(struct group_source_req), 2151 sizeof(struct group_source_req)); 2152 } 2153 if (error) 2154 return (error); 2155 2156 if (gsa->sin6.sin6_family != AF_INET6 || 2157 gsa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 2158 return (EINVAL); 2159 if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) { 2160 if (ssa->sin6.sin6_family != AF_INET6 || 2161 ssa->sin6.sin6_len != sizeof(struct sockaddr_in6)) 2162 return (EINVAL); 2163 if (IN6_IS_ADDR_MULTICAST(&ssa->sin6.sin6_addr)) 2164 return (EINVAL); 2165 /* 2166 * TODO: Validate embedded scope ID in source 2167 * list entry against passed-in ifp, if and only 2168 * if source list filter entry is iface or node local. 2169 */ 2170 in6_clearscope(&ssa->sin6.sin6_addr); 2171 } 2172 gsa->sin6.sin6_port = 0; 2173 gsa->sin6.sin6_scope_id = 0; 2174 ifindex = gsr.gsr_interface; 2175 break; 2176 2177 default: 2178 CTR2(KTR_MLD, "%s: unknown sopt_name %d", 2179 __func__, sopt->sopt_name); 2180 return (EOPNOTSUPP); 2181 break; 2182 } 2183 2184 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 2185 return (EINVAL); 2186 2187 /* 2188 * Validate interface index if provided. If no interface index 2189 * was provided separately, attempt to look the membership up 2190 * from the default scope as a last resort to disambiguate 2191 * the membership we are being asked to leave. 2192 * XXX SCOPE6 lock potentially taken here. 2193 */ 2194 if (ifindex != 0) { 2195 if (V_if_index < ifindex) 2196 return (EADDRNOTAVAIL); 2197 ifp = ifnet_byindex(ifindex); 2198 if (ifp == NULL) 2199 return (EADDRNOTAVAIL); 2200 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 2201 } else { 2202 error = sa6_embedscope(&gsa->sin6, V_ip6_use_defzone); 2203 if (error) 2204 return (EADDRNOTAVAIL); 2205 /* 2206 * Some badly behaved applications don't pass an ifindex 2207 * or a scope ID, which is an API violation. In this case, 2208 * perform a lookup as per a v6 join. 2209 * 2210 * XXX For now, stomp on zone ID for the corner case. 2211 * This is not the 'KAME way', but we need to see the ifp 2212 * directly until such time as this implementation is 2213 * refactored, assuming the scope IDs are the way to go. 2214 */ 2215 ifindex = ntohs(gsa->sin6.sin6_addr.s6_addr16[1]); 2216 if (ifindex == 0) { 2217 CTR2(KTR_MLD, "%s: warning: no ifindex, looking up " 2218 "ifp for group %s.", __func__, 2219 ip6_sprintf(ip6tbuf, &gsa->sin6.sin6_addr)); 2220 ifp = in6p_lookup_mcast_ifp(inp, &gsa->sin6); 2221 } else { 2222 ifp = ifnet_byindex(ifindex); 2223 } 2224 if (ifp == NULL) 2225 return (EADDRNOTAVAIL); 2226 } 2227 2228 CTR2(KTR_MLD, "%s: ifp = %p", __func__, ifp); 2229 KASSERT(ifp != NULL, ("%s: ifp did not resolve", __func__)); 2230 2231 /* 2232 * Find the membership in the membership array. 2233 */ 2234 imo = in6p_findmoptions(inp); 2235 idx = im6o_match_group(imo, ifp, &gsa->sa); 2236 if (idx == -1) { 2237 error = EADDRNOTAVAIL; 2238 goto out_in6p_locked; 2239 } 2240 inm = imo->im6o_membership[idx]; 2241 imf = &imo->im6o_mfilters[idx]; 2242 2243 if (ssa->ss.ss_family != AF_UNSPEC) 2244 is_final = 0; 2245 2246 /* 2247 * Begin state merge transaction at socket layer. 2248 */ 2249 INP_WLOCK_ASSERT(inp); 2250 2251 /* 2252 * If we were instructed only to leave a given source, do so. 2253 * MCAST_LEAVE_SOURCE_GROUP is only valid for inclusive memberships. 2254 */ 2255 if (is_final) { 2256 im6f_leave(imf); 2257 } else { 2258 if (imf->im6f_st[0] == MCAST_EXCLUDE) { 2259 error = EADDRNOTAVAIL; 2260 goto out_in6p_locked; 2261 } 2262 ims = im6o_match_source(imo, idx, &ssa->sa); 2263 if (ims == NULL) { 2264 CTR3(KTR_MLD, "%s: source %p %spresent", __func__, 2265 ip6_sprintf(ip6tbuf, &ssa->sin6.sin6_addr), 2266 "not "); 2267 error = EADDRNOTAVAIL; 2268 goto out_in6p_locked; 2269 } 2270 CTR2(KTR_MLD, "%s: %s source", __func__, "block"); 2271 error = im6f_prune(imf, &ssa->sin6); 2272 if (error) { 2273 CTR1(KTR_MLD, "%s: merge imf state failed", 2274 __func__); 2275 goto out_in6p_locked; 2276 } 2277 } 2278 2279 /* 2280 * Begin state merge transaction at MLD layer. 2281 */ 2282 IN6_MULTI_LOCK(); 2283 2284 if (is_final) { 2285 /* 2286 * Give up the multicast address record to which 2287 * the membership points. 2288 */ 2289 (void)in6_mc_leave_locked(inm, imf); 2290 } else { 2291 CTR1(KTR_MLD, "%s: merge inm state", __func__); 2292 error = in6m_merge(inm, imf); 2293 if (error) 2294 CTR1(KTR_MLD, "%s: failed to merge inm state", 2295 __func__); 2296 else { 2297 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 2298 error = mld_change_state(inm, 0); 2299 if (error) 2300 CTR1(KTR_MLD, "%s: failed mld downcall", 2301 __func__); 2302 } 2303 } 2304 2305 IN6_MULTI_UNLOCK(); 2306 2307 if (error) 2308 im6f_rollback(imf); 2309 else 2310 im6f_commit(imf); 2311 2312 im6f_reap(imf); 2313 2314 if (is_final) { 2315 /* Remove the gap in the membership array. */ 2316 for (++idx; idx < imo->im6o_num_memberships; ++idx) { 2317 imo->im6o_membership[idx-1] = imo->im6o_membership[idx]; 2318 imo->im6o_mfilters[idx-1] = imo->im6o_mfilters[idx]; 2319 } 2320 imo->im6o_num_memberships--; 2321 } 2322 2323 out_in6p_locked: 2324 INP_WUNLOCK(inp); 2325 return (error); 2326 } 2327 2328 /* 2329 * Select the interface for transmitting IPv6 multicast datagrams. 2330 * 2331 * Either an instance of struct in6_addr or an instance of struct ipv6_mreqn 2332 * may be passed to this socket option. An address of in6addr_any or an 2333 * interface index of 0 is used to remove a previous selection. 2334 * When no interface is selected, one is chosen for every send. 2335 */ 2336 static int 2337 in6p_set_multicast_if(struct inpcb *inp, struct sockopt *sopt) 2338 { 2339 struct ifnet *ifp; 2340 struct ip6_moptions *imo; 2341 u_int ifindex; 2342 int error; 2343 2344 if (sopt->sopt_valsize != sizeof(u_int)) 2345 return (EINVAL); 2346 2347 error = sooptcopyin(sopt, &ifindex, sizeof(u_int), sizeof(u_int)); 2348 if (error) 2349 return (error); 2350 if (V_if_index < ifindex) 2351 return (EINVAL); 2352 2353 ifp = ifnet_byindex(ifindex); 2354 if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) 2355 return (EADDRNOTAVAIL); 2356 2357 imo = in6p_findmoptions(inp); 2358 imo->im6o_multicast_ifp = ifp; 2359 INP_WUNLOCK(inp); 2360 2361 return (0); 2362 } 2363 2364 /* 2365 * Atomically set source filters on a socket for an IPv6 multicast group. 2366 * 2367 * SMPng: NOTE: Potentially calls malloc(M_WAITOK) with Giant held. 2368 */ 2369 static int 2370 in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt) 2371 { 2372 struct __msfilterreq msfr; 2373 sockunion_t *gsa; 2374 struct ifnet *ifp; 2375 struct in6_mfilter *imf; 2376 struct ip6_moptions *imo; 2377 struct in6_multi *inm; 2378 size_t idx; 2379 int error; 2380 2381 error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq), 2382 sizeof(struct __msfilterreq)); 2383 if (error) 2384 return (error); 2385 2386 if (msfr.msfr_nsrcs > in6_mcast_maxsocksrc) 2387 return (ENOBUFS); 2388 2389 if (msfr.msfr_fmode != MCAST_EXCLUDE && 2390 msfr.msfr_fmode != MCAST_INCLUDE) 2391 return (EINVAL); 2392 2393 if (msfr.msfr_group.ss_family != AF_INET6 || 2394 msfr.msfr_group.ss_len != sizeof(struct sockaddr_in6)) 2395 return (EINVAL); 2396 2397 gsa = (sockunion_t *)&msfr.msfr_group; 2398 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) 2399 return (EINVAL); 2400 2401 gsa->sin6.sin6_port = 0; /* ignore port */ 2402 2403 if (msfr.msfr_ifindex == 0 || V_if_index < msfr.msfr_ifindex) 2404 return (EADDRNOTAVAIL); 2405 ifp = ifnet_byindex(msfr.msfr_ifindex); 2406 if (ifp == NULL) 2407 return (EADDRNOTAVAIL); 2408 (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); 2409 2410 /* 2411 * Take the INP write lock. 2412 * Check if this socket is a member of this group. 2413 */ 2414 imo = in6p_findmoptions(inp); 2415 idx = im6o_match_group(imo, ifp, &gsa->sa); 2416 if (idx == -1 || imo->im6o_mfilters == NULL) { 2417 error = EADDRNOTAVAIL; 2418 goto out_in6p_locked; 2419 } 2420 inm = imo->im6o_membership[idx]; 2421 imf = &imo->im6o_mfilters[idx]; 2422 2423 /* 2424 * Begin state merge transaction at socket layer. 2425 */ 2426 INP_WLOCK_ASSERT(inp); 2427 2428 imf->im6f_st[1] = msfr.msfr_fmode; 2429 2430 /* 2431 * Apply any new source filters, if present. 2432 * Make a copy of the user-space source vector so 2433 * that we may copy them with a single copyin. This 2434 * allows us to deal with page faults up-front. 2435 */ 2436 if (msfr.msfr_nsrcs > 0) { 2437 struct in6_msource *lims; 2438 struct sockaddr_in6 *psin; 2439 struct sockaddr_storage *kss, *pkss; 2440 int i; 2441 2442 INP_WUNLOCK(inp); 2443 2444 CTR2(KTR_MLD, "%s: loading %lu source list entries", 2445 __func__, (unsigned long)msfr.msfr_nsrcs); 2446 kss = malloc(sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs, 2447 M_TEMP, M_WAITOK); 2448 error = copyin(msfr.msfr_srcs, kss, 2449 sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs); 2450 if (error) { 2451 free(kss, M_TEMP); 2452 return (error); 2453 } 2454 2455 INP_WLOCK(inp); 2456 2457 /* 2458 * Mark all source filters as UNDEFINED at t1. 2459 * Restore new group filter mode, as im6f_leave() 2460 * will set it to INCLUDE. 2461 */ 2462 im6f_leave(imf); 2463 imf->im6f_st[1] = msfr.msfr_fmode; 2464 2465 /* 2466 * Update socket layer filters at t1, lazy-allocating 2467 * new entries. This saves a bunch of memory at the 2468 * cost of one RB_FIND() per source entry; duplicate 2469 * entries in the msfr_nsrcs vector are ignored. 2470 * If we encounter an error, rollback transaction. 2471 * 2472 * XXX This too could be replaced with a set-symmetric 2473 * difference like loop to avoid walking from root 2474 * every time, as the key space is common. 2475 */ 2476 for (i = 0, pkss = kss; i < msfr.msfr_nsrcs; i++, pkss++) { 2477 psin = (struct sockaddr_in6 *)pkss; 2478 if (psin->sin6_family != AF_INET6) { 2479 error = EAFNOSUPPORT; 2480 break; 2481 } 2482 if (psin->sin6_len != sizeof(struct sockaddr_in6)) { 2483 error = EINVAL; 2484 break; 2485 } 2486 if (IN6_IS_ADDR_MULTICAST(&psin->sin6_addr)) { 2487 error = EINVAL; 2488 break; 2489 } 2490 /* 2491 * TODO: Validate embedded scope ID in source 2492 * list entry against passed-in ifp, if and only 2493 * if source list filter entry is iface or node local. 2494 */ 2495 in6_clearscope(&psin->sin6_addr); 2496 error = im6f_get_source(imf, psin, &lims); 2497 if (error) 2498 break; 2499 lims->im6sl_st[1] = imf->im6f_st[1]; 2500 } 2501 free(kss, M_TEMP); 2502 } 2503 2504 if (error) 2505 goto out_im6f_rollback; 2506 2507 INP_WLOCK_ASSERT(inp); 2508 IN6_MULTI_LOCK(); 2509 2510 /* 2511 * Begin state merge transaction at MLD layer. 2512 */ 2513 CTR1(KTR_MLD, "%s: merge inm state", __func__); 2514 error = in6m_merge(inm, imf); 2515 if (error) 2516 CTR1(KTR_MLD, "%s: failed to merge inm state", __func__); 2517 else { 2518 CTR1(KTR_MLD, "%s: doing mld downcall", __func__); 2519 error = mld_change_state(inm, 0); 2520 if (error) 2521 CTR1(KTR_MLD, "%s: failed mld downcall", __func__); 2522 } 2523 2524 IN6_MULTI_UNLOCK(); 2525 2526 out_im6f_rollback: 2527 if (error) 2528 im6f_rollback(imf); 2529 else 2530 im6f_commit(imf); 2531 2532 im6f_reap(imf); 2533 2534 out_in6p_locked: 2535 INP_WUNLOCK(inp); 2536 return (error); 2537 } 2538 2539 /* 2540 * Set the IP multicast options in response to user setsockopt(). 2541 * 2542 * Many of the socket options handled in this function duplicate the 2543 * functionality of socket options in the regular unicast API. However, 2544 * it is not possible to merge the duplicate code, because the idempotence 2545 * of the IPv6 multicast part of the BSD Sockets API must be preserved; 2546 * the effects of these options must be treated as separate and distinct. 2547 * 2548 * SMPng: XXX: Unlocked read of inp_socket believed OK. 2549 */ 2550 int 2551 ip6_setmoptions(struct inpcb *inp, struct sockopt *sopt) 2552 { 2553 struct ip6_moptions *im6o; 2554 int error; 2555 2556 error = 0; 2557 2558 /* 2559 * If socket is neither of type SOCK_RAW or SOCK_DGRAM, 2560 * or is a divert socket, reject it. 2561 */ 2562 if (inp->inp_socket->so_proto->pr_protocol == IPPROTO_DIVERT || 2563 (inp->inp_socket->so_proto->pr_type != SOCK_RAW && 2564 inp->inp_socket->so_proto->pr_type != SOCK_DGRAM)) 2565 return (EOPNOTSUPP); 2566 2567 switch (sopt->sopt_name) { 2568 case IPV6_MULTICAST_IF: 2569 error = in6p_set_multicast_if(inp, sopt); 2570 break; 2571 2572 case IPV6_MULTICAST_HOPS: { 2573 int hlim; 2574 2575 if (sopt->sopt_valsize != sizeof(int)) { 2576 error = EINVAL; 2577 break; 2578 } 2579 error = sooptcopyin(sopt, &hlim, sizeof(hlim), sizeof(int)); 2580 if (error) 2581 break; 2582 if (hlim < -1 || hlim > 255) { 2583 error = EINVAL; 2584 break; 2585 } else if (hlim == -1) { 2586 hlim = V_ip6_defmcasthlim; 2587 } 2588 im6o = in6p_findmoptions(inp); 2589 im6o->im6o_multicast_hlim = hlim; 2590 INP_WUNLOCK(inp); 2591 break; 2592 } 2593 2594 case IPV6_MULTICAST_LOOP: { 2595 u_int loop; 2596 2597 /* 2598 * Set the loopback flag for outgoing multicast packets. 2599 * Must be zero or one. 2600 */ 2601 if (sopt->sopt_valsize != sizeof(u_int)) { 2602 error = EINVAL; 2603 break; 2604 } 2605 error = sooptcopyin(sopt, &loop, sizeof(u_int), sizeof(u_int)); 2606 if (error) 2607 break; 2608 if (loop > 1) { 2609 error = EINVAL; 2610 break; 2611 } 2612 im6o = in6p_findmoptions(inp); 2613 im6o->im6o_multicast_loop = loop; 2614 INP_WUNLOCK(inp); 2615 break; 2616 } 2617 2618 case IPV6_JOIN_GROUP: 2619 case MCAST_JOIN_GROUP: 2620 case MCAST_JOIN_SOURCE_GROUP: 2621 error = in6p_join_group(inp, sopt); 2622 break; 2623 2624 case IPV6_LEAVE_GROUP: 2625 case MCAST_LEAVE_GROUP: 2626 case MCAST_LEAVE_SOURCE_GROUP: 2627 error = in6p_leave_group(inp, sopt); 2628 break; 2629 2630 case MCAST_BLOCK_SOURCE: 2631 case MCAST_UNBLOCK_SOURCE: 2632 error = in6p_block_unblock_source(inp, sopt); 2633 break; 2634 2635 case IPV6_MSFILTER: 2636 error = in6p_set_source_filters(inp, sopt); 2637 break; 2638 2639 default: 2640 error = EOPNOTSUPP; 2641 break; 2642 } 2643 2644 INP_UNLOCK_ASSERT(inp); 2645 2646 return (error); 2647 } 2648 2649 /* 2650 * Expose MLD's multicast filter mode and source list(s) to userland, 2651 * keyed by (ifindex, group). 2652 * The filter mode is written out as a uint32_t, followed by 2653 * 0..n of struct in6_addr. 2654 * For use by ifmcstat(8). 2655 * SMPng: NOTE: unlocked read of ifindex space. 2656 */ 2657 static int 2658 sysctl_ip6_mcast_filters(SYSCTL_HANDLER_ARGS) 2659 { 2660 struct in6_addr mcaddr; 2661 struct in6_addr src; 2662 struct ifnet *ifp; 2663 struct ifmultiaddr *ifma; 2664 struct in6_multi *inm; 2665 struct ip6_msource *ims; 2666 int *name; 2667 int retval; 2668 u_int namelen; 2669 uint32_t fmode, ifindex; 2670 #ifdef KTR 2671 char ip6tbuf[INET6_ADDRSTRLEN]; 2672 #endif 2673 2674 name = (int *)arg1; 2675 namelen = arg2; 2676 2677 if (req->newptr != NULL) 2678 return (EPERM); 2679 2680 /* int: ifindex + 4 * 32 bits of IPv6 address */ 2681 if (namelen != 5) 2682 return (EINVAL); 2683 2684 ifindex = name[0]; 2685 if (ifindex <= 0 || ifindex > V_if_index) { 2686 CTR2(KTR_MLD, "%s: ifindex %u out of range", 2687 __func__, ifindex); 2688 return (ENOENT); 2689 } 2690 2691 memcpy(&mcaddr, &name[1], sizeof(struct in6_addr)); 2692 if (!IN6_IS_ADDR_MULTICAST(&mcaddr)) { 2693 CTR2(KTR_MLD, "%s: group %s is not multicast", 2694 __func__, ip6_sprintf(ip6tbuf, &mcaddr)); 2695 return (EINVAL); 2696 } 2697 2698 ifp = ifnet_byindex(ifindex); 2699 if (ifp == NULL) { 2700 CTR2(KTR_MLD, "%s: no ifp for ifindex %u", 2701 __func__, ifindex); 2702 return (ENOENT); 2703 } 2704 /* 2705 * Internal MLD lookups require that scope/zone ID is set. 2706 */ 2707 (void)in6_setscope(&mcaddr, ifp, NULL); 2708 2709 retval = sysctl_wire_old_buffer(req, 2710 sizeof(uint32_t) + (in6_mcast_maxgrpsrc * sizeof(struct in6_addr))); 2711 if (retval) 2712 return (retval); 2713 2714 IN6_MULTI_LOCK(); 2715 2716 IF_ADDR_RLOCK(ifp); 2717 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2718 if (ifma->ifma_addr->sa_family != AF_INET6 || 2719 ifma->ifma_protospec == NULL) 2720 continue; 2721 inm = (struct in6_multi *)ifma->ifma_protospec; 2722 if (!IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, &mcaddr)) 2723 continue; 2724 fmode = inm->in6m_st[1].iss_fmode; 2725 retval = SYSCTL_OUT(req, &fmode, sizeof(uint32_t)); 2726 if (retval != 0) 2727 break; 2728 RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) { 2729 CTR2(KTR_MLD, "%s: visit node %p", __func__, ims); 2730 /* 2731 * Only copy-out sources which are in-mode. 2732 */ 2733 if (fmode != im6s_get_mode(inm, ims, 1)) { 2734 CTR1(KTR_MLD, "%s: skip non-in-mode", 2735 __func__); 2736 continue; 2737 } 2738 src = ims->im6s_addr; 2739 retval = SYSCTL_OUT(req, &src, 2740 sizeof(struct in6_addr)); 2741 if (retval != 0) 2742 break; 2743 } 2744 } 2745 IF_ADDR_RUNLOCK(ifp); 2746 2747 IN6_MULTI_UNLOCK(); 2748 2749 return (retval); 2750 } 2751 2752 #ifdef KTR 2753 2754 static const char *in6m_modestrs[] = { "un", "in", "ex" }; 2755 2756 static const char * 2757 in6m_mode_str(const int mode) 2758 { 2759 2760 if (mode >= MCAST_UNDEFINED && mode <= MCAST_EXCLUDE) 2761 return (in6m_modestrs[mode]); 2762 return ("??"); 2763 } 2764 2765 static const char *in6m_statestrs[] = { 2766 "not-member", 2767 "silent", 2768 "idle", 2769 "lazy", 2770 "sleeping", 2771 "awakening", 2772 "query-pending", 2773 "sg-query-pending", 2774 "leaving" 2775 }; 2776 2777 static const char * 2778 in6m_state_str(const int state) 2779 { 2780 2781 if (state >= MLD_NOT_MEMBER && state <= MLD_LEAVING_MEMBER) 2782 return (in6m_statestrs[state]); 2783 return ("??"); 2784 } 2785 2786 /* 2787 * Dump an in6_multi structure to the console. 2788 */ 2789 void 2790 in6m_print(const struct in6_multi *inm) 2791 { 2792 int t; 2793 char ip6tbuf[INET6_ADDRSTRLEN]; 2794 2795 if ((ktr_mask & KTR_MLD) == 0) 2796 return; 2797 2798 printf("%s: --- begin in6m %p ---\n", __func__, inm); 2799 printf("addr %s ifp %p(%s) ifma %p\n", 2800 ip6_sprintf(ip6tbuf, &inm->in6m_addr), 2801 inm->in6m_ifp, 2802 if_name(inm->in6m_ifp), 2803 inm->in6m_ifma); 2804 printf("timer %u state %s refcount %u scq.len %u\n", 2805 inm->in6m_timer, 2806 in6m_state_str(inm->in6m_state), 2807 inm->in6m_refcount, 2808 inm->in6m_scq.ifq_len); 2809 printf("mli %p nsrc %lu sctimer %u scrv %u\n", 2810 inm->in6m_mli, 2811 inm->in6m_nsrc, 2812 inm->in6m_sctimer, 2813 inm->in6m_scrv); 2814 for (t = 0; t < 2; t++) { 2815 printf("t%d: fmode %s asm %u ex %u in %u rec %u\n", t, 2816 in6m_mode_str(inm->in6m_st[t].iss_fmode), 2817 inm->in6m_st[t].iss_asm, 2818 inm->in6m_st[t].iss_ex, 2819 inm->in6m_st[t].iss_in, 2820 inm->in6m_st[t].iss_rec); 2821 } 2822 printf("%s: --- end in6m %p ---\n", __func__, inm); 2823 } 2824 2825 #else /* !KTR */ 2826 2827 void 2828 in6m_print(const struct in6_multi *inm) 2829 { 2830 2831 } 2832 2833 #endif /* KTR */ 2834